]> git.proxmox.com Git - mirror_zfs.git/blob - cmd/zpool/zpool_main.c
Add ashift validation when adding devices to a pool
[mirror_zfs.git] / cmd / zpool / zpool_main.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
26 * Copyright (c) 2012 by Frederik Wessels. All rights reserved.
27 * Copyright (c) 2012 by Cyril Plisko. All rights reserved.
28 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
30 * Copyright (c) 2017 Datto Inc.
31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
32 * Copyright (c) 2017, Intel Corporation.
33 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
34 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
35 * Copyright (c) 2021, Klara Inc.
36 * Copyright [2021] Hewlett Packard Enterprise Development LP
37 */
38
39 #include <assert.h>
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <fcntl.h>
44 #include <getopt.h>
45 #include <libgen.h>
46 #include <libintl.h>
47 #include <libuutil.h>
48 #include <locale.h>
49 #include <pthread.h>
50 #include <stdio.h>
51 #include <stdlib.h>
52 #include <string.h>
53 #include <time.h>
54 #include <unistd.h>
55 #include <pwd.h>
56 #include <zone.h>
57 #include <sys/wait.h>
58 #include <zfs_prop.h>
59 #include <sys/fs/zfs.h>
60 #include <sys/stat.h>
61 #include <sys/systeminfo.h>
62 #include <sys/fm/fs/zfs.h>
63 #include <sys/fm/util.h>
64 #include <sys/fm/protocol.h>
65 #include <sys/zfs_ioctl.h>
66 #include <sys/mount.h>
67 #include <sys/sysmacros.h>
68
69 #include <math.h>
70
71 #include <libzfs.h>
72 #include <libzutil.h>
73
74 #include "zpool_util.h"
75 #include "zfs_comutil.h"
76 #include "zfeature_common.h"
77
78 #include "statcommon.h"
79
80 libzfs_handle_t *g_zfs;
81
82 static int zpool_do_create(int, char **);
83 static int zpool_do_destroy(int, char **);
84
85 static int zpool_do_add(int, char **);
86 static int zpool_do_remove(int, char **);
87 static int zpool_do_labelclear(int, char **);
88
89 static int zpool_do_checkpoint(int, char **);
90
91 static int zpool_do_list(int, char **);
92 static int zpool_do_iostat(int, char **);
93 static int zpool_do_status(int, char **);
94
95 static int zpool_do_online(int, char **);
96 static int zpool_do_offline(int, char **);
97 static int zpool_do_clear(int, char **);
98 static int zpool_do_reopen(int, char **);
99
100 static int zpool_do_reguid(int, char **);
101
102 static int zpool_do_attach(int, char **);
103 static int zpool_do_detach(int, char **);
104 static int zpool_do_replace(int, char **);
105 static int zpool_do_split(int, char **);
106
107 static int zpool_do_initialize(int, char **);
108 static int zpool_do_scrub(int, char **);
109 static int zpool_do_resilver(int, char **);
110 static int zpool_do_trim(int, char **);
111
112 static int zpool_do_import(int, char **);
113 static int zpool_do_export(int, char **);
114
115 static int zpool_do_upgrade(int, char **);
116
117 static int zpool_do_history(int, char **);
118 static int zpool_do_events(int, char **);
119
120 static int zpool_do_get(int, char **);
121 static int zpool_do_set(int, char **);
122
123 static int zpool_do_sync(int, char **);
124
125 static int zpool_do_version(int, char **);
126
127 static int zpool_do_wait(int, char **);
128
129 static int zpool_do_help(int argc, char **argv);
130
131 static zpool_compat_status_t zpool_do_load_compat(
132 const char *, boolean_t *);
133
134 enum zpool_options {
135 ZPOOL_OPTION_POWER = 1024,
136 ZPOOL_OPTION_ALLOW_INUSE,
137 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH,
138 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH
139 };
140
141 /*
142 * These libumem hooks provide a reasonable set of defaults for the allocator's
143 * debugging facilities.
144 */
145
146 #ifdef DEBUG
147 const char *
148 _umem_debug_init(void)
149 {
150 return ("default,verbose"); /* $UMEM_DEBUG setting */
151 }
152
153 const char *
154 _umem_logging_init(void)
155 {
156 return ("fail,contents"); /* $UMEM_LOGGING setting */
157 }
158 #endif
159
160 typedef enum {
161 HELP_ADD,
162 HELP_ATTACH,
163 HELP_CLEAR,
164 HELP_CREATE,
165 HELP_CHECKPOINT,
166 HELP_DESTROY,
167 HELP_DETACH,
168 HELP_EXPORT,
169 HELP_HISTORY,
170 HELP_IMPORT,
171 HELP_IOSTAT,
172 HELP_LABELCLEAR,
173 HELP_LIST,
174 HELP_OFFLINE,
175 HELP_ONLINE,
176 HELP_REPLACE,
177 HELP_REMOVE,
178 HELP_INITIALIZE,
179 HELP_SCRUB,
180 HELP_RESILVER,
181 HELP_TRIM,
182 HELP_STATUS,
183 HELP_UPGRADE,
184 HELP_EVENTS,
185 HELP_GET,
186 HELP_SET,
187 HELP_SPLIT,
188 HELP_SYNC,
189 HELP_REGUID,
190 HELP_REOPEN,
191 HELP_VERSION,
192 HELP_WAIT
193 } zpool_help_t;
194
195
196 /*
197 * Flags for stats to display with "zpool iostats"
198 */
199 enum iostat_type {
200 IOS_DEFAULT = 0,
201 IOS_LATENCY = 1,
202 IOS_QUEUES = 2,
203 IOS_L_HISTO = 3,
204 IOS_RQ_HISTO = 4,
205 IOS_COUNT, /* always last element */
206 };
207
208 /* iostat_type entries as bitmasks */
209 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
210 #define IOS_LATENCY_M (1ULL << IOS_LATENCY)
211 #define IOS_QUEUES_M (1ULL << IOS_QUEUES)
212 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
213 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
214
215 /* Mask of all the histo bits */
216 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
217
218 /*
219 * Lookup table for iostat flags to nvlist names. Basically a list
220 * of all the nvlists a flag requires. Also specifies the order in
221 * which data gets printed in zpool iostat.
222 */
223 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
224 [IOS_L_HISTO] = {
225 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
226 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
227 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
228 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
229 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
230 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
231 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
232 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
233 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
234 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
235 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
236 NULL},
237 [IOS_LATENCY] = {
238 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
239 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
240 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
241 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
242 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
243 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
244 NULL},
245 [IOS_QUEUES] = {
246 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
247 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
248 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
249 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
250 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
251 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
252 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
253 NULL},
254 [IOS_RQ_HISTO] = {
255 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
256 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
257 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
258 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
259 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
260 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
261 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
262 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
263 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
264 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
265 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
266 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
267 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
268 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
269 NULL},
270 };
271
272
273 /*
274 * Given a cb->cb_flags with a histogram bit set, return the iostat_type.
275 * Right now, only one histo bit is ever set at one time, so we can
276 * just do a highbit64(a)
277 */
278 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
279
280 typedef struct zpool_command {
281 const char *name;
282 int (*func)(int, char **);
283 zpool_help_t usage;
284 } zpool_command_t;
285
286 /*
287 * Master command table. Each ZFS command has a name, associated function, and
288 * usage message. The usage messages need to be internationalized, so we have
289 * to have a function to return the usage message based on a command index.
290 *
291 * These commands are organized according to how they are displayed in the usage
292 * message. An empty command (one with a NULL name) indicates an empty line in
293 * the generic usage message.
294 */
295 static zpool_command_t command_table[] = {
296 { "version", zpool_do_version, HELP_VERSION },
297 { NULL },
298 { "create", zpool_do_create, HELP_CREATE },
299 { "destroy", zpool_do_destroy, HELP_DESTROY },
300 { NULL },
301 { "add", zpool_do_add, HELP_ADD },
302 { "remove", zpool_do_remove, HELP_REMOVE },
303 { NULL },
304 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
305 { NULL },
306 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
307 { NULL },
308 { "list", zpool_do_list, HELP_LIST },
309 { "iostat", zpool_do_iostat, HELP_IOSTAT },
310 { "status", zpool_do_status, HELP_STATUS },
311 { NULL },
312 { "online", zpool_do_online, HELP_ONLINE },
313 { "offline", zpool_do_offline, HELP_OFFLINE },
314 { "clear", zpool_do_clear, HELP_CLEAR },
315 { "reopen", zpool_do_reopen, HELP_REOPEN },
316 { NULL },
317 { "attach", zpool_do_attach, HELP_ATTACH },
318 { "detach", zpool_do_detach, HELP_DETACH },
319 { "replace", zpool_do_replace, HELP_REPLACE },
320 { "split", zpool_do_split, HELP_SPLIT },
321 { NULL },
322 { "initialize", zpool_do_initialize, HELP_INITIALIZE },
323 { "resilver", zpool_do_resilver, HELP_RESILVER },
324 { "scrub", zpool_do_scrub, HELP_SCRUB },
325 { "trim", zpool_do_trim, HELP_TRIM },
326 { NULL },
327 { "import", zpool_do_import, HELP_IMPORT },
328 { "export", zpool_do_export, HELP_EXPORT },
329 { "upgrade", zpool_do_upgrade, HELP_UPGRADE },
330 { "reguid", zpool_do_reguid, HELP_REGUID },
331 { NULL },
332 { "history", zpool_do_history, HELP_HISTORY },
333 { "events", zpool_do_events, HELP_EVENTS },
334 { NULL },
335 { "get", zpool_do_get, HELP_GET },
336 { "set", zpool_do_set, HELP_SET },
337 { "sync", zpool_do_sync, HELP_SYNC },
338 { NULL },
339 { "wait", zpool_do_wait, HELP_WAIT },
340 };
341
342 #define NCOMMAND (ARRAY_SIZE(command_table))
343
344 #define VDEV_ALLOC_CLASS_LOGS "logs"
345
346 static zpool_command_t *current_command;
347 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
348 static char history_str[HIS_MAX_RECORD_LEN];
349 static boolean_t log_history = B_TRUE;
350 static uint_t timestamp_fmt = NODATE;
351
352 static const char *
353 get_usage(zpool_help_t idx)
354 {
355 switch (idx) {
356 case HELP_ADD:
357 return (gettext("\tadd [-afgLnP] [-o property=value] "
358 "<pool> <vdev> ...\n"));
359 case HELP_ATTACH:
360 return (gettext("\tattach [-fsw] [-o property=value] "
361 "<pool> <device> <new-device>\n"));
362 case HELP_CLEAR:
363 return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
364 case HELP_CREATE:
365 return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
366 "\t [-O file-system-property=value] ... \n"
367 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
368 case HELP_CHECKPOINT:
369 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
370 case HELP_DESTROY:
371 return (gettext("\tdestroy [-f] <pool>\n"));
372 case HELP_DETACH:
373 return (gettext("\tdetach <pool> <device>\n"));
374 case HELP_EXPORT:
375 return (gettext("\texport [-af] <pool> ...\n"));
376 case HELP_HISTORY:
377 return (gettext("\thistory [-il] [<pool>] ...\n"));
378 case HELP_IMPORT:
379 return (gettext("\timport [-d dir] [-D]\n"
380 "\timport [-o mntopts] [-o property=value] ... \n"
381 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
382 "[-R root] [-F [-n]] -a\n"
383 "\timport [-o mntopts] [-o property=value] ... \n"
384 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
385 "[-R root] [-F [-n]]\n"
386 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
387 case HELP_IOSTAT:
388 return (gettext("\tiostat [[[-c [script1,script2,...]"
389 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
390 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
391 " [[-n] interval [count]]\n"));
392 case HELP_LABELCLEAR:
393 return (gettext("\tlabelclear [-f] <vdev>\n"));
394 case HELP_LIST:
395 return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
396 "[-T d|u] [pool] ... \n"
397 "\t [interval [count]]\n"));
398 case HELP_OFFLINE:
399 return (gettext("\toffline [--power]|[[-f][-t]] <pool> "
400 "<device> ...\n"));
401 case HELP_ONLINE:
402 return (gettext("\tonline [--power][-e] <pool> <device> "
403 "...\n"));
404 case HELP_REPLACE:
405 return (gettext("\treplace [-fsw] [-o property=value] "
406 "<pool> <device> [new-device]\n"));
407 case HELP_REMOVE:
408 return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
409 case HELP_REOPEN:
410 return (gettext("\treopen [-n] <pool>\n"));
411 case HELP_INITIALIZE:
412 return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> "
413 "[<device> ...]\n"));
414 case HELP_SCRUB:
415 return (gettext("\tscrub [-s | -p] [-w] [-e] <pool> ...\n"));
416 case HELP_RESILVER:
417 return (gettext("\tresilver <pool> ...\n"));
418 case HELP_TRIM:
419 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
420 "[<device> ...]\n"));
421 case HELP_STATUS:
422 return (gettext("\tstatus [--power] [-c [script1,script2,...]] "
423 "[-DegiLpPstvx] [-T d|u] [pool] ...\n"
424 "\t [interval [count]]\n"));
425 case HELP_UPGRADE:
426 return (gettext("\tupgrade\n"
427 "\tupgrade -v\n"
428 "\tupgrade [-V version] <-a | pool ...>\n"));
429 case HELP_EVENTS:
430 return (gettext("\tevents [-vHf [pool] | -c]\n"));
431 case HELP_GET:
432 return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] "
433 "<\"all\" | property[,...]> <pool> ...\n"));
434 case HELP_SET:
435 return (gettext("\tset <property=value> <pool>\n"
436 "\tset <vdev_property=value> <pool> <vdev>\n"));
437 case HELP_SPLIT:
438 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
439 "\t [-o property=value] <pool> <newpool> "
440 "[<device> ...]\n"));
441 case HELP_REGUID:
442 return (gettext("\treguid <pool>\n"));
443 case HELP_SYNC:
444 return (gettext("\tsync [pool] ...\n"));
445 case HELP_VERSION:
446 return (gettext("\tversion\n"));
447 case HELP_WAIT:
448 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
449 "<pool> [interval]\n"));
450 default:
451 __builtin_unreachable();
452 }
453 }
454
455 static void
456 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
457 {
458 uint_t children = 0;
459 nvlist_t **child;
460 uint_t i;
461
462 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
463 &child, &children);
464
465 if (children == 0) {
466 char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
467 VDEV_NAME_PATH);
468
469 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
470 strcmp(path, VDEV_TYPE_HOLE) != 0)
471 fnvlist_add_boolean(res, path);
472
473 free(path);
474 return;
475 }
476
477 for (i = 0; i < children; i++) {
478 zpool_collect_leaves(zhp, child[i], res);
479 }
480 }
481
482 /*
483 * Callback routine that will print out a pool property value.
484 */
485 static int
486 print_pool_prop_cb(int prop, void *cb)
487 {
488 FILE *fp = cb;
489
490 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
491
492 if (zpool_prop_readonly(prop))
493 (void) fprintf(fp, " NO ");
494 else
495 (void) fprintf(fp, " YES ");
496
497 if (zpool_prop_values(prop) == NULL)
498 (void) fprintf(fp, "-\n");
499 else
500 (void) fprintf(fp, "%s\n", zpool_prop_values(prop));
501
502 return (ZPROP_CONT);
503 }
504
505 /*
506 * Callback routine that will print out a vdev property value.
507 */
508 static int
509 print_vdev_prop_cb(int prop, void *cb)
510 {
511 FILE *fp = cb;
512
513 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
514
515 if (vdev_prop_readonly(prop))
516 (void) fprintf(fp, " NO ");
517 else
518 (void) fprintf(fp, " YES ");
519
520 if (vdev_prop_values(prop) == NULL)
521 (void) fprintf(fp, "-\n");
522 else
523 (void) fprintf(fp, "%s\n", vdev_prop_values(prop));
524
525 return (ZPROP_CONT);
526 }
527
528 /*
529 * Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like
530 * '/dev/disk/by-vdev/L5'.
531 */
532 static const char *
533 vdev_name_to_path(zpool_handle_t *zhp, char *vdev)
534 {
535 nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);
536 if (vdev_nv == NULL) {
537 return (NULL);
538 }
539 return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH));
540 }
541
542 static int
543 zpool_power_on(zpool_handle_t *zhp, char *vdev)
544 {
545 return (zpool_power(zhp, vdev, B_TRUE));
546 }
547
548 static int
549 zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)
550 {
551 int rc;
552
553 rc = zpool_power_on(zhp, vdev);
554 if (rc != 0)
555 return (rc);
556
557 zpool_disk_wait(vdev_name_to_path(zhp, vdev));
558
559 return (0);
560 }
561
562 static int
563 zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)
564 {
565 nvlist_t *nv;
566 const char *path = NULL;
567 int rc;
568
569 /* Power up all the devices first */
570 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
571 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
572 if (path != NULL) {
573 rc = zpool_power_on(zhp, (char *)path);
574 if (rc != 0) {
575 return (rc);
576 }
577 }
578 }
579
580 /*
581 * Wait for their devices to show up. Since we powered them on
582 * at roughly the same time, they should all come online around
583 * the same time.
584 */
585 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
586 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
587 zpool_disk_wait(path);
588 }
589
590 return (0);
591 }
592
593 static int
594 zpool_power_off(zpool_handle_t *zhp, char *vdev)
595 {
596 return (zpool_power(zhp, vdev, B_FALSE));
597 }
598
599 /*
600 * Display usage message. If we're inside a command, display only the usage for
601 * that command. Otherwise, iterate over the entire command table and display
602 * a complete usage message.
603 */
604 static __attribute__((noreturn)) void
605 usage(boolean_t requested)
606 {
607 FILE *fp = requested ? stdout : stderr;
608
609 if (current_command == NULL) {
610 int i;
611
612 (void) fprintf(fp, gettext("usage: zpool command args ...\n"));
613 (void) fprintf(fp,
614 gettext("where 'command' is one of the following:\n\n"));
615
616 for (i = 0; i < NCOMMAND; i++) {
617 if (command_table[i].name == NULL)
618 (void) fprintf(fp, "\n");
619 else
620 (void) fprintf(fp, "%s",
621 get_usage(command_table[i].usage));
622 }
623
624 (void) fprintf(fp,
625 gettext("\nFor further help on a command or topic, "
626 "run: %s\n"), "zpool help [<topic>]");
627 } else {
628 (void) fprintf(fp, gettext("usage:\n"));
629 (void) fprintf(fp, "%s", get_usage(current_command->usage));
630 }
631
632 if (current_command != NULL &&
633 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
634 ((strcmp(current_command->name, "set") == 0) ||
635 (strcmp(current_command->name, "get") == 0) ||
636 (strcmp(current_command->name, "list") == 0))) {
637
638 (void) fprintf(fp, "%s",
639 gettext("\nthe following properties are supported:\n"));
640
641 (void) fprintf(fp, "\n\t%-19s %s %s\n\n",
642 "PROPERTY", "EDIT", "VALUES");
643
644 /* Iterate over all properties */
645 if (current_prop_type == ZFS_TYPE_POOL) {
646 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
647 B_TRUE, current_prop_type);
648
649 (void) fprintf(fp, "\t%-19s ", "feature@...");
650 (void) fprintf(fp, "YES "
651 "disabled | enabled | active\n");
652
653 (void) fprintf(fp, gettext("\nThe feature@ properties "
654 "must be appended with a feature name.\n"
655 "See zpool-features(7).\n"));
656 } else if (current_prop_type == ZFS_TYPE_VDEV) {
657 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
658 B_TRUE, current_prop_type);
659 }
660 }
661
662 /*
663 * See comments at end of main().
664 */
665 if (getenv("ZFS_ABORT") != NULL) {
666 (void) printf("dumping core by request\n");
667 abort();
668 }
669
670 exit(requested ? 0 : 2);
671 }
672
673 /*
674 * zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...]
675 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
676 * if none specified.
677 *
678 * -c Cancel. Ends active initializing.
679 * -s Suspend. Initializing can then be restarted with no flags.
680 * -u Uninitialize. Clears initialization state.
681 * -w Wait. Blocks until initializing has completed.
682 */
683 int
684 zpool_do_initialize(int argc, char **argv)
685 {
686 int c;
687 char *poolname;
688 zpool_handle_t *zhp;
689 nvlist_t *vdevs;
690 int err = 0;
691 boolean_t wait = B_FALSE;
692
693 struct option long_options[] = {
694 {"cancel", no_argument, NULL, 'c'},
695 {"suspend", no_argument, NULL, 's'},
696 {"uninit", no_argument, NULL, 'u'},
697 {"wait", no_argument, NULL, 'w'},
698 {0, 0, 0, 0}
699 };
700
701 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
702 while ((c = getopt_long(argc, argv, "csuw", long_options,
703 NULL)) != -1) {
704 switch (c) {
705 case 'c':
706 if (cmd_type != POOL_INITIALIZE_START &&
707 cmd_type != POOL_INITIALIZE_CANCEL) {
708 (void) fprintf(stderr, gettext("-c cannot be "
709 "combined with other options\n"));
710 usage(B_FALSE);
711 }
712 cmd_type = POOL_INITIALIZE_CANCEL;
713 break;
714 case 's':
715 if (cmd_type != POOL_INITIALIZE_START &&
716 cmd_type != POOL_INITIALIZE_SUSPEND) {
717 (void) fprintf(stderr, gettext("-s cannot be "
718 "combined with other options\n"));
719 usage(B_FALSE);
720 }
721 cmd_type = POOL_INITIALIZE_SUSPEND;
722 break;
723 case 'u':
724 if (cmd_type != POOL_INITIALIZE_START &&
725 cmd_type != POOL_INITIALIZE_UNINIT) {
726 (void) fprintf(stderr, gettext("-u cannot be "
727 "combined with other options\n"));
728 usage(B_FALSE);
729 }
730 cmd_type = POOL_INITIALIZE_UNINIT;
731 break;
732 case 'w':
733 wait = B_TRUE;
734 break;
735 case '?':
736 if (optopt != 0) {
737 (void) fprintf(stderr,
738 gettext("invalid option '%c'\n"), optopt);
739 } else {
740 (void) fprintf(stderr,
741 gettext("invalid option '%s'\n"),
742 argv[optind - 1]);
743 }
744 usage(B_FALSE);
745 }
746 }
747
748 argc -= optind;
749 argv += optind;
750
751 if (argc < 1) {
752 (void) fprintf(stderr, gettext("missing pool name argument\n"));
753 usage(B_FALSE);
754 return (-1);
755 }
756
757 if (wait && (cmd_type != POOL_INITIALIZE_START)) {
758 (void) fprintf(stderr, gettext("-w cannot be used with -c, -s"
759 "or -u\n"));
760 usage(B_FALSE);
761 }
762
763 poolname = argv[0];
764 zhp = zpool_open(g_zfs, poolname);
765 if (zhp == NULL)
766 return (-1);
767
768 vdevs = fnvlist_alloc();
769 if (argc == 1) {
770 /* no individual leaf vdevs specified, so add them all */
771 nvlist_t *config = zpool_get_config(zhp, NULL);
772 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
773 ZPOOL_CONFIG_VDEV_TREE);
774 zpool_collect_leaves(zhp, nvroot, vdevs);
775 } else {
776 for (int i = 1; i < argc; i++) {
777 fnvlist_add_boolean(vdevs, argv[i]);
778 }
779 }
780
781 if (wait)
782 err = zpool_initialize_wait(zhp, cmd_type, vdevs);
783 else
784 err = zpool_initialize(zhp, cmd_type, vdevs);
785
786 fnvlist_free(vdevs);
787 zpool_close(zhp);
788
789 return (err);
790 }
791
792 /*
793 * print a pool vdev config for dry runs
794 */
795 static void
796 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
797 const char *match, int name_flags)
798 {
799 nvlist_t **child;
800 uint_t c, children;
801 char *vname;
802 boolean_t printed = B_FALSE;
803
804 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
805 &child, &children) != 0) {
806 if (name != NULL)
807 (void) printf("\t%*s%s\n", indent, "", name);
808 return;
809 }
810
811 for (c = 0; c < children; c++) {
812 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
813 const char *class = "";
814
815 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
816 &is_hole);
817
818 if (is_hole == B_TRUE) {
819 continue;
820 }
821
822 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
823 &is_log);
824 if (is_log)
825 class = VDEV_ALLOC_BIAS_LOG;
826 (void) nvlist_lookup_string(child[c],
827 ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
828 if (strcmp(match, class) != 0)
829 continue;
830
831 if (!printed && name != NULL) {
832 (void) printf("\t%*s%s\n", indent, "", name);
833 printed = B_TRUE;
834 }
835 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
836 print_vdev_tree(zhp, vname, child[c], indent + 2, "",
837 name_flags);
838 free(vname);
839 }
840 }
841
842 /*
843 * Print the list of l2cache devices for dry runs.
844 */
845 static void
846 print_cache_list(nvlist_t *nv, int indent)
847 {
848 nvlist_t **child;
849 uint_t c, children;
850
851 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
852 &child, &children) == 0 && children > 0) {
853 (void) printf("\t%*s%s\n", indent, "", "cache");
854 } else {
855 return;
856 }
857 for (c = 0; c < children; c++) {
858 char *vname;
859
860 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
861 (void) printf("\t%*s%s\n", indent + 2, "", vname);
862 free(vname);
863 }
864 }
865
866 /*
867 * Print the list of spares for dry runs.
868 */
869 static void
870 print_spare_list(nvlist_t *nv, int indent)
871 {
872 nvlist_t **child;
873 uint_t c, children;
874
875 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
876 &child, &children) == 0 && children > 0) {
877 (void) printf("\t%*s%s\n", indent, "", "spares");
878 } else {
879 return;
880 }
881 for (c = 0; c < children; c++) {
882 char *vname;
883
884 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
885 (void) printf("\t%*s%s\n", indent + 2, "", vname);
886 free(vname);
887 }
888 }
889
890 static boolean_t
891 prop_list_contains_feature(nvlist_t *proplist)
892 {
893 nvpair_t *nvp;
894 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
895 nvp = nvlist_next_nvpair(proplist, nvp)) {
896 if (zpool_prop_feature(nvpair_name(nvp)))
897 return (B_TRUE);
898 }
899 return (B_FALSE);
900 }
901
902 /*
903 * Add a property pair (name, string-value) into a property nvlist.
904 */
905 static int
906 add_prop_list(const char *propname, const char *propval, nvlist_t **props,
907 boolean_t poolprop)
908 {
909 zpool_prop_t prop = ZPOOL_PROP_INVAL;
910 nvlist_t *proplist;
911 const char *normnm;
912 const char *strval;
913
914 if (*props == NULL &&
915 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
916 (void) fprintf(stderr,
917 gettext("internal error: out of memory\n"));
918 return (1);
919 }
920
921 proplist = *props;
922
923 if (poolprop) {
924 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
925 const char *cname =
926 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
927
928 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
929 (!zpool_prop_feature(propname) &&
930 !zpool_prop_vdev(propname))) {
931 (void) fprintf(stderr, gettext("property '%s' is "
932 "not a valid pool or vdev property\n"), propname);
933 return (2);
934 }
935
936 /*
937 * feature@ properties and version should not be specified
938 * at the same time.
939 */
940 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
941 nvlist_exists(proplist, vname)) ||
942 (prop == ZPOOL_PROP_VERSION &&
943 prop_list_contains_feature(proplist))) {
944 (void) fprintf(stderr, gettext("'feature@' and "
945 "'version' properties cannot be specified "
946 "together\n"));
947 return (2);
948 }
949
950 /*
951 * if version is specified, only "legacy" compatibility
952 * may be requested
953 */
954 if ((prop == ZPOOL_PROP_COMPATIBILITY &&
955 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
956 nvlist_exists(proplist, vname)) ||
957 (prop == ZPOOL_PROP_VERSION &&
958 nvlist_exists(proplist, cname) &&
959 strcmp(fnvlist_lookup_string(proplist, cname),
960 ZPOOL_COMPAT_LEGACY) != 0)) {
961 (void) fprintf(stderr, gettext("when 'version' is "
962 "specified, the 'compatibility' feature may only "
963 "be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
964 return (2);
965 }
966
967 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
968 normnm = propname;
969 else
970 normnm = zpool_prop_to_name(prop);
971 } else {
972 zfs_prop_t fsprop = zfs_name_to_prop(propname);
973
974 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
975 B_FALSE)) {
976 normnm = zfs_prop_to_name(fsprop);
977 } else if (zfs_prop_user(propname) ||
978 zfs_prop_userquota(propname)) {
979 normnm = propname;
980 } else {
981 (void) fprintf(stderr, gettext("property '%s' is "
982 "not a valid filesystem property\n"), propname);
983 return (2);
984 }
985 }
986
987 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
988 prop != ZPOOL_PROP_CACHEFILE) {
989 (void) fprintf(stderr, gettext("property '%s' "
990 "specified multiple times\n"), propname);
991 return (2);
992 }
993
994 if (nvlist_add_string(proplist, normnm, propval) != 0) {
995 (void) fprintf(stderr, gettext("internal "
996 "error: out of memory\n"));
997 return (1);
998 }
999
1000 return (0);
1001 }
1002
1003 /*
1004 * Set a default property pair (name, string-value) in a property nvlist
1005 */
1006 static int
1007 add_prop_list_default(const char *propname, const char *propval,
1008 nvlist_t **props)
1009 {
1010 const char *pval;
1011
1012 if (nvlist_lookup_string(*props, propname, &pval) == 0)
1013 return (0);
1014
1015 return (add_prop_list(propname, propval, props, B_TRUE));
1016 }
1017
1018 /*
1019 * zpool add [-afgLnP] [-o property=value] <pool> <vdev> ...
1020 *
1021 * -a Disable the ashift validation checks
1022 * -f Force addition of devices, even if they appear in use
1023 * -g Display guid for individual vdev name.
1024 * -L Follow links when resolving vdev path name.
1025 * -n Do not add the devices, but display the resulting layout if
1026 * they were to be added.
1027 * -o Set property=value.
1028 * -P Display full path for vdev name.
1029 *
1030 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is
1031 * handled by make_root_vdev(), which constructs the nvlist needed to pass to
1032 * libzfs.
1033 */
1034 int
1035 zpool_do_add(int argc, char **argv)
1036 {
1037 boolean_t check_replication = B_TRUE;
1038 boolean_t check_inuse = B_TRUE;
1039 boolean_t dryrun = B_FALSE;
1040 boolean_t check_ashift = B_TRUE;
1041 boolean_t force = B_FALSE;
1042 int name_flags = 0;
1043 int c;
1044 nvlist_t *nvroot;
1045 char *poolname;
1046 int ret;
1047 zpool_handle_t *zhp;
1048 nvlist_t *config;
1049 nvlist_t *props = NULL;
1050 char *propval;
1051
1052 struct option long_options[] = {
1053 {"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE},
1054 {"allow-replication-mismatch", no_argument, NULL,
1055 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH},
1056 {"allow-ashift-mismatch", no_argument, NULL,
1057 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH},
1058 {0, 0, 0, 0}
1059 };
1060
1061 /* check options */
1062 while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL))
1063 != -1) {
1064 switch (c) {
1065 case 'f':
1066 force = B_TRUE;
1067 break;
1068 case 'g':
1069 name_flags |= VDEV_NAME_GUID;
1070 break;
1071 case 'L':
1072 name_flags |= VDEV_NAME_FOLLOW_LINKS;
1073 break;
1074 case 'n':
1075 dryrun = B_TRUE;
1076 break;
1077 case 'o':
1078 if ((propval = strchr(optarg, '=')) == NULL) {
1079 (void) fprintf(stderr, gettext("missing "
1080 "'=' for -o option\n"));
1081 usage(B_FALSE);
1082 }
1083 *propval = '\0';
1084 propval++;
1085
1086 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
1087 (add_prop_list(optarg, propval, &props, B_TRUE)))
1088 usage(B_FALSE);
1089 break;
1090 case 'P':
1091 name_flags |= VDEV_NAME_PATH;
1092 break;
1093 case ZPOOL_OPTION_ALLOW_INUSE:
1094 check_inuse = B_FALSE;
1095 break;
1096 case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH:
1097 check_replication = B_FALSE;
1098 break;
1099 case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH:
1100 check_ashift = B_FALSE;
1101 break;
1102 case '?':
1103 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1104 optopt);
1105 usage(B_FALSE);
1106 }
1107 }
1108
1109 argc -= optind;
1110 argv += optind;
1111
1112 /* get pool name and check number of arguments */
1113 if (argc < 1) {
1114 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1115 usage(B_FALSE);
1116 }
1117 if (argc < 2) {
1118 (void) fprintf(stderr, gettext("missing vdev specification\n"));
1119 usage(B_FALSE);
1120 }
1121
1122 if (force) {
1123 if (!check_inuse || !check_replication || !check_ashift) {
1124 (void) fprintf(stderr, gettext("'-f' option is not "
1125 "allowed with '--allow-replication-mismatch', "
1126 "'--allow-ashift-mismatch', or "
1127 "'--allow-in-use'\n"));
1128 usage(B_FALSE);
1129 }
1130 check_inuse = B_FALSE;
1131 check_replication = B_FALSE;
1132 check_ashift = B_FALSE;
1133 }
1134
1135 poolname = argv[0];
1136
1137 argc--;
1138 argv++;
1139
1140 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1141 return (1);
1142
1143 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
1144 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
1145 poolname);
1146 zpool_close(zhp);
1147 return (1);
1148 }
1149
1150 /* unless manually specified use "ashift" pool property (if set) */
1151 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
1152 int intval;
1153 zprop_source_t src;
1154 char strval[ZPOOL_MAXPROPLEN];
1155
1156 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
1157 if (src != ZPROP_SRC_DEFAULT) {
1158 (void) sprintf(strval, "%" PRId32, intval);
1159 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
1160 &props, B_TRUE) == 0);
1161 }
1162 }
1163
1164 /* pass off to make_root_vdev for processing */
1165 nvroot = make_root_vdev(zhp, props, !check_inuse,
1166 check_replication, B_FALSE, dryrun, argc, argv);
1167 if (nvroot == NULL) {
1168 zpool_close(zhp);
1169 return (1);
1170 }
1171
1172 if (dryrun) {
1173 nvlist_t *poolnvroot;
1174 nvlist_t **l2child, **sparechild;
1175 uint_t l2children, sparechildren, c;
1176 char *vname;
1177 boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
1178
1179 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1180 &poolnvroot) == 0);
1181
1182 (void) printf(gettext("would update '%s' to the following "
1183 "configuration:\n\n"), zpool_get_name(zhp));
1184
1185 /* print original main pool and new tree */
1186 print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
1187 name_flags | VDEV_NAME_TYPE_ID);
1188 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
1189
1190 /* print other classes: 'dedup', 'special', and 'log' */
1191 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1192 print_vdev_tree(zhp, "dedup", poolnvroot, 0,
1193 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1194 print_vdev_tree(zhp, NULL, nvroot, 0,
1195 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1196 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1197 print_vdev_tree(zhp, "dedup", nvroot, 0,
1198 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1199 }
1200
1201 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1202 print_vdev_tree(zhp, "special", poolnvroot, 0,
1203 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1204 print_vdev_tree(zhp, NULL, nvroot, 0,
1205 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1206 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1207 print_vdev_tree(zhp, "special", nvroot, 0,
1208 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1209 }
1210
1211 if (num_logs(poolnvroot) > 0) {
1212 print_vdev_tree(zhp, "logs", poolnvroot, 0,
1213 VDEV_ALLOC_BIAS_LOG, name_flags);
1214 print_vdev_tree(zhp, NULL, nvroot, 0,
1215 VDEV_ALLOC_BIAS_LOG, name_flags);
1216 } else if (num_logs(nvroot) > 0) {
1217 print_vdev_tree(zhp, "logs", nvroot, 0,
1218 VDEV_ALLOC_BIAS_LOG, name_flags);
1219 }
1220
1221 /* Do the same for the caches */
1222 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
1223 &l2child, &l2children) == 0 && l2children) {
1224 hadcache = B_TRUE;
1225 (void) printf(gettext("\tcache\n"));
1226 for (c = 0; c < l2children; c++) {
1227 vname = zpool_vdev_name(g_zfs, NULL,
1228 l2child[c], name_flags);
1229 (void) printf("\t %s\n", vname);
1230 free(vname);
1231 }
1232 }
1233 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1234 &l2child, &l2children) == 0 && l2children) {
1235 if (!hadcache)
1236 (void) printf(gettext("\tcache\n"));
1237 for (c = 0; c < l2children; c++) {
1238 vname = zpool_vdev_name(g_zfs, NULL,
1239 l2child[c], name_flags);
1240 (void) printf("\t %s\n", vname);
1241 free(vname);
1242 }
1243 }
1244 /* And finally the spares */
1245 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
1246 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1247 hadspare = B_TRUE;
1248 (void) printf(gettext("\tspares\n"));
1249 for (c = 0; c < sparechildren; c++) {
1250 vname = zpool_vdev_name(g_zfs, NULL,
1251 sparechild[c], name_flags);
1252 (void) printf("\t %s\n", vname);
1253 free(vname);
1254 }
1255 }
1256 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1257 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1258 if (!hadspare)
1259 (void) printf(gettext("\tspares\n"));
1260 for (c = 0; c < sparechildren; c++) {
1261 vname = zpool_vdev_name(g_zfs, NULL,
1262 sparechild[c], name_flags);
1263 (void) printf("\t %s\n", vname);
1264 free(vname);
1265 }
1266 }
1267
1268 ret = 0;
1269 } else {
1270 ret = (zpool_add(zhp, nvroot, check_ashift) != 0);
1271 }
1272
1273 nvlist_free(props);
1274 nvlist_free(nvroot);
1275 zpool_close(zhp);
1276
1277 return (ret);
1278 }
1279
1280 /*
1281 * zpool remove [-npsw] <pool> <vdev> ...
1282 *
1283 * Removes the given vdev from the pool.
1284 */
1285 int
1286 zpool_do_remove(int argc, char **argv)
1287 {
1288 char *poolname;
1289 int i, ret = 0;
1290 zpool_handle_t *zhp = NULL;
1291 boolean_t stop = B_FALSE;
1292 int c;
1293 boolean_t noop = B_FALSE;
1294 boolean_t parsable = B_FALSE;
1295 boolean_t wait = B_FALSE;
1296
1297 /* check options */
1298 while ((c = getopt(argc, argv, "npsw")) != -1) {
1299 switch (c) {
1300 case 'n':
1301 noop = B_TRUE;
1302 break;
1303 case 'p':
1304 parsable = B_TRUE;
1305 break;
1306 case 's':
1307 stop = B_TRUE;
1308 break;
1309 case 'w':
1310 wait = B_TRUE;
1311 break;
1312 case '?':
1313 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1314 optopt);
1315 usage(B_FALSE);
1316 }
1317 }
1318
1319 argc -= optind;
1320 argv += optind;
1321
1322 /* get pool name and check number of arguments */
1323 if (argc < 1) {
1324 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1325 usage(B_FALSE);
1326 }
1327
1328 poolname = argv[0];
1329
1330 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1331 return (1);
1332
1333 if (stop && noop) {
1334 zpool_close(zhp);
1335 (void) fprintf(stderr, gettext("stop request ignored\n"));
1336 return (0);
1337 }
1338
1339 if (stop) {
1340 if (argc > 1) {
1341 (void) fprintf(stderr, gettext("too many arguments\n"));
1342 usage(B_FALSE);
1343 }
1344 if (zpool_vdev_remove_cancel(zhp) != 0)
1345 ret = 1;
1346 if (wait) {
1347 (void) fprintf(stderr, gettext("invalid option "
1348 "combination: -w cannot be used with -s\n"));
1349 usage(B_FALSE);
1350 }
1351 } else {
1352 if (argc < 2) {
1353 (void) fprintf(stderr, gettext("missing device\n"));
1354 usage(B_FALSE);
1355 }
1356
1357 for (i = 1; i < argc; i++) {
1358 if (noop) {
1359 uint64_t size;
1360
1361 if (zpool_vdev_indirect_size(zhp, argv[i],
1362 &size) != 0) {
1363 ret = 1;
1364 break;
1365 }
1366 if (parsable) {
1367 (void) printf("%s %llu\n",
1368 argv[i], (unsigned long long)size);
1369 } else {
1370 char valstr[32];
1371 zfs_nicenum(size, valstr,
1372 sizeof (valstr));
1373 (void) printf("Memory that will be "
1374 "used after removing %s: %s\n",
1375 argv[i], valstr);
1376 }
1377 } else {
1378 if (zpool_vdev_remove(zhp, argv[i]) != 0)
1379 ret = 1;
1380 }
1381 }
1382
1383 if (ret == 0 && wait)
1384 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
1385 }
1386 zpool_close(zhp);
1387
1388 return (ret);
1389 }
1390
1391 /*
1392 * Return 1 if a vdev is active (being used in a pool)
1393 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
1394 *
1395 * This is useful for checking if a disk in an active pool is offlined or
1396 * faulted.
1397 */
1398 static int
1399 vdev_is_active(char *vdev_path)
1400 {
1401 int fd;
1402 fd = open(vdev_path, O_EXCL);
1403 if (fd < 0) {
1404 return (1); /* cant open O_EXCL - disk is active */
1405 }
1406
1407 close(fd);
1408 return (0); /* disk is inactive in the pool */
1409 }
1410
1411 /*
1412 * zpool labelclear [-f] <vdev>
1413 *
1414 * -f Force clearing the label for the vdevs which are members of
1415 * the exported or foreign pools.
1416 *
1417 * Verifies that the vdev is not active and zeros out the label information
1418 * on the device.
1419 */
1420 int
1421 zpool_do_labelclear(int argc, char **argv)
1422 {
1423 char vdev[MAXPATHLEN];
1424 char *name = NULL;
1425 int c, fd = -1, ret = 0;
1426 nvlist_t *config;
1427 pool_state_t state;
1428 boolean_t inuse = B_FALSE;
1429 boolean_t force = B_FALSE;
1430
1431 /* check options */
1432 while ((c = getopt(argc, argv, "f")) != -1) {
1433 switch (c) {
1434 case 'f':
1435 force = B_TRUE;
1436 break;
1437 default:
1438 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1439 optopt);
1440 usage(B_FALSE);
1441 }
1442 }
1443
1444 argc -= optind;
1445 argv += optind;
1446
1447 /* get vdev name */
1448 if (argc < 1) {
1449 (void) fprintf(stderr, gettext("missing vdev name\n"));
1450 usage(B_FALSE);
1451 }
1452 if (argc > 1) {
1453 (void) fprintf(stderr, gettext("too many arguments\n"));
1454 usage(B_FALSE);
1455 }
1456
1457 (void) strlcpy(vdev, argv[0], sizeof (vdev));
1458
1459 /*
1460 * If we cannot open an absolute path, we quit.
1461 * Otherwise if the provided vdev name doesn't point to a file,
1462 * try prepending expected disk paths and partition numbers.
1463 */
1464 if ((fd = open(vdev, O_RDWR)) < 0) {
1465 int error;
1466 if (vdev[0] == '/') {
1467 (void) fprintf(stderr, gettext("failed to open "
1468 "%s: %s\n"), vdev, strerror(errno));
1469 return (1);
1470 }
1471
1472 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
1473 if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
1474 if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
1475 error = ENOENT;
1476 }
1477
1478 if (error || ((fd = open(vdev, O_RDWR)) < 0)) {
1479 if (errno == ENOENT) {
1480 (void) fprintf(stderr, gettext(
1481 "failed to find device %s, try "
1482 "specifying absolute path instead\n"),
1483 argv[0]);
1484 return (1);
1485 }
1486
1487 (void) fprintf(stderr, gettext("failed to open %s:"
1488 " %s\n"), vdev, strerror(errno));
1489 return (1);
1490 }
1491 }
1492
1493 /*
1494 * Flush all dirty pages for the block device. This should not be
1495 * fatal when the device does not support BLKFLSBUF as would be the
1496 * case for a file vdev.
1497 */
1498 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
1499 (void) fprintf(stderr, gettext("failed to invalidate "
1500 "cache for %s: %s\n"), vdev, strerror(errno));
1501
1502 if (zpool_read_label(fd, &config, NULL) != 0) {
1503 (void) fprintf(stderr,
1504 gettext("failed to read label from %s\n"), vdev);
1505 ret = 1;
1506 goto errout;
1507 }
1508 nvlist_free(config);
1509
1510 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
1511 if (ret != 0) {
1512 (void) fprintf(stderr,
1513 gettext("failed to check state for %s\n"), vdev);
1514 ret = 1;
1515 goto errout;
1516 }
1517
1518 if (!inuse)
1519 goto wipe_label;
1520
1521 switch (state) {
1522 default:
1523 case POOL_STATE_ACTIVE:
1524 case POOL_STATE_SPARE:
1525 case POOL_STATE_L2CACHE:
1526 /*
1527 * We allow the user to call 'zpool offline -f'
1528 * on an offlined disk in an active pool. We can check if
1529 * the disk is online by calling vdev_is_active().
1530 */
1531 if (force && !vdev_is_active(vdev))
1532 break;
1533
1534 (void) fprintf(stderr, gettext(
1535 "%s is a member (%s) of pool \"%s\""),
1536 vdev, zpool_pool_state_to_name(state), name);
1537
1538 if (force) {
1539 (void) fprintf(stderr, gettext(
1540 ". Offline the disk first to clear its label."));
1541 }
1542 printf("\n");
1543 ret = 1;
1544 goto errout;
1545
1546 case POOL_STATE_EXPORTED:
1547 if (force)
1548 break;
1549 (void) fprintf(stderr, gettext(
1550 "use '-f' to override the following error:\n"
1551 "%s is a member of exported pool \"%s\"\n"),
1552 vdev, name);
1553 ret = 1;
1554 goto errout;
1555
1556 case POOL_STATE_POTENTIALLY_ACTIVE:
1557 if (force)
1558 break;
1559 (void) fprintf(stderr, gettext(
1560 "use '-f' to override the following error:\n"
1561 "%s is a member of potentially active pool \"%s\"\n"),
1562 vdev, name);
1563 ret = 1;
1564 goto errout;
1565
1566 case POOL_STATE_DESTROYED:
1567 /* inuse should never be set for a destroyed pool */
1568 assert(0);
1569 break;
1570 }
1571
1572 wipe_label:
1573 ret = zpool_clear_label(fd);
1574 if (ret != 0) {
1575 (void) fprintf(stderr,
1576 gettext("failed to clear label for %s\n"), vdev);
1577 }
1578
1579 errout:
1580 free(name);
1581 (void) close(fd);
1582
1583 return (ret);
1584 }
1585
1586 /*
1587 * zpool create [-fnd] [-o property=value] ...
1588 * [-O file-system-property=value] ...
1589 * [-R root] [-m mountpoint] <pool> <dev> ...
1590 *
1591 * -f Force creation, even if devices appear in use
1592 * -n Do not create the pool, but display the resulting layout if it
1593 * were to be created.
1594 * -R Create a pool under an alternate root
1595 * -m Set default mountpoint for the root dataset. By default it's
1596 * '/<pool>'
1597 * -o Set property=value.
1598 * -o Set feature@feature=enabled|disabled.
1599 * -d Don't automatically enable all supported pool features
1600 * (individual features can be enabled with -o).
1601 * -O Set fsproperty=value in the pool's root file system
1602 *
1603 * Creates the named pool according to the given vdev specification. The
1604 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
1605 * Once we get the nvlist back from make_root_vdev(), we either print out the
1606 * contents (if '-n' was specified), or pass it to libzfs to do the creation.
1607 */
1608 int
1609 zpool_do_create(int argc, char **argv)
1610 {
1611 boolean_t force = B_FALSE;
1612 boolean_t dryrun = B_FALSE;
1613 boolean_t enable_pool_features = B_TRUE;
1614
1615 int c;
1616 nvlist_t *nvroot = NULL;
1617 char *poolname;
1618 char *tname = NULL;
1619 int ret = 1;
1620 char *altroot = NULL;
1621 char *compat = NULL;
1622 char *mountpoint = NULL;
1623 nvlist_t *fsprops = NULL;
1624 nvlist_t *props = NULL;
1625 char *propval;
1626
1627 /* check options */
1628 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
1629 switch (c) {
1630 case 'f':
1631 force = B_TRUE;
1632 break;
1633 case 'n':
1634 dryrun = B_TRUE;
1635 break;
1636 case 'd':
1637 enable_pool_features = B_FALSE;
1638 break;
1639 case 'R':
1640 altroot = optarg;
1641 if (add_prop_list(zpool_prop_to_name(
1642 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
1643 goto errout;
1644 if (add_prop_list_default(zpool_prop_to_name(
1645 ZPOOL_PROP_CACHEFILE), "none", &props))
1646 goto errout;
1647 break;
1648 case 'm':
1649 /* Equivalent to -O mountpoint=optarg */
1650 mountpoint = optarg;
1651 break;
1652 case 'o':
1653 if ((propval = strchr(optarg, '=')) == NULL) {
1654 (void) fprintf(stderr, gettext("missing "
1655 "'=' for -o option\n"));
1656 goto errout;
1657 }
1658 *propval = '\0';
1659 propval++;
1660
1661 if (add_prop_list(optarg, propval, &props, B_TRUE))
1662 goto errout;
1663
1664 /*
1665 * If the user is creating a pool that doesn't support
1666 * feature flags, don't enable any features.
1667 */
1668 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
1669 char *end;
1670 u_longlong_t ver;
1671
1672 ver = strtoull(propval, &end, 10);
1673 if (*end == '\0' &&
1674 ver < SPA_VERSION_FEATURES) {
1675 enable_pool_features = B_FALSE;
1676 }
1677 }
1678 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
1679 altroot = propval;
1680 if (zpool_name_to_prop(optarg) ==
1681 ZPOOL_PROP_COMPATIBILITY)
1682 compat = propval;
1683 break;
1684 case 'O':
1685 if ((propval = strchr(optarg, '=')) == NULL) {
1686 (void) fprintf(stderr, gettext("missing "
1687 "'=' for -O option\n"));
1688 goto errout;
1689 }
1690 *propval = '\0';
1691 propval++;
1692
1693 /*
1694 * Mountpoints are checked and then added later.
1695 * Uniquely among properties, they can be specified
1696 * more than once, to avoid conflict with -m.
1697 */
1698 if (0 == strcmp(optarg,
1699 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
1700 mountpoint = propval;
1701 } else if (add_prop_list(optarg, propval, &fsprops,
1702 B_FALSE)) {
1703 goto errout;
1704 }
1705 break;
1706 case 't':
1707 /*
1708 * Sanity check temporary pool name.
1709 */
1710 if (strchr(optarg, '/') != NULL) {
1711 (void) fprintf(stderr, gettext("cannot create "
1712 "'%s': invalid character '/' in temporary "
1713 "name\n"), optarg);
1714 (void) fprintf(stderr, gettext("use 'zfs "
1715 "create' to create a dataset\n"));
1716 goto errout;
1717 }
1718
1719 if (add_prop_list(zpool_prop_to_name(
1720 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
1721 goto errout;
1722 if (add_prop_list_default(zpool_prop_to_name(
1723 ZPOOL_PROP_CACHEFILE), "none", &props))
1724 goto errout;
1725 tname = optarg;
1726 break;
1727 case ':':
1728 (void) fprintf(stderr, gettext("missing argument for "
1729 "'%c' option\n"), optopt);
1730 goto badusage;
1731 case '?':
1732 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1733 optopt);
1734 goto badusage;
1735 }
1736 }
1737
1738 argc -= optind;
1739 argv += optind;
1740
1741 /* get pool name and check number of arguments */
1742 if (argc < 1) {
1743 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1744 goto badusage;
1745 }
1746 if (argc < 2) {
1747 (void) fprintf(stderr, gettext("missing vdev specification\n"));
1748 goto badusage;
1749 }
1750
1751 poolname = argv[0];
1752
1753 /*
1754 * As a special case, check for use of '/' in the name, and direct the
1755 * user to use 'zfs create' instead.
1756 */
1757 if (strchr(poolname, '/') != NULL) {
1758 (void) fprintf(stderr, gettext("cannot create '%s': invalid "
1759 "character '/' in pool name\n"), poolname);
1760 (void) fprintf(stderr, gettext("use 'zfs create' to "
1761 "create a dataset\n"));
1762 goto errout;
1763 }
1764
1765 /* pass off to make_root_vdev for bulk processing */
1766 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
1767 argc - 1, argv + 1);
1768 if (nvroot == NULL)
1769 goto errout;
1770
1771 /* make_root_vdev() allows 0 toplevel children if there are spares */
1772 if (!zfs_allocatable_devs(nvroot)) {
1773 (void) fprintf(stderr, gettext("invalid vdev "
1774 "specification: at least one toplevel vdev must be "
1775 "specified\n"));
1776 goto errout;
1777 }
1778
1779 if (altroot != NULL && altroot[0] != '/') {
1780 (void) fprintf(stderr, gettext("invalid alternate root '%s': "
1781 "must be an absolute path\n"), altroot);
1782 goto errout;
1783 }
1784
1785 /*
1786 * Check the validity of the mountpoint and direct the user to use the
1787 * '-m' mountpoint option if it looks like its in use.
1788 */
1789 if (mountpoint == NULL ||
1790 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
1791 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
1792 char buf[MAXPATHLEN];
1793 DIR *dirp;
1794
1795 if (mountpoint && mountpoint[0] != '/') {
1796 (void) fprintf(stderr, gettext("invalid mountpoint "
1797 "'%s': must be an absolute path, 'legacy', or "
1798 "'none'\n"), mountpoint);
1799 goto errout;
1800 }
1801
1802 if (mountpoint == NULL) {
1803 if (altroot != NULL)
1804 (void) snprintf(buf, sizeof (buf), "%s/%s",
1805 altroot, poolname);
1806 else
1807 (void) snprintf(buf, sizeof (buf), "/%s",
1808 poolname);
1809 } else {
1810 if (altroot != NULL)
1811 (void) snprintf(buf, sizeof (buf), "%s%s",
1812 altroot, mountpoint);
1813 else
1814 (void) snprintf(buf, sizeof (buf), "%s",
1815 mountpoint);
1816 }
1817
1818 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
1819 (void) fprintf(stderr, gettext("mountpoint '%s' : "
1820 "%s\n"), buf, strerror(errno));
1821 (void) fprintf(stderr, gettext("use '-m' "
1822 "option to provide a different default\n"));
1823 goto errout;
1824 } else if (dirp) {
1825 int count = 0;
1826
1827 while (count < 3 && readdir(dirp) != NULL)
1828 count++;
1829 (void) closedir(dirp);
1830
1831 if (count > 2) {
1832 (void) fprintf(stderr, gettext("mountpoint "
1833 "'%s' exists and is not empty\n"), buf);
1834 (void) fprintf(stderr, gettext("use '-m' "
1835 "option to provide a "
1836 "different default\n"));
1837 goto errout;
1838 }
1839 }
1840 }
1841
1842 /*
1843 * Now that the mountpoint's validity has been checked, ensure that
1844 * the property is set appropriately prior to creating the pool.
1845 */
1846 if (mountpoint != NULL) {
1847 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1848 mountpoint, &fsprops, B_FALSE);
1849 if (ret != 0)
1850 goto errout;
1851 }
1852
1853 ret = 1;
1854 if (dryrun) {
1855 /*
1856 * For a dry run invocation, print out a basic message and run
1857 * through all the vdevs in the list and print out in an
1858 * appropriate hierarchy.
1859 */
1860 (void) printf(gettext("would create '%s' with the "
1861 "following layout:\n\n"), poolname);
1862
1863 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
1864 print_vdev_tree(NULL, "dedup", nvroot, 0,
1865 VDEV_ALLOC_BIAS_DEDUP, 0);
1866 print_vdev_tree(NULL, "special", nvroot, 0,
1867 VDEV_ALLOC_BIAS_SPECIAL, 0);
1868 print_vdev_tree(NULL, "logs", nvroot, 0,
1869 VDEV_ALLOC_BIAS_LOG, 0);
1870 print_cache_list(nvroot, 0);
1871 print_spare_list(nvroot, 0);
1872
1873 ret = 0;
1874 } else {
1875 /*
1876 * Load in feature set.
1877 * Note: if compatibility property not given, we'll have
1878 * NULL, which means 'all features'.
1879 */
1880 boolean_t requested_features[SPA_FEATURES];
1881 if (zpool_do_load_compat(compat, requested_features) !=
1882 ZPOOL_COMPATIBILITY_OK)
1883 goto errout;
1884
1885 /*
1886 * props contains list of features to enable.
1887 * For each feature:
1888 * - remove it if feature@name=disabled
1889 * - leave it there if feature@name=enabled
1890 * - add it if:
1891 * - enable_pool_features (ie: no '-d' or '-o version')
1892 * - it's supported by the kernel module
1893 * - it's in the requested feature set
1894 * - warn if it's enabled but not in compat
1895 */
1896 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
1897 char propname[MAXPATHLEN];
1898 const char *propval;
1899 zfeature_info_t *feat = &spa_feature_table[i];
1900
1901 (void) snprintf(propname, sizeof (propname),
1902 "feature@%s", feat->fi_uname);
1903
1904 if (!nvlist_lookup_string(props, propname, &propval)) {
1905 if (strcmp(propval,
1906 ZFS_FEATURE_DISABLED) == 0) {
1907 (void) nvlist_remove_all(props,
1908 propname);
1909 } else if (strcmp(propval,
1910 ZFS_FEATURE_ENABLED) == 0 &&
1911 !requested_features[i]) {
1912 (void) fprintf(stderr, gettext(
1913 "Warning: feature \"%s\" enabled "
1914 "but is not in specified "
1915 "'compatibility' feature set.\n"),
1916 feat->fi_uname);
1917 }
1918 } else if (
1919 enable_pool_features &&
1920 feat->fi_zfs_mod_supported &&
1921 requested_features[i]) {
1922 ret = add_prop_list(propname,
1923 ZFS_FEATURE_ENABLED, &props, B_TRUE);
1924 if (ret != 0)
1925 goto errout;
1926 }
1927 }
1928
1929 ret = 1;
1930 if (zpool_create(g_zfs, poolname,
1931 nvroot, props, fsprops) == 0) {
1932 zfs_handle_t *pool = zfs_open(g_zfs,
1933 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
1934 if (pool != NULL) {
1935 if (zfs_mount(pool, NULL, 0) == 0) {
1936 ret = zfs_share(pool, NULL);
1937 zfs_commit_shares(NULL);
1938 }
1939 zfs_close(pool);
1940 }
1941 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
1942 (void) fprintf(stderr, gettext("pool name may have "
1943 "been omitted\n"));
1944 }
1945 }
1946
1947 errout:
1948 nvlist_free(nvroot);
1949 nvlist_free(fsprops);
1950 nvlist_free(props);
1951 return (ret);
1952 badusage:
1953 nvlist_free(fsprops);
1954 nvlist_free(props);
1955 usage(B_FALSE);
1956 return (2);
1957 }
1958
1959 /*
1960 * zpool destroy <pool>
1961 *
1962 * -f Forcefully unmount any datasets
1963 *
1964 * Destroy the given pool. Automatically unmounts any datasets in the pool.
1965 */
1966 int
1967 zpool_do_destroy(int argc, char **argv)
1968 {
1969 boolean_t force = B_FALSE;
1970 int c;
1971 char *pool;
1972 zpool_handle_t *zhp;
1973 int ret;
1974
1975 /* check options */
1976 while ((c = getopt(argc, argv, "f")) != -1) {
1977 switch (c) {
1978 case 'f':
1979 force = B_TRUE;
1980 break;
1981 case '?':
1982 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1983 optopt);
1984 usage(B_FALSE);
1985 }
1986 }
1987
1988 argc -= optind;
1989 argv += optind;
1990
1991 /* check arguments */
1992 if (argc < 1) {
1993 (void) fprintf(stderr, gettext("missing pool argument\n"));
1994 usage(B_FALSE);
1995 }
1996 if (argc > 1) {
1997 (void) fprintf(stderr, gettext("too many arguments\n"));
1998 usage(B_FALSE);
1999 }
2000
2001 pool = argv[0];
2002
2003 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
2004 /*
2005 * As a special case, check for use of '/' in the name, and
2006 * direct the user to use 'zfs destroy' instead.
2007 */
2008 if (strchr(pool, '/') != NULL)
2009 (void) fprintf(stderr, gettext("use 'zfs destroy' to "
2010 "destroy a dataset\n"));
2011 return (1);
2012 }
2013
2014 if (zpool_disable_datasets(zhp, force) != 0) {
2015 (void) fprintf(stderr, gettext("could not destroy '%s': "
2016 "could not unmount datasets\n"), zpool_get_name(zhp));
2017 zpool_close(zhp);
2018 return (1);
2019 }
2020
2021 /* The history must be logged as part of the export */
2022 log_history = B_FALSE;
2023
2024 ret = (zpool_destroy(zhp, history_str) != 0);
2025
2026 zpool_close(zhp);
2027
2028 return (ret);
2029 }
2030
2031 typedef struct export_cbdata {
2032 boolean_t force;
2033 boolean_t hardforce;
2034 } export_cbdata_t;
2035
2036 /*
2037 * Export one pool
2038 */
2039 static int
2040 zpool_export_one(zpool_handle_t *zhp, void *data)
2041 {
2042 export_cbdata_t *cb = data;
2043
2044 if (zpool_disable_datasets(zhp, cb->force) != 0)
2045 return (1);
2046
2047 /* The history must be logged as part of the export */
2048 log_history = B_FALSE;
2049
2050 if (cb->hardforce) {
2051 if (zpool_export_force(zhp, history_str) != 0)
2052 return (1);
2053 } else if (zpool_export(zhp, cb->force, history_str) != 0) {
2054 return (1);
2055 }
2056
2057 return (0);
2058 }
2059
2060 /*
2061 * zpool export [-f] <pool> ...
2062 *
2063 * -a Export all pools
2064 * -f Forcefully unmount datasets
2065 *
2066 * Export the given pools. By default, the command will attempt to cleanly
2067 * unmount any active datasets within the pool. If the '-f' flag is specified,
2068 * then the datasets will be forcefully unmounted.
2069 */
2070 int
2071 zpool_do_export(int argc, char **argv)
2072 {
2073 export_cbdata_t cb;
2074 boolean_t do_all = B_FALSE;
2075 boolean_t force = B_FALSE;
2076 boolean_t hardforce = B_FALSE;
2077 int c, ret;
2078
2079 /* check options */
2080 while ((c = getopt(argc, argv, "afF")) != -1) {
2081 switch (c) {
2082 case 'a':
2083 do_all = B_TRUE;
2084 break;
2085 case 'f':
2086 force = B_TRUE;
2087 break;
2088 case 'F':
2089 hardforce = B_TRUE;
2090 break;
2091 case '?':
2092 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
2093 optopt);
2094 usage(B_FALSE);
2095 }
2096 }
2097
2098 cb.force = force;
2099 cb.hardforce = hardforce;
2100 argc -= optind;
2101 argv += optind;
2102
2103 if (do_all) {
2104 if (argc != 0) {
2105 (void) fprintf(stderr, gettext("too many arguments\n"));
2106 usage(B_FALSE);
2107 }
2108
2109 return (for_each_pool(argc, argv, B_TRUE, NULL,
2110 ZFS_TYPE_POOL, B_FALSE, zpool_export_one, &cb));
2111 }
2112
2113 /* check arguments */
2114 if (argc < 1) {
2115 (void) fprintf(stderr, gettext("missing pool argument\n"));
2116 usage(B_FALSE);
2117 }
2118
2119 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2120 B_FALSE, zpool_export_one, &cb);
2121
2122 return (ret);
2123 }
2124
2125 /*
2126 * Given a vdev configuration, determine the maximum width needed for the device
2127 * name column.
2128 */
2129 static int
2130 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
2131 int name_flags)
2132 {
2133 static const char *const subtypes[] =
2134 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
2135
2136 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
2137 max = MAX(strlen(name) + depth, max);
2138 free(name);
2139
2140 nvlist_t **child;
2141 uint_t children;
2142 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
2143 if (nvlist_lookup_nvlist_array(nv, subtypes[i],
2144 &child, &children) == 0)
2145 for (uint_t c = 0; c < children; ++c)
2146 max = MAX(max_width(zhp, child[c], depth + 2,
2147 max, name_flags), max);
2148
2149 return (max);
2150 }
2151
2152 typedef struct spare_cbdata {
2153 uint64_t cb_guid;
2154 zpool_handle_t *cb_zhp;
2155 } spare_cbdata_t;
2156
2157 static boolean_t
2158 find_vdev(nvlist_t *nv, uint64_t search)
2159 {
2160 uint64_t guid;
2161 nvlist_t **child;
2162 uint_t c, children;
2163
2164 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
2165 search == guid)
2166 return (B_TRUE);
2167
2168 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2169 &child, &children) == 0) {
2170 for (c = 0; c < children; c++)
2171 if (find_vdev(child[c], search))
2172 return (B_TRUE);
2173 }
2174
2175 return (B_FALSE);
2176 }
2177
2178 static int
2179 find_spare(zpool_handle_t *zhp, void *data)
2180 {
2181 spare_cbdata_t *cbp = data;
2182 nvlist_t *config, *nvroot;
2183
2184 config = zpool_get_config(zhp, NULL);
2185 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2186 &nvroot) == 0);
2187
2188 if (find_vdev(nvroot, cbp->cb_guid)) {
2189 cbp->cb_zhp = zhp;
2190 return (1);
2191 }
2192
2193 zpool_close(zhp);
2194 return (0);
2195 }
2196
2197 typedef struct status_cbdata {
2198 int cb_count;
2199 int cb_name_flags;
2200 int cb_namewidth;
2201 boolean_t cb_allpools;
2202 boolean_t cb_verbose;
2203 boolean_t cb_literal;
2204 boolean_t cb_explain;
2205 boolean_t cb_first;
2206 boolean_t cb_dedup_stats;
2207 boolean_t cb_print_unhealthy;
2208 boolean_t cb_print_status;
2209 boolean_t cb_print_slow_ios;
2210 boolean_t cb_print_vdev_init;
2211 boolean_t cb_print_vdev_trim;
2212 vdev_cmd_data_list_t *vcdl;
2213 boolean_t cb_print_power;
2214 } status_cbdata_t;
2215
2216 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
2217 static boolean_t
2218 is_blank_str(const char *str)
2219 {
2220 for (; str != NULL && *str != '\0'; ++str)
2221 if (!isblank(*str))
2222 return (B_FALSE);
2223 return (B_TRUE);
2224 }
2225
2226 /* Print command output lines for specific vdev in a specific pool */
2227 static void
2228 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
2229 {
2230 vdev_cmd_data_t *data;
2231 int i, j;
2232 const char *val;
2233
2234 for (i = 0; i < vcdl->count; i++) {
2235 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2236 (strcmp(vcdl->data[i].pool, pool) != 0)) {
2237 /* Not the vdev we're looking for */
2238 continue;
2239 }
2240
2241 data = &vcdl->data[i];
2242 /* Print out all the output values for this vdev */
2243 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2244 val = NULL;
2245 /* Does this vdev have values for this column? */
2246 for (int k = 0; k < data->cols_cnt; k++) {
2247 if (strcmp(data->cols[k],
2248 vcdl->uniq_cols[j]) == 0) {
2249 /* yes it does, record the value */
2250 val = data->lines[k];
2251 break;
2252 }
2253 }
2254 /*
2255 * Mark empty values with dashes to make output
2256 * awk-able.
2257 */
2258 if (val == NULL || is_blank_str(val))
2259 val = "-";
2260
2261 printf("%*s", vcdl->uniq_cols_width[j], val);
2262 if (j < vcdl->uniq_cols_cnt - 1)
2263 fputs(" ", stdout);
2264 }
2265
2266 /* Print out any values that aren't in a column at the end */
2267 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2268 /* Did we have any columns? If so print a spacer. */
2269 if (vcdl->uniq_cols_cnt > 0)
2270 fputs(" ", stdout);
2271
2272 val = data->lines[j];
2273 fputs(val ?: "", stdout);
2274 }
2275 break;
2276 }
2277 }
2278
2279 /*
2280 * Print vdev initialization status for leaves
2281 */
2282 static void
2283 print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
2284 {
2285 if (verbose) {
2286 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
2287 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
2288 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
2289 !vs->vs_scan_removing) {
2290 char zbuf[1024];
2291 char tbuf[256];
2292 struct tm zaction_ts;
2293
2294 time_t t = vs->vs_initialize_action_time;
2295 int initialize_pct = 100;
2296 if (vs->vs_initialize_state !=
2297 VDEV_INITIALIZE_COMPLETE) {
2298 initialize_pct = (vs->vs_initialize_bytes_done *
2299 100 / (vs->vs_initialize_bytes_est + 1));
2300 }
2301
2302 (void) localtime_r(&t, &zaction_ts);
2303 (void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
2304
2305 switch (vs->vs_initialize_state) {
2306 case VDEV_INITIALIZE_SUSPENDED:
2307 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2308 gettext("suspended, started at"), tbuf);
2309 break;
2310 case VDEV_INITIALIZE_ACTIVE:
2311 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2312 gettext("started at"), tbuf);
2313 break;
2314 case VDEV_INITIALIZE_COMPLETE:
2315 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2316 gettext("completed at"), tbuf);
2317 break;
2318 }
2319
2320 (void) printf(gettext(" (%d%% initialized%s)"),
2321 initialize_pct, zbuf);
2322 } else {
2323 (void) printf(gettext(" (uninitialized)"));
2324 }
2325 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
2326 (void) printf(gettext(" (initializing)"));
2327 }
2328 }
2329
2330 /*
2331 * Print vdev TRIM status for leaves
2332 */
2333 static void
2334 print_status_trim(vdev_stat_t *vs, boolean_t verbose)
2335 {
2336 if (verbose) {
2337 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
2338 vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
2339 vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
2340 !vs->vs_scan_removing) {
2341 char zbuf[1024];
2342 char tbuf[256];
2343 struct tm zaction_ts;
2344
2345 time_t t = vs->vs_trim_action_time;
2346 int trim_pct = 100;
2347 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
2348 trim_pct = (vs->vs_trim_bytes_done *
2349 100 / (vs->vs_trim_bytes_est + 1));
2350 }
2351
2352 (void) localtime_r(&t, &zaction_ts);
2353 (void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
2354
2355 switch (vs->vs_trim_state) {
2356 case VDEV_TRIM_SUSPENDED:
2357 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2358 gettext("suspended, started at"), tbuf);
2359 break;
2360 case VDEV_TRIM_ACTIVE:
2361 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2362 gettext("started at"), tbuf);
2363 break;
2364 case VDEV_TRIM_COMPLETE:
2365 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2366 gettext("completed at"), tbuf);
2367 break;
2368 }
2369
2370 (void) printf(gettext(" (%d%% trimmed%s)"),
2371 trim_pct, zbuf);
2372 } else if (vs->vs_trim_notsup) {
2373 (void) printf(gettext(" (trim unsupported)"));
2374 } else {
2375 (void) printf(gettext(" (untrimmed)"));
2376 }
2377 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
2378 (void) printf(gettext(" (trimming)"));
2379 }
2380 }
2381
2382 /*
2383 * Return the color associated with a health string. This includes returning
2384 * NULL for no color change.
2385 */
2386 static const char *
2387 health_str_to_color(const char *health)
2388 {
2389 if (strcmp(health, gettext("FAULTED")) == 0 ||
2390 strcmp(health, gettext("SUSPENDED")) == 0 ||
2391 strcmp(health, gettext("UNAVAIL")) == 0) {
2392 return (ANSI_RED);
2393 }
2394
2395 if (strcmp(health, gettext("OFFLINE")) == 0 ||
2396 strcmp(health, gettext("DEGRADED")) == 0 ||
2397 strcmp(health, gettext("REMOVED")) == 0) {
2398 return (ANSI_YELLOW);
2399 }
2400
2401 return (NULL);
2402 }
2403
2404 /*
2405 * Called for each leaf vdev. Returns 0 if the vdev is healthy.
2406 * A vdev is unhealthy if any of the following are true:
2407 * 1) there are read, write, or checksum errors,
2408 * 2) its state is not ONLINE, or
2409 * 3) slow IO reporting was requested (-s) and there are slow IOs.
2410 */
2411 static int
2412 vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data)
2413 {
2414 status_cbdata_t *cb = data;
2415 vdev_stat_t *vs;
2416 uint_t vsc;
2417 (void) hdl_data;
2418
2419 if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2420 (uint64_t **)&vs, &vsc) != 0)
2421 return (1);
2422
2423 if (vs->vs_checksum_errors || vs->vs_read_errors ||
2424 vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY)
2425 return (1);
2426
2427 if (cb->cb_print_slow_ios && vs->vs_slow_ios)
2428 return (1);
2429
2430 return (0);
2431 }
2432
2433 /*
2434 * Print out configuration state as requested by status_callback.
2435 */
2436 static void
2437 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
2438 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
2439 {
2440 nvlist_t **child, *root;
2441 uint_t c, i, vsc, children;
2442 pool_scan_stat_t *ps = NULL;
2443 vdev_stat_t *vs;
2444 char rbuf[6], wbuf[6], cbuf[6];
2445 char *vname;
2446 uint64_t notpresent;
2447 spare_cbdata_t spare_cb;
2448 const char *state;
2449 const char *type;
2450 const char *path = NULL;
2451 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL,
2452 *scolor = NULL;
2453
2454 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2455 &child, &children) != 0)
2456 children = 0;
2457
2458 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2459 (uint64_t **)&vs, &vsc) == 0);
2460
2461 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2462
2463 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2464 return;
2465
2466 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2467
2468 if (isspare) {
2469 /*
2470 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
2471 * online drives.
2472 */
2473 if (vs->vs_aux == VDEV_AUX_SPARED)
2474 state = gettext("INUSE");
2475 else if (vs->vs_state == VDEV_STATE_HEALTHY)
2476 state = gettext("AVAIL");
2477 }
2478
2479 /*
2480 * If '-e' is specified then top-level vdevs and their children
2481 * can be pruned if all of their leaves are healthy.
2482 */
2483 if (cb->cb_print_unhealthy && depth > 0 &&
2484 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
2485 return;
2486 }
2487
2488 printf_color(health_str_to_color(state),
2489 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
2490 name, state);
2491
2492 if (!isspare) {
2493 if (vs->vs_read_errors)
2494 rcolor = ANSI_RED;
2495
2496 if (vs->vs_write_errors)
2497 wcolor = ANSI_RED;
2498
2499 if (vs->vs_checksum_errors)
2500 ccolor = ANSI_RED;
2501
2502 if (vs->vs_slow_ios)
2503 scolor = ANSI_BLUE;
2504
2505 if (cb->cb_literal) {
2506 fputc(' ', stdout);
2507 printf_color(rcolor, "%5llu",
2508 (u_longlong_t)vs->vs_read_errors);
2509 fputc(' ', stdout);
2510 printf_color(wcolor, "%5llu",
2511 (u_longlong_t)vs->vs_write_errors);
2512 fputc(' ', stdout);
2513 printf_color(ccolor, "%5llu",
2514 (u_longlong_t)vs->vs_checksum_errors);
2515 } else {
2516 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
2517 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
2518 zfs_nicenum(vs->vs_checksum_errors, cbuf,
2519 sizeof (cbuf));
2520 fputc(' ', stdout);
2521 printf_color(rcolor, "%5s", rbuf);
2522 fputc(' ', stdout);
2523 printf_color(wcolor, "%5s", wbuf);
2524 fputc(' ', stdout);
2525 printf_color(ccolor, "%5s", cbuf);
2526 }
2527 if (cb->cb_print_slow_ios) {
2528 if (children == 0) {
2529 /* Only leafs vdevs have slow IOs */
2530 zfs_nicenum(vs->vs_slow_ios, rbuf,
2531 sizeof (rbuf));
2532 } else {
2533 snprintf(rbuf, sizeof (rbuf), "-");
2534 }
2535
2536 if (cb->cb_literal)
2537 printf_color(scolor, " %5llu",
2538 (u_longlong_t)vs->vs_slow_ios);
2539 else
2540 printf_color(scolor, " %5s", rbuf);
2541 }
2542 if (cb->cb_print_power) {
2543 if (children == 0) {
2544 /* Only leaf vdevs have physical slots */
2545 switch (zpool_power_current_state(zhp, (char *)
2546 fnvlist_lookup_string(nv,
2547 ZPOOL_CONFIG_PATH))) {
2548 case 0:
2549 printf_color(ANSI_RED, " %5s",
2550 gettext("off"));
2551 break;
2552 case 1:
2553 printf(" %5s", gettext("on"));
2554 break;
2555 default:
2556 printf(" %5s", "-");
2557 }
2558 } else {
2559 printf(" %5s", "-");
2560 }
2561 }
2562 }
2563
2564 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2565 &notpresent) == 0) {
2566 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
2567 (void) printf(" %s %s", gettext("was"), path);
2568 } else if (vs->vs_aux != 0) {
2569 (void) printf(" ");
2570 color_start(ANSI_RED);
2571 switch (vs->vs_aux) {
2572 case VDEV_AUX_OPEN_FAILED:
2573 (void) printf(gettext("cannot open"));
2574 break;
2575
2576 case VDEV_AUX_BAD_GUID_SUM:
2577 (void) printf(gettext("missing device"));
2578 break;
2579
2580 case VDEV_AUX_NO_REPLICAS:
2581 (void) printf(gettext("insufficient replicas"));
2582 break;
2583
2584 case VDEV_AUX_VERSION_NEWER:
2585 (void) printf(gettext("newer version"));
2586 break;
2587
2588 case VDEV_AUX_UNSUP_FEAT:
2589 (void) printf(gettext("unsupported feature(s)"));
2590 break;
2591
2592 case VDEV_AUX_ASHIFT_TOO_BIG:
2593 (void) printf(gettext("unsupported minimum blocksize"));
2594 break;
2595
2596 case VDEV_AUX_SPARED:
2597 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2598 &spare_cb.cb_guid) == 0);
2599 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
2600 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
2601 zpool_get_name(zhp)) == 0)
2602 (void) printf(gettext("currently in "
2603 "use"));
2604 else
2605 (void) printf(gettext("in use by "
2606 "pool '%s'"),
2607 zpool_get_name(spare_cb.cb_zhp));
2608 zpool_close(spare_cb.cb_zhp);
2609 } else {
2610 (void) printf(gettext("currently in use"));
2611 }
2612 break;
2613
2614 case VDEV_AUX_ERR_EXCEEDED:
2615 if (vs->vs_read_errors + vs->vs_write_errors +
2616 vs->vs_checksum_errors == 0 && children == 0 &&
2617 vs->vs_slow_ios > 0) {
2618 (void) printf(gettext("too many slow I/Os"));
2619 } else {
2620 (void) printf(gettext("too many errors"));
2621 }
2622 break;
2623
2624 case VDEV_AUX_IO_FAILURE:
2625 (void) printf(gettext("experienced I/O failures"));
2626 break;
2627
2628 case VDEV_AUX_BAD_LOG:
2629 (void) printf(gettext("bad intent log"));
2630 break;
2631
2632 case VDEV_AUX_EXTERNAL:
2633 (void) printf(gettext("external device fault"));
2634 break;
2635
2636 case VDEV_AUX_SPLIT_POOL:
2637 (void) printf(gettext("split into new pool"));
2638 break;
2639
2640 case VDEV_AUX_ACTIVE:
2641 (void) printf(gettext("currently in use"));
2642 break;
2643
2644 case VDEV_AUX_CHILDREN_OFFLINE:
2645 (void) printf(gettext("all children offline"));
2646 break;
2647
2648 case VDEV_AUX_BAD_LABEL:
2649 (void) printf(gettext("invalid label"));
2650 break;
2651
2652 default:
2653 (void) printf(gettext("corrupted data"));
2654 break;
2655 }
2656 color_end();
2657 } else if (children == 0 && !isspare &&
2658 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
2659 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
2660 vs->vs_configured_ashift < vs->vs_physical_ashift) {
2661 (void) printf(
2662 gettext(" block size: %dB configured, %dB native"),
2663 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
2664 }
2665
2666 if (vs->vs_scan_removing != 0) {
2667 (void) printf(gettext(" (removing)"));
2668 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
2669 (void) printf(gettext(" (non-allocating)"));
2670 }
2671
2672 /* The root vdev has the scrub/resilver stats */
2673 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2674 ZPOOL_CONFIG_VDEV_TREE);
2675 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
2676 (uint64_t **)&ps, &c);
2677
2678 /*
2679 * If you force fault a drive that's resilvering, its scan stats can
2680 * get frozen in time, giving the false impression that it's
2681 * being resilvered. That's why we check the state to see if the vdev
2682 * is healthy before reporting "resilvering" or "repairing".
2683 */
2684 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
2685 vs->vs_state == VDEV_STATE_HEALTHY) {
2686 if (vs->vs_scan_processed != 0) {
2687 (void) printf(gettext(" (%s)"),
2688 (ps->pss_func == POOL_SCAN_RESILVER) ?
2689 "resilvering" : "repairing");
2690 } else if (vs->vs_resilver_deferred) {
2691 (void) printf(gettext(" (awaiting resilver)"));
2692 }
2693 }
2694
2695 /* The top-level vdevs have the rebuild stats */
2696 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
2697 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
2698 if (vs->vs_rebuild_processed != 0) {
2699 (void) printf(gettext(" (resilvering)"));
2700 }
2701 }
2702
2703 if (cb->vcdl != NULL) {
2704 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2705 printf(" ");
2706 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
2707 }
2708 }
2709
2710 /* Display vdev initialization and trim status for leaves. */
2711 if (children == 0) {
2712 print_status_initialize(vs, cb->cb_print_vdev_init);
2713 print_status_trim(vs, cb->cb_print_vdev_trim);
2714 }
2715
2716 (void) printf("\n");
2717
2718 for (c = 0; c < children; c++) {
2719 uint64_t islog = B_FALSE, ishole = B_FALSE;
2720
2721 /* Don't print logs or holes here */
2722 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2723 &islog);
2724 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2725 &ishole);
2726 if (islog || ishole)
2727 continue;
2728 /* Only print normal classes here */
2729 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
2730 continue;
2731
2732 /* Provide vdev_rebuild_stats to children if available */
2733 if (vrs == NULL) {
2734 (void) nvlist_lookup_uint64_array(nv,
2735 ZPOOL_CONFIG_REBUILD_STATS,
2736 (uint64_t **)&vrs, &i);
2737 }
2738
2739 vname = zpool_vdev_name(g_zfs, zhp, child[c],
2740 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2741 print_status_config(zhp, cb, vname, child[c], depth + 2,
2742 isspare, vrs);
2743 free(vname);
2744 }
2745 }
2746
2747 /*
2748 * Print the configuration of an exported pool. Iterate over all vdevs in the
2749 * pool, printing out the name and status for each one.
2750 */
2751 static void
2752 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
2753 int depth)
2754 {
2755 nvlist_t **child;
2756 uint_t c, children;
2757 vdev_stat_t *vs;
2758 const char *type;
2759 char *vname;
2760
2761 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2762 if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
2763 strcmp(type, VDEV_TYPE_HOLE) == 0)
2764 return;
2765
2766 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2767 (uint64_t **)&vs, &c) == 0);
2768
2769 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
2770 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
2771
2772 if (vs->vs_aux != 0) {
2773 (void) printf(" ");
2774
2775 switch (vs->vs_aux) {
2776 case VDEV_AUX_OPEN_FAILED:
2777 (void) printf(gettext("cannot open"));
2778 break;
2779
2780 case VDEV_AUX_BAD_GUID_SUM:
2781 (void) printf(gettext("missing device"));
2782 break;
2783
2784 case VDEV_AUX_NO_REPLICAS:
2785 (void) printf(gettext("insufficient replicas"));
2786 break;
2787
2788 case VDEV_AUX_VERSION_NEWER:
2789 (void) printf(gettext("newer version"));
2790 break;
2791
2792 case VDEV_AUX_UNSUP_FEAT:
2793 (void) printf(gettext("unsupported feature(s)"));
2794 break;
2795
2796 case VDEV_AUX_ERR_EXCEEDED:
2797 (void) printf(gettext("too many errors"));
2798 break;
2799
2800 case VDEV_AUX_ACTIVE:
2801 (void) printf(gettext("currently in use"));
2802 break;
2803
2804 case VDEV_AUX_CHILDREN_OFFLINE:
2805 (void) printf(gettext("all children offline"));
2806 break;
2807
2808 case VDEV_AUX_BAD_LABEL:
2809 (void) printf(gettext("invalid label"));
2810 break;
2811
2812 default:
2813 (void) printf(gettext("corrupted data"));
2814 break;
2815 }
2816 }
2817 (void) printf("\n");
2818
2819 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2820 &child, &children) != 0)
2821 return;
2822
2823 for (c = 0; c < children; c++) {
2824 uint64_t is_log = B_FALSE;
2825
2826 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2827 &is_log);
2828 if (is_log)
2829 continue;
2830 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
2831 continue;
2832
2833 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2834 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2835 print_import_config(cb, vname, child[c], depth + 2);
2836 free(vname);
2837 }
2838
2839 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2840 &child, &children) == 0) {
2841 (void) printf(gettext("\tcache\n"));
2842 for (c = 0; c < children; c++) {
2843 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2844 cb->cb_name_flags);
2845 (void) printf("\t %s\n", vname);
2846 free(vname);
2847 }
2848 }
2849
2850 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2851 &child, &children) == 0) {
2852 (void) printf(gettext("\tspares\n"));
2853 for (c = 0; c < children; c++) {
2854 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2855 cb->cb_name_flags);
2856 (void) printf("\t %s\n", vname);
2857 free(vname);
2858 }
2859 }
2860 }
2861
2862 /*
2863 * Print specialized class vdevs.
2864 *
2865 * These are recorded as top level vdevs in the main pool child array
2866 * but with "is_log" set to 1 or an "alloc_bias" string. We use either
2867 * print_status_config() or print_import_config() to print the top level
2868 * class vdevs then any of their children (eg mirrored slogs) are printed
2869 * recursively - which works because only the top level vdev is marked.
2870 */
2871 static void
2872 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
2873 const char *class)
2874 {
2875 uint_t c, children;
2876 nvlist_t **child;
2877 boolean_t printed = B_FALSE;
2878
2879 assert(zhp != NULL || !cb->cb_verbose);
2880
2881 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
2882 &children) != 0)
2883 return;
2884
2885 for (c = 0; c < children; c++) {
2886 uint64_t is_log = B_FALSE;
2887 const char *bias = NULL;
2888 const char *type = NULL;
2889
2890 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2891 &is_log);
2892
2893 if (is_log) {
2894 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
2895 } else {
2896 (void) nvlist_lookup_string(child[c],
2897 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
2898 (void) nvlist_lookup_string(child[c],
2899 ZPOOL_CONFIG_TYPE, &type);
2900 }
2901
2902 if (bias == NULL || strcmp(bias, class) != 0)
2903 continue;
2904 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2905 continue;
2906
2907 if (!printed) {
2908 (void) printf("\t%s\t\n", gettext(class));
2909 printed = B_TRUE;
2910 }
2911
2912 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
2913 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2914 if (cb->cb_print_status)
2915 print_status_config(zhp, cb, name, child[c], 2,
2916 B_FALSE, NULL);
2917 else
2918 print_import_config(cb, name, child[c], 2);
2919 free(name);
2920 }
2921 }
2922
2923 /*
2924 * Display the status for the given pool.
2925 */
2926 static int
2927 show_import(nvlist_t *config, boolean_t report_error)
2928 {
2929 uint64_t pool_state;
2930 vdev_stat_t *vs;
2931 const char *name;
2932 uint64_t guid;
2933 uint64_t hostid = 0;
2934 const char *msgid;
2935 const char *hostname = "unknown";
2936 nvlist_t *nvroot, *nvinfo;
2937 zpool_status_t reason;
2938 zpool_errata_t errata;
2939 const char *health;
2940 uint_t vsc;
2941 const char *comment;
2942 status_cbdata_t cb = { 0 };
2943
2944 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
2945 &name) == 0);
2946 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
2947 &guid) == 0);
2948 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2949 &pool_state) == 0);
2950 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2951 &nvroot) == 0);
2952
2953 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
2954 (uint64_t **)&vs, &vsc) == 0);
2955 health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2956
2957 reason = zpool_import_status(config, &msgid, &errata);
2958
2959 /*
2960 * If we're importing using a cachefile, then we won't report any
2961 * errors unless we are in the scan phase of the import.
2962 */
2963 if (reason != ZPOOL_STATUS_OK && !report_error)
2964 return (reason);
2965
2966 (void) printf(gettext(" pool: %s\n"), name);
2967 (void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid);
2968 (void) printf(gettext(" state: %s"), health);
2969 if (pool_state == POOL_STATE_DESTROYED)
2970 (void) printf(gettext(" (DESTROYED)"));
2971 (void) printf("\n");
2972
2973 switch (reason) {
2974 case ZPOOL_STATUS_MISSING_DEV_R:
2975 case ZPOOL_STATUS_MISSING_DEV_NR:
2976 case ZPOOL_STATUS_BAD_GUID_SUM:
2977 printf_color(ANSI_BOLD, gettext("status: "));
2978 printf_color(ANSI_YELLOW, gettext("One or more devices are "
2979 "missing from the system.\n"));
2980 break;
2981
2982 case ZPOOL_STATUS_CORRUPT_LABEL_R:
2983 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
2984 printf_color(ANSI_BOLD, gettext("status: "));
2985 printf_color(ANSI_YELLOW, gettext("One or more devices contains"
2986 " corrupted data.\n"));
2987 break;
2988
2989 case ZPOOL_STATUS_CORRUPT_DATA:
2990 (void) printf(
2991 gettext(" status: The pool data is corrupted.\n"));
2992 break;
2993
2994 case ZPOOL_STATUS_OFFLINE_DEV:
2995 printf_color(ANSI_BOLD, gettext("status: "));
2996 printf_color(ANSI_YELLOW, gettext("One or more devices "
2997 "are offlined.\n"));
2998 break;
2999
3000 case ZPOOL_STATUS_CORRUPT_POOL:
3001 printf_color(ANSI_BOLD, gettext("status: "));
3002 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
3003 "corrupted.\n"));
3004 break;
3005
3006 case ZPOOL_STATUS_VERSION_OLDER:
3007 printf_color(ANSI_BOLD, gettext("status: "));
3008 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
3009 "a legacy on-disk version.\n"));
3010 break;
3011
3012 case ZPOOL_STATUS_VERSION_NEWER:
3013 printf_color(ANSI_BOLD, gettext("status: "));
3014 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
3015 "an incompatible version.\n"));
3016 break;
3017
3018 case ZPOOL_STATUS_FEAT_DISABLED:
3019 printf_color(ANSI_BOLD, gettext("status: "));
3020 printf_color(ANSI_YELLOW, gettext("Some supported "
3021 "features are not enabled on the pool.\n\t"
3022 "(Note that they may be intentionally disabled "
3023 "if the\n\t'compatibility' property is set.)\n"));
3024 break;
3025
3026 case ZPOOL_STATUS_COMPATIBILITY_ERR:
3027 printf_color(ANSI_BOLD, gettext("status: "));
3028 printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
3029 "the file(s) indicated by the 'compatibility'\n"
3030 "property.\n"));
3031 break;
3032
3033 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
3034 printf_color(ANSI_BOLD, gettext("status: "));
3035 printf_color(ANSI_YELLOW, gettext("One or more features "
3036 "are enabled on the pool despite not being\n"
3037 "requested by the 'compatibility' property.\n"));
3038 break;
3039
3040 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3041 printf_color(ANSI_BOLD, gettext("status: "));
3042 printf_color(ANSI_YELLOW, gettext("The pool uses the following "
3043 "feature(s) not supported on this system:\n"));
3044 color_start(ANSI_YELLOW);
3045 zpool_print_unsup_feat(config);
3046 color_end();
3047 break;
3048
3049 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3050 printf_color(ANSI_BOLD, gettext("status: "));
3051 printf_color(ANSI_YELLOW, gettext("The pool can only be "
3052 "accessed in read-only mode on this system. It\n\tcannot be"
3053 " accessed in read-write mode because it uses the "
3054 "following\n\tfeature(s) not supported on this system:\n"));
3055 color_start(ANSI_YELLOW);
3056 zpool_print_unsup_feat(config);
3057 color_end();
3058 break;
3059
3060 case ZPOOL_STATUS_HOSTID_ACTIVE:
3061 printf_color(ANSI_BOLD, gettext("status: "));
3062 printf_color(ANSI_YELLOW, gettext("The pool is currently "
3063 "imported by another system.\n"));
3064 break;
3065
3066 case ZPOOL_STATUS_HOSTID_REQUIRED:
3067 printf_color(ANSI_BOLD, gettext("status: "));
3068 printf_color(ANSI_YELLOW, gettext("The pool has the "
3069 "multihost property on. It cannot\n\tbe safely imported "
3070 "when the system hostid is not set.\n"));
3071 break;
3072
3073 case ZPOOL_STATUS_HOSTID_MISMATCH:
3074 printf_color(ANSI_BOLD, gettext("status: "));
3075 printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
3076 "by another system.\n"));
3077 break;
3078
3079 case ZPOOL_STATUS_FAULTED_DEV_R:
3080 case ZPOOL_STATUS_FAULTED_DEV_NR:
3081 printf_color(ANSI_BOLD, gettext("status: "));
3082 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3083 "faulted.\n"));
3084 break;
3085
3086 case ZPOOL_STATUS_BAD_LOG:
3087 printf_color(ANSI_BOLD, gettext("status: "));
3088 printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
3089 "be read.\n"));
3090 break;
3091
3092 case ZPOOL_STATUS_RESILVERING:
3093 case ZPOOL_STATUS_REBUILDING:
3094 printf_color(ANSI_BOLD, gettext("status: "));
3095 printf_color(ANSI_YELLOW, gettext("One or more devices were "
3096 "being resilvered.\n"));
3097 break;
3098
3099 case ZPOOL_STATUS_ERRATA:
3100 printf_color(ANSI_BOLD, gettext("status: "));
3101 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
3102 errata);
3103 break;
3104
3105 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
3106 printf_color(ANSI_BOLD, gettext("status: "));
3107 printf_color(ANSI_YELLOW, gettext("One or more devices are "
3108 "configured to use a non-native block size.\n"
3109 "\tExpect reduced performance.\n"));
3110 break;
3111
3112 default:
3113 /*
3114 * No other status can be seen when importing pools.
3115 */
3116 assert(reason == ZPOOL_STATUS_OK);
3117 }
3118
3119 /*
3120 * Print out an action according to the overall state of the pool.
3121 */
3122 if (vs->vs_state == VDEV_STATE_HEALTHY) {
3123 if (reason == ZPOOL_STATUS_VERSION_OLDER ||
3124 reason == ZPOOL_STATUS_FEAT_DISABLED) {
3125 (void) printf(gettext(" action: The pool can be "
3126 "imported using its name or numeric identifier, "
3127 "though\n\tsome features will not be available "
3128 "without an explicit 'zpool upgrade'.\n"));
3129 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
3130 (void) printf(gettext(" action: The pool can be "
3131 "imported using its name or numeric\n\tidentifier, "
3132 "though the file(s) indicated by its "
3133 "'compatibility'\n\tproperty cannot be parsed at "
3134 "this time.\n"));
3135 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
3136 (void) printf(gettext(" action: The pool can be "
3137 "imported using its name or numeric "
3138 "identifier and\n\tthe '-f' flag.\n"));
3139 } else if (reason == ZPOOL_STATUS_ERRATA) {
3140 switch (errata) {
3141 case ZPOOL_ERRATA_NONE:
3142 break;
3143
3144 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
3145 (void) printf(gettext(" action: The pool can "
3146 "be imported using its name or numeric "
3147 "identifier,\n\thowever there is a compat"
3148 "ibility issue which should be corrected"
3149 "\n\tby running 'zpool scrub'\n"));
3150 break;
3151
3152 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
3153 (void) printf(gettext(" action: The pool can"
3154 "not be imported with this version of ZFS "
3155 "due to\n\tan active asynchronous destroy. "
3156 "Revert to an earlier version\n\tand "
3157 "allow the destroy to complete before "
3158 "updating.\n"));
3159 break;
3160
3161 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
3162 (void) printf(gettext(" action: Existing "
3163 "encrypted datasets contain an on-disk "
3164 "incompatibility, which\n\tneeds to be "
3165 "corrected. Backup these datasets to new "
3166 "encrypted datasets\n\tand destroy the "
3167 "old ones.\n"));
3168 break;
3169
3170 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
3171 (void) printf(gettext(" action: Existing "
3172 "encrypted snapshots and bookmarks contain "
3173 "an on-disk\n\tincompatibility. This may "
3174 "cause on-disk corruption if they are used"
3175 "\n\twith 'zfs recv'. To correct the "
3176 "issue, enable the bookmark_v2 feature.\n\t"
3177 "No additional action is needed if there "
3178 "are no encrypted snapshots or\n\t"
3179 "bookmarks. If preserving the encrypted "
3180 "snapshots and bookmarks is\n\trequired, "
3181 "use a non-raw send to backup and restore "
3182 "them. Alternately,\n\tthey may be removed"
3183 " to resolve the incompatibility.\n"));
3184 break;
3185 default:
3186 /*
3187 * All errata must contain an action message.
3188 */
3189 assert(0);
3190 }
3191 } else {
3192 (void) printf(gettext(" action: The pool can be "
3193 "imported using its name or numeric "
3194 "identifier.\n"));
3195 }
3196 } else if (vs->vs_state == VDEV_STATE_DEGRADED) {
3197 (void) printf(gettext(" action: The pool can be imported "
3198 "despite missing or damaged devices. The\n\tfault "
3199 "tolerance of the pool may be compromised if imported.\n"));
3200 } else {
3201 switch (reason) {
3202 case ZPOOL_STATUS_VERSION_NEWER:
3203 (void) printf(gettext(" action: The pool cannot be "
3204 "imported. Access the pool on a system running "
3205 "newer\n\tsoftware, or recreate the pool from "
3206 "backup.\n"));
3207 break;
3208 case ZPOOL_STATUS_UNSUP_FEAT_READ:
3209 printf_color(ANSI_BOLD, gettext("action: "));
3210 printf_color(ANSI_YELLOW, gettext("The pool cannot be "
3211 "imported. Access the pool on a system that "
3212 "supports\n\tthe required feature(s), or recreate "
3213 "the pool from backup.\n"));
3214 break;
3215 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3216 printf_color(ANSI_BOLD, gettext("action: "));
3217 printf_color(ANSI_YELLOW, gettext("The pool cannot be "
3218 "imported in read-write mode. Import the pool "
3219 "with\n"
3220 "\t\"-o readonly=on\", access the pool on a system "
3221 "that supports the\n\trequired feature(s), or "
3222 "recreate the pool from backup.\n"));
3223 break;
3224 case ZPOOL_STATUS_MISSING_DEV_R:
3225 case ZPOOL_STATUS_MISSING_DEV_NR:
3226 case ZPOOL_STATUS_BAD_GUID_SUM:
3227 (void) printf(gettext(" action: The pool cannot be "
3228 "imported. Attach the missing\n\tdevices and try "
3229 "again.\n"));
3230 break;
3231 case ZPOOL_STATUS_HOSTID_ACTIVE:
3232 VERIFY0(nvlist_lookup_nvlist(config,
3233 ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
3234
3235 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3236 hostname = fnvlist_lookup_string(nvinfo,
3237 ZPOOL_CONFIG_MMP_HOSTNAME);
3238
3239 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3240 hostid = fnvlist_lookup_uint64(nvinfo,
3241 ZPOOL_CONFIG_MMP_HOSTID);
3242
3243 (void) printf(gettext(" action: The pool must be "
3244 "exported from %s (hostid=%"PRIx64")\n\tbefore it "
3245 "can be safely imported.\n"), hostname, hostid);
3246 break;
3247 case ZPOOL_STATUS_HOSTID_REQUIRED:
3248 (void) printf(gettext(" action: Set a unique system "
3249 "hostid with the zgenhostid(8) command.\n"));
3250 break;
3251 default:
3252 (void) printf(gettext(" action: The pool cannot be "
3253 "imported due to damaged devices or data.\n"));
3254 }
3255 }
3256
3257 /* Print the comment attached to the pool. */
3258 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
3259 (void) printf(gettext("comment: %s\n"), comment);
3260
3261 /*
3262 * If the state is "closed" or "can't open", and the aux state
3263 * is "corrupt data":
3264 */
3265 if (((vs->vs_state == VDEV_STATE_CLOSED) ||
3266 (vs->vs_state == VDEV_STATE_CANT_OPEN)) &&
3267 (vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) {
3268 if (pool_state == POOL_STATE_DESTROYED)
3269 (void) printf(gettext("\tThe pool was destroyed, "
3270 "but can be imported using the '-Df' flags.\n"));
3271 else if (pool_state != POOL_STATE_EXPORTED)
3272 (void) printf(gettext("\tThe pool may be active on "
3273 "another system, but can be imported using\n\t"
3274 "the '-f' flag.\n"));
3275 }
3276
3277 if (msgid != NULL) {
3278 (void) printf(gettext(
3279 " see: https://openzfs.github.io/openzfs-docs/msg/%s\n"),
3280 msgid);
3281 }
3282
3283 (void) printf(gettext(" config:\n\n"));
3284
3285 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
3286 VDEV_NAME_TYPE_ID);
3287 if (cb.cb_namewidth < 10)
3288 cb.cb_namewidth = 10;
3289
3290 print_import_config(&cb, name, nvroot, 0);
3291
3292 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
3293 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
3294 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
3295
3296 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
3297 (void) printf(gettext("\n\tAdditional devices are known to "
3298 "be part of this pool, though their\n\texact "
3299 "configuration cannot be determined.\n"));
3300 }
3301 return (0);
3302 }
3303
3304 static boolean_t
3305 zfs_force_import_required(nvlist_t *config)
3306 {
3307 uint64_t state;
3308 uint64_t hostid = 0;
3309 nvlist_t *nvinfo;
3310
3311 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
3312 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3313
3314 /*
3315 * The hostid on LOAD_INFO comes from the MOS label via
3316 * spa_tryimport(). If its not there then we're likely talking to an
3317 * older kernel, so use the top one, which will be from the label
3318 * discovered in zpool_find_import(), or if a cachefile is in use, the
3319 * local hostid.
3320 */
3321 if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0)
3322 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,
3323 &hostid);
3324
3325 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
3326 return (B_TRUE);
3327
3328 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
3329 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
3330 ZPOOL_CONFIG_MMP_STATE);
3331
3332 if (mmp_state != MMP_STATE_INACTIVE)
3333 return (B_TRUE);
3334 }
3335
3336 return (B_FALSE);
3337 }
3338
3339 /*
3340 * Perform the import for the given configuration. This passes the heavy
3341 * lifting off to zpool_import_props(), and then mounts the datasets contained
3342 * within the pool.
3343 */
3344 static int
3345 do_import(nvlist_t *config, const char *newname, const char *mntopts,
3346 nvlist_t *props, int flags)
3347 {
3348 int ret = 0;
3349 int ms_status = 0;
3350 zpool_handle_t *zhp;
3351 const char *name;
3352 uint64_t version;
3353
3354 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
3355 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3356
3357 if (!SPA_VERSION_IS_SUPPORTED(version)) {
3358 (void) fprintf(stderr, gettext("cannot import '%s': pool "
3359 "is formatted using an unsupported ZFS version\n"), name);
3360 return (1);
3361 } else if (zfs_force_import_required(config) &&
3362 !(flags & ZFS_IMPORT_ANY_HOST)) {
3363 mmp_state_t mmp_state = MMP_STATE_INACTIVE;
3364 nvlist_t *nvinfo;
3365
3366 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3367 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
3368 mmp_state = fnvlist_lookup_uint64(nvinfo,
3369 ZPOOL_CONFIG_MMP_STATE);
3370
3371 if (mmp_state == MMP_STATE_ACTIVE) {
3372 const char *hostname = "<unknown>";
3373 uint64_t hostid = 0;
3374
3375 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3376 hostname = fnvlist_lookup_string(nvinfo,
3377 ZPOOL_CONFIG_MMP_HOSTNAME);
3378
3379 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3380 hostid = fnvlist_lookup_uint64(nvinfo,
3381 ZPOOL_CONFIG_MMP_HOSTID);
3382
3383 (void) fprintf(stderr, gettext("cannot import '%s': "
3384 "pool is imported on %s (hostid: "
3385 "0x%"PRIx64")\nExport the pool on the other "
3386 "system, then run 'zpool import'.\n"),
3387 name, hostname, hostid);
3388 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
3389 (void) fprintf(stderr, gettext("Cannot import '%s': "
3390 "pool has the multihost property on and the\n"
3391 "system's hostid is not set. Set a unique hostid "
3392 "with the zgenhostid(8) command.\n"), name);
3393 } else {
3394 const char *hostname = "<unknown>";
3395 time_t timestamp = 0;
3396 uint64_t hostid = 0;
3397
3398 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME))
3399 hostname = fnvlist_lookup_string(nvinfo,
3400 ZPOOL_CONFIG_HOSTNAME);
3401 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
3402 hostname = fnvlist_lookup_string(config,
3403 ZPOOL_CONFIG_HOSTNAME);
3404
3405 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
3406 timestamp = fnvlist_lookup_uint64(config,
3407 ZPOOL_CONFIG_TIMESTAMP);
3408
3409 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID))
3410 hostid = fnvlist_lookup_uint64(nvinfo,
3411 ZPOOL_CONFIG_HOSTID);
3412 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
3413 hostid = fnvlist_lookup_uint64(config,
3414 ZPOOL_CONFIG_HOSTID);
3415
3416 (void) fprintf(stderr, gettext("cannot import '%s': "
3417 "pool was previously in use from another system.\n"
3418 "Last accessed by %s (hostid=%"PRIx64") at %s"
3419 "The pool can be imported, use 'zpool import -f' "
3420 "to import the pool.\n"), name, hostname,
3421 hostid, ctime(&timestamp));
3422 }
3423
3424 return (1);
3425 }
3426
3427 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
3428 return (1);
3429
3430 if (newname != NULL)
3431 name = newname;
3432
3433 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
3434 return (1);
3435
3436 /*
3437 * Loading keys is best effort. We don't want to return immediately
3438 * if it fails but we do want to give the error to the caller.
3439 */
3440 if (flags & ZFS_IMPORT_LOAD_KEYS &&
3441 zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
3442 ret = 1;
3443
3444 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
3445 !(flags & ZFS_IMPORT_ONLY)) {
3446 ms_status = zpool_enable_datasets(zhp, mntopts, 0);
3447 if (ms_status == EZFS_SHAREFAILED) {
3448 (void) fprintf(stderr, gettext("Import was "
3449 "successful, but unable to share some datasets"));
3450 } else if (ms_status == EZFS_MOUNTFAILED) {
3451 (void) fprintf(stderr, gettext("Import was "
3452 "successful, but unable to mount some datasets"));
3453 }
3454 }
3455
3456 zpool_close(zhp);
3457 return (ret);
3458 }
3459
3460 static int
3461 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
3462 char *orig_name, char *new_name,
3463 boolean_t do_destroyed, boolean_t pool_specified, boolean_t do_all,
3464 importargs_t *import)
3465 {
3466 nvlist_t *config = NULL;
3467 nvlist_t *found_config = NULL;
3468 uint64_t pool_state;
3469
3470 /*
3471 * At this point we have a list of import candidate configs. Even if
3472 * we were searching by pool name or guid, we still need to
3473 * post-process the list to deal with pool state and possible
3474 * duplicate names.
3475 */
3476 int err = 0;
3477 nvpair_t *elem = NULL;
3478 boolean_t first = B_TRUE;
3479 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
3480
3481 verify(nvpair_value_nvlist(elem, &config) == 0);
3482
3483 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3484 &pool_state) == 0);
3485 if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
3486 continue;
3487 if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
3488 continue;
3489
3490 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
3491 import->policy) == 0);
3492
3493 if (!pool_specified) {
3494 if (first)
3495 first = B_FALSE;
3496 else if (!do_all)
3497 (void) fputc('\n', stdout);
3498
3499 if (do_all) {
3500 err |= do_import(config, NULL, mntopts,
3501 props, flags);
3502 } else {
3503 /*
3504 * If we're importing from cachefile, then
3505 * we don't want to report errors until we
3506 * are in the scan phase of the import. If
3507 * we get an error, then we return that error
3508 * to invoke the scan phase.
3509 */
3510 if (import->cachefile && !import->scan)
3511 err = show_import(config, B_FALSE);
3512 else
3513 (void) show_import(config, B_TRUE);
3514 }
3515 } else if (import->poolname != NULL) {
3516 const char *name;
3517
3518 /*
3519 * We are searching for a pool based on name.
3520 */
3521 verify(nvlist_lookup_string(config,
3522 ZPOOL_CONFIG_POOL_NAME, &name) == 0);
3523
3524 if (strcmp(name, import->poolname) == 0) {
3525 if (found_config != NULL) {
3526 (void) fprintf(stderr, gettext(
3527 "cannot import '%s': more than "
3528 "one matching pool\n"),
3529 import->poolname);
3530 (void) fprintf(stderr, gettext(
3531 "import by numeric ID instead\n"));
3532 err = B_TRUE;
3533 }
3534 found_config = config;
3535 }
3536 } else {
3537 uint64_t guid;
3538
3539 /*
3540 * Search for a pool by guid.
3541 */
3542 verify(nvlist_lookup_uint64(config,
3543 ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
3544
3545 if (guid == import->guid)
3546 found_config = config;
3547 }
3548 }
3549
3550 /*
3551 * If we were searching for a specific pool, verify that we found a
3552 * pool, and then do the import.
3553 */
3554 if (pool_specified && err == 0) {
3555 if (found_config == NULL) {
3556 (void) fprintf(stderr, gettext("cannot import '%s': "
3557 "no such pool available\n"), orig_name);
3558 err = B_TRUE;
3559 } else {
3560 err |= do_import(found_config, new_name,
3561 mntopts, props, flags);
3562 }
3563 }
3564
3565 /*
3566 * If we were just looking for pools, report an error if none were
3567 * found.
3568 */
3569 if (!pool_specified && first)
3570 (void) fprintf(stderr,
3571 gettext("no pools available to import\n"));
3572 return (err);
3573 }
3574
3575 typedef struct target_exists_args {
3576 const char *poolname;
3577 uint64_t poolguid;
3578 } target_exists_args_t;
3579
3580 static int
3581 name_or_guid_exists(zpool_handle_t *zhp, void *data)
3582 {
3583 target_exists_args_t *args = data;
3584 nvlist_t *config = zpool_get_config(zhp, NULL);
3585 int found = 0;
3586
3587 if (config == NULL)
3588 return (0);
3589
3590 if (args->poolname != NULL) {
3591 const char *pool_name;
3592
3593 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3594 &pool_name) == 0);
3595 if (strcmp(pool_name, args->poolname) == 0)
3596 found = 1;
3597 } else {
3598 uint64_t pool_guid;
3599
3600 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3601 &pool_guid) == 0);
3602 if (pool_guid == args->poolguid)
3603 found = 1;
3604 }
3605 zpool_close(zhp);
3606
3607 return (found);
3608 }
3609 /*
3610 * zpool checkpoint <pool>
3611 * checkpoint --discard <pool>
3612 *
3613 * -d Discard the checkpoint from a checkpointed
3614 * --discard pool.
3615 *
3616 * -w Wait for discarding a checkpoint to complete.
3617 * --wait
3618 *
3619 * Checkpoints the specified pool, by taking a "snapshot" of its
3620 * current state. A pool can only have one checkpoint at a time.
3621 */
3622 int
3623 zpool_do_checkpoint(int argc, char **argv)
3624 {
3625 boolean_t discard, wait;
3626 char *pool;
3627 zpool_handle_t *zhp;
3628 int c, err;
3629
3630 struct option long_options[] = {
3631 {"discard", no_argument, NULL, 'd'},
3632 {"wait", no_argument, NULL, 'w'},
3633 {0, 0, 0, 0}
3634 };
3635
3636 discard = B_FALSE;
3637 wait = B_FALSE;
3638 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
3639 switch (c) {
3640 case 'd':
3641 discard = B_TRUE;
3642 break;
3643 case 'w':
3644 wait = B_TRUE;
3645 break;
3646 case '?':
3647 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
3648 optopt);
3649 usage(B_FALSE);
3650 }
3651 }
3652
3653 if (wait && !discard) {
3654 (void) fprintf(stderr, gettext("--wait only valid when "
3655 "--discard also specified\n"));
3656 usage(B_FALSE);
3657 }
3658
3659 argc -= optind;
3660 argv += optind;
3661
3662 if (argc < 1) {
3663 (void) fprintf(stderr, gettext("missing pool argument\n"));
3664 usage(B_FALSE);
3665 }
3666
3667 if (argc > 1) {
3668 (void) fprintf(stderr, gettext("too many arguments\n"));
3669 usage(B_FALSE);
3670 }
3671
3672 pool = argv[0];
3673
3674 if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
3675 /* As a special case, check for use of '/' in the name */
3676 if (strchr(pool, '/') != NULL)
3677 (void) fprintf(stderr, gettext("'zpool checkpoint' "
3678 "doesn't work on datasets. To save the state "
3679 "of a dataset from a specific point in time "
3680 "please use 'zfs snapshot'\n"));
3681 return (1);
3682 }
3683
3684 if (discard) {
3685 err = (zpool_discard_checkpoint(zhp) != 0);
3686 if (err == 0 && wait)
3687 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
3688 } else {
3689 err = (zpool_checkpoint(zhp) != 0);
3690 }
3691
3692 zpool_close(zhp);
3693
3694 return (err);
3695 }
3696
3697 #define CHECKPOINT_OPT 1024
3698
3699 /*
3700 * zpool import [-d dir] [-D]
3701 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
3702 * [-d dir | -c cachefile | -s] [-f] -a
3703 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
3704 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
3705 * [newpool]
3706 *
3707 * -c Read pool information from a cachefile instead of searching
3708 * devices. If importing from a cachefile config fails, then
3709 * fallback to searching for devices only in the directories that
3710 * exist in the cachefile.
3711 *
3712 * -d Scan in a specific directory, other than /dev/. More than
3713 * one directory can be specified using multiple '-d' options.
3714 *
3715 * -D Scan for previously destroyed pools or import all or only
3716 * specified destroyed pools.
3717 *
3718 * -R Temporarily import the pool, with all mountpoints relative to
3719 * the given root. The pool will remain exported when the machine
3720 * is rebooted.
3721 *
3722 * -V Import even in the presence of faulted vdevs. This is an
3723 * intentionally undocumented option for testing purposes, and
3724 * treats the pool configuration as complete, leaving any bad
3725 * vdevs in the FAULTED state. In other words, it does verbatim
3726 * import.
3727 *
3728 * -f Force import, even if it appears that the pool is active.
3729 *
3730 * -F Attempt rewind if necessary.
3731 *
3732 * -n See if rewind would work, but don't actually rewind.
3733 *
3734 * -N Import the pool but don't mount datasets.
3735 *
3736 * -T Specify a starting txg to use for import. This option is
3737 * intentionally undocumented option for testing purposes.
3738 *
3739 * -a Import all pools found.
3740 *
3741 * -l Load encryption keys while importing.
3742 *
3743 * -o Set property=value and/or temporary mount options (without '=').
3744 *
3745 * -s Scan using the default search path, the libblkid cache will
3746 * not be consulted.
3747 *
3748 * --rewind-to-checkpoint
3749 * Import the pool and revert back to the checkpoint.
3750 *
3751 * The import command scans for pools to import, and import pools based on pool
3752 * name and GUID. The pool can also be renamed as part of the import process.
3753 */
3754 int
3755 zpool_do_import(int argc, char **argv)
3756 {
3757 char **searchdirs = NULL;
3758 char *env, *envdup = NULL;
3759 int nsearch = 0;
3760 int c;
3761 int err = 0;
3762 nvlist_t *pools = NULL;
3763 boolean_t do_all = B_FALSE;
3764 boolean_t do_destroyed = B_FALSE;
3765 char *mntopts = NULL;
3766 uint64_t searchguid = 0;
3767 char *searchname = NULL;
3768 char *propval;
3769 nvlist_t *policy = NULL;
3770 nvlist_t *props = NULL;
3771 int flags = ZFS_IMPORT_NORMAL;
3772 uint32_t rewind_policy = ZPOOL_NO_REWIND;
3773 boolean_t dryrun = B_FALSE;
3774 boolean_t do_rewind = B_FALSE;
3775 boolean_t xtreme_rewind = B_FALSE;
3776 boolean_t do_scan = B_FALSE;
3777 boolean_t pool_exists = B_FALSE;
3778 boolean_t pool_specified = B_FALSE;
3779 uint64_t txg = -1ULL;
3780 char *cachefile = NULL;
3781 importargs_t idata = { 0 };
3782 char *endptr;
3783
3784 struct option long_options[] = {
3785 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
3786 {0, 0, 0, 0}
3787 };
3788
3789 /* check options */
3790 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
3791 long_options, NULL)) != -1) {
3792 switch (c) {
3793 case 'a':
3794 do_all = B_TRUE;
3795 break;
3796 case 'c':
3797 cachefile = optarg;
3798 break;
3799 case 'd':
3800 searchdirs = safe_realloc(searchdirs,
3801 (nsearch + 1) * sizeof (char *));
3802 searchdirs[nsearch++] = optarg;
3803 break;
3804 case 'D':
3805 do_destroyed = B_TRUE;
3806 break;
3807 case 'f':
3808 flags |= ZFS_IMPORT_ANY_HOST;
3809 break;
3810 case 'F':
3811 do_rewind = B_TRUE;
3812 break;
3813 case 'l':
3814 flags |= ZFS_IMPORT_LOAD_KEYS;
3815 break;
3816 case 'm':
3817 flags |= ZFS_IMPORT_MISSING_LOG;
3818 break;
3819 case 'n':
3820 dryrun = B_TRUE;
3821 break;
3822 case 'N':
3823 flags |= ZFS_IMPORT_ONLY;
3824 break;
3825 case 'o':
3826 if ((propval = strchr(optarg, '=')) != NULL) {
3827 *propval = '\0';
3828 propval++;
3829 if (add_prop_list(optarg, propval,
3830 &props, B_TRUE))
3831 goto error;
3832 } else {
3833 mntopts = optarg;
3834 }
3835 break;
3836 case 'R':
3837 if (add_prop_list(zpool_prop_to_name(
3838 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
3839 goto error;
3840 if (add_prop_list_default(zpool_prop_to_name(
3841 ZPOOL_PROP_CACHEFILE), "none", &props))
3842 goto error;
3843 break;
3844 case 's':
3845 do_scan = B_TRUE;
3846 break;
3847 case 't':
3848 flags |= ZFS_IMPORT_TEMP_NAME;
3849 if (add_prop_list_default(zpool_prop_to_name(
3850 ZPOOL_PROP_CACHEFILE), "none", &props))
3851 goto error;
3852 break;
3853
3854 case 'T':
3855 errno = 0;
3856 txg = strtoull(optarg, &endptr, 0);
3857 if (errno != 0 || *endptr != '\0') {
3858 (void) fprintf(stderr,
3859 gettext("invalid txg value\n"));
3860 usage(B_FALSE);
3861 }
3862 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
3863 break;
3864 case 'V':
3865 flags |= ZFS_IMPORT_VERBATIM;
3866 break;
3867 case 'X':
3868 xtreme_rewind = B_TRUE;
3869 break;
3870 case CHECKPOINT_OPT:
3871 flags |= ZFS_IMPORT_CHECKPOINT;
3872 break;
3873 case ':':
3874 (void) fprintf(stderr, gettext("missing argument for "
3875 "'%c' option\n"), optopt);
3876 usage(B_FALSE);
3877 break;
3878 case '?':
3879 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
3880 optopt);
3881 usage(B_FALSE);
3882 }
3883 }
3884
3885 argc -= optind;
3886 argv += optind;
3887
3888 if (cachefile && nsearch != 0) {
3889 (void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
3890 usage(B_FALSE);
3891 }
3892
3893 if (cachefile && do_scan) {
3894 (void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
3895 usage(B_FALSE);
3896 }
3897
3898 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
3899 (void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
3900 usage(B_FALSE);
3901 }
3902
3903 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
3904 (void) fprintf(stderr, gettext("-l is only meaningful during "
3905 "an import\n"));
3906 usage(B_FALSE);
3907 }
3908
3909 if ((dryrun || xtreme_rewind) && !do_rewind) {
3910 (void) fprintf(stderr,
3911 gettext("-n or -X only meaningful with -F\n"));
3912 usage(B_FALSE);
3913 }
3914 if (dryrun)
3915 rewind_policy = ZPOOL_TRY_REWIND;
3916 else if (do_rewind)
3917 rewind_policy = ZPOOL_DO_REWIND;
3918 if (xtreme_rewind)
3919 rewind_policy |= ZPOOL_EXTREME_REWIND;
3920
3921 /* In the future, we can capture further policy and include it here */
3922 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
3923 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
3924 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
3925 rewind_policy) != 0)
3926 goto error;
3927
3928 /* check argument count */
3929 if (do_all) {
3930 if (argc != 0) {
3931 (void) fprintf(stderr, gettext("too many arguments\n"));
3932 usage(B_FALSE);
3933 }
3934 } else {
3935 if (argc > 2) {
3936 (void) fprintf(stderr, gettext("too many arguments\n"));
3937 usage(B_FALSE);
3938 }
3939 }
3940
3941 /*
3942 * Check for the effective uid. We do this explicitly here because
3943 * otherwise any attempt to discover pools will silently fail.
3944 */
3945 if (argc == 0 && geteuid() != 0) {
3946 (void) fprintf(stderr, gettext("cannot "
3947 "discover pools: permission denied\n"));
3948
3949 free(searchdirs);
3950 nvlist_free(props);
3951 nvlist_free(policy);
3952 return (1);
3953 }
3954
3955 /*
3956 * Depending on the arguments given, we do one of the following:
3957 *
3958 * <none> Iterate through all pools and display information about
3959 * each one.
3960 *
3961 * -a Iterate through all pools and try to import each one.
3962 *
3963 * <id> Find the pool that corresponds to the given GUID/pool
3964 * name and import that one.
3965 *
3966 * -D Above options applies only to destroyed pools.
3967 */
3968 if (argc != 0) {
3969 char *endptr;
3970
3971 errno = 0;
3972 searchguid = strtoull(argv[0], &endptr, 10);
3973 if (errno != 0 || *endptr != '\0') {
3974 searchname = argv[0];
3975 searchguid = 0;
3976 }
3977 pool_specified = B_TRUE;
3978
3979 /*
3980 * User specified a name or guid. Ensure it's unique.
3981 */
3982 target_exists_args_t search = {searchname, searchguid};
3983 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
3984 }
3985
3986 /*
3987 * Check the environment for the preferred search path.
3988 */
3989 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
3990 char *dir, *tmp = NULL;
3991
3992 envdup = strdup(env);
3993
3994 for (dir = strtok_r(envdup, ":", &tmp);
3995 dir != NULL;
3996 dir = strtok_r(NULL, ":", &tmp)) {
3997 searchdirs = safe_realloc(searchdirs,
3998 (nsearch + 1) * sizeof (char *));
3999 searchdirs[nsearch++] = dir;
4000 }
4001 }
4002
4003 idata.path = searchdirs;
4004 idata.paths = nsearch;
4005 idata.poolname = searchname;
4006 idata.guid = searchguid;
4007 idata.cachefile = cachefile;
4008 idata.scan = do_scan;
4009 idata.policy = policy;
4010
4011 libpc_handle_t lpch = {
4012 .lpc_lib_handle = g_zfs,
4013 .lpc_ops = &libzfs_config_ops,
4014 .lpc_printerr = B_TRUE
4015 };
4016 pools = zpool_search_import(&lpch, &idata);
4017
4018 if (pools != NULL && pool_exists &&
4019 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
4020 (void) fprintf(stderr, gettext("cannot import '%s': "
4021 "a pool with that name already exists\n"),
4022 argv[0]);
4023 (void) fprintf(stderr, gettext("use the form '%s "
4024 "<pool | id> <newpool>' to give it a new name\n"),
4025 "zpool import");
4026 err = 1;
4027 } else if (pools == NULL && pool_exists) {
4028 (void) fprintf(stderr, gettext("cannot import '%s': "
4029 "a pool with that name is already created/imported,\n"),
4030 argv[0]);
4031 (void) fprintf(stderr, gettext("and no additional pools "
4032 "with that name were found\n"));
4033 err = 1;
4034 } else if (pools == NULL) {
4035 if (argc != 0) {
4036 (void) fprintf(stderr, gettext("cannot import '%s': "
4037 "no such pool available\n"), argv[0]);
4038 }
4039 err = 1;
4040 }
4041
4042 if (err == 1) {
4043 free(searchdirs);
4044 free(envdup);
4045 nvlist_free(policy);
4046 nvlist_free(pools);
4047 nvlist_free(props);
4048 return (1);
4049 }
4050
4051 err = import_pools(pools, props, mntopts, flags,
4052 argc >= 1 ? argv[0] : NULL,
4053 argc >= 2 ? argv[1] : NULL,
4054 do_destroyed, pool_specified, do_all, &idata);
4055
4056 /*
4057 * If we're using the cachefile and we failed to import, then
4058 * fallback to scanning the directory for pools that match
4059 * those in the cachefile.
4060 */
4061 if (err != 0 && cachefile != NULL) {
4062 (void) printf(gettext("cachefile import failed, retrying\n"));
4063
4064 /*
4065 * We use the scan flag to gather the directories that exist
4066 * in the cachefile. If we need to fallback to searching for
4067 * the pool config, we will only search devices in these
4068 * directories.
4069 */
4070 idata.scan = B_TRUE;
4071 nvlist_free(pools);
4072 pools = zpool_search_import(&lpch, &idata);
4073
4074 err = import_pools(pools, props, mntopts, flags,
4075 argc >= 1 ? argv[0] : NULL,
4076 argc >= 2 ? argv[1] : NULL,
4077 do_destroyed, pool_specified, do_all, &idata);
4078 }
4079
4080 error:
4081 nvlist_free(props);
4082 nvlist_free(pools);
4083 nvlist_free(policy);
4084 free(searchdirs);
4085 free(envdup);
4086
4087 return (err ? 1 : 0);
4088 }
4089
4090 /*
4091 * zpool sync [-f] [pool] ...
4092 *
4093 * -f (undocumented) force uberblock (and config including zpool cache file)
4094 * update.
4095 *
4096 * Sync the specified pool(s).
4097 * Without arguments "zpool sync" will sync all pools.
4098 * This command initiates TXG sync(s) and will return after the TXG(s) commit.
4099 *
4100 */
4101 static int
4102 zpool_do_sync(int argc, char **argv)
4103 {
4104 int ret;
4105 boolean_t force = B_FALSE;
4106
4107 /* check options */
4108 while ((ret = getopt(argc, argv, "f")) != -1) {
4109 switch (ret) {
4110 case 'f':
4111 force = B_TRUE;
4112 break;
4113 case '?':
4114 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
4115 optopt);
4116 usage(B_FALSE);
4117 }
4118 }
4119
4120 argc -= optind;
4121 argv += optind;
4122
4123 /* if argc == 0 we will execute zpool_sync_one on all pools */
4124 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
4125 B_FALSE, zpool_sync_one, &force);
4126
4127 return (ret);
4128 }
4129
4130 typedef struct iostat_cbdata {
4131 uint64_t cb_flags;
4132 int cb_namewidth;
4133 int cb_iteration;
4134 boolean_t cb_verbose;
4135 boolean_t cb_literal;
4136 boolean_t cb_scripted;
4137 zpool_list_t *cb_list;
4138 vdev_cmd_data_list_t *vcdl;
4139 vdev_cbdata_t cb_vdevs;
4140 } iostat_cbdata_t;
4141
4142 /* iostat labels */
4143 typedef struct name_and_columns {
4144 const char *name; /* Column name */
4145 unsigned int columns; /* Center name to this number of columns */
4146 } name_and_columns_t;
4147
4148 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
4149
4150 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
4151 {
4152 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
4153 {NULL}},
4154 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4155 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
4156 {NULL}},
4157 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
4158 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
4159 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
4160 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4161 {"asyncq_wait", 2}, {NULL}},
4162 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
4163 {"async_read", 2}, {"async_write", 2}, {"scrub", 2},
4164 {"trim", 2}, {"rebuild", 2}, {NULL}},
4165 };
4166
4167 /* Shorthand - if "columns" field not set, default to 1 column */
4168 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
4169 {
4170 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
4171 {"write"}, {NULL}},
4172 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4173 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
4174 {NULL}},
4175 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
4176 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
4177 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
4178 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4179 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
4180 {NULL}},
4181 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4182 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4183 {"ind"}, {"agg"}, {NULL}},
4184 };
4185
4186 static const char *histo_to_title[] = {
4187 [IOS_L_HISTO] = "latency",
4188 [IOS_RQ_HISTO] = "req_size",
4189 };
4190
4191 /*
4192 * Return the number of labels in a null-terminated name_and_columns_t
4193 * array.
4194 *
4195 */
4196 static unsigned int
4197 label_array_len(const name_and_columns_t *labels)
4198 {
4199 int i = 0;
4200
4201 while (labels[i].name)
4202 i++;
4203
4204 return (i);
4205 }
4206
4207 /*
4208 * Return the number of strings in a null-terminated string array.
4209 * For example:
4210 *
4211 * const char foo[] = {"bar", "baz", NULL}
4212 *
4213 * returns 2
4214 */
4215 static uint64_t
4216 str_array_len(const char *array[])
4217 {
4218 uint64_t i = 0;
4219 while (array[i])
4220 i++;
4221
4222 return (i);
4223 }
4224
4225
4226 /*
4227 * Return a default column width for default/latency/queue columns. This does
4228 * not include histograms, which have their columns autosized.
4229 */
4230 static unsigned int
4231 default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
4232 {
4233 unsigned long column_width = 5; /* Normal niceprint */
4234 static unsigned long widths[] = {
4235 /*
4236 * Choose some sane default column sizes for printing the
4237 * raw numbers.
4238 */
4239 [IOS_DEFAULT] = 15, /* 1PB capacity */
4240 [IOS_LATENCY] = 10, /* 1B ns = 10sec */
4241 [IOS_QUEUES] = 6, /* 1M queue entries */
4242 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */
4243 [IOS_RQ_HISTO] = 6, /* 1M queue entries */
4244 };
4245
4246 if (cb->cb_literal)
4247 column_width = widths[type];
4248
4249 return (column_width);
4250 }
4251
4252 /*
4253 * Print the column labels, i.e:
4254 *
4255 * capacity operations bandwidth
4256 * alloc free read write read write ...
4257 *
4258 * If force_column_width is set, use it for the column width. If not set, use
4259 * the default column width.
4260 */
4261 static void
4262 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
4263 const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
4264 {
4265 int i, idx, s;
4266 int text_start, rw_column_width, spaces_to_end;
4267 uint64_t flags = cb->cb_flags;
4268 uint64_t f;
4269 unsigned int column_width = force_column_width;
4270
4271 /* For each bit set in flags */
4272 for (f = flags; f; f &= ~(1ULL << idx)) {
4273 idx = lowbit64(f) - 1;
4274 if (!force_column_width)
4275 column_width = default_column_width(cb, idx);
4276 /* Print our top labels centered over "read write" label. */
4277 for (i = 0; i < label_array_len(labels[idx]); i++) {
4278 const char *name = labels[idx][i].name;
4279 /*
4280 * We treat labels[][].columns == 0 as shorthand
4281 * for one column. It makes writing out the label
4282 * tables more concise.
4283 */
4284 unsigned int columns = MAX(1, labels[idx][i].columns);
4285 unsigned int slen = strlen(name);
4286
4287 rw_column_width = (column_width * columns) +
4288 (2 * (columns - 1));
4289
4290 text_start = (int)((rw_column_width) / columns -
4291 slen / columns);
4292 if (text_start < 0)
4293 text_start = 0;
4294
4295 printf(" "); /* Two spaces between columns */
4296
4297 /* Space from beginning of column to label */
4298 for (s = 0; s < text_start; s++)
4299 printf(" ");
4300
4301 printf("%s", name);
4302
4303 /* Print space after label to end of column */
4304 spaces_to_end = rw_column_width - text_start - slen;
4305 if (spaces_to_end < 0)
4306 spaces_to_end = 0;
4307
4308 for (s = 0; s < spaces_to_end; s++)
4309 printf(" ");
4310 }
4311 }
4312 }
4313
4314
4315 /*
4316 * print_cmd_columns - Print custom column titles from -c
4317 *
4318 * If the user specified the "zpool status|iostat -c" then print their custom
4319 * column titles in the header. For example, print_cmd_columns() would print
4320 * the " col1 col2" part of this:
4321 *
4322 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
4323 * ...
4324 * capacity operations bandwidth
4325 * pool alloc free read write read write col1 col2
4326 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4327 * mypool 269K 1008M 0 0 107 946
4328 * mirror 269K 1008M 0 0 107 946
4329 * sdb - - 0 0 102 473 val1 val2
4330 * sdc - - 0 0 5 473 val1 val2
4331 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4332 */
4333 static void
4334 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
4335 {
4336 int i, j;
4337 vdev_cmd_data_t *data = &vcdl->data[0];
4338
4339 if (vcdl->count == 0 || data == NULL)
4340 return;
4341
4342 /*
4343 * Each vdev cmd should have the same column names unless the user did
4344 * something weird with their cmd. Just take the column names from the
4345 * first vdev and assume it works for all of them.
4346 */
4347 for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
4348 printf(" ");
4349 if (use_dashes) {
4350 for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
4351 printf("-");
4352 } else {
4353 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
4354 vcdl->uniq_cols[i]);
4355 }
4356 }
4357 }
4358
4359
4360 /*
4361 * Utility function to print out a line of dashes like:
4362 *
4363 * -------------------------------- ----- ----- ----- ----- -----
4364 *
4365 * ...or a dashed named-row line like:
4366 *
4367 * logs - - - - -
4368 *
4369 * @cb: iostat data
4370 *
4371 * @force_column_width If non-zero, use the value as the column width.
4372 * Otherwise use the default column widths.
4373 *
4374 * @name: Print a dashed named-row line starting
4375 * with @name. Otherwise, print a regular
4376 * dashed line.
4377 */
4378 static void
4379 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
4380 const char *name)
4381 {
4382 int i;
4383 unsigned int namewidth;
4384 uint64_t flags = cb->cb_flags;
4385 uint64_t f;
4386 int idx;
4387 const name_and_columns_t *labels;
4388 const char *title;
4389
4390
4391 if (cb->cb_flags & IOS_ANYHISTO_M) {
4392 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4393 } else if (cb->cb_vdevs.cb_names_count) {
4394 title = "vdev";
4395 } else {
4396 title = "pool";
4397 }
4398
4399 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4400 name ? strlen(name) : 0);
4401
4402
4403 if (name) {
4404 printf("%-*s", namewidth, name);
4405 } else {
4406 for (i = 0; i < namewidth; i++)
4407 (void) printf("-");
4408 }
4409
4410 /* For each bit in flags */
4411 for (f = flags; f; f &= ~(1ULL << idx)) {
4412 unsigned int column_width;
4413 idx = lowbit64(f) - 1;
4414 if (force_column_width)
4415 column_width = force_column_width;
4416 else
4417 column_width = default_column_width(cb, idx);
4418
4419 labels = iostat_bottom_labels[idx];
4420 for (i = 0; i < label_array_len(labels); i++) {
4421 if (name)
4422 printf(" %*s-", column_width - 1, " ");
4423 else
4424 printf(" %.*s", column_width,
4425 "--------------------");
4426 }
4427 }
4428 }
4429
4430
4431 static void
4432 print_iostat_separator_impl(iostat_cbdata_t *cb,
4433 unsigned int force_column_width)
4434 {
4435 print_iostat_dashes(cb, force_column_width, NULL);
4436 }
4437
4438 static void
4439 print_iostat_separator(iostat_cbdata_t *cb)
4440 {
4441 print_iostat_separator_impl(cb, 0);
4442 }
4443
4444 static void
4445 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
4446 const char *histo_vdev_name)
4447 {
4448 unsigned int namewidth;
4449 const char *title;
4450
4451 color_start(ANSI_BOLD);
4452
4453 if (cb->cb_flags & IOS_ANYHISTO_M) {
4454 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4455 } else if (cb->cb_vdevs.cb_names_count) {
4456 title = "vdev";
4457 } else {
4458 title = "pool";
4459 }
4460
4461 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4462 histo_vdev_name ? strlen(histo_vdev_name) : 0);
4463
4464 if (histo_vdev_name)
4465 printf("%-*s", namewidth, histo_vdev_name);
4466 else
4467 printf("%*s", namewidth, "");
4468
4469
4470 print_iostat_labels(cb, force_column_width, iostat_top_labels);
4471 printf("\n");
4472
4473 printf("%-*s", namewidth, title);
4474
4475 print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
4476 if (cb->vcdl != NULL)
4477 print_cmd_columns(cb->vcdl, 0);
4478
4479 printf("\n");
4480
4481 print_iostat_separator_impl(cb, force_column_width);
4482
4483 if (cb->vcdl != NULL)
4484 print_cmd_columns(cb->vcdl, 1);
4485
4486 color_end();
4487
4488 printf("\n");
4489 }
4490
4491 static void
4492 print_iostat_header(iostat_cbdata_t *cb)
4493 {
4494 print_iostat_header_impl(cb, 0, NULL);
4495 }
4496
4497 /*
4498 * Prints a size string (i.e. 120M) with the suffix ("M") colored
4499 * by order of magnitude. Uses column_size to add padding.
4500 */
4501 static void
4502 print_stat_color(const char *statbuf, unsigned int column_size)
4503 {
4504 fputs(" ", stdout);
4505 size_t len = strlen(statbuf);
4506 while (len < column_size) {
4507 fputc(' ', stdout);
4508 column_size--;
4509 }
4510 if (*statbuf == '0') {
4511 color_start(ANSI_GRAY);
4512 fputc('0', stdout);
4513 } else {
4514 for (; *statbuf; statbuf++) {
4515 if (*statbuf == 'K') color_start(ANSI_GREEN);
4516 else if (*statbuf == 'M') color_start(ANSI_YELLOW);
4517 else if (*statbuf == 'G') color_start(ANSI_RED);
4518 else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);
4519 else if (*statbuf == 'P') color_start(ANSI_MAGENTA);
4520 else if (*statbuf == 'E') color_start(ANSI_CYAN);
4521 fputc(*statbuf, stdout);
4522 if (--column_size <= 0)
4523 break;
4524 }
4525 }
4526 color_end();
4527 }
4528
4529 /*
4530 * Display a single statistic.
4531 */
4532 static void
4533 print_one_stat(uint64_t value, enum zfs_nicenum_format format,
4534 unsigned int column_size, boolean_t scripted)
4535 {
4536 char buf[64];
4537
4538 zfs_nicenum_format(value, buf, sizeof (buf), format);
4539
4540 if (scripted)
4541 printf("\t%s", buf);
4542 else
4543 print_stat_color(buf, column_size);
4544 }
4545
4546 /*
4547 * Calculate the default vdev stats
4548 *
4549 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting
4550 * stats into calcvs.
4551 */
4552 static void
4553 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
4554 vdev_stat_t *calcvs)
4555 {
4556 int i;
4557
4558 memcpy(calcvs, newvs, sizeof (*calcvs));
4559 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
4560 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
4561
4562 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
4563 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
4564 }
4565
4566 /*
4567 * Internal representation of the extended iostats data.
4568 *
4569 * The extended iostat stats are exported in nvlists as either uint64_t arrays
4570 * or single uint64_t's. We make both look like arrays to make them easier
4571 * to process. In order to make single uint64_t's look like arrays, we set
4572 * __data to the stat data, and then set *data = &__data with count = 1. Then,
4573 * we can just use *data and count.
4574 */
4575 struct stat_array {
4576 uint64_t *data;
4577 uint_t count; /* Number of entries in data[] */
4578 uint64_t __data; /* Only used when data is a single uint64_t */
4579 };
4580
4581 static uint64_t
4582 stat_histo_max(struct stat_array *nva, unsigned int len)
4583 {
4584 uint64_t max = 0;
4585 int i;
4586 for (i = 0; i < len; i++)
4587 max = MAX(max, array64_max(nva[i].data, nva[i].count));
4588
4589 return (max);
4590 }
4591
4592 /*
4593 * Helper function to lookup a uint64_t array or uint64_t value and store its
4594 * data as a stat_array. If the nvpair is a single uint64_t value, then we make
4595 * it look like a one element array to make it easier to process.
4596 */
4597 static int
4598 nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
4599 struct stat_array *nva)
4600 {
4601 nvpair_t *tmp;
4602 int ret;
4603
4604 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
4605 switch (nvpair_type(tmp)) {
4606 case DATA_TYPE_UINT64_ARRAY:
4607 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
4608 break;
4609 case DATA_TYPE_UINT64:
4610 ret = nvpair_value_uint64(tmp, &nva->__data);
4611 nva->data = &nva->__data;
4612 nva->count = 1;
4613 break;
4614 default:
4615 /* Not a uint64_t */
4616 ret = EINVAL;
4617 break;
4618 }
4619
4620 return (ret);
4621 }
4622
4623 /*
4624 * Given a list of nvlist names, look up the extended stats in newnv and oldnv,
4625 * subtract them, and return the results in a newly allocated stat_array.
4626 * You must free the returned array after you are done with it with
4627 * free_calc_stats().
4628 *
4629 * Additionally, you can set "oldnv" to NULL if you simply want the newnv
4630 * values.
4631 */
4632 static struct stat_array *
4633 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
4634 nvlist_t *newnv)
4635 {
4636 nvlist_t *oldnvx = NULL, *newnvx;
4637 struct stat_array *oldnva, *newnva, *calcnva;
4638 int i, j;
4639 unsigned int alloc_size = (sizeof (struct stat_array)) * len;
4640
4641 /* Extract our extended stats nvlist from the main list */
4642 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
4643 &newnvx) == 0);
4644 if (oldnv) {
4645 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
4646 &oldnvx) == 0);
4647 }
4648
4649 newnva = safe_malloc(alloc_size);
4650 oldnva = safe_malloc(alloc_size);
4651 calcnva = safe_malloc(alloc_size);
4652
4653 for (j = 0; j < len; j++) {
4654 verify(nvpair64_to_stat_array(newnvx, names[j],
4655 &newnva[j]) == 0);
4656 calcnva[j].count = newnva[j].count;
4657 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
4658 calcnva[j].data = safe_malloc(alloc_size);
4659 memcpy(calcnva[j].data, newnva[j].data, alloc_size);
4660
4661 if (oldnvx) {
4662 verify(nvpair64_to_stat_array(oldnvx, names[j],
4663 &oldnva[j]) == 0);
4664 for (i = 0; i < oldnva[j].count; i++)
4665 calcnva[j].data[i] -= oldnva[j].data[i];
4666 }
4667 }
4668 free(newnva);
4669 free(oldnva);
4670 return (calcnva);
4671 }
4672
4673 static void
4674 free_calc_stats(struct stat_array *nva, unsigned int len)
4675 {
4676 int i;
4677 for (i = 0; i < len; i++)
4678 free(nva[i].data);
4679
4680 free(nva);
4681 }
4682
4683 static void
4684 print_iostat_histo(struct stat_array *nva, unsigned int len,
4685 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
4686 double scale)
4687 {
4688 int i, j;
4689 char buf[6];
4690 uint64_t val;
4691 enum zfs_nicenum_format format;
4692 unsigned int buckets;
4693 unsigned int start_bucket;
4694
4695 if (cb->cb_literal)
4696 format = ZFS_NICENUM_RAW;
4697 else
4698 format = ZFS_NICENUM_1024;
4699
4700 /* All these histos are the same size, so just use nva[0].count */
4701 buckets = nva[0].count;
4702
4703 if (cb->cb_flags & IOS_RQ_HISTO_M) {
4704 /* Start at 512 - req size should never be lower than this */
4705 start_bucket = 9;
4706 } else {
4707 start_bucket = 0;
4708 }
4709
4710 for (j = start_bucket; j < buckets; j++) {
4711 /* Print histogram bucket label */
4712 if (cb->cb_flags & IOS_L_HISTO_M) {
4713 /* Ending range of this bucket */
4714 val = (1UL << (j + 1)) - 1;
4715 zfs_nicetime(val, buf, sizeof (buf));
4716 } else {
4717 /* Request size (starting range of bucket) */
4718 val = (1UL << j);
4719 zfs_nicenum(val, buf, sizeof (buf));
4720 }
4721
4722 if (cb->cb_scripted)
4723 printf("%llu", (u_longlong_t)val);
4724 else
4725 printf("%-*s", namewidth, buf);
4726
4727 /* Print the values on the line */
4728 for (i = 0; i < len; i++) {
4729 print_one_stat(nva[i].data[j] * scale, format,
4730 column_width, cb->cb_scripted);
4731 }
4732 printf("\n");
4733 }
4734 }
4735
4736 static void
4737 print_solid_separator(unsigned int length)
4738 {
4739 while (length--)
4740 printf("-");
4741 printf("\n");
4742 }
4743
4744 static void
4745 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
4746 nvlist_t *newnv, double scale, const char *name)
4747 {
4748 unsigned int column_width;
4749 unsigned int namewidth;
4750 unsigned int entire_width;
4751 enum iostat_type type;
4752 struct stat_array *nva;
4753 const char **names;
4754 unsigned int names_len;
4755
4756 /* What type of histo are we? */
4757 type = IOS_HISTO_IDX(cb->cb_flags);
4758
4759 /* Get NULL-terminated array of nvlist names for our histo */
4760 names = vsx_type_to_nvlist[type];
4761 names_len = str_array_len(names); /* num of names */
4762
4763 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
4764
4765 if (cb->cb_literal) {
4766 column_width = MAX(5,
4767 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
4768 } else {
4769 column_width = 5;
4770 }
4771
4772 namewidth = MAX(cb->cb_namewidth,
4773 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
4774
4775 /*
4776 * Calculate the entire line width of what we're printing. The
4777 * +2 is for the two spaces between columns:
4778 */
4779 /* read write */
4780 /* ----- ----- */
4781 /* |___| <---------- column_width */
4782 /* */
4783 /* |__________| <--- entire_width */
4784 /* */
4785 entire_width = namewidth + (column_width + 2) *
4786 label_array_len(iostat_bottom_labels[type]);
4787
4788 if (cb->cb_scripted)
4789 printf("%s\n", name);
4790 else
4791 print_iostat_header_impl(cb, column_width, name);
4792
4793 print_iostat_histo(nva, names_len, cb, column_width,
4794 namewidth, scale);
4795
4796 free_calc_stats(nva, names_len);
4797 if (!cb->cb_scripted)
4798 print_solid_separator(entire_width);
4799 }
4800
4801 /*
4802 * Calculate the average latency of a power-of-two latency histogram
4803 */
4804 static uint64_t
4805 single_histo_average(uint64_t *histo, unsigned int buckets)
4806 {
4807 int i;
4808 uint64_t count = 0, total = 0;
4809
4810 for (i = 0; i < buckets; i++) {
4811 /*
4812 * Our buckets are power-of-two latency ranges. Use the
4813 * midpoint latency of each bucket to calculate the average.
4814 * For example:
4815 *
4816 * Bucket Midpoint
4817 * 8ns-15ns: 12ns
4818 * 16ns-31ns: 24ns
4819 * ...
4820 */
4821 if (histo[i] != 0) {
4822 total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
4823 count += histo[i];
4824 }
4825 }
4826
4827 /* Prevent divide by zero */
4828 return (count == 0 ? 0 : total / count);
4829 }
4830
4831 static void
4832 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
4833 {
4834 const char *names[] = {
4835 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
4836 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
4837 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
4838 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
4839 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
4840 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
4841 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
4842 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
4843 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
4844 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
4845 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
4846 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
4847 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
4848 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
4849 };
4850
4851 struct stat_array *nva;
4852
4853 unsigned int column_width = default_column_width(cb, IOS_QUEUES);
4854 enum zfs_nicenum_format format;
4855
4856 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
4857
4858 if (cb->cb_literal)
4859 format = ZFS_NICENUM_RAW;
4860 else
4861 format = ZFS_NICENUM_1024;
4862
4863 for (int i = 0; i < ARRAY_SIZE(names); i++) {
4864 uint64_t val = nva[i].data[0];
4865 print_one_stat(val, format, column_width, cb->cb_scripted);
4866 }
4867
4868 free_calc_stats(nva, ARRAY_SIZE(names));
4869 }
4870
4871 static void
4872 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
4873 nvlist_t *newnv)
4874 {
4875 int i;
4876 uint64_t val;
4877 const char *names[] = {
4878 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
4879 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
4880 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
4881 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
4882 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
4883 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
4884 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
4885 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
4886 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
4887 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
4888 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
4889 };
4890 struct stat_array *nva;
4891
4892 unsigned int column_width = default_column_width(cb, IOS_LATENCY);
4893 enum zfs_nicenum_format format;
4894
4895 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
4896
4897 if (cb->cb_literal)
4898 format = ZFS_NICENUM_RAWTIME;
4899 else
4900 format = ZFS_NICENUM_TIME;
4901
4902 /* Print our avg latencies on the line */
4903 for (i = 0; i < ARRAY_SIZE(names); i++) {
4904 /* Compute average latency for a latency histo */
4905 val = single_histo_average(nva[i].data, nva[i].count);
4906 print_one_stat(val, format, column_width, cb->cb_scripted);
4907 }
4908 free_calc_stats(nva, ARRAY_SIZE(names));
4909 }
4910
4911 /*
4912 * Print default statistics (capacity/operations/bandwidth)
4913 */
4914 static void
4915 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
4916 {
4917 unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
4918 enum zfs_nicenum_format format;
4919 char na; /* char to print for "not applicable" values */
4920
4921 if (cb->cb_literal) {
4922 format = ZFS_NICENUM_RAW;
4923 na = '0';
4924 } else {
4925 format = ZFS_NICENUM_1024;
4926 na = '-';
4927 }
4928
4929 /* only toplevel vdevs have capacity stats */
4930 if (vs->vs_space == 0) {
4931 if (cb->cb_scripted)
4932 printf("\t%c\t%c", na, na);
4933 else
4934 printf(" %*c %*c", column_width, na, column_width,
4935 na);
4936 } else {
4937 print_one_stat(vs->vs_alloc, format, column_width,
4938 cb->cb_scripted);
4939 print_one_stat(vs->vs_space - vs->vs_alloc, format,
4940 column_width, cb->cb_scripted);
4941 }
4942
4943 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
4944 format, column_width, cb->cb_scripted);
4945 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
4946 format, column_width, cb->cb_scripted);
4947 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
4948 format, column_width, cb->cb_scripted);
4949 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
4950 format, column_width, cb->cb_scripted);
4951 }
4952
4953 static const char *const class_name[] = {
4954 VDEV_ALLOC_BIAS_DEDUP,
4955 VDEV_ALLOC_BIAS_SPECIAL,
4956 VDEV_ALLOC_CLASS_LOGS
4957 };
4958
4959 /*
4960 * Print out all the statistics for the given vdev. This can either be the
4961 * toplevel configuration, or called recursively. If 'name' is NULL, then this
4962 * is a verbose output, and we don't want to display the toplevel pool stats.
4963 *
4964 * Returns the number of stat lines printed.
4965 */
4966 static unsigned int
4967 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
4968 nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
4969 {
4970 nvlist_t **oldchild, **newchild;
4971 uint_t c, children, oldchildren;
4972 vdev_stat_t *oldvs, *newvs, *calcvs;
4973 vdev_stat_t zerovs = { 0 };
4974 char *vname;
4975 int i;
4976 int ret = 0;
4977 uint64_t tdelta;
4978 double scale;
4979
4980 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
4981 return (ret);
4982
4983 calcvs = safe_malloc(sizeof (*calcvs));
4984
4985 if (oldnv != NULL) {
4986 verify(nvlist_lookup_uint64_array(oldnv,
4987 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
4988 } else {
4989 oldvs = &zerovs;
4990 }
4991
4992 /* Do we only want to see a specific vdev? */
4993 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
4994 /* Yes we do. Is this the vdev? */
4995 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
4996 /*
4997 * This is our vdev. Since it is the only vdev we
4998 * will be displaying, make depth = 0 so that it
4999 * doesn't get indented.
5000 */
5001 depth = 0;
5002 break;
5003 }
5004 }
5005
5006 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
5007 /* Couldn't match the name */
5008 goto children;
5009 }
5010
5011
5012 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
5013 (uint64_t **)&newvs, &c) == 0);
5014
5015 /*
5016 * Print the vdev name unless it's is a histogram. Histograms
5017 * display the vdev name in the header itself.
5018 */
5019 if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
5020 if (cb->cb_scripted) {
5021 printf("%s", name);
5022 } else {
5023 if (strlen(name) + depth > cb->cb_namewidth)
5024 (void) printf("%*s%s", depth, "", name);
5025 else
5026 (void) printf("%*s%s%*s", depth, "", name,
5027 (int)(cb->cb_namewidth - strlen(name) -
5028 depth), "");
5029 }
5030 }
5031
5032 /* Calculate our scaling factor */
5033 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
5034 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
5035 /*
5036 * If we specify printing histograms with no time interval, then
5037 * print the histogram numbers over the entire lifetime of the
5038 * vdev.
5039 */
5040 scale = 1;
5041 } else {
5042 if (tdelta == 0)
5043 scale = 1.0;
5044 else
5045 scale = (double)NANOSEC / tdelta;
5046 }
5047
5048 if (cb->cb_flags & IOS_DEFAULT_M) {
5049 calc_default_iostats(oldvs, newvs, calcvs);
5050 print_iostat_default(calcvs, cb, scale);
5051 }
5052 if (cb->cb_flags & IOS_LATENCY_M)
5053 print_iostat_latency(cb, oldnv, newnv);
5054 if (cb->cb_flags & IOS_QUEUES_M)
5055 print_iostat_queues(cb, newnv);
5056 if (cb->cb_flags & IOS_ANYHISTO_M) {
5057 printf("\n");
5058 print_iostat_histos(cb, oldnv, newnv, scale, name);
5059 }
5060
5061 if (cb->vcdl != NULL) {
5062 const char *path;
5063 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
5064 &path) == 0) {
5065 printf(" ");
5066 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
5067 }
5068 }
5069
5070 if (!(cb->cb_flags & IOS_ANYHISTO_M))
5071 printf("\n");
5072
5073 ret++;
5074
5075 children:
5076
5077 free(calcvs);
5078
5079 if (!cb->cb_verbose)
5080 return (ret);
5081
5082 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
5083 &newchild, &children) != 0)
5084 return (ret);
5085
5086 if (oldnv) {
5087 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
5088 &oldchild, &oldchildren) != 0)
5089 return (ret);
5090
5091 children = MIN(oldchildren, children);
5092 }
5093
5094 /*
5095 * print normal top-level devices
5096 */
5097 for (c = 0; c < children; c++) {
5098 uint64_t ishole = B_FALSE, islog = B_FALSE;
5099
5100 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
5101 &ishole);
5102
5103 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
5104 &islog);
5105
5106 if (ishole || islog)
5107 continue;
5108
5109 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
5110 continue;
5111
5112 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5113 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5114 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
5115 newchild[c], cb, depth + 2);
5116 free(vname);
5117 }
5118
5119 /*
5120 * print all other top-level devices
5121 */
5122 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
5123 boolean_t printed = B_FALSE;
5124
5125 for (c = 0; c < children; c++) {
5126 uint64_t islog = B_FALSE;
5127 const char *bias = NULL;
5128 const char *type = NULL;
5129
5130 (void) nvlist_lookup_uint64(newchild[c],
5131 ZPOOL_CONFIG_IS_LOG, &islog);
5132 if (islog) {
5133 bias = VDEV_ALLOC_CLASS_LOGS;
5134 } else {
5135 (void) nvlist_lookup_string(newchild[c],
5136 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
5137 (void) nvlist_lookup_string(newchild[c],
5138 ZPOOL_CONFIG_TYPE, &type);
5139 }
5140 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
5141 continue;
5142 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
5143 continue;
5144
5145 if (!printed) {
5146 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
5147 !cb->cb_scripted &&
5148 !cb->cb_vdevs.cb_names) {
5149 print_iostat_dashes(cb, 0,
5150 class_name[n]);
5151 }
5152 printf("\n");
5153 printed = B_TRUE;
5154 }
5155
5156 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5157 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5158 ret += print_vdev_stats(zhp, vname, oldnv ?
5159 oldchild[c] : NULL, newchild[c], cb, depth + 2);
5160 free(vname);
5161 }
5162 }
5163
5164 /*
5165 * Include level 2 ARC devices in iostat output
5166 */
5167 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
5168 &newchild, &children) != 0)
5169 return (ret);
5170
5171 if (oldnv) {
5172 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
5173 &oldchild, &oldchildren) != 0)
5174 return (ret);
5175
5176 children = MIN(oldchildren, children);
5177 }
5178
5179 if (children > 0) {
5180 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
5181 !cb->cb_vdevs.cb_names) {
5182 print_iostat_dashes(cb, 0, "cache");
5183 }
5184 printf("\n");
5185
5186 for (c = 0; c < children; c++) {
5187 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5188 cb->cb_vdevs.cb_name_flags);
5189 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
5190 : NULL, newchild[c], cb, depth + 2);
5191 free(vname);
5192 }
5193 }
5194
5195 return (ret);
5196 }
5197
5198 static int
5199 refresh_iostat(zpool_handle_t *zhp, void *data)
5200 {
5201 iostat_cbdata_t *cb = data;
5202 boolean_t missing;
5203
5204 /*
5205 * If the pool has disappeared, remove it from the list and continue.
5206 */
5207 if (zpool_refresh_stats(zhp, &missing) != 0)
5208 return (-1);
5209
5210 if (missing)
5211 pool_list_remove(cb->cb_list, zhp);
5212
5213 return (0);
5214 }
5215
5216 /*
5217 * Callback to print out the iostats for the given pool.
5218 */
5219 static int
5220 print_iostat(zpool_handle_t *zhp, void *data)
5221 {
5222 iostat_cbdata_t *cb = data;
5223 nvlist_t *oldconfig, *newconfig;
5224 nvlist_t *oldnvroot, *newnvroot;
5225 int ret;
5226
5227 newconfig = zpool_get_config(zhp, &oldconfig);
5228
5229 if (cb->cb_iteration == 1)
5230 oldconfig = NULL;
5231
5232 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
5233 &newnvroot) == 0);
5234
5235 if (oldconfig == NULL)
5236 oldnvroot = NULL;
5237 else
5238 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
5239 &oldnvroot) == 0);
5240
5241 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
5242 cb, 0);
5243 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
5244 !cb->cb_scripted && cb->cb_verbose &&
5245 !cb->cb_vdevs.cb_names_count) {
5246 print_iostat_separator(cb);
5247 if (cb->vcdl != NULL) {
5248 print_cmd_columns(cb->vcdl, 1);
5249 }
5250 printf("\n");
5251 }
5252
5253 return (ret);
5254 }
5255
5256 static int
5257 get_columns(void)
5258 {
5259 struct winsize ws;
5260 int columns = 80;
5261 int error;
5262
5263 if (isatty(STDOUT_FILENO)) {
5264 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
5265 if (error == 0)
5266 columns = ws.ws_col;
5267 } else {
5268 columns = 999;
5269 }
5270
5271 return (columns);
5272 }
5273
5274 /*
5275 * Return the required length of the pool/vdev name column. The minimum
5276 * allowed width and output formatting flags must be provided.
5277 */
5278 static int
5279 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
5280 {
5281 nvlist_t *config, *nvroot;
5282 int width = min_width;
5283
5284 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
5285 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5286 &nvroot) == 0);
5287 size_t poolname_len = strlen(zpool_get_name(zhp));
5288 if (verbose == B_FALSE) {
5289 width = MAX(poolname_len, min_width);
5290 } else {
5291 width = MAX(poolname_len,
5292 max_width(zhp, nvroot, 0, min_width, flags));
5293 }
5294 }
5295
5296 return (width);
5297 }
5298
5299 /*
5300 * Parse the input string, get the 'interval' and 'count' value if there is one.
5301 */
5302 static void
5303 get_interval_count(int *argcp, char **argv, float *iv,
5304 unsigned long *cnt)
5305 {
5306 float interval = 0;
5307 unsigned long count = 0;
5308 int argc = *argcp;
5309
5310 /*
5311 * Determine if the last argument is an integer or a pool name
5312 */
5313 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5314 char *end;
5315
5316 errno = 0;
5317 interval = strtof(argv[argc - 1], &end);
5318
5319 if (*end == '\0' && errno == 0) {
5320 if (interval == 0) {
5321 (void) fprintf(stderr, gettext(
5322 "interval cannot be zero\n"));
5323 usage(B_FALSE);
5324 }
5325 /*
5326 * Ignore the last parameter
5327 */
5328 argc--;
5329 } else {
5330 /*
5331 * If this is not a valid number, just plow on. The
5332 * user will get a more informative error message later
5333 * on.
5334 */
5335 interval = 0;
5336 }
5337 }
5338
5339 /*
5340 * If the last argument is also an integer, then we have both a count
5341 * and an interval.
5342 */
5343 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5344 char *end;
5345
5346 errno = 0;
5347 count = interval;
5348 interval = strtof(argv[argc - 1], &end);
5349
5350 if (*end == '\0' && errno == 0) {
5351 if (interval == 0) {
5352 (void) fprintf(stderr, gettext(
5353 "interval cannot be zero\n"));
5354 usage(B_FALSE);
5355 }
5356
5357 /*
5358 * Ignore the last parameter
5359 */
5360 argc--;
5361 } else {
5362 interval = 0;
5363 }
5364 }
5365
5366 *iv = interval;
5367 *cnt = count;
5368 *argcp = argc;
5369 }
5370
5371 static void
5372 get_timestamp_arg(char c)
5373 {
5374 if (c == 'u')
5375 timestamp_fmt = UDATE;
5376 else if (c == 'd')
5377 timestamp_fmt = DDATE;
5378 else
5379 usage(B_FALSE);
5380 }
5381
5382 /*
5383 * Return stat flags that are supported by all pools by both the module and
5384 * zpool iostat. "*data" should be initialized to all 0xFFs before running.
5385 * It will get ANDed down until only the flags that are supported on all pools
5386 * remain.
5387 */
5388 static int
5389 get_stat_flags_cb(zpool_handle_t *zhp, void *data)
5390 {
5391 uint64_t *mask = data;
5392 nvlist_t *config, *nvroot, *nvx;
5393 uint64_t flags = 0;
5394 int i, j;
5395
5396 config = zpool_get_config(zhp, NULL);
5397 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5398 &nvroot) == 0);
5399
5400 /* Default stats are always supported, but for completeness.. */
5401 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
5402 flags |= IOS_DEFAULT_M;
5403
5404 /* Get our extended stats nvlist from the main list */
5405 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
5406 &nvx) != 0) {
5407 /*
5408 * No extended stats; they're probably running an older
5409 * module. No big deal, we support that too.
5410 */
5411 goto end;
5412 }
5413
5414 /* For each extended stat, make sure all its nvpairs are supported */
5415 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
5416 if (!vsx_type_to_nvlist[j][0])
5417 continue;
5418
5419 /* Start off by assuming the flag is supported, then check */
5420 flags |= (1ULL << j);
5421 for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
5422 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
5423 /* flag isn't supported */
5424 flags = flags & ~(1ULL << j);
5425 break;
5426 }
5427 }
5428 }
5429 end:
5430 *mask = *mask & flags;
5431 return (0);
5432 }
5433
5434 /*
5435 * Return a bitmask of stats that are supported on all pools by both the module
5436 * and zpool iostat.
5437 */
5438 static uint64_t
5439 get_stat_flags(zpool_list_t *list)
5440 {
5441 uint64_t mask = -1;
5442
5443 /*
5444 * get_stat_flags_cb() will lop off bits from "mask" until only the
5445 * flags that are supported on all pools remain.
5446 */
5447 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
5448 return (mask);
5449 }
5450
5451 /*
5452 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
5453 */
5454 static int
5455 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
5456 {
5457 uint64_t guid;
5458 vdev_cbdata_t *cb = cb_data;
5459 zpool_handle_t *zhp = zhp_data;
5460
5461 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
5462 return (0);
5463
5464 return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));
5465 }
5466
5467 /*
5468 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
5469 */
5470 static int
5471 is_vdev(zpool_handle_t *zhp, void *cb_data)
5472 {
5473 return (for_each_vdev(zhp, is_vdev_cb, cb_data));
5474 }
5475
5476 /*
5477 * Check if vdevs are in a pool
5478 *
5479 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
5480 * return 0. If pool_name is NULL, then search all pools.
5481 */
5482 static int
5483 are_vdevs_in_pool(int argc, char **argv, char *pool_name,
5484 vdev_cbdata_t *cb)
5485 {
5486 char **tmp_name;
5487 int ret = 0;
5488 int i;
5489 int pool_count = 0;
5490
5491 if ((argc == 0) || !*argv)
5492 return (0);
5493
5494 if (pool_name)
5495 pool_count = 1;
5496
5497 /* Temporarily hijack cb_names for a second... */
5498 tmp_name = cb->cb_names;
5499
5500 /* Go though our list of prospective vdev names */
5501 for (i = 0; i < argc; i++) {
5502 cb->cb_names = argv + i;
5503
5504 /* Is this name a vdev in our pools? */
5505 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
5506 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
5507 if (!ret) {
5508 /* No match */
5509 break;
5510 }
5511 }
5512
5513 cb->cb_names = tmp_name;
5514
5515 return (ret);
5516 }
5517
5518 static int
5519 is_pool_cb(zpool_handle_t *zhp, void *data)
5520 {
5521 char *name = data;
5522 if (strcmp(name, zpool_get_name(zhp)) == 0)
5523 return (1);
5524
5525 return (0);
5526 }
5527
5528 /*
5529 * Do we have a pool named *name? If so, return 1, otherwise 0.
5530 */
5531 static int
5532 is_pool(char *name)
5533 {
5534 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
5535 is_pool_cb, name));
5536 }
5537
5538 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
5539 static int
5540 are_all_pools(int argc, char **argv)
5541 {
5542 if ((argc == 0) || !*argv)
5543 return (0);
5544
5545 while (--argc >= 0)
5546 if (!is_pool(argv[argc]))
5547 return (0);
5548
5549 return (1);
5550 }
5551
5552 /*
5553 * Helper function to print out vdev/pool names we can't resolve. Used for an
5554 * error message.
5555 */
5556 static void
5557 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
5558 vdev_cbdata_t *cb)
5559 {
5560 int i;
5561 char *name;
5562 char *str;
5563 for (i = 0; i < argc; i++) {
5564 name = argv[i];
5565
5566 if (is_pool(name))
5567 str = gettext("pool");
5568 else if (are_vdevs_in_pool(1, &name, pool_name, cb))
5569 str = gettext("vdev in this pool");
5570 else if (are_vdevs_in_pool(1, &name, NULL, cb))
5571 str = gettext("vdev in another pool");
5572 else
5573 str = gettext("unknown");
5574
5575 fprintf(stderr, "\t%s (%s)\n", name, str);
5576 }
5577 }
5578
5579 /*
5580 * Same as get_interval_count(), but with additional checks to not misinterpret
5581 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in
5582 * cb.cb_vdevs.cb_name_flags.
5583 */
5584 static void
5585 get_interval_count_filter_guids(int *argc, char **argv, float *interval,
5586 unsigned long *count, iostat_cbdata_t *cb)
5587 {
5588 char **tmpargv = argv;
5589 int argc_for_interval = 0;
5590
5591 /* Is the last arg an interval value? Or a guid? */
5592 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
5593 &cb->cb_vdevs)) {
5594 /*
5595 * The last arg is not a guid, so it's probably an
5596 * interval value.
5597 */
5598 argc_for_interval++;
5599
5600 if (*argc >= 2 &&
5601 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
5602 &cb->cb_vdevs)) {
5603 /*
5604 * The 2nd to last arg is not a guid, so it's probably
5605 * an interval value.
5606 */
5607 argc_for_interval++;
5608 }
5609 }
5610
5611 /* Point to our list of possible intervals */
5612 tmpargv = &argv[*argc - argc_for_interval];
5613
5614 *argc = *argc - argc_for_interval;
5615 get_interval_count(&argc_for_interval, tmpargv,
5616 interval, count);
5617 }
5618
5619 /*
5620 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
5621 * if we were unable to determine its size.
5622 */
5623 static int
5624 terminal_height(void)
5625 {
5626 struct winsize win;
5627
5628 if (isatty(STDOUT_FILENO) == 0)
5629 return (-1);
5630
5631 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
5632 return (win.ws_row);
5633
5634 return (-1);
5635 }
5636
5637 /*
5638 * Run one of the zpool status/iostat -c scripts with the help (-h) option and
5639 * print the result.
5640 *
5641 * name: Short name of the script ('iostat').
5642 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
5643 */
5644 static void
5645 print_zpool_script_help(char *name, char *path)
5646 {
5647 char *argv[] = {path, (char *)"-h", NULL};
5648 char **lines = NULL;
5649 int lines_cnt = 0;
5650 int rc;
5651
5652 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
5653 &lines_cnt);
5654 if (rc != 0 || lines == NULL || lines_cnt <= 0) {
5655 if (lines != NULL)
5656 libzfs_free_str_array(lines, lines_cnt);
5657 return;
5658 }
5659
5660 for (int i = 0; i < lines_cnt; i++)
5661 if (!is_blank_str(lines[i]))
5662 printf(" %-14s %s\n", name, lines[i]);
5663
5664 libzfs_free_str_array(lines, lines_cnt);
5665 }
5666
5667 /*
5668 * Go though the zpool status/iostat -c scripts in the user's path, run their
5669 * help option (-h), and print out the results.
5670 */
5671 static void
5672 print_zpool_dir_scripts(char *dirpath)
5673 {
5674 DIR *dir;
5675 struct dirent *ent;
5676 char fullpath[MAXPATHLEN];
5677 struct stat dir_stat;
5678
5679 if ((dir = opendir(dirpath)) != NULL) {
5680 /* print all the files and directories within directory */
5681 while ((ent = readdir(dir)) != NULL) {
5682 if (snprintf(fullpath, sizeof (fullpath), "%s/%s",
5683 dirpath, ent->d_name) >= sizeof (fullpath)) {
5684 (void) fprintf(stderr,
5685 gettext("internal error: "
5686 "ZPOOL_SCRIPTS_PATH too large.\n"));
5687 exit(1);
5688 }
5689
5690 /* Print the scripts */
5691 if (stat(fullpath, &dir_stat) == 0)
5692 if (dir_stat.st_mode & S_IXUSR &&
5693 S_ISREG(dir_stat.st_mode))
5694 print_zpool_script_help(ent->d_name,
5695 fullpath);
5696 }
5697 closedir(dir);
5698 }
5699 }
5700
5701 /*
5702 * Print out help text for all zpool status/iostat -c scripts.
5703 */
5704 static void
5705 print_zpool_script_list(const char *subcommand)
5706 {
5707 char *dir, *sp, *tmp;
5708
5709 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
5710
5711 sp = zpool_get_cmd_search_path();
5712 if (sp == NULL)
5713 return;
5714
5715 for (dir = strtok_r(sp, ":", &tmp);
5716 dir != NULL;
5717 dir = strtok_r(NULL, ":", &tmp))
5718 print_zpool_dir_scripts(dir);
5719
5720 free(sp);
5721 }
5722
5723 /*
5724 * Set the minimum pool/vdev name column width. The width must be at least 10,
5725 * but may be as large as the column width - 42 so it still fits on one line.
5726 * NOTE: 42 is the width of the default capacity/operations/bandwidth output
5727 */
5728 static int
5729 get_namewidth_iostat(zpool_handle_t *zhp, void *data)
5730 {
5731 iostat_cbdata_t *cb = data;
5732 int width, available_width;
5733
5734 /*
5735 * get_namewidth() returns the maximum width of any name in that column
5736 * for any pool/vdev/device line that will be output.
5737 */
5738 width = get_namewidth(zhp, cb->cb_namewidth,
5739 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
5740
5741 /*
5742 * The width we are calculating is the width of the header and also the
5743 * padding width for names that are less than maximum width. The stats
5744 * take up 42 characters, so the width available for names is:
5745 */
5746 available_width = get_columns() - 42;
5747
5748 /*
5749 * If the maximum width fits on a screen, then great! Make everything
5750 * line up by justifying all lines to the same width. If that max
5751 * width is larger than what's available, the name plus stats won't fit
5752 * on one line, and justifying to that width would cause every line to
5753 * wrap on the screen. We only want lines with long names to wrap.
5754 * Limit the padding to what won't wrap.
5755 */
5756 if (width > available_width)
5757 width = available_width;
5758
5759 /*
5760 * And regardless of whatever the screen width is (get_columns can
5761 * return 0 if the width is not known or less than 42 for a narrow
5762 * terminal) have the width be a minimum of 10.
5763 */
5764 if (width < 10)
5765 width = 10;
5766
5767 /* Save the calculated width */
5768 cb->cb_namewidth = width;
5769
5770 return (0);
5771 }
5772
5773 /*
5774 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
5775 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
5776 * [interval [count]]
5777 *
5778 * -c CMD For each vdev, run command CMD
5779 * -g Display guid for individual vdev name.
5780 * -L Follow links when resolving vdev path name.
5781 * -P Display full path for vdev name.
5782 * -v Display statistics for individual vdevs
5783 * -h Display help
5784 * -p Display values in parsable (exact) format.
5785 * -H Scripted mode. Don't display headers, and separate properties
5786 * by a single tab.
5787 * -l Display average latency
5788 * -q Display queue depths
5789 * -w Display latency histograms
5790 * -r Display request size histogram
5791 * -T Display a timestamp in date(1) or Unix format
5792 * -n Only print headers once
5793 *
5794 * This command can be tricky because we want to be able to deal with pool
5795 * creation/destruction as well as vdev configuration changes. The bulk of this
5796 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely
5797 * on pool_list_update() to detect the addition of new pools. Configuration
5798 * changes are all handled within libzfs.
5799 */
5800 int
5801 zpool_do_iostat(int argc, char **argv)
5802 {
5803 int c;
5804 int ret;
5805 int npools;
5806 float interval = 0;
5807 unsigned long count = 0;
5808 int winheight = 24;
5809 zpool_list_t *list;
5810 boolean_t verbose = B_FALSE;
5811 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
5812 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
5813 boolean_t omit_since_boot = B_FALSE;
5814 boolean_t guid = B_FALSE;
5815 boolean_t follow_links = B_FALSE;
5816 boolean_t full_name = B_FALSE;
5817 boolean_t headers_once = B_FALSE;
5818 iostat_cbdata_t cb = { 0 };
5819 char *cmd = NULL;
5820
5821 /* Used for printing error message */
5822 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
5823 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
5824
5825 uint64_t unsupported_flags;
5826
5827 /* check options */
5828 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
5829 switch (c) {
5830 case 'c':
5831 if (cmd != NULL) {
5832 fprintf(stderr,
5833 gettext("Can't set -c flag twice\n"));
5834 exit(1);
5835 }
5836
5837 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
5838 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
5839 fprintf(stderr, gettext(
5840 "Can't run -c, disabled by "
5841 "ZPOOL_SCRIPTS_ENABLED.\n"));
5842 exit(1);
5843 }
5844
5845 if ((getuid() <= 0 || geteuid() <= 0) &&
5846 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
5847 fprintf(stderr, gettext(
5848 "Can't run -c with root privileges "
5849 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
5850 exit(1);
5851 }
5852 cmd = optarg;
5853 verbose = B_TRUE;
5854 break;
5855 case 'g':
5856 guid = B_TRUE;
5857 break;
5858 case 'L':
5859 follow_links = B_TRUE;
5860 break;
5861 case 'P':
5862 full_name = B_TRUE;
5863 break;
5864 case 'T':
5865 get_timestamp_arg(*optarg);
5866 break;
5867 case 'v':
5868 verbose = B_TRUE;
5869 break;
5870 case 'p':
5871 parsable = B_TRUE;
5872 break;
5873 case 'l':
5874 latency = B_TRUE;
5875 break;
5876 case 'q':
5877 queues = B_TRUE;
5878 break;
5879 case 'H':
5880 scripted = B_TRUE;
5881 break;
5882 case 'w':
5883 l_histo = B_TRUE;
5884 break;
5885 case 'r':
5886 rq_histo = B_TRUE;
5887 break;
5888 case 'y':
5889 omit_since_boot = B_TRUE;
5890 break;
5891 case 'n':
5892 headers_once = B_TRUE;
5893 break;
5894 case 'h':
5895 usage(B_FALSE);
5896 break;
5897 case '?':
5898 if (optopt == 'c') {
5899 print_zpool_script_list("iostat");
5900 exit(0);
5901 } else {
5902 fprintf(stderr,
5903 gettext("invalid option '%c'\n"), optopt);
5904 }
5905 usage(B_FALSE);
5906 }
5907 }
5908
5909 argc -= optind;
5910 argv += optind;
5911
5912 cb.cb_literal = parsable;
5913 cb.cb_scripted = scripted;
5914
5915 if (guid)
5916 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
5917 if (follow_links)
5918 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
5919 if (full_name)
5920 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
5921 cb.cb_iteration = 0;
5922 cb.cb_namewidth = 0;
5923 cb.cb_verbose = verbose;
5924
5925 /* Get our interval and count values (if any) */
5926 if (guid) {
5927 get_interval_count_filter_guids(&argc, argv, &interval,
5928 &count, &cb);
5929 } else {
5930 get_interval_count(&argc, argv, &interval, &count);
5931 }
5932
5933 if (argc == 0) {
5934 /* No args, so just print the defaults. */
5935 } else if (are_all_pools(argc, argv)) {
5936 /* All the args are pool names */
5937 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
5938 /* All the args are vdevs */
5939 cb.cb_vdevs.cb_names = argv;
5940 cb.cb_vdevs.cb_names_count = argc;
5941 argc = 0; /* No pools to process */
5942 } else if (are_all_pools(1, argv)) {
5943 /* The first arg is a pool name */
5944 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
5945 &cb.cb_vdevs)) {
5946 /* ...and the rest are vdev names */
5947 cb.cb_vdevs.cb_names = argv + 1;
5948 cb.cb_vdevs.cb_names_count = argc - 1;
5949 argc = 1; /* One pool to process */
5950 } else {
5951 fprintf(stderr, gettext("Expected either a list of "));
5952 fprintf(stderr, gettext("pools, or list of vdevs in"));
5953 fprintf(stderr, " \"%s\", ", argv[0]);
5954 fprintf(stderr, gettext("but got:\n"));
5955 error_list_unresolved_vdevs(argc - 1, argv + 1,
5956 argv[0], &cb.cb_vdevs);
5957 fprintf(stderr, "\n");
5958 usage(B_FALSE);
5959 return (1);
5960 }
5961 } else {
5962 /*
5963 * The args don't make sense. The first arg isn't a pool name,
5964 * nor are all the args vdevs.
5965 */
5966 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
5967 fprintf(stderr, "\n");
5968 return (1);
5969 }
5970
5971 if (cb.cb_vdevs.cb_names_count != 0) {
5972 /*
5973 * If user specified vdevs, it implies verbose.
5974 */
5975 cb.cb_verbose = B_TRUE;
5976 }
5977
5978 /*
5979 * Construct the list of all interesting pools.
5980 */
5981 ret = 0;
5982 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
5983 &ret)) == NULL)
5984 return (1);
5985
5986 if (pool_list_count(list) == 0 && argc != 0) {
5987 pool_list_free(list);
5988 return (1);
5989 }
5990
5991 if (pool_list_count(list) == 0 && interval == 0) {
5992 pool_list_free(list);
5993 (void) fprintf(stderr, gettext("no pools available\n"));
5994 return (1);
5995 }
5996
5997 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
5998 pool_list_free(list);
5999 (void) fprintf(stderr,
6000 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
6001 usage(B_FALSE);
6002 return (1);
6003 }
6004
6005 if (l_histo && rq_histo) {
6006 pool_list_free(list);
6007 (void) fprintf(stderr,
6008 gettext("Only one of [-r|-w] can be passed at a time\n"));
6009 usage(B_FALSE);
6010 return (1);
6011 }
6012
6013 /*
6014 * Enter the main iostat loop.
6015 */
6016 cb.cb_list = list;
6017
6018 if (l_histo) {
6019 /*
6020 * Histograms tables look out of place when you try to display
6021 * them with the other stats, so make a rule that you can only
6022 * print histograms by themselves.
6023 */
6024 cb.cb_flags = IOS_L_HISTO_M;
6025 } else if (rq_histo) {
6026 cb.cb_flags = IOS_RQ_HISTO_M;
6027 } else {
6028 cb.cb_flags = IOS_DEFAULT_M;
6029 if (latency)
6030 cb.cb_flags |= IOS_LATENCY_M;
6031 if (queues)
6032 cb.cb_flags |= IOS_QUEUES_M;
6033 }
6034
6035 /*
6036 * See if the module supports all the stats we want to display.
6037 */
6038 unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
6039 if (unsupported_flags) {
6040 uint64_t f;
6041 int idx;
6042 fprintf(stderr,
6043 gettext("The loaded zfs module doesn't support:"));
6044
6045 /* for each bit set in unsupported_flags */
6046 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
6047 idx = lowbit64(f) - 1;
6048 fprintf(stderr, " -%c", flag_to_arg[idx]);
6049 }
6050
6051 fprintf(stderr, ". Try running a newer module.\n");
6052 pool_list_free(list);
6053
6054 return (1);
6055 }
6056
6057 for (;;) {
6058 if ((npools = pool_list_count(list)) == 0)
6059 (void) fprintf(stderr, gettext("no pools available\n"));
6060 else {
6061 /*
6062 * If this is the first iteration and -y was supplied
6063 * we skip any printing.
6064 */
6065 boolean_t skip = (omit_since_boot &&
6066 cb.cb_iteration == 0);
6067
6068 /*
6069 * Refresh all statistics. This is done as an
6070 * explicit step before calculating the maximum name
6071 * width, so that any * configuration changes are
6072 * properly accounted for.
6073 */
6074 (void) pool_list_iter(list, B_FALSE, refresh_iostat,
6075 &cb);
6076
6077 /*
6078 * Iterate over all pools to determine the maximum width
6079 * for the pool / device name column across all pools.
6080 */
6081 cb.cb_namewidth = 0;
6082 (void) pool_list_iter(list, B_FALSE,
6083 get_namewidth_iostat, &cb);
6084
6085 if (timestamp_fmt != NODATE)
6086 print_timestamp(timestamp_fmt);
6087
6088 if (cmd != NULL && cb.cb_verbose &&
6089 !(cb.cb_flags & IOS_ANYHISTO_M)) {
6090 cb.vcdl = all_pools_for_each_vdev_run(argc,
6091 argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
6092 cb.cb_vdevs.cb_names_count,
6093 cb.cb_vdevs.cb_name_flags);
6094 } else {
6095 cb.vcdl = NULL;
6096 }
6097
6098
6099 /*
6100 * Check terminal size so we can print headers
6101 * even when terminal window has its height
6102 * changed.
6103 */
6104 winheight = terminal_height();
6105 /*
6106 * Are we connected to TTY? If not, headers_once
6107 * should be true, to avoid breaking scripts.
6108 */
6109 if (winheight < 0)
6110 headers_once = B_TRUE;
6111
6112 /*
6113 * If it's the first time and we're not skipping it,
6114 * or either skip or verbose mode, print the header.
6115 *
6116 * The histogram code explicitly prints its header on
6117 * every vdev, so skip this for histograms.
6118 */
6119 if (((++cb.cb_iteration == 1 && !skip) ||
6120 (skip != verbose) ||
6121 (!headers_once &&
6122 (cb.cb_iteration % winheight) == 0)) &&
6123 (!(cb.cb_flags & IOS_ANYHISTO_M)) &&
6124 !cb.cb_scripted)
6125 print_iostat_header(&cb);
6126
6127 if (skip) {
6128 (void) fflush(stdout);
6129 (void) fsleep(interval);
6130 continue;
6131 }
6132
6133 pool_list_iter(list, B_FALSE, print_iostat, &cb);
6134
6135 /*
6136 * If there's more than one pool, and we're not in
6137 * verbose mode (which prints a separator for us),
6138 * then print a separator.
6139 *
6140 * In addition, if we're printing specific vdevs then
6141 * we also want an ending separator.
6142 */
6143 if (((npools > 1 && !verbose &&
6144 !(cb.cb_flags & IOS_ANYHISTO_M)) ||
6145 (!(cb.cb_flags & IOS_ANYHISTO_M) &&
6146 cb.cb_vdevs.cb_names_count)) &&
6147 !cb.cb_scripted) {
6148 print_iostat_separator(&cb);
6149 if (cb.vcdl != NULL)
6150 print_cmd_columns(cb.vcdl, 1);
6151 printf("\n");
6152 }
6153
6154 if (cb.vcdl != NULL)
6155 free_vdev_cmd_data_list(cb.vcdl);
6156
6157 }
6158
6159 if (interval == 0)
6160 break;
6161
6162 if (count != 0 && --count == 0)
6163 break;
6164
6165 (void) fflush(stdout);
6166 (void) fsleep(interval);
6167 }
6168
6169 pool_list_free(list);
6170
6171 return (ret);
6172 }
6173
6174 typedef struct list_cbdata {
6175 boolean_t cb_verbose;
6176 int cb_name_flags;
6177 int cb_namewidth;
6178 boolean_t cb_scripted;
6179 zprop_list_t *cb_proplist;
6180 boolean_t cb_literal;
6181 } list_cbdata_t;
6182
6183
6184 /*
6185 * Given a list of columns to display, output appropriate headers for each one.
6186 */
6187 static void
6188 print_header(list_cbdata_t *cb)
6189 {
6190 zprop_list_t *pl = cb->cb_proplist;
6191 char headerbuf[ZPOOL_MAXPROPLEN];
6192 const char *header;
6193 boolean_t first = B_TRUE;
6194 boolean_t right_justify;
6195 size_t width = 0;
6196
6197 for (; pl != NULL; pl = pl->pl_next) {
6198 width = pl->pl_width;
6199 if (first && cb->cb_verbose) {
6200 /*
6201 * Reset the width to accommodate the verbose listing
6202 * of devices.
6203 */
6204 width = cb->cb_namewidth;
6205 }
6206
6207 if (!first)
6208 (void) fputs(" ", stdout);
6209 else
6210 first = B_FALSE;
6211
6212 right_justify = B_FALSE;
6213 if (pl->pl_prop != ZPROP_USERPROP) {
6214 header = zpool_prop_column_name(pl->pl_prop);
6215 right_justify = zpool_prop_align_right(pl->pl_prop);
6216 } else {
6217 int i;
6218
6219 for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
6220 headerbuf[i] = toupper(pl->pl_user_prop[i]);
6221 headerbuf[i] = '\0';
6222 header = headerbuf;
6223 }
6224
6225 if (pl->pl_next == NULL && !right_justify)
6226 (void) fputs(header, stdout);
6227 else if (right_justify)
6228 (void) printf("%*s", (int)width, header);
6229 else
6230 (void) printf("%-*s", (int)width, header);
6231 }
6232
6233 (void) fputc('\n', stdout);
6234 }
6235
6236 /*
6237 * Given a pool and a list of properties, print out all the properties according
6238 * to the described layout. Used by zpool_do_list().
6239 */
6240 static void
6241 print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
6242 {
6243 zprop_list_t *pl = cb->cb_proplist;
6244 boolean_t first = B_TRUE;
6245 char property[ZPOOL_MAXPROPLEN];
6246 const char *propstr;
6247 boolean_t right_justify;
6248 size_t width;
6249
6250 for (; pl != NULL; pl = pl->pl_next) {
6251
6252 width = pl->pl_width;
6253 if (first && cb->cb_verbose) {
6254 /*
6255 * Reset the width to accommodate the verbose listing
6256 * of devices.
6257 */
6258 width = cb->cb_namewidth;
6259 }
6260
6261 if (!first) {
6262 if (cb->cb_scripted)
6263 (void) fputc('\t', stdout);
6264 else
6265 (void) fputs(" ", stdout);
6266 } else {
6267 first = B_FALSE;
6268 }
6269
6270 right_justify = B_FALSE;
6271 if (pl->pl_prop != ZPROP_USERPROP) {
6272 if (zpool_get_prop(zhp, pl->pl_prop, property,
6273 sizeof (property), NULL, cb->cb_literal) != 0)
6274 propstr = "-";
6275 else
6276 propstr = property;
6277
6278 right_justify = zpool_prop_align_right(pl->pl_prop);
6279 } else if ((zpool_prop_feature(pl->pl_user_prop) ||
6280 zpool_prop_unsupported(pl->pl_user_prop)) &&
6281 zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
6282 sizeof (property)) == 0) {
6283 propstr = property;
6284 } else if (zfs_prop_user(pl->pl_user_prop) &&
6285 zpool_get_userprop(zhp, pl->pl_user_prop, property,
6286 sizeof (property), NULL) == 0) {
6287 propstr = property;
6288 } else {
6289 propstr = "-";
6290 }
6291
6292 /*
6293 * If this is being called in scripted mode, or if this is the
6294 * last column and it is left-justified, don't include a width
6295 * format specifier.
6296 */
6297 if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
6298 (void) fputs(propstr, stdout);
6299 else if (right_justify)
6300 (void) printf("%*s", (int)width, propstr);
6301 else
6302 (void) printf("%-*s", (int)width, propstr);
6303 }
6304
6305 (void) fputc('\n', stdout);
6306 }
6307
6308 static void
6309 print_one_column(zpool_prop_t prop, uint64_t value, const char *str,
6310 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format)
6311 {
6312 char propval[64];
6313 boolean_t fixed;
6314 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
6315
6316 switch (prop) {
6317 case ZPOOL_PROP_SIZE:
6318 case ZPOOL_PROP_EXPANDSZ:
6319 case ZPOOL_PROP_CHECKPOINT:
6320 case ZPOOL_PROP_DEDUPRATIO:
6321 if (value == 0)
6322 (void) strlcpy(propval, "-", sizeof (propval));
6323 else
6324 zfs_nicenum_format(value, propval, sizeof (propval),
6325 format);
6326 break;
6327 case ZPOOL_PROP_FRAGMENTATION:
6328 if (value == ZFS_FRAG_INVALID) {
6329 (void) strlcpy(propval, "-", sizeof (propval));
6330 } else if (format == ZFS_NICENUM_RAW) {
6331 (void) snprintf(propval, sizeof (propval), "%llu",
6332 (unsigned long long)value);
6333 } else {
6334 (void) snprintf(propval, sizeof (propval), "%llu%%",
6335 (unsigned long long)value);
6336 }
6337 break;
6338 case ZPOOL_PROP_CAPACITY:
6339 /* capacity value is in parts-per-10,000 (aka permyriad) */
6340 if (format == ZFS_NICENUM_RAW)
6341 (void) snprintf(propval, sizeof (propval), "%llu",
6342 (unsigned long long)value / 100);
6343 else
6344 (void) snprintf(propval, sizeof (propval),
6345 value < 1000 ? "%1.2f%%" : value < 10000 ?
6346 "%2.1f%%" : "%3.0f%%", value / 100.0);
6347 break;
6348 case ZPOOL_PROP_HEALTH:
6349 width = 8;
6350 (void) strlcpy(propval, str, sizeof (propval));
6351 break;
6352 default:
6353 zfs_nicenum_format(value, propval, sizeof (propval), format);
6354 }
6355
6356 if (!valid)
6357 (void) strlcpy(propval, "-", sizeof (propval));
6358
6359 if (scripted)
6360 (void) printf("\t%s", propval);
6361 else
6362 (void) printf(" %*s", (int)width, propval);
6363 }
6364
6365 /*
6366 * print static default line per vdev
6367 * not compatible with '-o' <proplist> option
6368 */
6369 static void
6370 print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
6371 list_cbdata_t *cb, int depth, boolean_t isspare)
6372 {
6373 nvlist_t **child;
6374 vdev_stat_t *vs;
6375 uint_t c, children;
6376 char *vname;
6377 boolean_t scripted = cb->cb_scripted;
6378 uint64_t islog = B_FALSE;
6379 const char *dashes = "%-*s - - - - "
6380 "- - - - -\n";
6381
6382 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
6383 (uint64_t **)&vs, &c) == 0);
6384
6385 if (name != NULL) {
6386 boolean_t toplevel = (vs->vs_space != 0);
6387 uint64_t cap;
6388 enum zfs_nicenum_format format;
6389 const char *state;
6390
6391 if (cb->cb_literal)
6392 format = ZFS_NICENUM_RAW;
6393 else
6394 format = ZFS_NICENUM_1024;
6395
6396 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
6397 return;
6398
6399 if (scripted)
6400 (void) printf("\t%s", name);
6401 else if (strlen(name) + depth > cb->cb_namewidth)
6402 (void) printf("%*s%s", depth, "", name);
6403 else
6404 (void) printf("%*s%s%*s", depth, "", name,
6405 (int)(cb->cb_namewidth - strlen(name) - depth), "");
6406
6407 /*
6408 * Print the properties for the individual vdevs. Some
6409 * properties are only applicable to toplevel vdevs. The
6410 * 'toplevel' boolean value is passed to the print_one_column()
6411 * to indicate that the value is valid.
6412 */
6413 if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace)
6414 print_one_column(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
6415 scripted, B_TRUE, format);
6416 else
6417 print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
6418 scripted, toplevel, format);
6419 print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
6420 scripted, toplevel, format);
6421 print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
6422 NULL, scripted, toplevel, format);
6423 print_one_column(ZPOOL_PROP_CHECKPOINT,
6424 vs->vs_checkpoint_space, NULL, scripted, toplevel, format);
6425 print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
6426 scripted, B_TRUE, format);
6427 print_one_column(ZPOOL_PROP_FRAGMENTATION,
6428 vs->vs_fragmentation, NULL, scripted,
6429 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
6430 format);
6431 cap = (vs->vs_space == 0) ? 0 :
6432 (vs->vs_alloc * 10000 / vs->vs_space);
6433 print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL,
6434 scripted, toplevel, format);
6435 print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
6436 scripted, toplevel, format);
6437 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
6438 if (isspare) {
6439 if (vs->vs_aux == VDEV_AUX_SPARED)
6440 state = "INUSE";
6441 else if (vs->vs_state == VDEV_STATE_HEALTHY)
6442 state = "AVAIL";
6443 }
6444 print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted,
6445 B_TRUE, format);
6446 (void) fputc('\n', stdout);
6447 }
6448
6449 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
6450 &child, &children) != 0)
6451 return;
6452
6453 /* list the normal vdevs first */
6454 for (c = 0; c < children; c++) {
6455 uint64_t ishole = B_FALSE;
6456
6457 if (nvlist_lookup_uint64(child[c],
6458 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
6459 continue;
6460
6461 if (nvlist_lookup_uint64(child[c],
6462 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
6463 continue;
6464
6465 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
6466 continue;
6467
6468 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6469 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
6470 print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE);
6471 free(vname);
6472 }
6473
6474 /* list the classes: 'logs', 'dedup', and 'special' */
6475 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
6476 boolean_t printed = B_FALSE;
6477
6478 for (c = 0; c < children; c++) {
6479 const char *bias = NULL;
6480 const char *type = NULL;
6481
6482 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
6483 &islog) == 0 && islog) {
6484 bias = VDEV_ALLOC_CLASS_LOGS;
6485 } else {
6486 (void) nvlist_lookup_string(child[c],
6487 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
6488 (void) nvlist_lookup_string(child[c],
6489 ZPOOL_CONFIG_TYPE, &type);
6490 }
6491 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
6492 continue;
6493 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
6494 continue;
6495
6496 if (!printed) {
6497 /* LINTED E_SEC_PRINTF_VAR_FMT */
6498 (void) printf(dashes, cb->cb_namewidth,
6499 class_name[n]);
6500 printed = B_TRUE;
6501 }
6502 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6503 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
6504 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6505 B_FALSE);
6506 free(vname);
6507 }
6508 }
6509
6510 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
6511 &child, &children) == 0 && children > 0) {
6512 /* LINTED E_SEC_PRINTF_VAR_FMT */
6513 (void) printf(dashes, cb->cb_namewidth, "cache");
6514 for (c = 0; c < children; c++) {
6515 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6516 cb->cb_name_flags);
6517 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6518 B_FALSE);
6519 free(vname);
6520 }
6521 }
6522
6523 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
6524 &children) == 0 && children > 0) {
6525 /* LINTED E_SEC_PRINTF_VAR_FMT */
6526 (void) printf(dashes, cb->cb_namewidth, "spare");
6527 for (c = 0; c < children; c++) {
6528 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6529 cb->cb_name_flags);
6530 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6531 B_TRUE);
6532 free(vname);
6533 }
6534 }
6535 }
6536
6537 /*
6538 * Generic callback function to list a pool.
6539 */
6540 static int
6541 list_callback(zpool_handle_t *zhp, void *data)
6542 {
6543 list_cbdata_t *cbp = data;
6544
6545 print_pool(zhp, cbp);
6546
6547 if (cbp->cb_verbose) {
6548 nvlist_t *config, *nvroot;
6549
6550 config = zpool_get_config(zhp, NULL);
6551 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
6552 &nvroot) == 0);
6553 print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE);
6554 }
6555
6556 return (0);
6557 }
6558
6559 /*
6560 * Set the minimum pool/vdev name column width. The width must be at least 9,
6561 * but may be as large as needed.
6562 */
6563 static int
6564 get_namewidth_list(zpool_handle_t *zhp, void *data)
6565 {
6566 list_cbdata_t *cb = data;
6567 int width;
6568
6569 width = get_namewidth(zhp, cb->cb_namewidth,
6570 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
6571
6572 if (width < 9)
6573 width = 9;
6574
6575 cb->cb_namewidth = width;
6576
6577 return (0);
6578 }
6579
6580 /*
6581 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
6582 *
6583 * -g Display guid for individual vdev name.
6584 * -H Scripted mode. Don't display headers, and separate properties
6585 * by a single tab.
6586 * -L Follow links when resolving vdev path name.
6587 * -o List of properties to display. Defaults to
6588 * "name,size,allocated,free,expandsize,fragmentation,capacity,"
6589 * "dedupratio,health,altroot"
6590 * -p Display values in parsable (exact) format.
6591 * -P Display full path for vdev name.
6592 * -T Display a timestamp in date(1) or Unix format
6593 *
6594 * List all pools in the system, whether or not they're healthy. Output space
6595 * statistics for each one, as well as health status summary.
6596 */
6597 int
6598 zpool_do_list(int argc, char **argv)
6599 {
6600 int c;
6601 int ret = 0;
6602 list_cbdata_t cb = { 0 };
6603 static char default_props[] =
6604 "name,size,allocated,free,checkpoint,expandsize,fragmentation,"
6605 "capacity,dedupratio,health,altroot";
6606 char *props = default_props;
6607 float interval = 0;
6608 unsigned long count = 0;
6609 zpool_list_t *list;
6610 boolean_t first = B_TRUE;
6611 current_prop_type = ZFS_TYPE_POOL;
6612
6613 /* check options */
6614 while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) {
6615 switch (c) {
6616 case 'g':
6617 cb.cb_name_flags |= VDEV_NAME_GUID;
6618 break;
6619 case 'H':
6620 cb.cb_scripted = B_TRUE;
6621 break;
6622 case 'L':
6623 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
6624 break;
6625 case 'o':
6626 props = optarg;
6627 break;
6628 case 'P':
6629 cb.cb_name_flags |= VDEV_NAME_PATH;
6630 break;
6631 case 'p':
6632 cb.cb_literal = B_TRUE;
6633 break;
6634 case 'T':
6635 get_timestamp_arg(*optarg);
6636 break;
6637 case 'v':
6638 cb.cb_verbose = B_TRUE;
6639 cb.cb_namewidth = 8; /* 8 until precalc is avail */
6640 break;
6641 case ':':
6642 (void) fprintf(stderr, gettext("missing argument for "
6643 "'%c' option\n"), optopt);
6644 usage(B_FALSE);
6645 break;
6646 case '?':
6647 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6648 optopt);
6649 usage(B_FALSE);
6650 }
6651 }
6652
6653 argc -= optind;
6654 argv += optind;
6655
6656 get_interval_count(&argc, argv, &interval, &count);
6657
6658 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
6659 usage(B_FALSE);
6660
6661 for (;;) {
6662 if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
6663 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
6664 return (1);
6665
6666 if (pool_list_count(list) == 0)
6667 break;
6668
6669 cb.cb_namewidth = 0;
6670 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
6671
6672 if (timestamp_fmt != NODATE)
6673 print_timestamp(timestamp_fmt);
6674
6675 if (!cb.cb_scripted && (first || cb.cb_verbose)) {
6676 print_header(&cb);
6677 first = B_FALSE;
6678 }
6679 ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
6680
6681 if (interval == 0)
6682 break;
6683
6684 if (count != 0 && --count == 0)
6685 break;
6686
6687 pool_list_free(list);
6688
6689 (void) fflush(stdout);
6690 (void) fsleep(interval);
6691 }
6692
6693 if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) {
6694 (void) printf(gettext("no pools available\n"));
6695 ret = 0;
6696 }
6697
6698 pool_list_free(list);
6699 zprop_free_list(cb.cb_proplist);
6700 return (ret);
6701 }
6702
6703 static int
6704 zpool_do_attach_or_replace(int argc, char **argv, int replacing)
6705 {
6706 boolean_t force = B_FALSE;
6707 boolean_t rebuild = B_FALSE;
6708 boolean_t wait = B_FALSE;
6709 int c;
6710 nvlist_t *nvroot;
6711 char *poolname, *old_disk, *new_disk;
6712 zpool_handle_t *zhp;
6713 nvlist_t *props = NULL;
6714 char *propval;
6715 int ret;
6716
6717 /* check options */
6718 while ((c = getopt(argc, argv, "fo:sw")) != -1) {
6719 switch (c) {
6720 case 'f':
6721 force = B_TRUE;
6722 break;
6723 case 'o':
6724 if ((propval = strchr(optarg, '=')) == NULL) {
6725 (void) fprintf(stderr, gettext("missing "
6726 "'=' for -o option\n"));
6727 usage(B_FALSE);
6728 }
6729 *propval = '\0';
6730 propval++;
6731
6732 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
6733 (add_prop_list(optarg, propval, &props, B_TRUE)))
6734 usage(B_FALSE);
6735 break;
6736 case 's':
6737 rebuild = B_TRUE;
6738 break;
6739 case 'w':
6740 wait = B_TRUE;
6741 break;
6742 case '?':
6743 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6744 optopt);
6745 usage(B_FALSE);
6746 }
6747 }
6748
6749 argc -= optind;
6750 argv += optind;
6751
6752 /* get pool name and check number of arguments */
6753 if (argc < 1) {
6754 (void) fprintf(stderr, gettext("missing pool name argument\n"));
6755 usage(B_FALSE);
6756 }
6757
6758 poolname = argv[0];
6759
6760 if (argc < 2) {
6761 (void) fprintf(stderr,
6762 gettext("missing <device> specification\n"));
6763 usage(B_FALSE);
6764 }
6765
6766 old_disk = argv[1];
6767
6768 if (argc < 3) {
6769 if (!replacing) {
6770 (void) fprintf(stderr,
6771 gettext("missing <new_device> specification\n"));
6772 usage(B_FALSE);
6773 }
6774 new_disk = old_disk;
6775 argc -= 1;
6776 argv += 1;
6777 } else {
6778 new_disk = argv[2];
6779 argc -= 2;
6780 argv += 2;
6781 }
6782
6783 if (argc > 1) {
6784 (void) fprintf(stderr, gettext("too many arguments\n"));
6785 usage(B_FALSE);
6786 }
6787
6788 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
6789 nvlist_free(props);
6790 return (1);
6791 }
6792
6793 if (zpool_get_config(zhp, NULL) == NULL) {
6794 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
6795 poolname);
6796 zpool_close(zhp);
6797 nvlist_free(props);
6798 return (1);
6799 }
6800
6801 /* unless manually specified use "ashift" pool property (if set) */
6802 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
6803 int intval;
6804 zprop_source_t src;
6805 char strval[ZPOOL_MAXPROPLEN];
6806
6807 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
6808 if (src != ZPROP_SRC_DEFAULT) {
6809 (void) sprintf(strval, "%" PRId32, intval);
6810 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
6811 &props, B_TRUE) == 0);
6812 }
6813 }
6814
6815 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
6816 argc, argv);
6817 if (nvroot == NULL) {
6818 zpool_close(zhp);
6819 nvlist_free(props);
6820 return (1);
6821 }
6822
6823 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
6824 rebuild);
6825
6826 if (ret == 0 && wait) {
6827 zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER;
6828 char raidz_prefix[] = "raidz";
6829 if (replacing) {
6830 activity = ZPOOL_WAIT_REPLACE;
6831 } else if (strncmp(old_disk,
6832 raidz_prefix, strlen(raidz_prefix)) == 0) {
6833 activity = ZPOOL_WAIT_RAIDZ_EXPAND;
6834 }
6835 ret = zpool_wait(zhp, activity);
6836 }
6837
6838 nvlist_free(props);
6839 nvlist_free(nvroot);
6840 zpool_close(zhp);
6841
6842 return (ret);
6843 }
6844
6845 /*
6846 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
6847 *
6848 * -f Force attach, even if <new_device> appears to be in use.
6849 * -s Use sequential instead of healing reconstruction for resilver.
6850 * -o Set property=value.
6851 * -w Wait for replacing to complete before returning
6852 *
6853 * Replace <device> with <new_device>.
6854 */
6855 int
6856 zpool_do_replace(int argc, char **argv)
6857 {
6858 return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
6859 }
6860
6861 /*
6862 * zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device>
6863 *
6864 * -f Force attach, even if <new_device> appears to be in use.
6865 * -s Use sequential instead of healing reconstruction for resilver.
6866 * -o Set property=value.
6867 * -w Wait for resilvering (mirror) or expansion (raidz) to complete
6868 * before returning.
6869 *
6870 * Attach <new_device> to a <device> or <vdev>, where the vdev can be of type
6871 * mirror or raidz. If <device> is not part of a mirror, then <device> will
6872 * be transformed into a mirror of <device> and <new_device>. When a mirror
6873 * is involved, <new_device> will begin life with a DTL of [0, now], and will
6874 * immediately begin to resilver itself. For the raidz case, a expansion will
6875 * commence and reflow the raidz data across all the disks including the
6876 * <new_device>.
6877 */
6878 int
6879 zpool_do_attach(int argc, char **argv)
6880 {
6881 return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
6882 }
6883
6884 /*
6885 * zpool detach [-f] <pool> <device>
6886 *
6887 * -f Force detach of <device>, even if DTLs argue against it
6888 * (not supported yet)
6889 *
6890 * Detach a device from a mirror. The operation will be refused if <device>
6891 * is the last device in the mirror, or if the DTLs indicate that this device
6892 * has the only valid copy of some data.
6893 */
6894 int
6895 zpool_do_detach(int argc, char **argv)
6896 {
6897 int c;
6898 char *poolname, *path;
6899 zpool_handle_t *zhp;
6900 int ret;
6901
6902 /* check options */
6903 while ((c = getopt(argc, argv, "")) != -1) {
6904 switch (c) {
6905 case '?':
6906 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6907 optopt);
6908 usage(B_FALSE);
6909 }
6910 }
6911
6912 argc -= optind;
6913 argv += optind;
6914
6915 /* get pool name and check number of arguments */
6916 if (argc < 1) {
6917 (void) fprintf(stderr, gettext("missing pool name argument\n"));
6918 usage(B_FALSE);
6919 }
6920
6921 if (argc < 2) {
6922 (void) fprintf(stderr,
6923 gettext("missing <device> specification\n"));
6924 usage(B_FALSE);
6925 }
6926
6927 poolname = argv[0];
6928 path = argv[1];
6929
6930 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
6931 return (1);
6932
6933 ret = zpool_vdev_detach(zhp, path);
6934
6935 zpool_close(zhp);
6936
6937 return (ret);
6938 }
6939
6940 /*
6941 * zpool split [-gLnP] [-o prop=val] ...
6942 * [-o mntopt] ...
6943 * [-R altroot] <pool> <newpool> [<device> ...]
6944 *
6945 * -g Display guid for individual vdev name.
6946 * -L Follow links when resolving vdev path name.
6947 * -n Do not split the pool, but display the resulting layout if
6948 * it were to be split.
6949 * -o Set property=value, or set mount options.
6950 * -P Display full path for vdev name.
6951 * -R Mount the split-off pool under an alternate root.
6952 * -l Load encryption keys while importing.
6953 *
6954 * Splits the named pool and gives it the new pool name. Devices to be split
6955 * off may be listed, provided that no more than one device is specified
6956 * per top-level vdev mirror. The newly split pool is left in an exported
6957 * state unless -R is specified.
6958 *
6959 * Restrictions: the top-level of the pool pool must only be made up of
6960 * mirrors; all devices in the pool must be healthy; no device may be
6961 * undergoing a resilvering operation.
6962 */
6963 int
6964 zpool_do_split(int argc, char **argv)
6965 {
6966 char *srcpool, *newpool, *propval;
6967 char *mntopts = NULL;
6968 splitflags_t flags;
6969 int c, ret = 0;
6970 int ms_status = 0;
6971 boolean_t loadkeys = B_FALSE;
6972 zpool_handle_t *zhp;
6973 nvlist_t *config, *props = NULL;
6974
6975 flags.dryrun = B_FALSE;
6976 flags.import = B_FALSE;
6977 flags.name_flags = 0;
6978
6979 /* check options */
6980 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
6981 switch (c) {
6982 case 'g':
6983 flags.name_flags |= VDEV_NAME_GUID;
6984 break;
6985 case 'L':
6986 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
6987 break;
6988 case 'R':
6989 flags.import = B_TRUE;
6990 if (add_prop_list(
6991 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
6992 &props, B_TRUE) != 0) {
6993 nvlist_free(props);
6994 usage(B_FALSE);
6995 }
6996 break;
6997 case 'l':
6998 loadkeys = B_TRUE;
6999 break;
7000 case 'n':
7001 flags.dryrun = B_TRUE;
7002 break;
7003 case 'o':
7004 if ((propval = strchr(optarg, '=')) != NULL) {
7005 *propval = '\0';
7006 propval++;
7007 if (add_prop_list(optarg, propval,
7008 &props, B_TRUE) != 0) {
7009 nvlist_free(props);
7010 usage(B_FALSE);
7011 }
7012 } else {
7013 mntopts = optarg;
7014 }
7015 break;
7016 case 'P':
7017 flags.name_flags |= VDEV_NAME_PATH;
7018 break;
7019 case ':':
7020 (void) fprintf(stderr, gettext("missing argument for "
7021 "'%c' option\n"), optopt);
7022 usage(B_FALSE);
7023 break;
7024 case '?':
7025 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7026 optopt);
7027 usage(B_FALSE);
7028 break;
7029 }
7030 }
7031
7032 if (!flags.import && mntopts != NULL) {
7033 (void) fprintf(stderr, gettext("setting mntopts is only "
7034 "valid when importing the pool\n"));
7035 usage(B_FALSE);
7036 }
7037
7038 if (!flags.import && loadkeys) {
7039 (void) fprintf(stderr, gettext("loading keys is only "
7040 "valid when importing the pool\n"));
7041 usage(B_FALSE);
7042 }
7043
7044 argc -= optind;
7045 argv += optind;
7046
7047 if (argc < 1) {
7048 (void) fprintf(stderr, gettext("Missing pool name\n"));
7049 usage(B_FALSE);
7050 }
7051 if (argc < 2) {
7052 (void) fprintf(stderr, gettext("Missing new pool name\n"));
7053 usage(B_FALSE);
7054 }
7055
7056 srcpool = argv[0];
7057 newpool = argv[1];
7058
7059 argc -= 2;
7060 argv += 2;
7061
7062 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
7063 nvlist_free(props);
7064 return (1);
7065 }
7066
7067 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
7068 if (config == NULL) {
7069 ret = 1;
7070 } else {
7071 if (flags.dryrun) {
7072 (void) printf(gettext("would create '%s' with the "
7073 "following layout:\n\n"), newpool);
7074 print_vdev_tree(NULL, newpool, config, 0, "",
7075 flags.name_flags);
7076 print_vdev_tree(NULL, "dedup", config, 0,
7077 VDEV_ALLOC_BIAS_DEDUP, 0);
7078 print_vdev_tree(NULL, "special", config, 0,
7079 VDEV_ALLOC_BIAS_SPECIAL, 0);
7080 }
7081 }
7082
7083 zpool_close(zhp);
7084
7085 if (ret != 0 || flags.dryrun || !flags.import) {
7086 nvlist_free(config);
7087 nvlist_free(props);
7088 return (ret);
7089 }
7090
7091 /*
7092 * The split was successful. Now we need to open the new
7093 * pool and import it.
7094 */
7095 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
7096 nvlist_free(config);
7097 nvlist_free(props);
7098 return (1);
7099 }
7100
7101 if (loadkeys) {
7102 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
7103 if (ret != 0)
7104 ret = 1;
7105 }
7106
7107 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
7108 ms_status = zpool_enable_datasets(zhp, mntopts, 0);
7109 if (ms_status == EZFS_SHAREFAILED) {
7110 (void) fprintf(stderr, gettext("Split was successful, "
7111 "datasets are mounted but sharing of some datasets "
7112 "has failed\n"));
7113 } else if (ms_status == EZFS_MOUNTFAILED) {
7114 (void) fprintf(stderr, gettext("Split was successful"
7115 ", but some datasets could not be mounted\n"));
7116 (void) fprintf(stderr, gettext("Try doing '%s' with a "
7117 "different altroot\n"), "zpool import");
7118 }
7119 }
7120 zpool_close(zhp);
7121 nvlist_free(config);
7122 nvlist_free(props);
7123
7124 return (ret);
7125 }
7126
7127
7128 /*
7129 * zpool online [--power] <pool> <device> ...
7130 *
7131 * --power: Power on the enclosure slot to the drive (if possible)
7132 */
7133 int
7134 zpool_do_online(int argc, char **argv)
7135 {
7136 int c, i;
7137 char *poolname;
7138 zpool_handle_t *zhp;
7139 int ret = 0;
7140 vdev_state_t newstate;
7141 int flags = 0;
7142 boolean_t is_power_on = B_FALSE;
7143 struct option long_options[] = {
7144 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
7145 {0, 0, 0, 0}
7146 };
7147
7148 /* check options */
7149 while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) {
7150 switch (c) {
7151 case 'e':
7152 flags |= ZFS_ONLINE_EXPAND;
7153 break;
7154 case ZPOOL_OPTION_POWER:
7155 is_power_on = B_TRUE;
7156 break;
7157 case '?':
7158 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7159 optopt);
7160 usage(B_FALSE);
7161 }
7162 }
7163
7164 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
7165 is_power_on = B_TRUE;
7166
7167 argc -= optind;
7168 argv += optind;
7169
7170 /* get pool name and check number of arguments */
7171 if (argc < 1) {
7172 (void) fprintf(stderr, gettext("missing pool name\n"));
7173 usage(B_FALSE);
7174 }
7175 if (argc < 2) {
7176 (void) fprintf(stderr, gettext("missing device name\n"));
7177 usage(B_FALSE);
7178 }
7179
7180 poolname = argv[0];
7181
7182 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7183 return (1);
7184
7185 for (i = 1; i < argc; i++) {
7186 vdev_state_t oldstate;
7187 boolean_t avail_spare, l2cache;
7188 int rc;
7189
7190 if (is_power_on) {
7191 rc = zpool_power_on_and_disk_wait(zhp, argv[i]);
7192 if (rc == ENOTSUP) {
7193 (void) fprintf(stderr,
7194 gettext("Power control not supported\n"));
7195 }
7196 if (rc != 0)
7197 return (rc);
7198 }
7199
7200 nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,
7201 &l2cache, NULL);
7202 if (tgt == NULL) {
7203 ret = 1;
7204 continue;
7205 }
7206 uint_t vsc;
7207 oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
7208 ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
7209 if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
7210 if (newstate != VDEV_STATE_HEALTHY) {
7211 (void) printf(gettext("warning: device '%s' "
7212 "onlined, but remains in faulted state\n"),
7213 argv[i]);
7214 if (newstate == VDEV_STATE_FAULTED)
7215 (void) printf(gettext("use 'zpool "
7216 "clear' to restore a faulted "
7217 "device\n"));
7218 else
7219 (void) printf(gettext("use 'zpool "
7220 "replace' to replace devices "
7221 "that are no longer present\n"));
7222 if ((flags & ZFS_ONLINE_EXPAND)) {
7223 (void) printf(gettext("%s: failed "
7224 "to expand usable space on "
7225 "unhealthy device '%s'\n"),
7226 (oldstate >= VDEV_STATE_DEGRADED ?
7227 "error" : "warning"), argv[i]);
7228 if (oldstate >= VDEV_STATE_DEGRADED) {
7229 ret = 1;
7230 break;
7231 }
7232 }
7233 }
7234 } else {
7235 ret = 1;
7236 }
7237 }
7238
7239 zpool_close(zhp);
7240
7241 return (ret);
7242 }
7243
7244 /*
7245 * zpool offline [-ft]|[--power] <pool> <device> ...
7246 *
7247 *
7248 * -f Force the device into a faulted state.
7249 *
7250 * -t Only take the device off-line temporarily. The offline/faulted
7251 * state will not be persistent across reboots.
7252 *
7253 * --power Power off the enclosure slot to the drive (if possible)
7254 */
7255 int
7256 zpool_do_offline(int argc, char **argv)
7257 {
7258 int c, i;
7259 char *poolname;
7260 zpool_handle_t *zhp;
7261 int ret = 0;
7262 boolean_t istmp = B_FALSE;
7263 boolean_t fault = B_FALSE;
7264 boolean_t is_power_off = B_FALSE;
7265
7266 struct option long_options[] = {
7267 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
7268 {0, 0, 0, 0}
7269 };
7270
7271 /* check options */
7272 while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) {
7273 switch (c) {
7274 case 'f':
7275 fault = B_TRUE;
7276 break;
7277 case 't':
7278 istmp = B_TRUE;
7279 break;
7280 case ZPOOL_OPTION_POWER:
7281 is_power_off = B_TRUE;
7282 break;
7283 case '?':
7284 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7285 optopt);
7286 usage(B_FALSE);
7287 }
7288 }
7289
7290 if (is_power_off && fault) {
7291 (void) fprintf(stderr,
7292 gettext("-0 and -f cannot be used together\n"));
7293 usage(B_FALSE);
7294 return (1);
7295 }
7296
7297 if (is_power_off && istmp) {
7298 (void) fprintf(stderr,
7299 gettext("-0 and -t cannot be used together\n"));
7300 usage(B_FALSE);
7301 return (1);
7302 }
7303
7304 argc -= optind;
7305 argv += optind;
7306
7307 /* get pool name and check number of arguments */
7308 if (argc < 1) {
7309 (void) fprintf(stderr, gettext("missing pool name\n"));
7310 usage(B_FALSE);
7311 }
7312 if (argc < 2) {
7313 (void) fprintf(stderr, gettext("missing device name\n"));
7314 usage(B_FALSE);
7315 }
7316
7317 poolname = argv[0];
7318
7319 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7320 return (1);
7321
7322 for (i = 1; i < argc; i++) {
7323 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
7324 if (is_power_off) {
7325 /*
7326 * Note: we have to power off first, then set REMOVED,
7327 * or else zpool_vdev_set_removed_state() returns
7328 * EAGAIN.
7329 */
7330 ret = zpool_power_off(zhp, argv[i]);
7331 if (ret != 0) {
7332 (void) fprintf(stderr, "%s %s %d\n",
7333 gettext("unable to power off slot for"),
7334 argv[i], ret);
7335 }
7336 zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE);
7337
7338 } else if (fault) {
7339 vdev_aux_t aux;
7340 if (istmp == B_FALSE) {
7341 /* Force the fault to persist across imports */
7342 aux = VDEV_AUX_EXTERNAL_PERSIST;
7343 } else {
7344 aux = VDEV_AUX_EXTERNAL;
7345 }
7346
7347 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
7348 ret = 1;
7349 } else {
7350 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
7351 ret = 1;
7352 }
7353 }
7354
7355 zpool_close(zhp);
7356
7357 return (ret);
7358 }
7359
7360 /*
7361 * zpool clear [-nF]|[--power] <pool> [device]
7362 *
7363 * Clear all errors associated with a pool or a particular device.
7364 */
7365 int
7366 zpool_do_clear(int argc, char **argv)
7367 {
7368 int c;
7369 int ret = 0;
7370 boolean_t dryrun = B_FALSE;
7371 boolean_t do_rewind = B_FALSE;
7372 boolean_t xtreme_rewind = B_FALSE;
7373 boolean_t is_power_on = B_FALSE;
7374 uint32_t rewind_policy = ZPOOL_NO_REWIND;
7375 nvlist_t *policy = NULL;
7376 zpool_handle_t *zhp;
7377 char *pool, *device;
7378
7379 struct option long_options[] = {
7380 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
7381 {0, 0, 0, 0}
7382 };
7383
7384 /* check options */
7385 while ((c = getopt_long(argc, argv, "FnX", long_options,
7386 NULL)) != -1) {
7387 switch (c) {
7388 case 'F':
7389 do_rewind = B_TRUE;
7390 break;
7391 case 'n':
7392 dryrun = B_TRUE;
7393 break;
7394 case 'X':
7395 xtreme_rewind = B_TRUE;
7396 break;
7397 case ZPOOL_OPTION_POWER:
7398 is_power_on = B_TRUE;
7399 break;
7400 case '?':
7401 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7402 optopt);
7403 usage(B_FALSE);
7404 }
7405 }
7406
7407 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
7408 is_power_on = B_TRUE;
7409
7410 argc -= optind;
7411 argv += optind;
7412
7413 if (argc < 1) {
7414 (void) fprintf(stderr, gettext("missing pool name\n"));
7415 usage(B_FALSE);
7416 }
7417
7418 if (argc > 2) {
7419 (void) fprintf(stderr, gettext("too many arguments\n"));
7420 usage(B_FALSE);
7421 }
7422
7423 if ((dryrun || xtreme_rewind) && !do_rewind) {
7424 (void) fprintf(stderr,
7425 gettext("-n or -X only meaningful with -F\n"));
7426 usage(B_FALSE);
7427 }
7428 if (dryrun)
7429 rewind_policy = ZPOOL_TRY_REWIND;
7430 else if (do_rewind)
7431 rewind_policy = ZPOOL_DO_REWIND;
7432 if (xtreme_rewind)
7433 rewind_policy |= ZPOOL_EXTREME_REWIND;
7434
7435 /* In future, further rewind policy choices can be passed along here */
7436 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
7437 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
7438 rewind_policy) != 0) {
7439 return (1);
7440 }
7441
7442 pool = argv[0];
7443 device = argc == 2 ? argv[1] : NULL;
7444
7445 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
7446 nvlist_free(policy);
7447 return (1);
7448 }
7449
7450 if (is_power_on) {
7451 if (device == NULL) {
7452 zpool_power_on_pool_and_wait_for_devices(zhp);
7453 } else {
7454 zpool_power_on_and_disk_wait(zhp, device);
7455 }
7456 }
7457
7458 if (zpool_clear(zhp, device, policy) != 0)
7459 ret = 1;
7460
7461 zpool_close(zhp);
7462
7463 nvlist_free(policy);
7464
7465 return (ret);
7466 }
7467
7468 /*
7469 * zpool reguid <pool>
7470 */
7471 int
7472 zpool_do_reguid(int argc, char **argv)
7473 {
7474 int c;
7475 char *poolname;
7476 zpool_handle_t *zhp;
7477 int ret = 0;
7478
7479 /* check options */
7480 while ((c = getopt(argc, argv, "")) != -1) {
7481 switch (c) {
7482 case '?':
7483 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7484 optopt);
7485 usage(B_FALSE);
7486 }
7487 }
7488
7489 argc -= optind;
7490 argv += optind;
7491
7492 /* get pool name and check number of arguments */
7493 if (argc < 1) {
7494 (void) fprintf(stderr, gettext("missing pool name\n"));
7495 usage(B_FALSE);
7496 }
7497
7498 if (argc > 1) {
7499 (void) fprintf(stderr, gettext("too many arguments\n"));
7500 usage(B_FALSE);
7501 }
7502
7503 poolname = argv[0];
7504 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7505 return (1);
7506
7507 ret = zpool_reguid(zhp);
7508
7509 zpool_close(zhp);
7510 return (ret);
7511 }
7512
7513
7514 /*
7515 * zpool reopen <pool>
7516 *
7517 * Reopen the pool so that the kernel can update the sizes of all vdevs.
7518 */
7519 int
7520 zpool_do_reopen(int argc, char **argv)
7521 {
7522 int c;
7523 int ret = 0;
7524 boolean_t scrub_restart = B_TRUE;
7525
7526 /* check options */
7527 while ((c = getopt(argc, argv, "n")) != -1) {
7528 switch (c) {
7529 case 'n':
7530 scrub_restart = B_FALSE;
7531 break;
7532 case '?':
7533 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7534 optopt);
7535 usage(B_FALSE);
7536 }
7537 }
7538
7539 argc -= optind;
7540 argv += optind;
7541
7542 /* if argc == 0 we will execute zpool_reopen_one on all pools */
7543 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7544 B_FALSE, zpool_reopen_one, &scrub_restart);
7545
7546 return (ret);
7547 }
7548
7549 typedef struct scrub_cbdata {
7550 int cb_type;
7551 pool_scrub_cmd_t cb_scrub_cmd;
7552 } scrub_cbdata_t;
7553
7554 static boolean_t
7555 zpool_has_checkpoint(zpool_handle_t *zhp)
7556 {
7557 nvlist_t *config, *nvroot;
7558
7559 config = zpool_get_config(zhp, NULL);
7560
7561 if (config != NULL) {
7562 pool_checkpoint_stat_t *pcs = NULL;
7563 uint_t c;
7564
7565 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
7566 (void) nvlist_lookup_uint64_array(nvroot,
7567 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
7568
7569 if (pcs == NULL || pcs->pcs_state == CS_NONE)
7570 return (B_FALSE);
7571
7572 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
7573 pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
7574 return (B_TRUE);
7575 }
7576
7577 return (B_FALSE);
7578 }
7579
7580 static int
7581 scrub_callback(zpool_handle_t *zhp, void *data)
7582 {
7583 scrub_cbdata_t *cb = data;
7584 int err;
7585
7586 /*
7587 * Ignore faulted pools.
7588 */
7589 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
7590 (void) fprintf(stderr, gettext("cannot scan '%s': pool is "
7591 "currently unavailable\n"), zpool_get_name(zhp));
7592 return (1);
7593 }
7594
7595 err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
7596
7597 if (err == 0 && zpool_has_checkpoint(zhp) &&
7598 cb->cb_type == POOL_SCAN_SCRUB) {
7599 (void) printf(gettext("warning: will not scrub state that "
7600 "belongs to the checkpoint of pool '%s'\n"),
7601 zpool_get_name(zhp));
7602 }
7603
7604 return (err != 0);
7605 }
7606
7607 static int
7608 wait_callback(zpool_handle_t *zhp, void *data)
7609 {
7610 zpool_wait_activity_t *act = data;
7611 return (zpool_wait(zhp, *act));
7612 }
7613
7614 /*
7615 * zpool scrub [-s | -p] [-w] [-e] <pool> ...
7616 *
7617 * -e Only scrub blocks in the error log.
7618 * -s Stop. Stops any in-progress scrub.
7619 * -p Pause. Pause in-progress scrub.
7620 * -w Wait. Blocks until scrub has completed.
7621 */
7622 int
7623 zpool_do_scrub(int argc, char **argv)
7624 {
7625 int c;
7626 scrub_cbdata_t cb;
7627 boolean_t wait = B_FALSE;
7628 int error;
7629
7630 cb.cb_type = POOL_SCAN_SCRUB;
7631 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7632
7633 boolean_t is_error_scrub = B_FALSE;
7634 boolean_t is_pause = B_FALSE;
7635 boolean_t is_stop = B_FALSE;
7636
7637 /* check options */
7638 while ((c = getopt(argc, argv, "spwe")) != -1) {
7639 switch (c) {
7640 case 'e':
7641 is_error_scrub = B_TRUE;
7642 break;
7643 case 's':
7644 is_stop = B_TRUE;
7645 break;
7646 case 'p':
7647 is_pause = B_TRUE;
7648 break;
7649 case 'w':
7650 wait = B_TRUE;
7651 break;
7652 case '?':
7653 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7654 optopt);
7655 usage(B_FALSE);
7656 }
7657 }
7658
7659 if (is_pause && is_stop) {
7660 (void) fprintf(stderr, gettext("invalid option "
7661 "combination :-s and -p are mutually exclusive\n"));
7662 usage(B_FALSE);
7663 } else {
7664 if (is_error_scrub)
7665 cb.cb_type = POOL_SCAN_ERRORSCRUB;
7666
7667 if (is_pause) {
7668 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
7669 } else if (is_stop) {
7670 cb.cb_type = POOL_SCAN_NONE;
7671 } else {
7672 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7673 }
7674 }
7675
7676 if (wait && (cb.cb_type == POOL_SCAN_NONE ||
7677 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
7678 (void) fprintf(stderr, gettext("invalid option combination: "
7679 "-w cannot be used with -p or -s\n"));
7680 usage(B_FALSE);
7681 }
7682
7683 argc -= optind;
7684 argv += optind;
7685
7686 if (argc < 1) {
7687 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7688 usage(B_FALSE);
7689 }
7690
7691 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7692 B_FALSE, scrub_callback, &cb);
7693
7694 if (wait && !error) {
7695 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
7696 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7697 B_FALSE, wait_callback, &act);
7698 }
7699
7700 return (error);
7701 }
7702
7703 /*
7704 * zpool resilver <pool> ...
7705 *
7706 * Restarts any in-progress resilver
7707 */
7708 int
7709 zpool_do_resilver(int argc, char **argv)
7710 {
7711 int c;
7712 scrub_cbdata_t cb;
7713
7714 cb.cb_type = POOL_SCAN_RESILVER;
7715 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7716
7717 /* check options */
7718 while ((c = getopt(argc, argv, "")) != -1) {
7719 switch (c) {
7720 case '?':
7721 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7722 optopt);
7723 usage(B_FALSE);
7724 }
7725 }
7726
7727 argc -= optind;
7728 argv += optind;
7729
7730 if (argc < 1) {
7731 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7732 usage(B_FALSE);
7733 }
7734
7735 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7736 B_FALSE, scrub_callback, &cb));
7737 }
7738
7739 /*
7740 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
7741 *
7742 * -c Cancel. Ends any in-progress trim.
7743 * -d Secure trim. Requires kernel and device support.
7744 * -r <rate> Sets the TRIM rate in bytes (per second). Supports
7745 * adding a multiplier suffix such as 'k' or 'm'.
7746 * -s Suspend. TRIM can then be restarted with no flags.
7747 * -w Wait. Blocks until trimming has completed.
7748 */
7749 int
7750 zpool_do_trim(int argc, char **argv)
7751 {
7752 struct option long_options[] = {
7753 {"cancel", no_argument, NULL, 'c'},
7754 {"secure", no_argument, NULL, 'd'},
7755 {"rate", required_argument, NULL, 'r'},
7756 {"suspend", no_argument, NULL, 's'},
7757 {"wait", no_argument, NULL, 'w'},
7758 {0, 0, 0, 0}
7759 };
7760
7761 pool_trim_func_t cmd_type = POOL_TRIM_START;
7762 uint64_t rate = 0;
7763 boolean_t secure = B_FALSE;
7764 boolean_t wait = B_FALSE;
7765
7766 int c;
7767 while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
7768 != -1) {
7769 switch (c) {
7770 case 'c':
7771 if (cmd_type != POOL_TRIM_START &&
7772 cmd_type != POOL_TRIM_CANCEL) {
7773 (void) fprintf(stderr, gettext("-c cannot be "
7774 "combined with other options\n"));
7775 usage(B_FALSE);
7776 }
7777 cmd_type = POOL_TRIM_CANCEL;
7778 break;
7779 case 'd':
7780 if (cmd_type != POOL_TRIM_START) {
7781 (void) fprintf(stderr, gettext("-d cannot be "
7782 "combined with the -c or -s options\n"));
7783 usage(B_FALSE);
7784 }
7785 secure = B_TRUE;
7786 break;
7787 case 'r':
7788 if (cmd_type != POOL_TRIM_START) {
7789 (void) fprintf(stderr, gettext("-r cannot be "
7790 "combined with the -c or -s options\n"));
7791 usage(B_FALSE);
7792 }
7793 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
7794 (void) fprintf(stderr, "%s: %s\n",
7795 gettext("invalid value for rate"),
7796 libzfs_error_description(g_zfs));
7797 usage(B_FALSE);
7798 }
7799 break;
7800 case 's':
7801 if (cmd_type != POOL_TRIM_START &&
7802 cmd_type != POOL_TRIM_SUSPEND) {
7803 (void) fprintf(stderr, gettext("-s cannot be "
7804 "combined with other options\n"));
7805 usage(B_FALSE);
7806 }
7807 cmd_type = POOL_TRIM_SUSPEND;
7808 break;
7809 case 'w':
7810 wait = B_TRUE;
7811 break;
7812 case '?':
7813 if (optopt != 0) {
7814 (void) fprintf(stderr,
7815 gettext("invalid option '%c'\n"), optopt);
7816 } else {
7817 (void) fprintf(stderr,
7818 gettext("invalid option '%s'\n"),
7819 argv[optind - 1]);
7820 }
7821 usage(B_FALSE);
7822 }
7823 }
7824
7825 argc -= optind;
7826 argv += optind;
7827
7828 if (argc < 1) {
7829 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7830 usage(B_FALSE);
7831 return (-1);
7832 }
7833
7834 if (wait && (cmd_type != POOL_TRIM_START)) {
7835 (void) fprintf(stderr, gettext("-w cannot be used with -c or "
7836 "-s\n"));
7837 usage(B_FALSE);
7838 }
7839
7840 char *poolname = argv[0];
7841 zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
7842 if (zhp == NULL)
7843 return (-1);
7844
7845 trimflags_t trim_flags = {
7846 .secure = secure,
7847 .rate = rate,
7848 .wait = wait,
7849 };
7850
7851 nvlist_t *vdevs = fnvlist_alloc();
7852 if (argc == 1) {
7853 /* no individual leaf vdevs specified, so add them all */
7854 nvlist_t *config = zpool_get_config(zhp, NULL);
7855 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
7856 ZPOOL_CONFIG_VDEV_TREE);
7857 zpool_collect_leaves(zhp, nvroot, vdevs);
7858 trim_flags.fullpool = B_TRUE;
7859 } else {
7860 trim_flags.fullpool = B_FALSE;
7861 for (int i = 1; i < argc; i++) {
7862 fnvlist_add_boolean(vdevs, argv[i]);
7863 }
7864 }
7865
7866 int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
7867
7868 fnvlist_free(vdevs);
7869 zpool_close(zhp);
7870
7871 return (error);
7872 }
7873
7874 /*
7875 * Converts a total number of seconds to a human readable string broken
7876 * down in to days/hours/minutes/seconds.
7877 */
7878 static void
7879 secs_to_dhms(uint64_t total, char *buf)
7880 {
7881 uint64_t days = total / 60 / 60 / 24;
7882 uint64_t hours = (total / 60 / 60) % 24;
7883 uint64_t mins = (total / 60) % 60;
7884 uint64_t secs = (total % 60);
7885
7886 if (days > 0) {
7887 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
7888 (u_longlong_t)days, (u_longlong_t)hours,
7889 (u_longlong_t)mins, (u_longlong_t)secs);
7890 } else {
7891 (void) sprintf(buf, "%02llu:%02llu:%02llu",
7892 (u_longlong_t)hours, (u_longlong_t)mins,
7893 (u_longlong_t)secs);
7894 }
7895 }
7896
7897 /*
7898 * Print out detailed error scrub status.
7899 */
7900 static void
7901 print_err_scrub_status(pool_scan_stat_t *ps)
7902 {
7903 time_t start, end, pause;
7904 uint64_t total_secs_left;
7905 uint64_t secs_left, mins_left, hours_left, days_left;
7906 uint64_t examined, to_be_examined;
7907
7908 if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {
7909 return;
7910 }
7911
7912 (void) printf(gettext(" scrub: "));
7913
7914 start = ps->pss_error_scrub_start;
7915 end = ps->pss_error_scrub_end;
7916 pause = ps->pss_pass_error_scrub_pause;
7917 examined = ps->pss_error_scrub_examined;
7918 to_be_examined = ps->pss_error_scrub_to_be_examined;
7919
7920 assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);
7921
7922 if (ps->pss_error_scrub_state == DSS_FINISHED) {
7923 total_secs_left = end - start;
7924 days_left = total_secs_left / 60 / 60 / 24;
7925 hours_left = (total_secs_left / 60 / 60) % 24;
7926 mins_left = (total_secs_left / 60) % 60;
7927 secs_left = (total_secs_left % 60);
7928
7929 (void) printf(gettext("scrubbed %llu error blocks in %llu days "
7930 "%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,
7931 (u_longlong_t)days_left, (u_longlong_t)hours_left,
7932 (u_longlong_t)mins_left, (u_longlong_t)secs_left,
7933 ctime(&end));
7934
7935 return;
7936 } else if (ps->pss_error_scrub_state == DSS_CANCELED) {
7937 (void) printf(gettext("error scrub canceled on %s"),
7938 ctime(&end));
7939 return;
7940 }
7941 assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);
7942
7943 /* Error scrub is in progress. */
7944 if (pause == 0) {
7945 (void) printf(gettext("error scrub in progress since %s"),
7946 ctime(&start));
7947 } else {
7948 (void) printf(gettext("error scrub paused since %s"),
7949 ctime(&pause));
7950 (void) printf(gettext("\terror scrub started on %s"),
7951 ctime(&start));
7952 }
7953
7954 double fraction_done = (double)examined / (to_be_examined + examined);
7955 (void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"
7956 " blocks"), 100 * fraction_done, (u_longlong_t)examined);
7957
7958 (void) printf("\n");
7959 }
7960
7961 /*
7962 * Print out detailed scrub status.
7963 */
7964 static void
7965 print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
7966 {
7967 time_t start, end, pause;
7968 uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;
7969 uint64_t elapsed, scan_rate, issue_rate;
7970 double fraction_done;
7971 char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];
7972 char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];
7973
7974 printf(" ");
7975 printf_color(ANSI_BOLD, gettext("scan:"));
7976 printf(" ");
7977
7978 /* If there's never been a scan, there's not much to say. */
7979 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
7980 ps->pss_func >= POOL_SCAN_FUNCS) {
7981 (void) printf(gettext("none requested\n"));
7982 return;
7983 }
7984
7985 start = ps->pss_start_time;
7986 end = ps->pss_end_time;
7987 pause = ps->pss_pass_scrub_pause;
7988
7989 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
7990
7991 int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;
7992 int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;
7993 assert(is_resilver || is_scrub);
7994
7995 /* Scan is finished or canceled. */
7996 if (ps->pss_state == DSS_FINISHED) {
7997 secs_to_dhms(end - start, time_buf);
7998
7999 if (is_scrub) {
8000 (void) printf(gettext("scrub repaired %s "
8001 "in %s with %llu errors on %s"), processed_buf,
8002 time_buf, (u_longlong_t)ps->pss_errors,
8003 ctime(&end));
8004 } else if (is_resilver) {
8005 (void) printf(gettext("resilvered %s "
8006 "in %s with %llu errors on %s"), processed_buf,
8007 time_buf, (u_longlong_t)ps->pss_errors,
8008 ctime(&end));
8009 }
8010 return;
8011 } else if (ps->pss_state == DSS_CANCELED) {
8012 if (is_scrub) {
8013 (void) printf(gettext("scrub canceled on %s"),
8014 ctime(&end));
8015 } else if (is_resilver) {
8016 (void) printf(gettext("resilver canceled on %s"),
8017 ctime(&end));
8018 }
8019 return;
8020 }
8021
8022 assert(ps->pss_state == DSS_SCANNING);
8023
8024 /* Scan is in progress. Resilvers can't be paused. */
8025 if (is_scrub) {
8026 if (pause == 0) {
8027 (void) printf(gettext("scrub in progress since %s"),
8028 ctime(&start));
8029 } else {
8030 (void) printf(gettext("scrub paused since %s"),
8031 ctime(&pause));
8032 (void) printf(gettext("\tscrub started on %s"),
8033 ctime(&start));
8034 }
8035 } else if (is_resilver) {
8036 (void) printf(gettext("resilver in progress since %s"),
8037 ctime(&start));
8038 }
8039
8040 scanned = ps->pss_examined;
8041 pass_scanned = ps->pss_pass_exam;
8042 issued = ps->pss_issued;
8043 pass_issued = ps->pss_pass_issued;
8044 total_s = ps->pss_to_examine;
8045 total_i = ps->pss_to_examine - ps->pss_skipped;
8046
8047 /* we are only done with a block once we have issued the IO for it */
8048 fraction_done = (double)issued / total_i;
8049
8050 /* elapsed time for this pass, rounding up to 1 if it's 0 */
8051 elapsed = time(NULL) - ps->pss_pass_start;
8052 elapsed -= ps->pss_pass_scrub_spent_paused;
8053 elapsed = (elapsed != 0) ? elapsed : 1;
8054
8055 scan_rate = pass_scanned / elapsed;
8056 issue_rate = pass_issued / elapsed;
8057
8058 /* format all of the numbers we will be reporting */
8059 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
8060 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
8061 zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));
8062 zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));
8063
8064 /* do not print estimated time if we have a paused scrub */
8065 (void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);
8066 if (pause == 0 && scan_rate > 0) {
8067 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
8068 (void) printf(gettext(" at %s/s"), srate_buf);
8069 }
8070 (void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);
8071 if (pause == 0 && issue_rate > 0) {
8072 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
8073 (void) printf(gettext(" at %s/s"), irate_buf);
8074 }
8075 (void) printf(gettext("\n"));
8076
8077 if (is_resilver) {
8078 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
8079 processed_buf, 100 * fraction_done);
8080 } else if (is_scrub) {
8081 (void) printf(gettext("\t%s repaired, %.2f%% done"),
8082 processed_buf, 100 * fraction_done);
8083 }
8084
8085 if (pause == 0) {
8086 /*
8087 * Only provide an estimate iff:
8088 * 1) we haven't yet issued all we expected, and
8089 * 2) the issue rate exceeds 10 MB/s, and
8090 * 3) it's either:
8091 * a) a resilver which has started repairs, or
8092 * b) a scrub which has entered the issue phase.
8093 */
8094 if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&
8095 ((is_resilver && ps->pss_processed > 0) ||
8096 (is_scrub && issued > 0))) {
8097 secs_to_dhms((total_i - issued) / issue_rate, time_buf);
8098 (void) printf(gettext(", %s to go\n"), time_buf);
8099 } else {
8100 (void) printf(gettext(", no estimated "
8101 "completion time\n"));
8102 }
8103 } else {
8104 (void) printf(gettext("\n"));
8105 }
8106 }
8107
8108 static void
8109 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)
8110 {
8111 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
8112 return;
8113
8114 printf(" ");
8115 printf_color(ANSI_BOLD, gettext("scan:"));
8116 printf(" ");
8117
8118 uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
8119 uint64_t bytes_issued = vrs->vrs_bytes_issued;
8120 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
8121 uint64_t bytes_est_s = vrs->vrs_bytes_est;
8122 uint64_t bytes_est_i = vrs->vrs_bytes_est;
8123 if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)
8124 bytes_est_i -= vrs->vrs_pass_bytes_skipped;
8125 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
8126 (vrs->vrs_pass_time_ms + 1)) * 1000;
8127 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
8128 (vrs->vrs_pass_time_ms + 1)) * 1000;
8129 double scan_pct = MIN((double)bytes_scanned * 100 /
8130 (bytes_est_s + 1), 100);
8131
8132 /* Format all of the numbers we will be reporting */
8133 char bytes_scanned_buf[7], bytes_issued_buf[7];
8134 char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];
8135 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
8136 zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
8137 sizeof (bytes_scanned_buf));
8138 zfs_nicebytes(bytes_issued, bytes_issued_buf,
8139 sizeof (bytes_issued_buf));
8140 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
8141 sizeof (bytes_rebuilt_buf));
8142 zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));
8143 zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));
8144
8145 time_t start = vrs->vrs_start_time;
8146 time_t end = vrs->vrs_end_time;
8147
8148 /* Rebuild is finished or canceled. */
8149 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
8150 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
8151 (void) printf(gettext("resilvered (%s) %s in %s "
8152 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
8153 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
8154 return;
8155 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
8156 (void) printf(gettext("resilver (%s) canceled on %s"),
8157 vdev_name, ctime(&end));
8158 return;
8159 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
8160 (void) printf(gettext("resilver (%s) in progress since %s"),
8161 vdev_name, ctime(&start));
8162 }
8163
8164 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
8165
8166 (void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,
8167 bytes_est_s_buf);
8168 if (scan_rate > 0) {
8169 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
8170 (void) printf(gettext(" at %s/s"), scan_rate_buf);
8171 }
8172 (void) printf(gettext(", %s / %s issued"), bytes_issued_buf,
8173 bytes_est_i_buf);
8174 if (issue_rate > 0) {
8175 zfs_nicebytes(issue_rate, issue_rate_buf,
8176 sizeof (issue_rate_buf));
8177 (void) printf(gettext(" at %s/s"), issue_rate_buf);
8178 }
8179 (void) printf(gettext("\n"));
8180
8181 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
8182 bytes_rebuilt_buf, scan_pct);
8183
8184 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
8185 if (bytes_est_s >= bytes_scanned &&
8186 scan_rate >= 10 * 1024 * 1024) {
8187 secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,
8188 time_buf);
8189 (void) printf(gettext(", %s to go\n"), time_buf);
8190 } else {
8191 (void) printf(gettext(", no estimated "
8192 "completion time\n"));
8193 }
8194 } else {
8195 (void) printf(gettext("\n"));
8196 }
8197 }
8198
8199 /*
8200 * Print rebuild status for top-level vdevs.
8201 */
8202 static void
8203 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
8204 {
8205 nvlist_t **child;
8206 uint_t children;
8207
8208 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
8209 &child, &children) != 0)
8210 children = 0;
8211
8212 for (uint_t c = 0; c < children; c++) {
8213 vdev_rebuild_stat_t *vrs;
8214 uint_t i;
8215
8216 if (nvlist_lookup_uint64_array(child[c],
8217 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
8218 char *name = zpool_vdev_name(g_zfs, zhp,
8219 child[c], VDEV_NAME_TYPE_ID);
8220 print_rebuild_status_impl(vrs, i, name);
8221 free(name);
8222 }
8223 }
8224 }
8225
8226 /*
8227 * As we don't scrub checkpointed blocks, we want to warn the user that we
8228 * skipped scanning some blocks if a checkpoint exists or existed at any
8229 * time during the scan. If a sequential instead of healing reconstruction
8230 * was performed then the blocks were reconstructed. However, their checksums
8231 * have not been verified so we still print the warning.
8232 */
8233 static void
8234 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
8235 {
8236 if (ps == NULL || pcs == NULL)
8237 return;
8238
8239 if (pcs->pcs_state == CS_NONE ||
8240 pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
8241 return;
8242
8243 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
8244
8245 if (ps->pss_state == DSS_NONE)
8246 return;
8247
8248 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
8249 ps->pss_end_time < pcs->pcs_start_time)
8250 return;
8251
8252 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
8253 (void) printf(gettext(" scan warning: skipped blocks "
8254 "that are only referenced by the checkpoint.\n"));
8255 } else {
8256 assert(ps->pss_state == DSS_SCANNING);
8257 (void) printf(gettext(" scan warning: skipping blocks "
8258 "that are only referenced by the checkpoint.\n"));
8259 }
8260 }
8261
8262 /*
8263 * Returns B_TRUE if there is an active rebuild in progress. Otherwise,
8264 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for
8265 * the last completed (or cancelled) rebuild.
8266 */
8267 static boolean_t
8268 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
8269 {
8270 nvlist_t **child;
8271 uint_t children;
8272 boolean_t rebuilding = B_FALSE;
8273 uint64_t end_time = 0;
8274
8275 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
8276 &child, &children) != 0)
8277 children = 0;
8278
8279 for (uint_t c = 0; c < children; c++) {
8280 vdev_rebuild_stat_t *vrs;
8281 uint_t i;
8282
8283 if (nvlist_lookup_uint64_array(child[c],
8284 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
8285
8286 if (vrs->vrs_end_time > end_time)
8287 end_time = vrs->vrs_end_time;
8288
8289 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
8290 rebuilding = B_TRUE;
8291 end_time = 0;
8292 break;
8293 }
8294 }
8295 }
8296
8297 if (rebuild_end_time != NULL)
8298 *rebuild_end_time = end_time;
8299
8300 return (rebuilding);
8301 }
8302
8303 /*
8304 * Print the scan status.
8305 */
8306 static void
8307 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
8308 {
8309 uint64_t rebuild_end_time = 0, resilver_end_time = 0;
8310 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
8311 boolean_t have_errorscrub = B_FALSE;
8312 boolean_t active_resilver = B_FALSE;
8313 pool_checkpoint_stat_t *pcs = NULL;
8314 pool_scan_stat_t *ps = NULL;
8315 uint_t c;
8316 time_t scrub_start = 0, errorscrub_start = 0;
8317
8318 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
8319 (uint64_t **)&ps, &c) == 0) {
8320 if (ps->pss_func == POOL_SCAN_RESILVER) {
8321 resilver_end_time = ps->pss_end_time;
8322 active_resilver = (ps->pss_state == DSS_SCANNING);
8323 }
8324
8325 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
8326 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
8327 scrub_start = ps->pss_start_time;
8328 if (c > offsetof(pool_scan_stat_t,
8329 pss_pass_error_scrub_pause) / 8) {
8330 have_errorscrub = (ps->pss_error_scrub_func ==
8331 POOL_SCAN_ERRORSCRUB);
8332 errorscrub_start = ps->pss_error_scrub_start;
8333 }
8334 }
8335
8336 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
8337 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
8338
8339 /* Always print the scrub status when available. */
8340 if (have_scrub && scrub_start > errorscrub_start)
8341 print_scan_scrub_resilver_status(ps);
8342 else if (have_errorscrub && errorscrub_start >= scrub_start)
8343 print_err_scrub_status(ps);
8344
8345 /*
8346 * When there is an active resilver or rebuild print its status.
8347 * Otherwise print the status of the last resilver or rebuild.
8348 */
8349 if (active_resilver || (!active_rebuild && have_resilver &&
8350 resilver_end_time && resilver_end_time > rebuild_end_time)) {
8351 print_scan_scrub_resilver_status(ps);
8352 } else if (active_rebuild || (!active_resilver && have_rebuild &&
8353 rebuild_end_time && rebuild_end_time > resilver_end_time)) {
8354 print_rebuild_status(zhp, nvroot);
8355 }
8356
8357 (void) nvlist_lookup_uint64_array(nvroot,
8358 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
8359 print_checkpoint_scan_warning(ps, pcs);
8360 }
8361
8362 /*
8363 * Print out detailed removal status.
8364 */
8365 static void
8366 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
8367 {
8368 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
8369 time_t start, end;
8370 nvlist_t *config, *nvroot;
8371 nvlist_t **child;
8372 uint_t children;
8373 char *vdev_name;
8374
8375 if (prs == NULL || prs->prs_state == DSS_NONE)
8376 return;
8377
8378 /*
8379 * Determine name of vdev.
8380 */
8381 config = zpool_get_config(zhp, NULL);
8382 nvroot = fnvlist_lookup_nvlist(config,
8383 ZPOOL_CONFIG_VDEV_TREE);
8384 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
8385 &child, &children) == 0);
8386 assert(prs->prs_removing_vdev < children);
8387 vdev_name = zpool_vdev_name(g_zfs, zhp,
8388 child[prs->prs_removing_vdev], B_TRUE);
8389
8390 printf_color(ANSI_BOLD, gettext("remove: "));
8391
8392 start = prs->prs_start_time;
8393 end = prs->prs_end_time;
8394 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
8395
8396 /*
8397 * Removal is finished or canceled.
8398 */
8399 if (prs->prs_state == DSS_FINISHED) {
8400 uint64_t minutes_taken = (end - start) / 60;
8401
8402 (void) printf(gettext("Removal of vdev %llu copied %s "
8403 "in %lluh%um, completed on %s"),
8404 (longlong_t)prs->prs_removing_vdev,
8405 copied_buf,
8406 (u_longlong_t)(minutes_taken / 60),
8407 (uint_t)(minutes_taken % 60),
8408 ctime((time_t *)&end));
8409 } else if (prs->prs_state == DSS_CANCELED) {
8410 (void) printf(gettext("Removal of %s canceled on %s"),
8411 vdev_name, ctime(&end));
8412 } else {
8413 uint64_t copied, total, elapsed, mins_left, hours_left;
8414 double fraction_done;
8415 uint_t rate;
8416
8417 assert(prs->prs_state == DSS_SCANNING);
8418
8419 /*
8420 * Removal is in progress.
8421 */
8422 (void) printf(gettext(
8423 "Evacuation of %s in progress since %s"),
8424 vdev_name, ctime(&start));
8425
8426 copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
8427 total = prs->prs_to_copy;
8428 fraction_done = (double)copied / total;
8429
8430 /* elapsed time for this pass */
8431 elapsed = time(NULL) - prs->prs_start_time;
8432 elapsed = elapsed > 0 ? elapsed : 1;
8433 rate = copied / elapsed;
8434 rate = rate > 0 ? rate : 1;
8435 mins_left = ((total - copied) / rate) / 60;
8436 hours_left = mins_left / 60;
8437
8438 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
8439 zfs_nicenum(total, total_buf, sizeof (total_buf));
8440 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
8441
8442 /*
8443 * do not print estimated time if hours_left is more than
8444 * 30 days
8445 */
8446 (void) printf(gettext(
8447 "\t%s copied out of %s at %s/s, %.2f%% done"),
8448 examined_buf, total_buf, rate_buf, 100 * fraction_done);
8449 if (hours_left < (30 * 24)) {
8450 (void) printf(gettext(", %lluh%um to go\n"),
8451 (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
8452 } else {
8453 (void) printf(gettext(
8454 ", (copy is slow, no estimated time)\n"));
8455 }
8456 }
8457 free(vdev_name);
8458
8459 if (prs->prs_mapping_memory > 0) {
8460 char mem_buf[7];
8461 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
8462 (void) printf(gettext(
8463 "\t%s memory used for removed device mappings\n"),
8464 mem_buf);
8465 }
8466 }
8467
8468 /*
8469 * Print out detailed raidz expansion status.
8470 */
8471 static void
8472 print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)
8473 {
8474 char copied_buf[7];
8475
8476 if (pres == NULL || pres->pres_state == DSS_NONE)
8477 return;
8478
8479 /*
8480 * Determine name of vdev.
8481 */
8482 nvlist_t *config = zpool_get_config(zhp, NULL);
8483 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
8484 ZPOOL_CONFIG_VDEV_TREE);
8485 nvlist_t **child;
8486 uint_t children;
8487 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
8488 &child, &children) == 0);
8489 assert(pres->pres_expanding_vdev < children);
8490
8491 printf_color(ANSI_BOLD, gettext("expand: "));
8492
8493 time_t start = pres->pres_start_time;
8494 time_t end = pres->pres_end_time;
8495 char *vname =
8496 zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0);
8497 zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf));
8498
8499 /*
8500 * Expansion is finished or canceled.
8501 */
8502 if (pres->pres_state == DSS_FINISHED) {
8503 char time_buf[32];
8504 secs_to_dhms(end - start, time_buf);
8505
8506 (void) printf(gettext("expanded %s-%u copied %s in %s, "
8507 "on %s"), vname, (int)pres->pres_expanding_vdev,
8508 copied_buf, time_buf, ctime((time_t *)&end));
8509 } else {
8510 char examined_buf[7], total_buf[7], rate_buf[7];
8511 uint64_t copied, total, elapsed, secs_left;
8512 double fraction_done;
8513 uint_t rate;
8514
8515 assert(pres->pres_state == DSS_SCANNING);
8516
8517 /*
8518 * Expansion is in progress.
8519 */
8520 (void) printf(gettext(
8521 "expansion of %s-%u in progress since %s"),
8522 vname, (int)pres->pres_expanding_vdev, ctime(&start));
8523
8524 copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1;
8525 total = pres->pres_to_reflow;
8526 fraction_done = (double)copied / total;
8527
8528 /* elapsed time for this pass */
8529 elapsed = time(NULL) - pres->pres_start_time;
8530 elapsed = elapsed > 0 ? elapsed : 1;
8531 rate = copied / elapsed;
8532 rate = rate > 0 ? rate : 1;
8533 secs_left = (total - copied) / rate;
8534
8535 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
8536 zfs_nicenum(total, total_buf, sizeof (total_buf));
8537 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
8538
8539 /*
8540 * do not print estimated time if hours_left is more than
8541 * 30 days
8542 */
8543 (void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"),
8544 examined_buf, total_buf, rate_buf, 100 * fraction_done);
8545 if (pres->pres_waiting_for_resilver) {
8546 (void) printf(gettext(", paused for resilver or "
8547 "clear\n"));
8548 } else if (secs_left < (30 * 24 * 3600)) {
8549 char time_buf[32];
8550 secs_to_dhms(secs_left, time_buf);
8551 (void) printf(gettext(", %s to go\n"), time_buf);
8552 } else {
8553 (void) printf(gettext(
8554 ", (copy is slow, no estimated time)\n"));
8555 }
8556 }
8557 free(vname);
8558 }
8559 static void
8560 print_checkpoint_status(pool_checkpoint_stat_t *pcs)
8561 {
8562 time_t start;
8563 char space_buf[7];
8564
8565 if (pcs == NULL || pcs->pcs_state == CS_NONE)
8566 return;
8567
8568 (void) printf(gettext("checkpoint: "));
8569
8570 start = pcs->pcs_start_time;
8571 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
8572
8573 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
8574 char *date = ctime(&start);
8575
8576 /*
8577 * ctime() adds a newline at the end of the generated
8578 * string, thus the weird format specifier and the
8579 * strlen() call used to chop it off from the output.
8580 */
8581 (void) printf(gettext("created %.*s, consumes %s\n"),
8582 (int)(strlen(date) - 1), date, space_buf);
8583 return;
8584 }
8585
8586 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
8587
8588 (void) printf(gettext("discarding, %s remaining.\n"),
8589 space_buf);
8590 }
8591
8592 static void
8593 print_error_log(zpool_handle_t *zhp)
8594 {
8595 nvlist_t *nverrlist = NULL;
8596 nvpair_t *elem;
8597 char *pathname;
8598 size_t len = MAXPATHLEN * 2;
8599
8600 if (zpool_get_errlog(zhp, &nverrlist) != 0)
8601 return;
8602
8603 (void) printf("errors: Permanent errors have been "
8604 "detected in the following files:\n\n");
8605
8606 pathname = safe_malloc(len);
8607 elem = NULL;
8608 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
8609 nvlist_t *nv;
8610 uint64_t dsobj, obj;
8611
8612 verify(nvpair_value_nvlist(elem, &nv) == 0);
8613 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
8614 &dsobj) == 0);
8615 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
8616 &obj) == 0);
8617 zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
8618 (void) printf("%7s %s\n", "", pathname);
8619 }
8620 free(pathname);
8621 nvlist_free(nverrlist);
8622 }
8623
8624 static void
8625 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
8626 uint_t nspares)
8627 {
8628 uint_t i;
8629 char *name;
8630
8631 if (nspares == 0)
8632 return;
8633
8634 (void) printf(gettext("\tspares\n"));
8635
8636 for (i = 0; i < nspares; i++) {
8637 name = zpool_vdev_name(g_zfs, zhp, spares[i],
8638 cb->cb_name_flags);
8639 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
8640 free(name);
8641 }
8642 }
8643
8644 static void
8645 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
8646 uint_t nl2cache)
8647 {
8648 uint_t i;
8649 char *name;
8650
8651 if (nl2cache == 0)
8652 return;
8653
8654 (void) printf(gettext("\tcache\n"));
8655
8656 for (i = 0; i < nl2cache; i++) {
8657 name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
8658 cb->cb_name_flags);
8659 print_status_config(zhp, cb, name, l2cache[i], 2,
8660 B_FALSE, NULL);
8661 free(name);
8662 }
8663 }
8664
8665 static void
8666 print_dedup_stats(nvlist_t *config)
8667 {
8668 ddt_histogram_t *ddh;
8669 ddt_stat_t *dds;
8670 ddt_object_t *ddo;
8671 uint_t c;
8672 char dspace[6], mspace[6];
8673
8674 /*
8675 * If the pool was faulted then we may not have been able to
8676 * obtain the config. Otherwise, if we have anything in the dedup
8677 * table continue processing the stats.
8678 */
8679 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
8680 (uint64_t **)&ddo, &c) != 0)
8681 return;
8682
8683 (void) printf("\n");
8684 (void) printf(gettext(" dedup: "));
8685 if (ddo->ddo_count == 0) {
8686 (void) printf(gettext("no DDT entries\n"));
8687 return;
8688 }
8689
8690 zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace));
8691 zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace));
8692 (void) printf("DDT entries %llu, size %s on disk, %s in core\n",
8693 (u_longlong_t)ddo->ddo_count,
8694 dspace,
8695 mspace);
8696
8697 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
8698 (uint64_t **)&dds, &c) == 0);
8699 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
8700 (uint64_t **)&ddh, &c) == 0);
8701 zpool_dump_ddt(dds, ddh);
8702 }
8703
8704 /*
8705 * Display a summary of pool status. Displays a summary such as:
8706 *
8707 * pool: tank
8708 * status: DEGRADED
8709 * reason: One or more devices ...
8710 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
8711 * config:
8712 * mirror DEGRADED
8713 * c1t0d0 OK
8714 * c2t0d0 UNAVAIL
8715 *
8716 * When given the '-v' option, we print out the complete config. If the '-e'
8717 * option is specified, then we print out error rate information as well.
8718 */
8719 static int
8720 status_callback(zpool_handle_t *zhp, void *data)
8721 {
8722 status_cbdata_t *cbp = data;
8723 nvlist_t *config, *nvroot;
8724 const char *msgid;
8725 zpool_status_t reason;
8726 zpool_errata_t errata;
8727 const char *health;
8728 uint_t c;
8729 vdev_stat_t *vs;
8730
8731 config = zpool_get_config(zhp, NULL);
8732 reason = zpool_get_status(zhp, &msgid, &errata);
8733
8734 cbp->cb_count++;
8735
8736 /*
8737 * If we were given 'zpool status -x', only report those pools with
8738 * problems.
8739 */
8740 if (cbp->cb_explain &&
8741 (reason == ZPOOL_STATUS_OK ||
8742 reason == ZPOOL_STATUS_VERSION_OLDER ||
8743 reason == ZPOOL_STATUS_FEAT_DISABLED ||
8744 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
8745 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
8746 if (!cbp->cb_allpools) {
8747 (void) printf(gettext("pool '%s' is healthy\n"),
8748 zpool_get_name(zhp));
8749 if (cbp->cb_first)
8750 cbp->cb_first = B_FALSE;
8751 }
8752 return (0);
8753 }
8754
8755 if (cbp->cb_first)
8756 cbp->cb_first = B_FALSE;
8757 else
8758 (void) printf("\n");
8759
8760 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
8761 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
8762 (uint64_t **)&vs, &c) == 0);
8763
8764 health = zpool_get_state_str(zhp);
8765
8766 printf(" ");
8767 printf_color(ANSI_BOLD, gettext("pool:"));
8768 printf(" %s\n", zpool_get_name(zhp));
8769 fputc(' ', stdout);
8770 printf_color(ANSI_BOLD, gettext("state: "));
8771
8772 printf_color(health_str_to_color(health), "%s", health);
8773
8774 fputc('\n', stdout);
8775
8776 switch (reason) {
8777 case ZPOOL_STATUS_MISSING_DEV_R:
8778 printf_color(ANSI_BOLD, gettext("status: "));
8779 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8780 "not be opened. Sufficient replicas exist for\n\tthe pool "
8781 "to continue functioning in a degraded state.\n"));
8782 printf_color(ANSI_BOLD, gettext("action: "));
8783 printf_color(ANSI_YELLOW, gettext("Attach the missing device "
8784 "and online it using 'zpool online'.\n"));
8785 break;
8786
8787 case ZPOOL_STATUS_MISSING_DEV_NR:
8788 printf_color(ANSI_BOLD, gettext("status: "));
8789 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8790 "not be opened. There are insufficient\n\treplicas for the"
8791 " pool to continue functioning.\n"));
8792 printf_color(ANSI_BOLD, gettext("action: "));
8793 printf_color(ANSI_YELLOW, gettext("Attach the missing device "
8794 "and online it using 'zpool online'.\n"));
8795 break;
8796
8797 case ZPOOL_STATUS_CORRUPT_LABEL_R:
8798 printf_color(ANSI_BOLD, gettext("status: "));
8799 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8800 "not be used because the label is missing or\n\tinvalid. "
8801 "Sufficient replicas exist for the pool to continue\n\t"
8802 "functioning in a degraded state.\n"));
8803 printf_color(ANSI_BOLD, gettext("action: "));
8804 printf_color(ANSI_YELLOW, gettext("Replace the device using "
8805 "'zpool replace'.\n"));
8806 break;
8807
8808 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
8809 printf_color(ANSI_BOLD, gettext("status: "));
8810 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8811 "not be used because the label is missing \n\tor invalid. "
8812 "There are insufficient replicas for the pool to "
8813 "continue\n\tfunctioning.\n"));
8814 zpool_explain_recover(zpool_get_handle(zhp),
8815 zpool_get_name(zhp), reason, config);
8816 break;
8817
8818 case ZPOOL_STATUS_FAILING_DEV:
8819 printf_color(ANSI_BOLD, gettext("status: "));
8820 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8821 "experienced an unrecoverable error. An\n\tattempt was "
8822 "made to correct the error. Applications are "
8823 "unaffected.\n"));
8824 printf_color(ANSI_BOLD, gettext("action: "));
8825 printf_color(ANSI_YELLOW, gettext("Determine if the "
8826 "device needs to be replaced, and clear the errors\n\tusing"
8827 " 'zpool clear' or replace the device with 'zpool "
8828 "replace'.\n"));
8829 break;
8830
8831 case ZPOOL_STATUS_OFFLINE_DEV:
8832 printf_color(ANSI_BOLD, gettext("status: "));
8833 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8834 "been taken offline by the administrator.\n\tSufficient "
8835 "replicas exist for the pool to continue functioning in "
8836 "a\n\tdegraded state.\n"));
8837 printf_color(ANSI_BOLD, gettext("action: "));
8838 printf_color(ANSI_YELLOW, gettext("Online the device "
8839 "using 'zpool online' or replace the device with\n\t'zpool "
8840 "replace'.\n"));
8841 break;
8842
8843 case ZPOOL_STATUS_REMOVED_DEV:
8844 printf_color(ANSI_BOLD, gettext("status: "));
8845 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8846 "been removed by the administrator.\n\tSufficient "
8847 "replicas exist for the pool to continue functioning in "
8848 "a\n\tdegraded state.\n"));
8849 printf_color(ANSI_BOLD, gettext("action: "));
8850 printf_color(ANSI_YELLOW, gettext("Online the device "
8851 "using zpool online' or replace the device with\n\t'zpool "
8852 "replace'.\n"));
8853 break;
8854
8855 case ZPOOL_STATUS_RESILVERING:
8856 case ZPOOL_STATUS_REBUILDING:
8857 printf_color(ANSI_BOLD, gettext("status: "));
8858 printf_color(ANSI_YELLOW, gettext("One or more devices is "
8859 "currently being resilvered. The pool will\n\tcontinue "
8860 "to function, possibly in a degraded state.\n"));
8861 printf_color(ANSI_BOLD, gettext("action: "));
8862 printf_color(ANSI_YELLOW, gettext("Wait for the resilver to "
8863 "complete.\n"));
8864 break;
8865
8866 case ZPOOL_STATUS_REBUILD_SCRUB:
8867 printf_color(ANSI_BOLD, gettext("status: "));
8868 printf_color(ANSI_YELLOW, gettext("One or more devices have "
8869 "been sequentially resilvered, scrubbing\n\tthe pool "
8870 "is recommended.\n"));
8871 printf_color(ANSI_BOLD, gettext("action: "));
8872 printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to "
8873 "verify all data checksums.\n"));
8874 break;
8875
8876 case ZPOOL_STATUS_CORRUPT_DATA:
8877 printf_color(ANSI_BOLD, gettext("status: "));
8878 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8879 "experienced an error resulting in data\n\tcorruption. "
8880 "Applications may be affected.\n"));
8881 printf_color(ANSI_BOLD, gettext("action: "));
8882 printf_color(ANSI_YELLOW, gettext("Restore the file in question"
8883 " if possible. Otherwise restore the\n\tentire pool from "
8884 "backup.\n"));
8885 break;
8886
8887 case ZPOOL_STATUS_CORRUPT_POOL:
8888 printf_color(ANSI_BOLD, gettext("status: "));
8889 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
8890 "corrupted and the pool cannot be opened.\n"));
8891 zpool_explain_recover(zpool_get_handle(zhp),
8892 zpool_get_name(zhp), reason, config);
8893 break;
8894
8895 case ZPOOL_STATUS_VERSION_OLDER:
8896 printf_color(ANSI_BOLD, gettext("status: "));
8897 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
8898 "a legacy on-disk format. The pool can\n\tstill be used, "
8899 "but some features are unavailable.\n"));
8900 printf_color(ANSI_BOLD, gettext("action: "));
8901 printf_color(ANSI_YELLOW, gettext("Upgrade the pool using "
8902 "'zpool upgrade'. Once this is done, the\n\tpool will no "
8903 "longer be accessible on software that does not support\n\t"
8904 "feature flags.\n"));
8905 break;
8906
8907 case ZPOOL_STATUS_VERSION_NEWER:
8908 printf_color(ANSI_BOLD, gettext("status: "));
8909 printf_color(ANSI_YELLOW, gettext("The pool has been upgraded "
8910 "to a newer, incompatible on-disk version.\n\tThe pool "
8911 "cannot be accessed on this system.\n"));
8912 printf_color(ANSI_BOLD, gettext("action: "));
8913 printf_color(ANSI_YELLOW, gettext("Access the pool from a "
8914 "system running more recent software, or\n\trestore the "
8915 "pool from backup.\n"));
8916 break;
8917
8918 case ZPOOL_STATUS_FEAT_DISABLED:
8919 printf_color(ANSI_BOLD, gettext("status: "));
8920 printf_color(ANSI_YELLOW, gettext("Some supported and "
8921 "requested features are not enabled on the pool.\n\t"
8922 "The pool can still be used, but some features are "
8923 "unavailable.\n"));
8924 printf_color(ANSI_BOLD, gettext("action: "));
8925 printf_color(ANSI_YELLOW, gettext("Enable all features using "
8926 "'zpool upgrade'. Once this is done,\n\tthe pool may no "
8927 "longer be accessible by software that does not support\n\t"
8928 "the features. See zpool-features(7) for details.\n"));
8929 break;
8930
8931 case ZPOOL_STATUS_COMPATIBILITY_ERR:
8932 printf_color(ANSI_BOLD, gettext("status: "));
8933 printf_color(ANSI_YELLOW, gettext("This pool has a "
8934 "compatibility list specified, but it could not be\n\t"
8935 "read/parsed at this time. The pool can still be used, "
8936 "but this\n\tshould be investigated.\n"));
8937 printf_color(ANSI_BOLD, gettext("action: "));
8938 printf_color(ANSI_YELLOW, gettext("Check the value of the "
8939 "'compatibility' property against the\n\t"
8940 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
8941 ZPOOL_DATA_COMPAT_D ".\n"));
8942 break;
8943
8944 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
8945 printf_color(ANSI_BOLD, gettext("status: "));
8946 printf_color(ANSI_YELLOW, gettext("One or more features "
8947 "are enabled on the pool despite not being\n\t"
8948 "requested by the 'compatibility' property.\n"));
8949 printf_color(ANSI_BOLD, gettext("action: "));
8950 printf_color(ANSI_YELLOW, gettext("Consider setting "
8951 "'compatibility' to an appropriate value, or\n\t"
8952 "adding needed features to the relevant file in\n\t"
8953 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
8954 break;
8955
8956 case ZPOOL_STATUS_UNSUP_FEAT_READ:
8957 printf_color(ANSI_BOLD, gettext("status: "));
8958 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
8959 "on this system because it uses the\n\tfollowing feature(s)"
8960 " not supported on this system:\n"));
8961 zpool_print_unsup_feat(config);
8962 (void) printf("\n");
8963 printf_color(ANSI_BOLD, gettext("action: "));
8964 printf_color(ANSI_YELLOW, gettext("Access the pool from a "
8965 "system that supports the required feature(s),\n\tor "
8966 "restore the pool from backup.\n"));
8967 break;
8968
8969 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
8970 printf_color(ANSI_BOLD, gettext("status: "));
8971 printf_color(ANSI_YELLOW, gettext("The pool can only be "
8972 "accessed in read-only mode on this system. It\n\tcannot be"
8973 " accessed in read-write mode because it uses the "
8974 "following\n\tfeature(s) not supported on this system:\n"));
8975 zpool_print_unsup_feat(config);
8976 (void) printf("\n");
8977 printf_color(ANSI_BOLD, gettext("action: "));
8978 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
8979 "in read-write mode. Import the pool with\n"
8980 "\t\"-o readonly=on\", access the pool from a system that "
8981 "supports the\n\trequired feature(s), or restore the "
8982 "pool from backup.\n"));
8983 break;
8984
8985 case ZPOOL_STATUS_FAULTED_DEV_R:
8986 printf_color(ANSI_BOLD, gettext("status: "));
8987 printf_color(ANSI_YELLOW, gettext("One or more devices are "
8988 "faulted in response to persistent errors.\n\tSufficient "
8989 "replicas exist for the pool to continue functioning "
8990 "in a\n\tdegraded state.\n"));
8991 printf_color(ANSI_BOLD, gettext("action: "));
8992 printf_color(ANSI_YELLOW, gettext("Replace the faulted device, "
8993 "or use 'zpool clear' to mark the device\n\trepaired.\n"));
8994 break;
8995
8996 case ZPOOL_STATUS_FAULTED_DEV_NR:
8997 printf_color(ANSI_BOLD, gettext("status: "));
8998 printf_color(ANSI_YELLOW, gettext("One or more devices are "
8999 "faulted in response to persistent errors. There are "
9000 "insufficient replicas for the pool to\n\tcontinue "
9001 "functioning.\n"));
9002 printf_color(ANSI_BOLD, gettext("action: "));
9003 printf_color(ANSI_YELLOW, gettext("Destroy and re-create the "
9004 "pool from a backup source. Manually marking the device\n"
9005 "\trepaired using 'zpool clear' may allow some data "
9006 "to be recovered.\n"));
9007 break;
9008
9009 case ZPOOL_STATUS_IO_FAILURE_MMP:
9010 printf_color(ANSI_BOLD, gettext("status: "));
9011 printf_color(ANSI_YELLOW, gettext("The pool is suspended "
9012 "because multihost writes failed or were delayed;\n\t"
9013 "another system could import the pool undetected.\n"));
9014 printf_color(ANSI_BOLD, gettext("action: "));
9015 printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices"
9016 " are connected, then reboot your system and\n\timport the "
9017 "pool.\n"));
9018 break;
9019
9020 case ZPOOL_STATUS_IO_FAILURE_WAIT:
9021 case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
9022 printf_color(ANSI_BOLD, gettext("status: "));
9023 printf_color(ANSI_YELLOW, gettext("One or more devices are "
9024 "faulted in response to IO failures.\n"));
9025 printf_color(ANSI_BOLD, gettext("action: "));
9026 printf_color(ANSI_YELLOW, gettext("Make sure the affected "
9027 "devices are connected, then run 'zpool clear'.\n"));
9028 break;
9029
9030 case ZPOOL_STATUS_BAD_LOG:
9031 printf_color(ANSI_BOLD, gettext("status: "));
9032 printf_color(ANSI_YELLOW, gettext("An intent log record "
9033 "could not be read.\n"
9034 "\tWaiting for administrator intervention to fix the "
9035 "faulted pool.\n"));
9036 printf_color(ANSI_BOLD, gettext("action: "));
9037 printf_color(ANSI_YELLOW, gettext("Either restore the affected "
9038 "device(s) and run 'zpool online',\n"
9039 "\tor ignore the intent log records by running "
9040 "'zpool clear'.\n"));
9041 break;
9042
9043 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
9044 (void) printf(gettext("status: One or more devices are "
9045 "configured to use a non-native block size.\n"
9046 "\tExpect reduced performance.\n"));
9047 (void) printf(gettext("action: Replace affected devices with "
9048 "devices that support the\n\tconfigured block size, or "
9049 "migrate data to a properly configured\n\tpool.\n"));
9050 break;
9051
9052 case ZPOOL_STATUS_HOSTID_MISMATCH:
9053 printf_color(ANSI_BOLD, gettext("status: "));
9054 printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid"
9055 " and system hostid on imported pool.\n\tThis pool was "
9056 "previously imported into a system with a different "
9057 "hostid,\n\tand then was verbatim imported into this "
9058 "system.\n"));
9059 printf_color(ANSI_BOLD, gettext("action: "));
9060 printf_color(ANSI_YELLOW, gettext("Export this pool on all "
9061 "systems on which it is imported.\n"
9062 "\tThen import it to correct the mismatch.\n"));
9063 break;
9064
9065 case ZPOOL_STATUS_ERRATA:
9066 printf_color(ANSI_BOLD, gettext("status: "));
9067 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
9068 errata);
9069
9070 switch (errata) {
9071 case ZPOOL_ERRATA_NONE:
9072 break;
9073
9074 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
9075 printf_color(ANSI_BOLD, gettext("action: "));
9076 printf_color(ANSI_YELLOW, gettext("To correct the issue"
9077 " run 'zpool scrub'.\n"));
9078 break;
9079
9080 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
9081 (void) printf(gettext("\tExisting encrypted datasets "
9082 "contain an on-disk incompatibility\n\twhich "
9083 "needs to be corrected.\n"));
9084 printf_color(ANSI_BOLD, gettext("action: "));
9085 printf_color(ANSI_YELLOW, gettext("To correct the issue"
9086 " backup existing encrypted datasets to new\n\t"
9087 "encrypted datasets and destroy the old ones. "
9088 "'zfs mount -o ro' can\n\tbe used to temporarily "
9089 "mount existing encrypted datasets readonly.\n"));
9090 break;
9091
9092 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
9093 (void) printf(gettext("\tExisting encrypted snapshots "
9094 "and bookmarks contain an on-disk\n\tincompat"
9095 "ibility. This may cause on-disk corruption if "
9096 "they are used\n\twith 'zfs recv'.\n"));
9097 printf_color(ANSI_BOLD, gettext("action: "));
9098 printf_color(ANSI_YELLOW, gettext("To correct the"
9099 "issue, enable the bookmark_v2 feature. No "
9100 "additional\n\taction is needed if there are no "
9101 "encrypted snapshots or bookmarks.\n\tIf preserving"
9102 "the encrypted snapshots and bookmarks is required,"
9103 " use\n\ta non-raw send to backup and restore them."
9104 " Alternately, they may be\n\tremoved to resolve "
9105 "the incompatibility.\n"));
9106 break;
9107
9108 default:
9109 /*
9110 * All errata which allow the pool to be imported
9111 * must contain an action message.
9112 */
9113 assert(0);
9114 }
9115 break;
9116
9117 default:
9118 /*
9119 * The remaining errors can't actually be generated, yet.
9120 */
9121 assert(reason == ZPOOL_STATUS_OK);
9122 }
9123
9124 if (msgid != NULL) {
9125 printf(" ");
9126 printf_color(ANSI_BOLD, gettext("see:"));
9127 printf(gettext(
9128 " https://openzfs.github.io/openzfs-docs/msg/%s\n"),
9129 msgid);
9130 }
9131
9132 if (config != NULL) {
9133 uint64_t nerr;
9134 nvlist_t **spares, **l2cache;
9135 uint_t nspares, nl2cache;
9136
9137 print_scan_status(zhp, nvroot);
9138
9139 pool_removal_stat_t *prs = NULL;
9140 (void) nvlist_lookup_uint64_array(nvroot,
9141 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
9142 print_removal_status(zhp, prs);
9143
9144 pool_checkpoint_stat_t *pcs = NULL;
9145 (void) nvlist_lookup_uint64_array(nvroot,
9146 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
9147 print_checkpoint_status(pcs);
9148
9149 pool_raidz_expand_stat_t *pres = NULL;
9150 (void) nvlist_lookup_uint64_array(nvroot,
9151 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
9152 print_raidz_expand_status(zhp, pres);
9153
9154 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
9155 cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
9156 if (cbp->cb_namewidth < 10)
9157 cbp->cb_namewidth = 10;
9158
9159 color_start(ANSI_BOLD);
9160 (void) printf(gettext("config:\n\n"));
9161 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
9162 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
9163 "CKSUM");
9164 color_end();
9165
9166 if (cbp->cb_print_slow_ios) {
9167 printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
9168 }
9169
9170 if (cbp->cb_print_power) {
9171 printf_color(ANSI_BOLD, " %5s", gettext("POWER"));
9172 }
9173
9174 if (cbp->vcdl != NULL)
9175 print_cmd_columns(cbp->vcdl, 0);
9176
9177 printf("\n");
9178
9179 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
9180 B_FALSE, NULL);
9181
9182 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
9183 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
9184 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
9185
9186 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
9187 &l2cache, &nl2cache) == 0)
9188 print_l2cache(zhp, cbp, l2cache, nl2cache);
9189
9190 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
9191 &spares, &nspares) == 0)
9192 print_spares(zhp, cbp, spares, nspares);
9193
9194 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
9195 &nerr) == 0) {
9196 (void) printf("\n");
9197 if (nerr == 0) {
9198 (void) printf(gettext(
9199 "errors: No known data errors\n"));
9200 } else if (!cbp->cb_verbose) {
9201 color_start(ANSI_RED);
9202 (void) printf(gettext("errors: %llu data "
9203 "errors, use '-v' for a list\n"),
9204 (u_longlong_t)nerr);
9205 color_end();
9206 } else {
9207 print_error_log(zhp);
9208 }
9209 }
9210
9211 if (cbp->cb_dedup_stats)
9212 print_dedup_stats(config);
9213 } else {
9214 (void) printf(gettext("config: The configuration cannot be "
9215 "determined.\n"));
9216 }
9217
9218 return (0);
9219 }
9220
9221 /*
9222 * zpool status [-c [script1,script2,...]] [-DegiLpPstvx] [--power] [-T d|u] ...
9223 * [pool] [interval [count]]
9224 *
9225 * -c CMD For each vdev, run command CMD
9226 * -D Display dedup status (undocumented)
9227 * -e Display only unhealthy vdevs
9228 * -g Display guid for individual vdev name.
9229 * -i Display vdev initialization status.
9230 * -L Follow links when resolving vdev path name.
9231 * -p Display values in parsable (exact) format.
9232 * -P Display full path for vdev name.
9233 * -s Display slow IOs column.
9234 * -t Display vdev TRIM status.
9235 * -T Display a timestamp in date(1) or Unix format
9236 * -v Display complete error logs
9237 * -x Display only pools with potential problems
9238 * --power Display vdev enclosure slot power status
9239 *
9240 * Describes the health status of all pools or some subset.
9241 */
9242 int
9243 zpool_do_status(int argc, char **argv)
9244 {
9245 int c;
9246 int ret;
9247 float interval = 0;
9248 unsigned long count = 0;
9249 status_cbdata_t cb = { 0 };
9250 char *cmd = NULL;
9251
9252 struct option long_options[] = {
9253 {"power", no_argument, NULL, ZPOOL_OPTION_POWER},
9254 {0, 0, 0, 0}
9255 };
9256
9257 /* check options */
9258 while ((c = getopt_long(argc, argv, "c:DegiLpPstT:vx", long_options,
9259 NULL)) != -1) {
9260 switch (c) {
9261 case 'c':
9262 if (cmd != NULL) {
9263 fprintf(stderr,
9264 gettext("Can't set -c flag twice\n"));
9265 exit(1);
9266 }
9267
9268 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
9269 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
9270 fprintf(stderr, gettext(
9271 "Can't run -c, disabled by "
9272 "ZPOOL_SCRIPTS_ENABLED.\n"));
9273 exit(1);
9274 }
9275
9276 if ((getuid() <= 0 || geteuid() <= 0) &&
9277 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
9278 fprintf(stderr, gettext(
9279 "Can't run -c with root privileges "
9280 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
9281 exit(1);
9282 }
9283 cmd = optarg;
9284 break;
9285 case 'D':
9286 cb.cb_dedup_stats = B_TRUE;
9287 break;
9288 case 'e':
9289 cb.cb_print_unhealthy = B_TRUE;
9290 break;
9291 case 'g':
9292 cb.cb_name_flags |= VDEV_NAME_GUID;
9293 break;
9294 case 'i':
9295 cb.cb_print_vdev_init = B_TRUE;
9296 break;
9297 case 'L':
9298 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
9299 break;
9300 case 'p':
9301 cb.cb_literal = B_TRUE;
9302 break;
9303 case 'P':
9304 cb.cb_name_flags |= VDEV_NAME_PATH;
9305 break;
9306 case 's':
9307 cb.cb_print_slow_ios = B_TRUE;
9308 break;
9309 case 't':
9310 cb.cb_print_vdev_trim = B_TRUE;
9311 break;
9312 case 'T':
9313 get_timestamp_arg(*optarg);
9314 break;
9315 case 'v':
9316 cb.cb_verbose = B_TRUE;
9317 break;
9318 case 'x':
9319 cb.cb_explain = B_TRUE;
9320 break;
9321 case ZPOOL_OPTION_POWER:
9322 cb.cb_print_power = B_TRUE;
9323 break;
9324 case '?':
9325 if (optopt == 'c') {
9326 print_zpool_script_list("status");
9327 exit(0);
9328 } else {
9329 fprintf(stderr,
9330 gettext("invalid option '%c'\n"), optopt);
9331 }
9332 usage(B_FALSE);
9333 }
9334 }
9335
9336 argc -= optind;
9337 argv += optind;
9338
9339 get_interval_count(&argc, argv, &interval, &count);
9340
9341 if (argc == 0)
9342 cb.cb_allpools = B_TRUE;
9343
9344 cb.cb_first = B_TRUE;
9345 cb.cb_print_status = B_TRUE;
9346
9347 for (;;) {
9348 if (timestamp_fmt != NODATE)
9349 print_timestamp(timestamp_fmt);
9350
9351 if (cmd != NULL)
9352 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
9353 NULL, NULL, 0, 0);
9354
9355 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
9356 cb.cb_literal, status_callback, &cb);
9357
9358 if (cb.vcdl != NULL)
9359 free_vdev_cmd_data_list(cb.vcdl);
9360 if (argc == 0 && cb.cb_count == 0)
9361 (void) fprintf(stderr, gettext("no pools available\n"));
9362 else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
9363 (void) printf(gettext("all pools are healthy\n"));
9364
9365 if (ret != 0)
9366 return (ret);
9367
9368 if (interval == 0)
9369 break;
9370
9371 if (count != 0 && --count == 0)
9372 break;
9373
9374 (void) fflush(stdout);
9375 (void) fsleep(interval);
9376 }
9377
9378 return (0);
9379 }
9380
9381 typedef struct upgrade_cbdata {
9382 int cb_first;
9383 int cb_argc;
9384 uint64_t cb_version;
9385 char **cb_argv;
9386 } upgrade_cbdata_t;
9387
9388 static int
9389 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
9390 {
9391 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
9392 int *count = (int *)unsupp_fs;
9393
9394 if (zfs_version > ZPL_VERSION) {
9395 (void) printf(gettext("%s (v%d) is not supported by this "
9396 "implementation of ZFS.\n"),
9397 zfs_get_name(zhp), zfs_version);
9398 (*count)++;
9399 }
9400
9401 zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
9402
9403 zfs_close(zhp);
9404
9405 return (0);
9406 }
9407
9408 static int
9409 upgrade_version(zpool_handle_t *zhp, uint64_t version)
9410 {
9411 int ret;
9412 nvlist_t *config;
9413 uint64_t oldversion;
9414 int unsupp_fs = 0;
9415
9416 config = zpool_get_config(zhp, NULL);
9417 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9418 &oldversion) == 0);
9419
9420 char compat[ZFS_MAXPROPLEN];
9421 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
9422 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
9423 compat[0] = '\0';
9424
9425 assert(SPA_VERSION_IS_SUPPORTED(oldversion));
9426 assert(oldversion < version);
9427
9428 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
9429 if (ret != 0)
9430 return (ret);
9431
9432 if (unsupp_fs) {
9433 (void) fprintf(stderr, gettext("Upgrade not performed due "
9434 "to %d unsupported filesystems (max v%d).\n"),
9435 unsupp_fs, (int)ZPL_VERSION);
9436 return (1);
9437 }
9438
9439 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
9440 (void) fprintf(stderr, gettext("Upgrade not performed because "
9441 "'compatibility' property set to '"
9442 ZPOOL_COMPAT_LEGACY "'.\n"));
9443 return (1);
9444 }
9445
9446 ret = zpool_upgrade(zhp, version);
9447 if (ret != 0)
9448 return (ret);
9449
9450 if (version >= SPA_VERSION_FEATURES) {
9451 (void) printf(gettext("Successfully upgraded "
9452 "'%s' from version %llu to feature flags.\n"),
9453 zpool_get_name(zhp), (u_longlong_t)oldversion);
9454 } else {
9455 (void) printf(gettext("Successfully upgraded "
9456 "'%s' from version %llu to version %llu.\n"),
9457 zpool_get_name(zhp), (u_longlong_t)oldversion,
9458 (u_longlong_t)version);
9459 }
9460
9461 return (0);
9462 }
9463
9464 static int
9465 upgrade_enable_all(zpool_handle_t *zhp, int *countp)
9466 {
9467 int i, ret, count;
9468 boolean_t firstff = B_TRUE;
9469 nvlist_t *enabled = zpool_get_features(zhp);
9470
9471 char compat[ZFS_MAXPROPLEN];
9472 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
9473 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
9474 compat[0] = '\0';
9475
9476 boolean_t requested_features[SPA_FEATURES];
9477 if (zpool_do_load_compat(compat, requested_features) !=
9478 ZPOOL_COMPATIBILITY_OK)
9479 return (-1);
9480
9481 count = 0;
9482 for (i = 0; i < SPA_FEATURES; i++) {
9483 const char *fname = spa_feature_table[i].fi_uname;
9484 const char *fguid = spa_feature_table[i].fi_guid;
9485
9486 if (!spa_feature_table[i].fi_zfs_mod_supported)
9487 continue;
9488
9489 if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
9490 char *propname;
9491 verify(-1 != asprintf(&propname, "feature@%s", fname));
9492 ret = zpool_set_prop(zhp, propname,
9493 ZFS_FEATURE_ENABLED);
9494 if (ret != 0) {
9495 free(propname);
9496 return (ret);
9497 }
9498 count++;
9499
9500 if (firstff) {
9501 (void) printf(gettext("Enabled the "
9502 "following features on '%s':\n"),
9503 zpool_get_name(zhp));
9504 firstff = B_FALSE;
9505 }
9506 (void) printf(gettext(" %s\n"), fname);
9507 free(propname);
9508 }
9509 }
9510
9511 if (countp != NULL)
9512 *countp = count;
9513 return (0);
9514 }
9515
9516 static int
9517 upgrade_cb(zpool_handle_t *zhp, void *arg)
9518 {
9519 upgrade_cbdata_t *cbp = arg;
9520 nvlist_t *config;
9521 uint64_t version;
9522 boolean_t modified_pool = B_FALSE;
9523 int ret;
9524
9525 config = zpool_get_config(zhp, NULL);
9526 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9527 &version) == 0);
9528
9529 assert(SPA_VERSION_IS_SUPPORTED(version));
9530
9531 if (version < cbp->cb_version) {
9532 cbp->cb_first = B_FALSE;
9533 ret = upgrade_version(zhp, cbp->cb_version);
9534 if (ret != 0)
9535 return (ret);
9536 modified_pool = B_TRUE;
9537
9538 /*
9539 * If they did "zpool upgrade -a", then we could
9540 * be doing ioctls to different pools. We need
9541 * to log this history once to each pool, and bypass
9542 * the normal history logging that happens in main().
9543 */
9544 (void) zpool_log_history(g_zfs, history_str);
9545 log_history = B_FALSE;
9546 }
9547
9548 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
9549 int count;
9550 ret = upgrade_enable_all(zhp, &count);
9551 if (ret != 0)
9552 return (ret);
9553
9554 if (count > 0) {
9555 cbp->cb_first = B_FALSE;
9556 modified_pool = B_TRUE;
9557 }
9558 }
9559
9560 if (modified_pool) {
9561 (void) printf("\n");
9562 (void) after_zpool_upgrade(zhp);
9563 }
9564
9565 return (0);
9566 }
9567
9568 static int
9569 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
9570 {
9571 upgrade_cbdata_t *cbp = arg;
9572 nvlist_t *config;
9573 uint64_t version;
9574
9575 config = zpool_get_config(zhp, NULL);
9576 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9577 &version) == 0);
9578
9579 assert(SPA_VERSION_IS_SUPPORTED(version));
9580
9581 if (version < SPA_VERSION_FEATURES) {
9582 if (cbp->cb_first) {
9583 (void) printf(gettext("The following pools are "
9584 "formatted with legacy version numbers and can\n"
9585 "be upgraded to use feature flags. After "
9586 "being upgraded, these pools\nwill no "
9587 "longer be accessible by software that does not "
9588 "support feature\nflags.\n\n"
9589 "Note that setting a pool's 'compatibility' "
9590 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
9591 "inhibit upgrades.\n\n"));
9592 (void) printf(gettext("VER POOL\n"));
9593 (void) printf(gettext("--- ------------\n"));
9594 cbp->cb_first = B_FALSE;
9595 }
9596
9597 (void) printf("%2llu %s\n", (u_longlong_t)version,
9598 zpool_get_name(zhp));
9599 }
9600
9601 return (0);
9602 }
9603
9604 static int
9605 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
9606 {
9607 upgrade_cbdata_t *cbp = arg;
9608 nvlist_t *config;
9609 uint64_t version;
9610
9611 config = zpool_get_config(zhp, NULL);
9612 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
9613 &version) == 0);
9614
9615 if (version >= SPA_VERSION_FEATURES) {
9616 int i;
9617 boolean_t poolfirst = B_TRUE;
9618 nvlist_t *enabled = zpool_get_features(zhp);
9619
9620 for (i = 0; i < SPA_FEATURES; i++) {
9621 const char *fguid = spa_feature_table[i].fi_guid;
9622 const char *fname = spa_feature_table[i].fi_uname;
9623
9624 if (!spa_feature_table[i].fi_zfs_mod_supported)
9625 continue;
9626
9627 if (!nvlist_exists(enabled, fguid)) {
9628 if (cbp->cb_first) {
9629 (void) printf(gettext("\nSome "
9630 "supported features are not "
9631 "enabled on the following pools. "
9632 "Once a\nfeature is enabled the "
9633 "pool may become incompatible with "
9634 "software\nthat does not support "
9635 "the feature. See "
9636 "zpool-features(7) for "
9637 "details.\n\n"
9638 "Note that the pool "
9639 "'compatibility' feature can be "
9640 "used to inhibit\nfeature "
9641 "upgrades.\n\n"));
9642 (void) printf(gettext("POOL "
9643 "FEATURE\n"));
9644 (void) printf(gettext("------"
9645 "---------\n"));
9646 cbp->cb_first = B_FALSE;
9647 }
9648
9649 if (poolfirst) {
9650 (void) printf(gettext("%s\n"),
9651 zpool_get_name(zhp));
9652 poolfirst = B_FALSE;
9653 }
9654
9655 (void) printf(gettext(" %s\n"), fname);
9656 }
9657 /*
9658 * If they did "zpool upgrade -a", then we could
9659 * be doing ioctls to different pools. We need
9660 * to log this history once to each pool, and bypass
9661 * the normal history logging that happens in main().
9662 */
9663 (void) zpool_log_history(g_zfs, history_str);
9664 log_history = B_FALSE;
9665 }
9666 }
9667
9668 return (0);
9669 }
9670
9671 static int
9672 upgrade_one(zpool_handle_t *zhp, void *data)
9673 {
9674 boolean_t modified_pool = B_FALSE;
9675 upgrade_cbdata_t *cbp = data;
9676 uint64_t cur_version;
9677 int ret;
9678
9679 if (strcmp("log", zpool_get_name(zhp)) == 0) {
9680 (void) fprintf(stderr, gettext("'log' is now a reserved word\n"
9681 "Pool 'log' must be renamed using export and import"
9682 " to upgrade.\n"));
9683 return (1);
9684 }
9685
9686 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
9687 if (cur_version > cbp->cb_version) {
9688 (void) printf(gettext("Pool '%s' is already formatted "
9689 "using more current version '%llu'.\n\n"),
9690 zpool_get_name(zhp), (u_longlong_t)cur_version);
9691 return (0);
9692 }
9693
9694 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
9695 (void) printf(gettext("Pool '%s' is already formatted "
9696 "using version %llu.\n\n"), zpool_get_name(zhp),
9697 (u_longlong_t)cbp->cb_version);
9698 return (0);
9699 }
9700
9701 if (cur_version != cbp->cb_version) {
9702 modified_pool = B_TRUE;
9703 ret = upgrade_version(zhp, cbp->cb_version);
9704 if (ret != 0)
9705 return (ret);
9706 }
9707
9708 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
9709 int count = 0;
9710 ret = upgrade_enable_all(zhp, &count);
9711 if (ret != 0)
9712 return (ret);
9713
9714 if (count != 0) {
9715 modified_pool = B_TRUE;
9716 } else if (cur_version == SPA_VERSION) {
9717 (void) printf(gettext("Pool '%s' already has all "
9718 "supported and requested features enabled.\n"),
9719 zpool_get_name(zhp));
9720 }
9721 }
9722
9723 if (modified_pool) {
9724 (void) printf("\n");
9725 (void) after_zpool_upgrade(zhp);
9726 }
9727
9728 return (0);
9729 }
9730
9731 /*
9732 * zpool upgrade
9733 * zpool upgrade -v
9734 * zpool upgrade [-V version] <-a | pool ...>
9735 *
9736 * With no arguments, display downrev'd ZFS pool available for upgrade.
9737 * Individual pools can be upgraded by specifying the pool, and '-a' will
9738 * upgrade all pools.
9739 */
9740 int
9741 zpool_do_upgrade(int argc, char **argv)
9742 {
9743 int c;
9744 upgrade_cbdata_t cb = { 0 };
9745 int ret = 0;
9746 boolean_t showversions = B_FALSE;
9747 boolean_t upgradeall = B_FALSE;
9748 char *end;
9749
9750
9751 /* check options */
9752 while ((c = getopt(argc, argv, ":avV:")) != -1) {
9753 switch (c) {
9754 case 'a':
9755 upgradeall = B_TRUE;
9756 break;
9757 case 'v':
9758 showversions = B_TRUE;
9759 break;
9760 case 'V':
9761 cb.cb_version = strtoll(optarg, &end, 10);
9762 if (*end != '\0' ||
9763 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
9764 (void) fprintf(stderr,
9765 gettext("invalid version '%s'\n"), optarg);
9766 usage(B_FALSE);
9767 }
9768 break;
9769 case ':':
9770 (void) fprintf(stderr, gettext("missing argument for "
9771 "'%c' option\n"), optopt);
9772 usage(B_FALSE);
9773 break;
9774 case '?':
9775 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
9776 optopt);
9777 usage(B_FALSE);
9778 }
9779 }
9780
9781 cb.cb_argc = argc;
9782 cb.cb_argv = argv;
9783 argc -= optind;
9784 argv += optind;
9785
9786 if (cb.cb_version == 0) {
9787 cb.cb_version = SPA_VERSION;
9788 } else if (!upgradeall && argc == 0) {
9789 (void) fprintf(stderr, gettext("-V option is "
9790 "incompatible with other arguments\n"));
9791 usage(B_FALSE);
9792 }
9793
9794 if (showversions) {
9795 if (upgradeall || argc != 0) {
9796 (void) fprintf(stderr, gettext("-v option is "
9797 "incompatible with other arguments\n"));
9798 usage(B_FALSE);
9799 }
9800 } else if (upgradeall) {
9801 if (argc != 0) {
9802 (void) fprintf(stderr, gettext("-a option should not "
9803 "be used along with a pool name\n"));
9804 usage(B_FALSE);
9805 }
9806 }
9807
9808 (void) printf("%s", gettext("This system supports ZFS pool feature "
9809 "flags.\n\n"));
9810 if (showversions) {
9811 int i;
9812
9813 (void) printf(gettext("The following features are "
9814 "supported:\n\n"));
9815 (void) printf(gettext("FEAT DESCRIPTION\n"));
9816 (void) printf("----------------------------------------------"
9817 "---------------\n");
9818 for (i = 0; i < SPA_FEATURES; i++) {
9819 zfeature_info_t *fi = &spa_feature_table[i];
9820 if (!fi->fi_zfs_mod_supported)
9821 continue;
9822 const char *ro =
9823 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
9824 " (read-only compatible)" : "";
9825
9826 (void) printf("%-37s%s\n", fi->fi_uname, ro);
9827 (void) printf(" %s\n", fi->fi_desc);
9828 }
9829 (void) printf("\n");
9830
9831 (void) printf(gettext("The following legacy versions are also "
9832 "supported:\n\n"));
9833 (void) printf(gettext("VER DESCRIPTION\n"));
9834 (void) printf("--- -----------------------------------------"
9835 "---------------\n");
9836 (void) printf(gettext(" 1 Initial ZFS version\n"));
9837 (void) printf(gettext(" 2 Ditto blocks "
9838 "(replicated metadata)\n"));
9839 (void) printf(gettext(" 3 Hot spares and double parity "
9840 "RAID-Z\n"));
9841 (void) printf(gettext(" 4 zpool history\n"));
9842 (void) printf(gettext(" 5 Compression using the gzip "
9843 "algorithm\n"));
9844 (void) printf(gettext(" 6 bootfs pool property\n"));
9845 (void) printf(gettext(" 7 Separate intent log devices\n"));
9846 (void) printf(gettext(" 8 Delegated administration\n"));
9847 (void) printf(gettext(" 9 refquota and refreservation "
9848 "properties\n"));
9849 (void) printf(gettext(" 10 Cache devices\n"));
9850 (void) printf(gettext(" 11 Improved scrub performance\n"));
9851 (void) printf(gettext(" 12 Snapshot properties\n"));
9852 (void) printf(gettext(" 13 snapused property\n"));
9853 (void) printf(gettext(" 14 passthrough-x aclinherit\n"));
9854 (void) printf(gettext(" 15 user/group space accounting\n"));
9855 (void) printf(gettext(" 16 stmf property support\n"));
9856 (void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
9857 (void) printf(gettext(" 18 Snapshot user holds\n"));
9858 (void) printf(gettext(" 19 Log device removal\n"));
9859 (void) printf(gettext(" 20 Compression using zle "
9860 "(zero-length encoding)\n"));
9861 (void) printf(gettext(" 21 Deduplication\n"));
9862 (void) printf(gettext(" 22 Received properties\n"));
9863 (void) printf(gettext(" 23 Slim ZIL\n"));
9864 (void) printf(gettext(" 24 System attributes\n"));
9865 (void) printf(gettext(" 25 Improved scrub stats\n"));
9866 (void) printf(gettext(" 26 Improved snapshot deletion "
9867 "performance\n"));
9868 (void) printf(gettext(" 27 Improved snapshot creation "
9869 "performance\n"));
9870 (void) printf(gettext(" 28 Multiple vdev replacements\n"));
9871 (void) printf(gettext("\nFor more information on a particular "
9872 "version, including supported releases,\n"));
9873 (void) printf(gettext("see the ZFS Administration Guide.\n\n"));
9874 } else if (argc == 0 && upgradeall) {
9875 cb.cb_first = B_TRUE;
9876 ret = zpool_iter(g_zfs, upgrade_cb, &cb);
9877 if (ret == 0 && cb.cb_first) {
9878 if (cb.cb_version == SPA_VERSION) {
9879 (void) printf(gettext("All pools are already "
9880 "formatted using feature flags.\n\n"));
9881 (void) printf(gettext("Every feature flags "
9882 "pool already has all supported and "
9883 "requested features enabled.\n"));
9884 } else {
9885 (void) printf(gettext("All pools are already "
9886 "formatted with version %llu or higher.\n"),
9887 (u_longlong_t)cb.cb_version);
9888 }
9889 }
9890 } else if (argc == 0) {
9891 cb.cb_first = B_TRUE;
9892 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
9893 assert(ret == 0);
9894
9895 if (cb.cb_first) {
9896 (void) printf(gettext("All pools are formatted "
9897 "using feature flags.\n\n"));
9898 } else {
9899 (void) printf(gettext("\nUse 'zpool upgrade -v' "
9900 "for a list of available legacy versions.\n"));
9901 }
9902
9903 cb.cb_first = B_TRUE;
9904 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
9905 assert(ret == 0);
9906
9907 if (cb.cb_first) {
9908 (void) printf(gettext("Every feature flags pool has "
9909 "all supported and requested features enabled.\n"));
9910 } else {
9911 (void) printf(gettext("\n"));
9912 }
9913 } else {
9914 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
9915 B_FALSE, upgrade_one, &cb);
9916 }
9917
9918 return (ret);
9919 }
9920
9921 typedef struct hist_cbdata {
9922 boolean_t first;
9923 boolean_t longfmt;
9924 boolean_t internal;
9925 } hist_cbdata_t;
9926
9927 static void
9928 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
9929 {
9930 nvlist_t **records;
9931 uint_t numrecords;
9932 int i;
9933
9934 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
9935 &records, &numrecords) == 0);
9936 for (i = 0; i < numrecords; i++) {
9937 nvlist_t *rec = records[i];
9938 char tbuf[64] = "";
9939
9940 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
9941 time_t tsec;
9942 struct tm t;
9943
9944 tsec = fnvlist_lookup_uint64(records[i],
9945 ZPOOL_HIST_TIME);
9946 (void) localtime_r(&tsec, &t);
9947 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
9948 }
9949
9950 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
9951 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
9952 ZPOOL_HIST_ELAPSED_NS);
9953 (void) snprintf(tbuf + strlen(tbuf),
9954 sizeof (tbuf) - strlen(tbuf),
9955 " (%lldms)", (long long)elapsed_ns / 1000 / 1000);
9956 }
9957
9958 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
9959 (void) printf("%s %s", tbuf,
9960 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
9961 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
9962 int ievent =
9963 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
9964 if (!cb->internal)
9965 continue;
9966 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
9967 (void) printf("%s unrecognized record:\n",
9968 tbuf);
9969 dump_nvlist(rec, 4);
9970 continue;
9971 }
9972 (void) printf("%s [internal %s txg:%lld] %s", tbuf,
9973 zfs_history_event_names[ievent],
9974 (longlong_t)fnvlist_lookup_uint64(
9975 rec, ZPOOL_HIST_TXG),
9976 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
9977 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
9978 if (!cb->internal)
9979 continue;
9980 (void) printf("%s [txg:%lld] %s", tbuf,
9981 (longlong_t)fnvlist_lookup_uint64(
9982 rec, ZPOOL_HIST_TXG),
9983 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
9984 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
9985 (void) printf(" %s (%llu)",
9986 fnvlist_lookup_string(rec,
9987 ZPOOL_HIST_DSNAME),
9988 (u_longlong_t)fnvlist_lookup_uint64(rec,
9989 ZPOOL_HIST_DSID));
9990 }
9991 (void) printf(" %s", fnvlist_lookup_string(rec,
9992 ZPOOL_HIST_INT_STR));
9993 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
9994 if (!cb->internal)
9995 continue;
9996 (void) printf("%s ioctl %s\n", tbuf,
9997 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
9998 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
9999 (void) printf(" input:\n");
10000 dump_nvlist(fnvlist_lookup_nvlist(rec,
10001 ZPOOL_HIST_INPUT_NVL), 8);
10002 }
10003 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
10004 (void) printf(" output:\n");
10005 dump_nvlist(fnvlist_lookup_nvlist(rec,
10006 ZPOOL_HIST_OUTPUT_NVL), 8);
10007 }
10008 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
10009 (void) printf(" output nvlist omitted; "
10010 "original size: %lldKB\n",
10011 (longlong_t)fnvlist_lookup_int64(rec,
10012 ZPOOL_HIST_OUTPUT_SIZE) / 1024);
10013 }
10014 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
10015 (void) printf(" errno: %lld\n",
10016 (longlong_t)fnvlist_lookup_int64(rec,
10017 ZPOOL_HIST_ERRNO));
10018 }
10019 } else {
10020 if (!cb->internal)
10021 continue;
10022 (void) printf("%s unrecognized record:\n", tbuf);
10023 dump_nvlist(rec, 4);
10024 }
10025
10026 if (!cb->longfmt) {
10027 (void) printf("\n");
10028 continue;
10029 }
10030 (void) printf(" [");
10031 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
10032 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
10033 struct passwd *pwd = getpwuid(who);
10034 (void) printf("user %d ", (int)who);
10035 if (pwd != NULL)
10036 (void) printf("(%s) ", pwd->pw_name);
10037 }
10038 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
10039 (void) printf("on %s",
10040 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
10041 }
10042 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
10043 (void) printf(":%s",
10044 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
10045 }
10046
10047 (void) printf("]");
10048 (void) printf("\n");
10049 }
10050 }
10051
10052 /*
10053 * Print out the command history for a specific pool.
10054 */
10055 static int
10056 get_history_one(zpool_handle_t *zhp, void *data)
10057 {
10058 nvlist_t *nvhis;
10059 int ret;
10060 hist_cbdata_t *cb = (hist_cbdata_t *)data;
10061 uint64_t off = 0;
10062 boolean_t eof = B_FALSE;
10063
10064 cb->first = B_FALSE;
10065
10066 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
10067
10068 while (!eof) {
10069 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
10070 return (ret);
10071
10072 print_history_records(nvhis, cb);
10073 nvlist_free(nvhis);
10074 }
10075 (void) printf("\n");
10076
10077 return (ret);
10078 }
10079
10080 /*
10081 * zpool history <pool>
10082 *
10083 * Displays the history of commands that modified pools.
10084 */
10085 int
10086 zpool_do_history(int argc, char **argv)
10087 {
10088 hist_cbdata_t cbdata = { 0 };
10089 int ret;
10090 int c;
10091
10092 cbdata.first = B_TRUE;
10093 /* check options */
10094 while ((c = getopt(argc, argv, "li")) != -1) {
10095 switch (c) {
10096 case 'l':
10097 cbdata.longfmt = B_TRUE;
10098 break;
10099 case 'i':
10100 cbdata.internal = B_TRUE;
10101 break;
10102 case '?':
10103 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10104 optopt);
10105 usage(B_FALSE);
10106 }
10107 }
10108 argc -= optind;
10109 argv += optind;
10110
10111 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
10112 B_FALSE, get_history_one, &cbdata);
10113
10114 if (argc == 0 && cbdata.first == B_TRUE) {
10115 (void) fprintf(stderr, gettext("no pools available\n"));
10116 return (0);
10117 }
10118
10119 return (ret);
10120 }
10121
10122 typedef struct ev_opts {
10123 int verbose;
10124 int scripted;
10125 int follow;
10126 int clear;
10127 char poolname[ZFS_MAX_DATASET_NAME_LEN];
10128 } ev_opts_t;
10129
10130 static void
10131 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
10132 {
10133 char ctime_str[26], str[32];
10134 const char *ptr;
10135 int64_t *tv;
10136 uint_t n;
10137
10138 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
10139 memset(str, ' ', 32);
10140 (void) ctime_r((const time_t *)&tv[0], ctime_str);
10141 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
10142 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
10143 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
10144 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
10145 if (opts->scripted)
10146 (void) printf(gettext("%s\t"), str);
10147 else
10148 (void) printf(gettext("%s "), str);
10149
10150 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
10151 (void) printf(gettext("%s\n"), ptr);
10152 }
10153
10154 static void
10155 zpool_do_events_nvprint(nvlist_t *nvl, int depth)
10156 {
10157 nvpair_t *nvp;
10158
10159 for (nvp = nvlist_next_nvpair(nvl, NULL);
10160 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
10161
10162 data_type_t type = nvpair_type(nvp);
10163 const char *name = nvpair_name(nvp);
10164
10165 boolean_t b;
10166 uint8_t i8;
10167 uint16_t i16;
10168 uint32_t i32;
10169 uint64_t i64;
10170 const char *str;
10171 nvlist_t *cnv;
10172
10173 printf(gettext("%*s%s = "), depth, "", name);
10174
10175 switch (type) {
10176 case DATA_TYPE_BOOLEAN:
10177 printf(gettext("%s"), "1");
10178 break;
10179
10180 case DATA_TYPE_BOOLEAN_VALUE:
10181 (void) nvpair_value_boolean_value(nvp, &b);
10182 printf(gettext("%s"), b ? "1" : "0");
10183 break;
10184
10185 case DATA_TYPE_BYTE:
10186 (void) nvpair_value_byte(nvp, &i8);
10187 printf(gettext("0x%x"), i8);
10188 break;
10189
10190 case DATA_TYPE_INT8:
10191 (void) nvpair_value_int8(nvp, (void *)&i8);
10192 printf(gettext("0x%x"), i8);
10193 break;
10194
10195 case DATA_TYPE_UINT8:
10196 (void) nvpair_value_uint8(nvp, &i8);
10197 printf(gettext("0x%x"), i8);
10198 break;
10199
10200 case DATA_TYPE_INT16:
10201 (void) nvpair_value_int16(nvp, (void *)&i16);
10202 printf(gettext("0x%x"), i16);
10203 break;
10204
10205 case DATA_TYPE_UINT16:
10206 (void) nvpair_value_uint16(nvp, &i16);
10207 printf(gettext("0x%x"), i16);
10208 break;
10209
10210 case DATA_TYPE_INT32:
10211 (void) nvpair_value_int32(nvp, (void *)&i32);
10212 printf(gettext("0x%x"), i32);
10213 break;
10214
10215 case DATA_TYPE_UINT32:
10216 (void) nvpair_value_uint32(nvp, &i32);
10217 printf(gettext("0x%x"), i32);
10218 break;
10219
10220 case DATA_TYPE_INT64:
10221 (void) nvpair_value_int64(nvp, (void *)&i64);
10222 printf(gettext("0x%llx"), (u_longlong_t)i64);
10223 break;
10224
10225 case DATA_TYPE_UINT64:
10226 (void) nvpair_value_uint64(nvp, &i64);
10227 /*
10228 * translate vdev state values to readable
10229 * strings to aide zpool events consumers
10230 */
10231 if (strcmp(name,
10232 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
10233 strcmp(name,
10234 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
10235 printf(gettext("\"%s\" (0x%llx)"),
10236 zpool_state_to_name(i64, VDEV_AUX_NONE),
10237 (u_longlong_t)i64);
10238 } else {
10239 printf(gettext("0x%llx"), (u_longlong_t)i64);
10240 }
10241 break;
10242
10243 case DATA_TYPE_HRTIME:
10244 (void) nvpair_value_hrtime(nvp, (void *)&i64);
10245 printf(gettext("0x%llx"), (u_longlong_t)i64);
10246 break;
10247
10248 case DATA_TYPE_STRING:
10249 (void) nvpair_value_string(nvp, &str);
10250 printf(gettext("\"%s\""), str ? str : "<NULL>");
10251 break;
10252
10253 case DATA_TYPE_NVLIST:
10254 printf(gettext("(embedded nvlist)\n"));
10255 (void) nvpair_value_nvlist(nvp, &cnv);
10256 zpool_do_events_nvprint(cnv, depth + 8);
10257 printf(gettext("%*s(end %s)"), depth, "", name);
10258 break;
10259
10260 case DATA_TYPE_NVLIST_ARRAY: {
10261 nvlist_t **val;
10262 uint_t i, nelem;
10263
10264 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
10265 printf(gettext("(%d embedded nvlists)\n"), nelem);
10266 for (i = 0; i < nelem; i++) {
10267 printf(gettext("%*s%s[%d] = %s\n"),
10268 depth, "", name, i, "(embedded nvlist)");
10269 zpool_do_events_nvprint(val[i], depth + 8);
10270 printf(gettext("%*s(end %s[%i])\n"),
10271 depth, "", name, i);
10272 }
10273 printf(gettext("%*s(end %s)\n"), depth, "", name);
10274 }
10275 break;
10276
10277 case DATA_TYPE_INT8_ARRAY: {
10278 int8_t *val;
10279 uint_t i, nelem;
10280
10281 (void) nvpair_value_int8_array(nvp, &val, &nelem);
10282 for (i = 0; i < nelem; i++)
10283 printf(gettext("0x%x "), val[i]);
10284
10285 break;
10286 }
10287
10288 case DATA_TYPE_UINT8_ARRAY: {
10289 uint8_t *val;
10290 uint_t i, nelem;
10291
10292 (void) nvpair_value_uint8_array(nvp, &val, &nelem);
10293 for (i = 0; i < nelem; i++)
10294 printf(gettext("0x%x "), val[i]);
10295
10296 break;
10297 }
10298
10299 case DATA_TYPE_INT16_ARRAY: {
10300 int16_t *val;
10301 uint_t i, nelem;
10302
10303 (void) nvpair_value_int16_array(nvp, &val, &nelem);
10304 for (i = 0; i < nelem; i++)
10305 printf(gettext("0x%x "), val[i]);
10306
10307 break;
10308 }
10309
10310 case DATA_TYPE_UINT16_ARRAY: {
10311 uint16_t *val;
10312 uint_t i, nelem;
10313
10314 (void) nvpair_value_uint16_array(nvp, &val, &nelem);
10315 for (i = 0; i < nelem; i++)
10316 printf(gettext("0x%x "), val[i]);
10317
10318 break;
10319 }
10320
10321 case DATA_TYPE_INT32_ARRAY: {
10322 int32_t *val;
10323 uint_t i, nelem;
10324
10325 (void) nvpair_value_int32_array(nvp, &val, &nelem);
10326 for (i = 0; i < nelem; i++)
10327 printf(gettext("0x%x "), val[i]);
10328
10329 break;
10330 }
10331
10332 case DATA_TYPE_UINT32_ARRAY: {
10333 uint32_t *val;
10334 uint_t i, nelem;
10335
10336 (void) nvpair_value_uint32_array(nvp, &val, &nelem);
10337 for (i = 0; i < nelem; i++)
10338 printf(gettext("0x%x "), val[i]);
10339
10340 break;
10341 }
10342
10343 case DATA_TYPE_INT64_ARRAY: {
10344 int64_t *val;
10345 uint_t i, nelem;
10346
10347 (void) nvpair_value_int64_array(nvp, &val, &nelem);
10348 for (i = 0; i < nelem; i++)
10349 printf(gettext("0x%llx "),
10350 (u_longlong_t)val[i]);
10351
10352 break;
10353 }
10354
10355 case DATA_TYPE_UINT64_ARRAY: {
10356 uint64_t *val;
10357 uint_t i, nelem;
10358
10359 (void) nvpair_value_uint64_array(nvp, &val, &nelem);
10360 for (i = 0; i < nelem; i++)
10361 printf(gettext("0x%llx "),
10362 (u_longlong_t)val[i]);
10363
10364 break;
10365 }
10366
10367 case DATA_TYPE_STRING_ARRAY: {
10368 const char **str;
10369 uint_t i, nelem;
10370
10371 (void) nvpair_value_string_array(nvp, &str, &nelem);
10372 for (i = 0; i < nelem; i++)
10373 printf(gettext("\"%s\" "),
10374 str[i] ? str[i] : "<NULL>");
10375
10376 break;
10377 }
10378
10379 case DATA_TYPE_BOOLEAN_ARRAY:
10380 case DATA_TYPE_BYTE_ARRAY:
10381 case DATA_TYPE_DOUBLE:
10382 case DATA_TYPE_DONTCARE:
10383 case DATA_TYPE_UNKNOWN:
10384 printf(gettext("<unknown>"));
10385 break;
10386 }
10387
10388 printf(gettext("\n"));
10389 }
10390 }
10391
10392 static int
10393 zpool_do_events_next(ev_opts_t *opts)
10394 {
10395 nvlist_t *nvl;
10396 int zevent_fd, ret, dropped;
10397 const char *pool;
10398
10399 zevent_fd = open(ZFS_DEV, O_RDWR);
10400 VERIFY(zevent_fd >= 0);
10401
10402 if (!opts->scripted)
10403 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
10404
10405 while (1) {
10406 ret = zpool_events_next(g_zfs, &nvl, &dropped,
10407 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
10408 if (ret || nvl == NULL)
10409 break;
10410
10411 if (dropped > 0)
10412 (void) printf(gettext("dropped %d events\n"), dropped);
10413
10414 if (strlen(opts->poolname) > 0 &&
10415 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
10416 strcmp(opts->poolname, pool) != 0)
10417 continue;
10418
10419 zpool_do_events_short(nvl, opts);
10420
10421 if (opts->verbose) {
10422 zpool_do_events_nvprint(nvl, 8);
10423 printf(gettext("\n"));
10424 }
10425 (void) fflush(stdout);
10426
10427 nvlist_free(nvl);
10428 }
10429
10430 VERIFY(0 == close(zevent_fd));
10431
10432 return (ret);
10433 }
10434
10435 static int
10436 zpool_do_events_clear(void)
10437 {
10438 int count, ret;
10439
10440 ret = zpool_events_clear(g_zfs, &count);
10441 if (!ret)
10442 (void) printf(gettext("cleared %d events\n"), count);
10443
10444 return (ret);
10445 }
10446
10447 /*
10448 * zpool events [-vHf [pool] | -c]
10449 *
10450 * Displays events logs by ZFS.
10451 */
10452 int
10453 zpool_do_events(int argc, char **argv)
10454 {
10455 ev_opts_t opts = { 0 };
10456 int ret;
10457 int c;
10458
10459 /* check options */
10460 while ((c = getopt(argc, argv, "vHfc")) != -1) {
10461 switch (c) {
10462 case 'v':
10463 opts.verbose = 1;
10464 break;
10465 case 'H':
10466 opts.scripted = 1;
10467 break;
10468 case 'f':
10469 opts.follow = 1;
10470 break;
10471 case 'c':
10472 opts.clear = 1;
10473 break;
10474 case '?':
10475 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10476 optopt);
10477 usage(B_FALSE);
10478 }
10479 }
10480 argc -= optind;
10481 argv += optind;
10482
10483 if (argc > 1) {
10484 (void) fprintf(stderr, gettext("too many arguments\n"));
10485 usage(B_FALSE);
10486 } else if (argc == 1) {
10487 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
10488 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
10489 (void) fprintf(stderr,
10490 gettext("invalid pool name '%s'\n"), opts.poolname);
10491 usage(B_FALSE);
10492 }
10493 }
10494
10495 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
10496 opts.clear) {
10497 (void) fprintf(stderr,
10498 gettext("invalid options combined with -c\n"));
10499 usage(B_FALSE);
10500 }
10501
10502 if (opts.clear)
10503 ret = zpool_do_events_clear();
10504 else
10505 ret = zpool_do_events_next(&opts);
10506
10507 return (ret);
10508 }
10509
10510 static int
10511 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
10512 {
10513 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
10514 char value[ZFS_MAXPROPLEN];
10515 zprop_source_t srctype;
10516
10517 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
10518 pl = pl->pl_next) {
10519 char *prop_name;
10520 /*
10521 * If the first property is pool name, it is a special
10522 * placeholder that we can skip. This will also skip
10523 * over the name property when 'all' is specified.
10524 */
10525 if (pl->pl_prop == ZPOOL_PROP_NAME &&
10526 pl == cbp->cb_proplist)
10527 continue;
10528
10529 if (pl->pl_prop == ZPROP_INVAL) {
10530 prop_name = pl->pl_user_prop;
10531 } else {
10532 prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
10533 }
10534 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
10535 prop_name, value, sizeof (value), &srctype,
10536 cbp->cb_literal) == 0) {
10537 zprop_print_one_property(vdevname, cbp, prop_name,
10538 value, srctype, NULL, NULL);
10539 }
10540 }
10541
10542 return (0);
10543 }
10544
10545 static int
10546 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
10547 {
10548 zpool_handle_t *zhp = zhp_data;
10549 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
10550 char *vdevname;
10551 const char *type;
10552 int ret;
10553
10554 /*
10555 * zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the
10556 * pool name for display purposes, which is not desired. Fallback to
10557 * zpool_vdev_name() when not dealing with the root vdev.
10558 */
10559 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
10560 if (zhp != NULL && strcmp(type, "root") == 0)
10561 vdevname = strdup("root-0");
10562 else
10563 vdevname = zpool_vdev_name(g_zfs, zhp, nv,
10564 cbp->cb_vdevs.cb_name_flags);
10565
10566 (void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
10567
10568 ret = get_callback_vdev(zhp, vdevname, data);
10569
10570 free(vdevname);
10571
10572 return (ret);
10573 }
10574
10575 static int
10576 get_callback(zpool_handle_t *zhp, void *data)
10577 {
10578 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
10579 char value[ZFS_MAXPROPLEN];
10580 zprop_source_t srctype;
10581 zprop_list_t *pl;
10582 int vid;
10583
10584 if (cbp->cb_type == ZFS_TYPE_VDEV) {
10585 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
10586 for_each_vdev(zhp, get_callback_vdev_cb, data);
10587 } else {
10588 /* Adjust column widths for vdev properties */
10589 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
10590 vid++) {
10591 vdev_expand_proplist(zhp,
10592 cbp->cb_vdevs.cb_names[vid],
10593 &cbp->cb_proplist);
10594 }
10595 /* Display the properties */
10596 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
10597 vid++) {
10598 get_callback_vdev(zhp,
10599 cbp->cb_vdevs.cb_names[vid], data);
10600 }
10601 }
10602 } else {
10603 assert(cbp->cb_type == ZFS_TYPE_POOL);
10604 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
10605 /*
10606 * Skip the special fake placeholder. This will also
10607 * skip over the name property when 'all' is specified.
10608 */
10609 if (pl->pl_prop == ZPOOL_PROP_NAME &&
10610 pl == cbp->cb_proplist)
10611 continue;
10612
10613 if (pl->pl_prop == ZPROP_INVAL &&
10614 zfs_prop_user(pl->pl_user_prop)) {
10615 srctype = ZPROP_SRC_LOCAL;
10616
10617 if (zpool_get_userprop(zhp, pl->pl_user_prop,
10618 value, sizeof (value), &srctype) != 0)
10619 continue;
10620
10621 zprop_print_one_property(zpool_get_name(zhp),
10622 cbp, pl->pl_user_prop, value, srctype,
10623 NULL, NULL);
10624 } else if (pl->pl_prop == ZPROP_INVAL &&
10625 (zpool_prop_feature(pl->pl_user_prop) ||
10626 zpool_prop_unsupported(pl->pl_user_prop))) {
10627 srctype = ZPROP_SRC_LOCAL;
10628
10629 if (zpool_prop_get_feature(zhp,
10630 pl->pl_user_prop, value,
10631 sizeof (value)) == 0) {
10632 zprop_print_one_property(
10633 zpool_get_name(zhp), cbp,
10634 pl->pl_user_prop, value, srctype,
10635 NULL, NULL);
10636 }
10637 } else {
10638 if (zpool_get_prop(zhp, pl->pl_prop, value,
10639 sizeof (value), &srctype,
10640 cbp->cb_literal) != 0)
10641 continue;
10642
10643 zprop_print_one_property(zpool_get_name(zhp),
10644 cbp, zpool_prop_to_name(pl->pl_prop),
10645 value, srctype, NULL, NULL);
10646 }
10647 }
10648 }
10649
10650 return (0);
10651 }
10652
10653 /*
10654 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
10655 *
10656 * -H Scripted mode. Don't display headers, and separate properties
10657 * by a single tab.
10658 * -o List of columns to display. Defaults to
10659 * "name,property,value,source".
10660 * -p Display values in parsable (exact) format.
10661 *
10662 * Get properties of pools in the system. Output space statistics
10663 * for each one as well as other attributes.
10664 */
10665 int
10666 zpool_do_get(int argc, char **argv)
10667 {
10668 zprop_get_cbdata_t cb = { 0 };
10669 zprop_list_t fake_name = { 0 };
10670 int ret;
10671 int c, i;
10672 char *propstr = NULL;
10673 char *vdev = NULL;
10674
10675 cb.cb_first = B_TRUE;
10676
10677 /*
10678 * Set up default columns and sources.
10679 */
10680 cb.cb_sources = ZPROP_SRC_ALL;
10681 cb.cb_columns[0] = GET_COL_NAME;
10682 cb.cb_columns[1] = GET_COL_PROPERTY;
10683 cb.cb_columns[2] = GET_COL_VALUE;
10684 cb.cb_columns[3] = GET_COL_SOURCE;
10685 cb.cb_type = ZFS_TYPE_POOL;
10686 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
10687 current_prop_type = cb.cb_type;
10688
10689 /* check options */
10690 while ((c = getopt(argc, argv, ":Hpo:")) != -1) {
10691 switch (c) {
10692 case 'p':
10693 cb.cb_literal = B_TRUE;
10694 break;
10695 case 'H':
10696 cb.cb_scripted = B_TRUE;
10697 break;
10698 case 'o':
10699 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
10700 i = 0;
10701
10702 for (char *tok; (tok = strsep(&optarg, ",")); ) {
10703 static const char *const col_opts[] =
10704 { "name", "property", "value", "source",
10705 "all" };
10706 static const zfs_get_column_t col_cols[] =
10707 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
10708 GET_COL_SOURCE };
10709
10710 if (i == ZFS_GET_NCOLS - 1) {
10711 (void) fprintf(stderr, gettext("too "
10712 "many fields given to -o "
10713 "option\n"));
10714 usage(B_FALSE);
10715 }
10716
10717 for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
10718 if (strcmp(tok, col_opts[c]) == 0)
10719 goto found;
10720
10721 (void) fprintf(stderr,
10722 gettext("invalid column name '%s'\n"), tok);
10723 usage(B_FALSE);
10724
10725 found:
10726 if (c >= 4) {
10727 if (i > 0) {
10728 (void) fprintf(stderr,
10729 gettext("\"all\" conflicts "
10730 "with specific fields "
10731 "given to -o option\n"));
10732 usage(B_FALSE);
10733 }
10734
10735 memcpy(cb.cb_columns, col_cols,
10736 sizeof (col_cols));
10737 i = ZFS_GET_NCOLS - 1;
10738 } else
10739 cb.cb_columns[i++] = col_cols[c];
10740 }
10741 break;
10742 case '?':
10743 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10744 optopt);
10745 usage(B_FALSE);
10746 }
10747 }
10748
10749 argc -= optind;
10750 argv += optind;
10751
10752 if (argc < 1) {
10753 (void) fprintf(stderr, gettext("missing property "
10754 "argument\n"));
10755 usage(B_FALSE);
10756 }
10757
10758 /* Properties list is needed later by zprop_get_list() */
10759 propstr = argv[0];
10760
10761 argc--;
10762 argv++;
10763
10764 if (argc == 0) {
10765 /* No args, so just print the defaults. */
10766 } else if (are_all_pools(argc, argv)) {
10767 /* All the args are pool names */
10768 } else if (are_all_pools(1, argv)) {
10769 /* The first arg is a pool name */
10770 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
10771 (argc == 2 && strcmp(argv[1], "root") == 0) ||
10772 are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
10773 &cb.cb_vdevs)) {
10774
10775 if (strcmp(argv[1], "root") == 0)
10776 vdev = strdup("root-0");
10777 else
10778 vdev = strdup(argv[1]);
10779
10780 /* ... and the rest are vdev names */
10781 cb.cb_vdevs.cb_names = &vdev;
10782 cb.cb_vdevs.cb_names_count = argc - 1;
10783 cb.cb_type = ZFS_TYPE_VDEV;
10784 argc = 1; /* One pool to process */
10785 } else {
10786 fprintf(stderr, gettext("Expected a list of vdevs in"
10787 " \"%s\", but got:\n"), argv[0]);
10788 error_list_unresolved_vdevs(argc - 1, argv + 1,
10789 argv[0], &cb.cb_vdevs);
10790 fprintf(stderr, "\n");
10791 usage(B_FALSE);
10792 return (1);
10793 }
10794 } else {
10795 /*
10796 * The first arg isn't a pool name,
10797 */
10798 fprintf(stderr, gettext("missing pool name.\n"));
10799 fprintf(stderr, "\n");
10800 usage(B_FALSE);
10801 return (1);
10802 }
10803
10804 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
10805 cb.cb_type) != 0) {
10806 /* Use correct list of valid properties (pool or vdev) */
10807 current_prop_type = cb.cb_type;
10808 usage(B_FALSE);
10809 }
10810
10811 if (cb.cb_proplist != NULL) {
10812 fake_name.pl_prop = ZPOOL_PROP_NAME;
10813 fake_name.pl_width = strlen(gettext("NAME"));
10814 fake_name.pl_next = cb.cb_proplist;
10815 cb.cb_proplist = &fake_name;
10816 }
10817
10818 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
10819 cb.cb_literal, get_callback, &cb);
10820
10821 if (cb.cb_proplist == &fake_name)
10822 zprop_free_list(fake_name.pl_next);
10823 else
10824 zprop_free_list(cb.cb_proplist);
10825
10826 if (vdev != NULL)
10827 free(vdev);
10828
10829 return (ret);
10830 }
10831
10832 typedef struct set_cbdata {
10833 char *cb_propname;
10834 char *cb_value;
10835 zfs_type_t cb_type;
10836 vdev_cbdata_t cb_vdevs;
10837 boolean_t cb_any_successful;
10838 } set_cbdata_t;
10839
10840 static int
10841 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
10842 {
10843 int error;
10844
10845 /* Check if we have out-of-bounds features */
10846 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
10847 boolean_t features[SPA_FEATURES];
10848 if (zpool_do_load_compat(cb->cb_value, features) !=
10849 ZPOOL_COMPATIBILITY_OK)
10850 return (-1);
10851
10852 nvlist_t *enabled = zpool_get_features(zhp);
10853 spa_feature_t i;
10854 for (i = 0; i < SPA_FEATURES; i++) {
10855 const char *fguid = spa_feature_table[i].fi_guid;
10856 if (nvlist_exists(enabled, fguid) && !features[i])
10857 break;
10858 }
10859 if (i < SPA_FEATURES)
10860 (void) fprintf(stderr, gettext("Warning: one or "
10861 "more features already enabled on pool '%s'\n"
10862 "are not present in this compatibility set.\n"),
10863 zpool_get_name(zhp));
10864 }
10865
10866 /* if we're setting a feature, check it's in compatibility set */
10867 if (zpool_prop_feature(cb->cb_propname) &&
10868 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
10869 char *fname = strchr(cb->cb_propname, '@') + 1;
10870 spa_feature_t f;
10871
10872 if (zfeature_lookup_name(fname, &f) == 0) {
10873 char compat[ZFS_MAXPROPLEN];
10874 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
10875 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
10876 compat[0] = '\0';
10877
10878 boolean_t features[SPA_FEATURES];
10879 if (zpool_do_load_compat(compat, features) !=
10880 ZPOOL_COMPATIBILITY_OK) {
10881 (void) fprintf(stderr, gettext("Error: "
10882 "cannot enable feature '%s' on pool '%s'\n"
10883 "because the pool's 'compatibility' "
10884 "property cannot be parsed.\n"),
10885 fname, zpool_get_name(zhp));
10886 return (-1);
10887 }
10888
10889 if (!features[f]) {
10890 (void) fprintf(stderr, gettext("Error: "
10891 "cannot enable feature '%s' on pool '%s'\n"
10892 "as it is not specified in this pool's "
10893 "current compatibility set.\n"
10894 "Consider setting 'compatibility' to a "
10895 "less restrictive set, or to 'off'.\n"),
10896 fname, zpool_get_name(zhp));
10897 return (-1);
10898 }
10899 }
10900 }
10901
10902 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
10903
10904 return (error);
10905 }
10906
10907 static int
10908 set_callback(zpool_handle_t *zhp, void *data)
10909 {
10910 int error;
10911 set_cbdata_t *cb = (set_cbdata_t *)data;
10912
10913 if (cb->cb_type == ZFS_TYPE_VDEV) {
10914 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
10915 cb->cb_propname, cb->cb_value);
10916 } else {
10917 assert(cb->cb_type == ZFS_TYPE_POOL);
10918 error = set_pool_callback(zhp, cb);
10919 }
10920
10921 cb->cb_any_successful = !error;
10922 return (error);
10923 }
10924
10925 int
10926 zpool_do_set(int argc, char **argv)
10927 {
10928 set_cbdata_t cb = { 0 };
10929 int error;
10930 char *vdev = NULL;
10931
10932 current_prop_type = ZFS_TYPE_POOL;
10933 if (argc > 1 && argv[1][0] == '-') {
10934 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10935 argv[1][1]);
10936 usage(B_FALSE);
10937 }
10938
10939 if (argc < 2) {
10940 (void) fprintf(stderr, gettext("missing property=value "
10941 "argument\n"));
10942 usage(B_FALSE);
10943 }
10944
10945 if (argc < 3) {
10946 (void) fprintf(stderr, gettext("missing pool name\n"));
10947 usage(B_FALSE);
10948 }
10949
10950 if (argc > 4) {
10951 (void) fprintf(stderr, gettext("too many pool names\n"));
10952 usage(B_FALSE);
10953 }
10954
10955 cb.cb_propname = argv[1];
10956 cb.cb_type = ZFS_TYPE_POOL;
10957 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
10958 cb.cb_value = strchr(cb.cb_propname, '=');
10959 if (cb.cb_value == NULL) {
10960 (void) fprintf(stderr, gettext("missing value in "
10961 "property=value argument\n"));
10962 usage(B_FALSE);
10963 }
10964
10965 *(cb.cb_value) = '\0';
10966 cb.cb_value++;
10967 argc -= 2;
10968 argv += 2;
10969
10970 /* argv[0] is pool name */
10971 if (!is_pool(argv[0])) {
10972 (void) fprintf(stderr,
10973 gettext("cannot open '%s': is not a pool\n"), argv[0]);
10974 return (EINVAL);
10975 }
10976
10977 /* argv[1], when supplied, is vdev name */
10978 if (argc == 2) {
10979
10980 if (strcmp(argv[1], "root") == 0)
10981 vdev = strdup("root-0");
10982 else
10983 vdev = strdup(argv[1]);
10984
10985 if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {
10986 (void) fprintf(stderr, gettext(
10987 "cannot find '%s' in '%s': device not in pool\n"),
10988 vdev, argv[0]);
10989 free(vdev);
10990 return (EINVAL);
10991 }
10992 cb.cb_vdevs.cb_names = &vdev;
10993 cb.cb_vdevs.cb_names_count = 1;
10994 cb.cb_type = ZFS_TYPE_VDEV;
10995 }
10996
10997 error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
10998 B_FALSE, set_callback, &cb);
10999
11000 if (vdev != NULL)
11001 free(vdev);
11002
11003 return (error);
11004 }
11005
11006 /* Add up the total number of bytes left to initialize/trim across all vdevs */
11007 static uint64_t
11008 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
11009 {
11010 uint64_t bytes_remaining;
11011 nvlist_t **child;
11012 uint_t c, children;
11013 vdev_stat_t *vs;
11014
11015 assert(activity == ZPOOL_WAIT_INITIALIZE ||
11016 activity == ZPOOL_WAIT_TRIM);
11017
11018 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
11019 (uint64_t **)&vs, &c) == 0);
11020
11021 if (activity == ZPOOL_WAIT_INITIALIZE &&
11022 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
11023 bytes_remaining = vs->vs_initialize_bytes_est -
11024 vs->vs_initialize_bytes_done;
11025 else if (activity == ZPOOL_WAIT_TRIM &&
11026 vs->vs_trim_state == VDEV_TRIM_ACTIVE)
11027 bytes_remaining = vs->vs_trim_bytes_est -
11028 vs->vs_trim_bytes_done;
11029 else
11030 bytes_remaining = 0;
11031
11032 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
11033 &child, &children) != 0)
11034 children = 0;
11035
11036 for (c = 0; c < children; c++)
11037 bytes_remaining += vdev_activity_remaining(child[c], activity);
11038
11039 return (bytes_remaining);
11040 }
11041
11042 /* Add up the total number of bytes left to rebuild across top-level vdevs */
11043 static uint64_t
11044 vdev_activity_top_remaining(nvlist_t *nv)
11045 {
11046 uint64_t bytes_remaining = 0;
11047 nvlist_t **child;
11048 uint_t children;
11049 int error;
11050
11051 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
11052 &child, &children) != 0)
11053 children = 0;
11054
11055 for (uint_t c = 0; c < children; c++) {
11056 vdev_rebuild_stat_t *vrs;
11057 uint_t i;
11058
11059 error = nvlist_lookup_uint64_array(child[c],
11060 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
11061 if (error == 0) {
11062 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
11063 bytes_remaining += (vrs->vrs_bytes_est -
11064 vrs->vrs_bytes_rebuilt);
11065 }
11066 }
11067 }
11068
11069 return (bytes_remaining);
11070 }
11071
11072 /* Whether any vdevs are 'spare' or 'replacing' vdevs */
11073 static boolean_t
11074 vdev_any_spare_replacing(nvlist_t *nv)
11075 {
11076 nvlist_t **child;
11077 uint_t c, children;
11078 const char *vdev_type;
11079
11080 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
11081
11082 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
11083 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
11084 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
11085 return (B_TRUE);
11086 }
11087
11088 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
11089 &child, &children) != 0)
11090 children = 0;
11091
11092 for (c = 0; c < children; c++) {
11093 if (vdev_any_spare_replacing(child[c]))
11094 return (B_TRUE);
11095 }
11096
11097 return (B_FALSE);
11098 }
11099
11100 typedef struct wait_data {
11101 char *wd_poolname;
11102 boolean_t wd_scripted;
11103 boolean_t wd_exact;
11104 boolean_t wd_headers_once;
11105 boolean_t wd_should_exit;
11106 /* Which activities to wait for */
11107 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
11108 float wd_interval;
11109 pthread_cond_t wd_cv;
11110 pthread_mutex_t wd_mutex;
11111 } wait_data_t;
11112
11113 /*
11114 * Print to stdout a single line, containing one column for each activity that
11115 * we are waiting for specifying how many bytes of work are left for that
11116 * activity.
11117 */
11118 static void
11119 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
11120 {
11121 nvlist_t *config, *nvroot;
11122 uint_t c;
11123 int i;
11124 pool_checkpoint_stat_t *pcs = NULL;
11125 pool_scan_stat_t *pss = NULL;
11126 pool_removal_stat_t *prs = NULL;
11127 pool_raidz_expand_stat_t *pres = NULL;
11128 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
11129 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"};
11130 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
11131
11132 /* Calculate the width of each column */
11133 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
11134 /*
11135 * Make sure we have enough space in the col for pretty-printed
11136 * numbers and for the column header, and then leave a couple
11137 * spaces between cols for readability.
11138 */
11139 col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
11140 }
11141
11142 if (timestamp_fmt != NODATE)
11143 print_timestamp(timestamp_fmt);
11144
11145 /* Print header if appropriate */
11146 int term_height = terminal_height();
11147 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
11148 row % (term_height-1) == 0);
11149 if (!wd->wd_scripted && (row == 0 || reprint_header)) {
11150 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
11151 if (wd->wd_enabled[i])
11152 (void) printf("%*s", col_widths[i], headers[i]);
11153 }
11154 (void) fputc('\n', stdout);
11155 }
11156
11157 /* Bytes of work remaining in each activity */
11158 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
11159
11160 bytes_rem[ZPOOL_WAIT_FREE] =
11161 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
11162
11163 config = zpool_get_config(zhp, NULL);
11164 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
11165
11166 (void) nvlist_lookup_uint64_array(nvroot,
11167 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
11168 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
11169 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
11170
11171 (void) nvlist_lookup_uint64_array(nvroot,
11172 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
11173 if (prs != NULL && prs->prs_state == DSS_SCANNING)
11174 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
11175 prs->prs_copied;
11176
11177 (void) nvlist_lookup_uint64_array(nvroot,
11178 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
11179 if (pss != NULL && pss->pss_state == DSS_SCANNING &&
11180 pss->pss_pass_scrub_pause == 0) {
11181 int64_t rem = pss->pss_to_examine - pss->pss_issued;
11182 if (pss->pss_func == POOL_SCAN_SCRUB)
11183 bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
11184 else
11185 bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
11186 } else if (check_rebuilding(nvroot, NULL)) {
11187 bytes_rem[ZPOOL_WAIT_RESILVER] =
11188 vdev_activity_top_remaining(nvroot);
11189 }
11190
11191 (void) nvlist_lookup_uint64_array(nvroot,
11192 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
11193 if (pres != NULL && pres->pres_state == DSS_SCANNING) {
11194 int64_t rem = pres->pres_to_reflow - pres->pres_reflowed;
11195 bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem;
11196 }
11197
11198 bytes_rem[ZPOOL_WAIT_INITIALIZE] =
11199 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
11200 bytes_rem[ZPOOL_WAIT_TRIM] =
11201 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
11202
11203 /*
11204 * A replace finishes after resilvering finishes, so the amount of work
11205 * left for a replace is the same as for resilvering.
11206 *
11207 * It isn't quite correct to say that if we have any 'spare' or
11208 * 'replacing' vdevs and a resilver is happening, then a replace is in
11209 * progress, like we do here. When a hot spare is used, the faulted vdev
11210 * is not removed after the hot spare is resilvered, so parent 'spare'
11211 * vdev is not removed either. So we could have a 'spare' vdev, but be
11212 * resilvering for a different reason. However, we use it as a heuristic
11213 * because we don't have access to the DTLs, which could tell us whether
11214 * or not we have really finished resilvering a hot spare.
11215 */
11216 if (vdev_any_spare_replacing(nvroot))
11217 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
11218
11219 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
11220 char buf[64];
11221 if (!wd->wd_enabled[i])
11222 continue;
11223
11224 if (wd->wd_exact) {
11225 (void) snprintf(buf, sizeof (buf), "%" PRIi64,
11226 bytes_rem[i]);
11227 } else {
11228 zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
11229 }
11230
11231 if (wd->wd_scripted)
11232 (void) printf(i == 0 ? "%s" : "\t%s", buf);
11233 else
11234 (void) printf(" %*s", col_widths[i] - 1, buf);
11235 }
11236 (void) printf("\n");
11237 (void) fflush(stdout);
11238 }
11239
11240 static void *
11241 wait_status_thread(void *arg)
11242 {
11243 wait_data_t *wd = (wait_data_t *)arg;
11244 zpool_handle_t *zhp;
11245
11246 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
11247 return (void *)(1);
11248
11249 for (int row = 0; ; row++) {
11250 boolean_t missing;
11251 struct timespec timeout;
11252 int ret = 0;
11253 (void) clock_gettime(CLOCK_REALTIME, &timeout);
11254
11255 if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
11256 zpool_props_refresh(zhp) != 0) {
11257 zpool_close(zhp);
11258 return (void *)(uintptr_t)(missing ? 0 : 1);
11259 }
11260
11261 print_wait_status_row(wd, zhp, row);
11262
11263 timeout.tv_sec += floor(wd->wd_interval);
11264 long nanos = timeout.tv_nsec +
11265 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
11266 if (nanos >= NANOSEC) {
11267 timeout.tv_sec++;
11268 timeout.tv_nsec = nanos - NANOSEC;
11269 } else {
11270 timeout.tv_nsec = nanos;
11271 }
11272 pthread_mutex_lock(&wd->wd_mutex);
11273 if (!wd->wd_should_exit)
11274 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
11275 &timeout);
11276 pthread_mutex_unlock(&wd->wd_mutex);
11277 if (ret == 0) {
11278 break; /* signaled by main thread */
11279 } else if (ret != ETIMEDOUT) {
11280 (void) fprintf(stderr, gettext("pthread_cond_timedwait "
11281 "failed: %s\n"), strerror(ret));
11282 zpool_close(zhp);
11283 return (void *)(uintptr_t)(1);
11284 }
11285 }
11286
11287 zpool_close(zhp);
11288 return (void *)(0);
11289 }
11290
11291 int
11292 zpool_do_wait(int argc, char **argv)
11293 {
11294 boolean_t verbose = B_FALSE;
11295 int c, i;
11296 unsigned long count;
11297 pthread_t status_thr;
11298 int error = 0;
11299 zpool_handle_t *zhp;
11300
11301 wait_data_t wd;
11302 wd.wd_scripted = B_FALSE;
11303 wd.wd_exact = B_FALSE;
11304 wd.wd_headers_once = B_FALSE;
11305 wd.wd_should_exit = B_FALSE;
11306
11307 pthread_mutex_init(&wd.wd_mutex, NULL);
11308 pthread_cond_init(&wd.wd_cv, NULL);
11309
11310 /* By default, wait for all types of activity. */
11311 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
11312 wd.wd_enabled[i] = B_TRUE;
11313
11314 while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
11315 switch (c) {
11316 case 'H':
11317 wd.wd_scripted = B_TRUE;
11318 break;
11319 case 'n':
11320 wd.wd_headers_once = B_TRUE;
11321 break;
11322 case 'p':
11323 wd.wd_exact = B_TRUE;
11324 break;
11325 case 'T':
11326 get_timestamp_arg(*optarg);
11327 break;
11328 case 't':
11329 /* Reset activities array */
11330 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
11331
11332 for (char *tok; (tok = strsep(&optarg, ",")); ) {
11333 static const char *const col_opts[] = {
11334 "discard", "free", "initialize", "replace",
11335 "remove", "resilver", "scrub", "trim",
11336 "raidz_expand" };
11337
11338 for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
11339 if (strcmp(tok, col_opts[i]) == 0) {
11340 wd.wd_enabled[i] = B_TRUE;
11341 goto found;
11342 }
11343
11344 (void) fprintf(stderr,
11345 gettext("invalid activity '%s'\n"), tok);
11346 usage(B_FALSE);
11347 found:;
11348 }
11349 break;
11350 case '?':
11351 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
11352 optopt);
11353 usage(B_FALSE);
11354 }
11355 }
11356
11357 argc -= optind;
11358 argv += optind;
11359
11360 get_interval_count(&argc, argv, &wd.wd_interval, &count);
11361 if (count != 0) {
11362 /* This subcmd only accepts an interval, not a count */
11363 (void) fprintf(stderr, gettext("too many arguments\n"));
11364 usage(B_FALSE);
11365 }
11366
11367 if (wd.wd_interval != 0)
11368 verbose = B_TRUE;
11369
11370 if (argc < 1) {
11371 (void) fprintf(stderr, gettext("missing 'pool' argument\n"));
11372 usage(B_FALSE);
11373 }
11374 if (argc > 1) {
11375 (void) fprintf(stderr, gettext("too many arguments\n"));
11376 usage(B_FALSE);
11377 }
11378
11379 wd.wd_poolname = argv[0];
11380
11381 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
11382 return (1);
11383
11384 if (verbose) {
11385 /*
11386 * We use a separate thread for printing status updates because
11387 * the main thread will call lzc_wait(), which blocks as long
11388 * as an activity is in progress, which can be a long time.
11389 */
11390 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
11391 != 0) {
11392 (void) fprintf(stderr, gettext("failed to create status"
11393 "thread: %s\n"), strerror(errno));
11394 zpool_close(zhp);
11395 return (1);
11396 }
11397 }
11398
11399 /*
11400 * Loop over all activities that we are supposed to wait for until none
11401 * of them are in progress. Note that this means we can end up waiting
11402 * for more activities to complete than just those that were in progress
11403 * when we began waiting; if an activity we are interested in begins
11404 * while we are waiting for another activity, we will wait for both to
11405 * complete before exiting.
11406 */
11407 for (;;) {
11408 boolean_t missing = B_FALSE;
11409 boolean_t any_waited = B_FALSE;
11410
11411 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
11412 boolean_t waited;
11413
11414 if (!wd.wd_enabled[i])
11415 continue;
11416
11417 error = zpool_wait_status(zhp, i, &missing, &waited);
11418 if (error != 0 || missing)
11419 break;
11420
11421 any_waited = (any_waited || waited);
11422 }
11423
11424 if (error != 0 || missing || !any_waited)
11425 break;
11426 }
11427
11428 zpool_close(zhp);
11429
11430 if (verbose) {
11431 uintptr_t status;
11432 pthread_mutex_lock(&wd.wd_mutex);
11433 wd.wd_should_exit = B_TRUE;
11434 pthread_cond_signal(&wd.wd_cv);
11435 pthread_mutex_unlock(&wd.wd_mutex);
11436 (void) pthread_join(status_thr, (void *)&status);
11437 if (status != 0)
11438 error = status;
11439 }
11440
11441 pthread_mutex_destroy(&wd.wd_mutex);
11442 pthread_cond_destroy(&wd.wd_cv);
11443 return (error);
11444 }
11445
11446 static int
11447 find_command_idx(const char *command, int *idx)
11448 {
11449 for (int i = 0; i < NCOMMAND; ++i) {
11450 if (command_table[i].name == NULL)
11451 continue;
11452
11453 if (strcmp(command, command_table[i].name) == 0) {
11454 *idx = i;
11455 return (0);
11456 }
11457 }
11458 return (1);
11459 }
11460
11461 /*
11462 * Display version message
11463 */
11464 static int
11465 zpool_do_version(int argc, char **argv)
11466 {
11467 (void) argc, (void) argv;
11468 return (zfs_version_print() != 0);
11469 }
11470
11471 /* Display documentation */
11472 static int
11473 zpool_do_help(int argc, char **argv)
11474 {
11475 char page[MAXNAMELEN];
11476 if (argc < 3 || strcmp(argv[2], "zpool") == 0)
11477 strcpy(page, "zpool");
11478 else if (strcmp(argv[2], "concepts") == 0 ||
11479 strcmp(argv[2], "props") == 0)
11480 snprintf(page, sizeof (page), "zpool%s", argv[2]);
11481 else
11482 snprintf(page, sizeof (page), "zpool-%s", argv[2]);
11483
11484 execlp("man", "man", page, NULL);
11485
11486 fprintf(stderr, "couldn't run man program: %s", strerror(errno));
11487 return (-1);
11488 }
11489
11490 /*
11491 * Do zpool_load_compat() and print error message on failure
11492 */
11493 static zpool_compat_status_t
11494 zpool_do_load_compat(const char *compat, boolean_t *list)
11495 {
11496 char report[1024];
11497
11498 zpool_compat_status_t ret;
11499
11500 ret = zpool_load_compat(compat, list, report, 1024);
11501 switch (ret) {
11502
11503 case ZPOOL_COMPATIBILITY_OK:
11504 break;
11505
11506 case ZPOOL_COMPATIBILITY_NOFILES:
11507 case ZPOOL_COMPATIBILITY_BADFILE:
11508 case ZPOOL_COMPATIBILITY_BADTOKEN:
11509 (void) fprintf(stderr, "Error: %s\n", report);
11510 break;
11511
11512 case ZPOOL_COMPATIBILITY_WARNTOKEN:
11513 (void) fprintf(stderr, "Warning: %s\n", report);
11514 ret = ZPOOL_COMPATIBILITY_OK;
11515 break;
11516 }
11517 return (ret);
11518 }
11519
11520 int
11521 main(int argc, char **argv)
11522 {
11523 int ret = 0;
11524 int i = 0;
11525 char *cmdname;
11526 char **newargv;
11527
11528 (void) setlocale(LC_ALL, "");
11529 (void) setlocale(LC_NUMERIC, "C");
11530 (void) textdomain(TEXT_DOMAIN);
11531 srand(time(NULL));
11532
11533 opterr = 0;
11534
11535 /*
11536 * Make sure the user has specified some command.
11537 */
11538 if (argc < 2) {
11539 (void) fprintf(stderr, gettext("missing command\n"));
11540 usage(B_FALSE);
11541 }
11542
11543 cmdname = argv[1];
11544
11545 /*
11546 * Special case '-?'
11547 */
11548 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
11549 usage(B_TRUE);
11550
11551 /*
11552 * Special case '-V|--version'
11553 */
11554 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
11555 return (zpool_do_version(argc, argv));
11556
11557 /*
11558 * Special case 'help'
11559 */
11560 if (strcmp(cmdname, "help") == 0)
11561 return (zpool_do_help(argc, argv));
11562
11563 if ((g_zfs = libzfs_init()) == NULL) {
11564 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
11565 return (1);
11566 }
11567
11568 libzfs_print_on_error(g_zfs, B_TRUE);
11569
11570 zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
11571
11572 /*
11573 * Many commands modify input strings for string parsing reasons.
11574 * We create a copy to protect the original argv.
11575 */
11576 newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
11577 for (i = 0; i < argc; i++)
11578 newargv[i] = strdup(argv[i]);
11579 newargv[argc] = NULL;
11580
11581 /*
11582 * Run the appropriate command.
11583 */
11584 if (find_command_idx(cmdname, &i) == 0) {
11585 current_command = &command_table[i];
11586 ret = command_table[i].func(argc - 1, newargv + 1);
11587 } else if (strchr(cmdname, '=')) {
11588 verify(find_command_idx("set", &i) == 0);
11589 current_command = &command_table[i];
11590 ret = command_table[i].func(argc, newargv);
11591 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
11592 /*
11593 * 'freeze' is a vile debugging abomination, so we treat
11594 * it as such.
11595 */
11596 zfs_cmd_t zc = {"\0"};
11597
11598 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
11599 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
11600 if (ret != 0) {
11601 (void) fprintf(stderr,
11602 gettext("failed to freeze pool: %d\n"), errno);
11603 ret = 1;
11604 }
11605
11606 log_history = 0;
11607 } else {
11608 (void) fprintf(stderr, gettext("unrecognized "
11609 "command '%s'\n"), cmdname);
11610 usage(B_FALSE);
11611 ret = 1;
11612 }
11613
11614 for (i = 0; i < argc; i++)
11615 free(newargv[i]);
11616 free(newargv);
11617
11618 if (ret == 0 && log_history)
11619 (void) zpool_log_history(g_zfs, history_str);
11620
11621 libzfs_fini(g_zfs);
11622
11623 /*
11624 * The 'ZFS_ABORT' environment variable causes us to dump core on exit
11625 * for the purposes of running ::findleaks.
11626 */
11627 if (getenv("ZFS_ABORT") != NULL) {
11628 (void) printf("dumping core by request\n");
11629 abort();
11630 }
11631
11632 return (ret);
11633 }