4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
28 * This file contains the functions which analyze the status of a pool. This
29 * include both the status of an active pool, as well as the status exported
30 * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
31 * the pool. This status is independent (to a certain degree) from the state of
32 * the pool. A pool's state describes only whether or not it is capable of
33 * providing the necessary fault tolerance for data. The status describes the
34 * overall status of devices. A pool that is online can still have a device
35 * that is experiencing errors.
37 * Only a subset of the possible faults can be detected using 'zpool status',
38 * and not all possible errors correspond to a FMA message ID. The explanation
39 * is left up to the caller, depending on whether it is a live pool or an
46 #include "libzfs_impl.h"
49 * Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
50 * in libzfs.h. Note that there are some status results which go past the end
51 * of this table, and hence have no associated message ID.
53 static char *zfs_msgid_table
[] = {
70 #define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
74 vdev_missing(uint64_t state
, uint64_t aux
, uint64_t errs
)
76 return (state
== VDEV_STATE_CANT_OPEN
&&
77 aux
== VDEV_AUX_OPEN_FAILED
);
82 vdev_faulted(uint64_t state
, uint64_t aux
, uint64_t errs
)
84 return (state
== VDEV_STATE_FAULTED
);
89 vdev_errors(uint64_t state
, uint64_t aux
, uint64_t errs
)
91 return (state
== VDEV_STATE_DEGRADED
|| errs
!= 0);
96 vdev_broken(uint64_t state
, uint64_t aux
, uint64_t errs
)
98 return (state
== VDEV_STATE_CANT_OPEN
);
103 vdev_offlined(uint64_t state
, uint64_t aux
, uint64_t errs
)
105 return (state
== VDEV_STATE_OFFLINE
);
110 vdev_removed(uint64_t state
, uint64_t aux
, uint64_t errs
)
112 return (state
== VDEV_STATE_REMOVED
);
116 * Detect if any leaf devices that have seen errors or could not be opened.
119 find_vdev_problem(nvlist_t
*vdev
, int (*func
)(uint64_t, uint64_t, uint64_t))
127 * Ignore problems within a 'replacing' vdev, since we're presumably in
128 * the process of repairing any such errors, and don't want to call them
129 * out again. We'll pick up the fact that a resilver is happening
132 verify(nvlist_lookup_string(vdev
, ZPOOL_CONFIG_TYPE
, &type
) == 0);
133 if (strcmp(type
, VDEV_TYPE_REPLACING
) == 0)
136 if (nvlist_lookup_nvlist_array(vdev
, ZPOOL_CONFIG_CHILDREN
, &child
,
138 for (c
= 0; c
< children
; c
++)
139 if (find_vdev_problem(child
[c
], func
))
142 verify(nvlist_lookup_uint64_array(vdev
, ZPOOL_CONFIG_VDEV_STATS
,
143 (uint64_t **)&vs
, &c
) == 0);
145 if (func(vs
->vs_state
, vs
->vs_aux
,
147 vs
->vs_write_errors
+
148 vs
->vs_checksum_errors
))
156 * Active pool health status.
158 * To determine the status for a pool, we make several passes over the config,
159 * picking the most egregious error we find. In order of importance, we do the
162 * - Check for a complete and valid configuration
163 * - Look for any faulted or missing devices in a non-replicated config
164 * - Check for any data errors
165 * - Check for any faulted or missing devices in a replicated config
166 * - Look for any devices showing errors
167 * - Check for any resilvering devices
169 * There can obviously be multiple errors within a single pool, so this routine
170 * only picks the most damaging of all the current errors to report.
172 static zpool_status_t
173 check_status(nvlist_t
*config
, boolean_t isimport
)
177 pool_scan_stat_t
*ps
= NULL
;
184 unsigned long system_hostid
= gethostid() & 0xffffffff;
186 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
,
188 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
190 verify(nvlist_lookup_uint64_array(nvroot
, ZPOOL_CONFIG_VDEV_STATS
,
191 (uint64_t **)&vs
, &vsc
) == 0);
192 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
196 * Currently resilvering a vdev
198 (void) nvlist_lookup_uint64_array(nvroot
, ZPOOL_CONFIG_SCAN_STATS
,
199 (uint64_t **)&ps
, &psc
);
200 if (ps
&& ps
->pss_func
== POOL_SCAN_RESILVER
&&
201 ps
->pss_state
== DSS_SCANNING
)
202 return (ZPOOL_STATUS_RESILVERING
);
205 * Pool last accessed by another system.
207 (void) nvlist_lookup_uint64(config
, ZPOOL_CONFIG_HOSTID
, &hostid
);
208 if (hostid
!= 0 && (unsigned long)hostid
!= system_hostid
&&
209 stateval
== POOL_STATE_ACTIVE
)
210 return (ZPOOL_STATUS_HOSTID_MISMATCH
);
213 * Newer on-disk version.
215 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
216 vs
->vs_aux
== VDEV_AUX_VERSION_NEWER
)
217 return (ZPOOL_STATUS_VERSION_NEWER
);
220 * Unsupported feature(s).
222 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
223 vs
->vs_aux
== VDEV_AUX_UNSUP_FEAT
) {
226 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
,
228 if (nvlist_exists(nvinfo
, ZPOOL_CONFIG_CAN_RDONLY
))
229 return (ZPOOL_STATUS_UNSUP_FEAT_WRITE
);
230 return (ZPOOL_STATUS_UNSUP_FEAT_READ
);
234 * Check that the config is complete.
236 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
237 vs
->vs_aux
== VDEV_AUX_BAD_GUID_SUM
)
238 return (ZPOOL_STATUS_BAD_GUID_SUM
);
241 * Check whether the pool has suspended due to failed I/O.
243 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_SUSPENDED
,
245 if (suspended
== ZIO_FAILURE_MODE_CONTINUE
)
246 return (ZPOOL_STATUS_IO_FAILURE_CONTINUE
);
247 return (ZPOOL_STATUS_IO_FAILURE_WAIT
);
251 * Could not read a log.
253 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
254 vs
->vs_aux
== VDEV_AUX_BAD_LOG
) {
255 return (ZPOOL_STATUS_BAD_LOG
);
259 * Bad devices in non-replicated config.
261 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
262 find_vdev_problem(nvroot
, vdev_faulted
))
263 return (ZPOOL_STATUS_FAULTED_DEV_NR
);
265 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
266 find_vdev_problem(nvroot
, vdev_missing
))
267 return (ZPOOL_STATUS_MISSING_DEV_NR
);
269 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
270 find_vdev_problem(nvroot
, vdev_broken
))
271 return (ZPOOL_STATUS_CORRUPT_LABEL_NR
);
274 * Corrupted pool metadata
276 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
277 vs
->vs_aux
== VDEV_AUX_CORRUPT_DATA
)
278 return (ZPOOL_STATUS_CORRUPT_POOL
);
281 * Persistent data errors.
284 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_ERRCOUNT
,
285 &nerr
) == 0 && nerr
!= 0)
286 return (ZPOOL_STATUS_CORRUPT_DATA
);
290 * Missing devices in a replicated config.
292 if (find_vdev_problem(nvroot
, vdev_faulted
))
293 return (ZPOOL_STATUS_FAULTED_DEV_R
);
294 if (find_vdev_problem(nvroot
, vdev_missing
))
295 return (ZPOOL_STATUS_MISSING_DEV_R
);
296 if (find_vdev_problem(nvroot
, vdev_broken
))
297 return (ZPOOL_STATUS_CORRUPT_LABEL_R
);
300 * Devices with errors
302 if (!isimport
&& find_vdev_problem(nvroot
, vdev_errors
))
303 return (ZPOOL_STATUS_FAILING_DEV
);
308 if (find_vdev_problem(nvroot
, vdev_offlined
))
309 return (ZPOOL_STATUS_OFFLINE_DEV
);
314 if (find_vdev_problem(nvroot
, vdev_removed
))
315 return (ZPOOL_STATUS_REMOVED_DEV
);
318 * Outdated, but usable, version
320 if (SPA_VERSION_IS_SUPPORTED(version
) && version
!= SPA_VERSION
)
321 return (ZPOOL_STATUS_VERSION_OLDER
);
323 return (ZPOOL_STATUS_OK
);
327 zpool_get_status(zpool_handle_t
*zhp
, char **msgid
)
329 zpool_status_t ret
= check_status(zhp
->zpool_config
, B_FALSE
);
334 *msgid
= zfs_msgid_table
[ret
];
340 zpool_import_status(nvlist_t
*config
, char **msgid
)
342 zpool_status_t ret
= check_status(config
, B_TRUE
);
347 *msgid
= zfs_msgid_table
[ret
];
353 dump_ddt_stat(const ddt_stat_t
*dds
, int h
)
356 char blocks
[6], lsize
[6], psize
[6], dsize
[6];
357 char ref_blocks
[6], ref_lsize
[6], ref_psize
[6], ref_dsize
[6];
359 if (dds
== NULL
|| dds
->dds_blocks
== 0)
363 (void) strcpy(refcnt
, "Total");
365 zfs_nicenum(1ULL << h
, refcnt
, sizeof (refcnt
));
367 zfs_nicenum(dds
->dds_blocks
, blocks
, sizeof (blocks
));
368 zfs_nicenum(dds
->dds_lsize
, lsize
, sizeof (lsize
));
369 zfs_nicenum(dds
->dds_psize
, psize
, sizeof (psize
));
370 zfs_nicenum(dds
->dds_dsize
, dsize
, sizeof (dsize
));
371 zfs_nicenum(dds
->dds_ref_blocks
, ref_blocks
, sizeof (ref_blocks
));
372 zfs_nicenum(dds
->dds_ref_lsize
, ref_lsize
, sizeof (ref_lsize
));
373 zfs_nicenum(dds
->dds_ref_psize
, ref_psize
, sizeof (ref_psize
));
374 zfs_nicenum(dds
->dds_ref_dsize
, ref_dsize
, sizeof (ref_dsize
));
376 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
378 blocks
, lsize
, psize
, dsize
,
379 ref_blocks
, ref_lsize
, ref_psize
, ref_dsize
);
383 * Print the DDT histogram and the column totals.
386 zpool_dump_ddt(const ddt_stat_t
*dds_total
, const ddt_histogram_t
*ddh
)
392 (void) printf("bucket "
395 (void) printf("______ "
396 "______________________________ "
397 "______________________________\n");
399 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
401 "blocks", "LSIZE", "PSIZE", "DSIZE",
402 "blocks", "LSIZE", "PSIZE", "DSIZE");
404 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
406 "------", "-----", "-----", "-----",
407 "------", "-----", "-----", "-----");
409 for (h
= 0; h
< 64; h
++)
410 dump_ddt_stat(&ddh
->ddh_stat
[h
], h
);
412 dump_ddt_stat(dds_total
, -1);