4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 * Copyright (c) 2013 Steven Hartland. All rights reserved.
29 * This file contains the functions which analyze the status of a pool. This
30 * include both the status of an active pool, as well as the status exported
31 * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
32 * the pool. This status is independent (to a certain degree) from the state of
33 * the pool. A pool's state describes only whether or not it is capable of
34 * providing the necessary fault tolerance for data. The status describes the
35 * overall status of devices. A pool that is online can still have a device
36 * that is experiencing errors.
38 * Only a subset of the possible faults can be detected using 'zpool status',
39 * and not all possible errors correspond to a FMA message ID. The explanation
40 * is left up to the caller, depending on whether it is a live pool or an
47 #include "libzfs_impl.h"
48 #include "zfeature_common.h"
51 * Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
52 * in libzfs.h. Note that there are some status results which go past the end
53 * of this table, and hence have no associated message ID.
55 static char *zfs_msgid_table
[] = {
75 #define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
79 vdev_missing(uint64_t state
, uint64_t aux
, uint64_t errs
)
81 return (state
== VDEV_STATE_CANT_OPEN
&&
82 aux
== VDEV_AUX_OPEN_FAILED
);
87 vdev_faulted(uint64_t state
, uint64_t aux
, uint64_t errs
)
89 return (state
== VDEV_STATE_FAULTED
);
94 vdev_errors(uint64_t state
, uint64_t aux
, uint64_t errs
)
96 return (state
== VDEV_STATE_DEGRADED
|| errs
!= 0);
101 vdev_broken(uint64_t state
, uint64_t aux
, uint64_t errs
)
103 return (state
== VDEV_STATE_CANT_OPEN
);
108 vdev_offlined(uint64_t state
, uint64_t aux
, uint64_t errs
)
110 return (state
== VDEV_STATE_OFFLINE
);
115 vdev_removed(uint64_t state
, uint64_t aux
, uint64_t errs
)
117 return (state
== VDEV_STATE_REMOVED
);
121 * Detect if any leaf devices that have seen errors or could not be opened.
124 find_vdev_problem(nvlist_t
*vdev
, int (*func
)(uint64_t, uint64_t, uint64_t))
132 * Ignore problems within a 'replacing' vdev, since we're presumably in
133 * the process of repairing any such errors, and don't want to call them
134 * out again. We'll pick up the fact that a resilver is happening
137 verify(nvlist_lookup_string(vdev
, ZPOOL_CONFIG_TYPE
, &type
) == 0);
138 if (strcmp(type
, VDEV_TYPE_REPLACING
) == 0)
141 if (nvlist_lookup_nvlist_array(vdev
, ZPOOL_CONFIG_CHILDREN
, &child
,
143 for (c
= 0; c
< children
; c
++)
144 if (find_vdev_problem(child
[c
], func
))
147 verify(nvlist_lookup_uint64_array(vdev
, ZPOOL_CONFIG_VDEV_STATS
,
148 (uint64_t **)&vs
, &c
) == 0);
150 if (func(vs
->vs_state
, vs
->vs_aux
,
152 vs
->vs_write_errors
+
153 vs
->vs_checksum_errors
))
158 * Check any L2 cache devs
160 if (nvlist_lookup_nvlist_array(vdev
, ZPOOL_CONFIG_L2CACHE
, &child
,
162 for (c
= 0; c
< children
; c
++)
163 if (find_vdev_problem(child
[c
], func
))
171 * Active pool health status.
173 * To determine the status for a pool, we make several passes over the config,
174 * picking the most egregious error we find. In order of importance, we do the
177 * - Check for a complete and valid configuration
178 * - Look for any faulted or missing devices in a non-replicated config
179 * - Check for any data errors
180 * - Check for any faulted or missing devices in a replicated config
181 * - Look for any devices showing errors
182 * - Check for any resilvering devices
184 * There can obviously be multiple errors within a single pool, so this routine
185 * only picks the most damaging of all the current errors to report.
187 static zpool_status_t
188 check_status(nvlist_t
*config
, boolean_t isimport
, zpool_errata_t
*erratap
)
192 pool_scan_stat_t
*ps
= NULL
;
200 unsigned long system_hostid
= get_system_hostid();
202 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
,
204 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
206 verify(nvlist_lookup_uint64_array(nvroot
, ZPOOL_CONFIG_VDEV_STATS
,
207 (uint64_t **)&vs
, &vsc
) == 0);
208 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
212 * Currently resilvering a vdev
214 (void) nvlist_lookup_uint64_array(nvroot
, ZPOOL_CONFIG_SCAN_STATS
,
215 (uint64_t **)&ps
, &psc
);
216 if (ps
&& ps
->pss_func
== POOL_SCAN_RESILVER
&&
217 ps
->pss_state
== DSS_SCANNING
)
218 return (ZPOOL_STATUS_RESILVERING
);
221 * The multihost property is set and the pool may be active.
223 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
224 vs
->vs_aux
== VDEV_AUX_ACTIVE
) {
225 mmp_state_t mmp_state
;
228 nvinfo
= fnvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
);
229 mmp_state
= fnvlist_lookup_uint64(nvinfo
,
230 ZPOOL_CONFIG_MMP_STATE
);
232 if (mmp_state
== MMP_STATE_ACTIVE
)
233 return (ZPOOL_STATUS_HOSTID_ACTIVE
);
234 else if (mmp_state
== MMP_STATE_NO_HOSTID
)
235 return (ZPOOL_STATUS_HOSTID_REQUIRED
);
237 return (ZPOOL_STATUS_HOSTID_MISMATCH
);
241 * Pool last accessed by another system.
243 (void) nvlist_lookup_uint64(config
, ZPOOL_CONFIG_HOSTID
, &hostid
);
244 if (hostid
!= 0 && (unsigned long)hostid
!= system_hostid
&&
245 stateval
== POOL_STATE_ACTIVE
)
246 return (ZPOOL_STATUS_HOSTID_MISMATCH
);
249 * Newer on-disk version.
251 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
252 vs
->vs_aux
== VDEV_AUX_VERSION_NEWER
)
253 return (ZPOOL_STATUS_VERSION_NEWER
);
256 * Unsupported feature(s).
258 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
259 vs
->vs_aux
== VDEV_AUX_UNSUP_FEAT
) {
262 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
,
264 if (nvlist_exists(nvinfo
, ZPOOL_CONFIG_CAN_RDONLY
))
265 return (ZPOOL_STATUS_UNSUP_FEAT_WRITE
);
266 return (ZPOOL_STATUS_UNSUP_FEAT_READ
);
270 * Check that the config is complete.
272 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
273 vs
->vs_aux
== VDEV_AUX_BAD_GUID_SUM
)
274 return (ZPOOL_STATUS_BAD_GUID_SUM
);
277 * Check whether the pool has suspended.
279 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_SUSPENDED
,
283 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_SUSPENDED_REASON
,
284 &reason
) == 0 && reason
== ZIO_SUSPEND_MMP
)
285 return (ZPOOL_STATUS_IO_FAILURE_MMP
);
287 if (suspended
== ZIO_FAILURE_MODE_CONTINUE
)
288 return (ZPOOL_STATUS_IO_FAILURE_CONTINUE
);
289 return (ZPOOL_STATUS_IO_FAILURE_WAIT
);
293 * Could not read a log.
295 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
296 vs
->vs_aux
== VDEV_AUX_BAD_LOG
) {
297 return (ZPOOL_STATUS_BAD_LOG
);
301 * Bad devices in non-replicated config.
303 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
304 find_vdev_problem(nvroot
, vdev_faulted
))
305 return (ZPOOL_STATUS_FAULTED_DEV_NR
);
307 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
308 find_vdev_problem(nvroot
, vdev_missing
))
309 return (ZPOOL_STATUS_MISSING_DEV_NR
);
311 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
312 find_vdev_problem(nvroot
, vdev_broken
))
313 return (ZPOOL_STATUS_CORRUPT_LABEL_NR
);
316 * Corrupted pool metadata
318 if (vs
->vs_state
== VDEV_STATE_CANT_OPEN
&&
319 vs
->vs_aux
== VDEV_AUX_CORRUPT_DATA
)
320 return (ZPOOL_STATUS_CORRUPT_POOL
);
323 * Persistent data errors.
326 if (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_ERRCOUNT
,
327 &nerr
) == 0 && nerr
!= 0)
328 return (ZPOOL_STATUS_CORRUPT_DATA
);
332 * Missing devices in a replicated config.
334 if (find_vdev_problem(nvroot
, vdev_faulted
))
335 return (ZPOOL_STATUS_FAULTED_DEV_R
);
336 if (find_vdev_problem(nvroot
, vdev_missing
))
337 return (ZPOOL_STATUS_MISSING_DEV_R
);
338 if (find_vdev_problem(nvroot
, vdev_broken
))
339 return (ZPOOL_STATUS_CORRUPT_LABEL_R
);
342 * Devices with errors
344 if (!isimport
&& find_vdev_problem(nvroot
, vdev_errors
))
345 return (ZPOOL_STATUS_FAILING_DEV
);
350 if (find_vdev_problem(nvroot
, vdev_offlined
))
351 return (ZPOOL_STATUS_OFFLINE_DEV
);
356 if (find_vdev_problem(nvroot
, vdev_removed
))
357 return (ZPOOL_STATUS_REMOVED_DEV
);
360 * Outdated, but usable, version
362 if (SPA_VERSION_IS_SUPPORTED(version
) && version
!= SPA_VERSION
)
363 return (ZPOOL_STATUS_VERSION_OLDER
);
366 * Usable pool with disabled features
368 if (version
>= SPA_VERSION_FEATURES
) {
373 feat
= fnvlist_lookup_nvlist(config
,
374 ZPOOL_CONFIG_LOAD_INFO
);
375 if (nvlist_exists(feat
, ZPOOL_CONFIG_ENABLED_FEAT
))
376 feat
= fnvlist_lookup_nvlist(feat
,
377 ZPOOL_CONFIG_ENABLED_FEAT
);
379 feat
= fnvlist_lookup_nvlist(config
,
380 ZPOOL_CONFIG_FEATURE_STATS
);
383 for (i
= 0; i
< SPA_FEATURES
; i
++) {
384 zfeature_info_t
*fi
= &spa_feature_table
[i
];
385 if (!nvlist_exists(feat
, fi
->fi_guid
))
386 return (ZPOOL_STATUS_FEAT_DISABLED
);
391 * Informational errata available.
393 (void) nvlist_lookup_uint64(config
, ZPOOL_CONFIG_ERRATA
, &errata
);
396 return (ZPOOL_STATUS_ERRATA
);
399 return (ZPOOL_STATUS_OK
);
403 zpool_get_status(zpool_handle_t
*zhp
, char **msgid
, zpool_errata_t
*errata
)
405 zpool_status_t ret
= check_status(zhp
->zpool_config
, B_FALSE
, errata
);
410 *msgid
= zfs_msgid_table
[ret
];
416 zpool_import_status(nvlist_t
*config
, char **msgid
, zpool_errata_t
*errata
)
418 zpool_status_t ret
= check_status(config
, B_TRUE
, errata
);
423 *msgid
= zfs_msgid_table
[ret
];
429 dump_ddt_stat(const ddt_stat_t
*dds
, int h
)
432 char blocks
[6], lsize
[6], psize
[6], dsize
[6];
433 char ref_blocks
[6], ref_lsize
[6], ref_psize
[6], ref_dsize
[6];
435 if (dds
== NULL
|| dds
->dds_blocks
== 0)
439 (void) strcpy(refcnt
, "Total");
441 zfs_nicenum(1ULL << h
, refcnt
, sizeof (refcnt
));
443 zfs_nicenum(dds
->dds_blocks
, blocks
, sizeof (blocks
));
444 zfs_nicebytes(dds
->dds_lsize
, lsize
, sizeof (lsize
));
445 zfs_nicebytes(dds
->dds_psize
, psize
, sizeof (psize
));
446 zfs_nicebytes(dds
->dds_dsize
, dsize
, sizeof (dsize
));
447 zfs_nicenum(dds
->dds_ref_blocks
, ref_blocks
, sizeof (ref_blocks
));
448 zfs_nicebytes(dds
->dds_ref_lsize
, ref_lsize
, sizeof (ref_lsize
));
449 zfs_nicebytes(dds
->dds_ref_psize
, ref_psize
, sizeof (ref_psize
));
450 zfs_nicebytes(dds
->dds_ref_dsize
, ref_dsize
, sizeof (ref_dsize
));
452 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
454 blocks
, lsize
, psize
, dsize
,
455 ref_blocks
, ref_lsize
, ref_psize
, ref_dsize
);
459 * Print the DDT histogram and the column totals.
462 zpool_dump_ddt(const ddt_stat_t
*dds_total
, const ddt_histogram_t
*ddh
)
468 (void) printf("bucket "
471 (void) printf("______ "
472 "______________________________ "
473 "______________________________\n");
475 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
477 "blocks", "LSIZE", "PSIZE", "DSIZE",
478 "blocks", "LSIZE", "PSIZE", "DSIZE");
480 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
482 "------", "-----", "-----", "-----",
483 "------", "-----", "-----", "-----");
485 for (h
= 0; h
< 64; h
++)
486 dump_ddt_stat(&ddh
->ddh_stat
[h
], h
);
488 dump_ddt_stat(dds_total
, -1);