]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_status.c
Add libzutil for libzfs or libzpool consumers
[mirror_zfs.git] / lib / libzfs / libzfs_status.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 * Copyright (c) 2013 Steven Hartland. All rights reserved.
26 */
27
28 /*
29 * This file contains the functions which analyze the status of a pool. This
30 * include both the status of an active pool, as well as the status exported
31 * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
32 * the pool. This status is independent (to a certain degree) from the state of
33 * the pool. A pool's state describes only whether or not it is capable of
34 * providing the necessary fault tolerance for data. The status describes the
35 * overall status of devices. A pool that is online can still have a device
36 * that is experiencing errors.
37 *
38 * Only a subset of the possible faults can be detected using 'zpool status',
39 * and not all possible errors correspond to a FMA message ID. The explanation
40 * is left up to the caller, depending on whether it is a live pool or an
41 * import.
42 */
43
44 #include <libzfs.h>
45 #include <libzutil.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <sys/systeminfo.h>
49 #include "libzfs_impl.h"
50 #include "zfeature_common.h"
51
52 /*
53 * Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
54 * in libzfs.h. Note that there are some status results which go past the end
55 * of this table, and hence have no associated message ID.
56 */
57 static char *zfs_msgid_table[] = {
58 "ZFS-8000-14",
59 "ZFS-8000-2Q",
60 "ZFS-8000-3C",
61 "ZFS-8000-4J",
62 "ZFS-8000-5E",
63 "ZFS-8000-6X",
64 "ZFS-8000-72",
65 "ZFS-8000-8A",
66 "ZFS-8000-9P",
67 "ZFS-8000-A5",
68 "ZFS-8000-EY",
69 "ZFS-8000-EY",
70 "ZFS-8000-EY",
71 "ZFS-8000-HC",
72 "ZFS-8000-JQ",
73 "ZFS-8000-K4",
74 "ZFS-8000-ER",
75 };
76
77 #define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
78
79 /* ARGSUSED */
80 static int
81 vdev_missing(uint64_t state, uint64_t aux, uint64_t errs)
82 {
83 return (state == VDEV_STATE_CANT_OPEN &&
84 aux == VDEV_AUX_OPEN_FAILED);
85 }
86
87 /* ARGSUSED */
88 static int
89 vdev_faulted(uint64_t state, uint64_t aux, uint64_t errs)
90 {
91 return (state == VDEV_STATE_FAULTED);
92 }
93
94 /* ARGSUSED */
95 static int
96 vdev_errors(uint64_t state, uint64_t aux, uint64_t errs)
97 {
98 return (state == VDEV_STATE_DEGRADED || errs != 0);
99 }
100
101 /* ARGSUSED */
102 static int
103 vdev_broken(uint64_t state, uint64_t aux, uint64_t errs)
104 {
105 return (state == VDEV_STATE_CANT_OPEN);
106 }
107
108 /* ARGSUSED */
109 static int
110 vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs)
111 {
112 return (state == VDEV_STATE_OFFLINE);
113 }
114
115 /* ARGSUSED */
116 static int
117 vdev_removed(uint64_t state, uint64_t aux, uint64_t errs)
118 {
119 return (state == VDEV_STATE_REMOVED);
120 }
121
122 /*
123 * Detect if any leaf devices that have seen errors or could not be opened.
124 */
125 static boolean_t
126 find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
127 {
128 nvlist_t **child;
129 vdev_stat_t *vs;
130 uint_t c, children;
131 char *type;
132
133 /*
134 * Ignore problems within a 'replacing' vdev, since we're presumably in
135 * the process of repairing any such errors, and don't want to call them
136 * out again. We'll pick up the fact that a resilver is happening
137 * later.
138 */
139 verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
140 if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
141 return (B_FALSE);
142
143 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
144 &children) == 0) {
145 for (c = 0; c < children; c++)
146 if (find_vdev_problem(child[c], func))
147 return (B_TRUE);
148 } else {
149 verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
150 (uint64_t **)&vs, &c) == 0);
151
152 if (func(vs->vs_state, vs->vs_aux,
153 vs->vs_read_errors +
154 vs->vs_write_errors +
155 vs->vs_checksum_errors))
156 return (B_TRUE);
157 }
158
159 /*
160 * Check any L2 cache devs
161 */
162 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_L2CACHE, &child,
163 &children) == 0) {
164 for (c = 0; c < children; c++)
165 if (find_vdev_problem(child[c], func))
166 return (B_TRUE);
167 }
168
169 return (B_FALSE);
170 }
171
172 /*
173 * Active pool health status.
174 *
175 * To determine the status for a pool, we make several passes over the config,
176 * picking the most egregious error we find. In order of importance, we do the
177 * following:
178 *
179 * - Check for a complete and valid configuration
180 * - Look for any faulted or missing devices in a non-replicated config
181 * - Check for any data errors
182 * - Check for any faulted or missing devices in a replicated config
183 * - Look for any devices showing errors
184 * - Check for any resilvering devices
185 *
186 * There can obviously be multiple errors within a single pool, so this routine
187 * only picks the most damaging of all the current errors to report.
188 */
189 static zpool_status_t
190 check_status(nvlist_t *config, boolean_t isimport, zpool_errata_t *erratap)
191 {
192 nvlist_t *nvroot;
193 vdev_stat_t *vs;
194 pool_scan_stat_t *ps = NULL;
195 uint_t vsc, psc;
196 uint64_t nerr;
197 uint64_t version;
198 uint64_t stateval;
199 uint64_t suspended;
200 uint64_t hostid = 0;
201 uint64_t errata = 0;
202 unsigned long system_hostid = get_system_hostid();
203
204 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
205 &version) == 0);
206 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
207 &nvroot) == 0);
208 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
209 (uint64_t **)&vs, &vsc) == 0);
210 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
211 &stateval) == 0);
212
213 /*
214 * Currently resilvering a vdev
215 */
216 (void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
217 (uint64_t **)&ps, &psc);
218 if (ps != NULL && ps->pss_func == POOL_SCAN_RESILVER &&
219 ps->pss_state == DSS_SCANNING)
220 return (ZPOOL_STATUS_RESILVERING);
221
222 /*
223 * The multihost property is set and the pool may be active.
224 */
225 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
226 vs->vs_aux == VDEV_AUX_ACTIVE) {
227 mmp_state_t mmp_state;
228 nvlist_t *nvinfo;
229
230 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
231 mmp_state = fnvlist_lookup_uint64(nvinfo,
232 ZPOOL_CONFIG_MMP_STATE);
233
234 if (mmp_state == MMP_STATE_ACTIVE)
235 return (ZPOOL_STATUS_HOSTID_ACTIVE);
236 else if (mmp_state == MMP_STATE_NO_HOSTID)
237 return (ZPOOL_STATUS_HOSTID_REQUIRED);
238 else
239 return (ZPOOL_STATUS_HOSTID_MISMATCH);
240 }
241
242 /*
243 * Pool last accessed by another system.
244 */
245 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
246 if (hostid != 0 && (unsigned long)hostid != system_hostid &&
247 stateval == POOL_STATE_ACTIVE)
248 return (ZPOOL_STATUS_HOSTID_MISMATCH);
249
250 /*
251 * Newer on-disk version.
252 */
253 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
254 vs->vs_aux == VDEV_AUX_VERSION_NEWER)
255 return (ZPOOL_STATUS_VERSION_NEWER);
256
257 /*
258 * Unsupported feature(s).
259 */
260 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
261 vs->vs_aux == VDEV_AUX_UNSUP_FEAT) {
262 nvlist_t *nvinfo;
263
264 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
265 &nvinfo) == 0);
266 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY))
267 return (ZPOOL_STATUS_UNSUP_FEAT_WRITE);
268 return (ZPOOL_STATUS_UNSUP_FEAT_READ);
269 }
270
271 /*
272 * Check that the config is complete.
273 */
274 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
275 vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
276 return (ZPOOL_STATUS_BAD_GUID_SUM);
277
278 /*
279 * Check whether the pool has suspended.
280 */
281 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
282 &suspended) == 0) {
283 uint64_t reason;
284
285 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED_REASON,
286 &reason) == 0 && reason == ZIO_SUSPEND_MMP)
287 return (ZPOOL_STATUS_IO_FAILURE_MMP);
288
289 if (suspended == ZIO_FAILURE_MODE_CONTINUE)
290 return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
291 return (ZPOOL_STATUS_IO_FAILURE_WAIT);
292 }
293
294 /*
295 * Could not read a log.
296 */
297 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
298 vs->vs_aux == VDEV_AUX_BAD_LOG) {
299 return (ZPOOL_STATUS_BAD_LOG);
300 }
301
302 /*
303 * Bad devices in non-replicated config.
304 */
305 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
306 find_vdev_problem(nvroot, vdev_faulted))
307 return (ZPOOL_STATUS_FAULTED_DEV_NR);
308
309 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
310 find_vdev_problem(nvroot, vdev_missing))
311 return (ZPOOL_STATUS_MISSING_DEV_NR);
312
313 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
314 find_vdev_problem(nvroot, vdev_broken))
315 return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
316
317 /*
318 * Corrupted pool metadata
319 */
320 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
321 vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
322 return (ZPOOL_STATUS_CORRUPT_POOL);
323
324 /*
325 * Persistent data errors.
326 */
327 if (!isimport) {
328 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
329 &nerr) == 0 && nerr != 0)
330 return (ZPOOL_STATUS_CORRUPT_DATA);
331 }
332
333 /*
334 * Missing devices in a replicated config.
335 */
336 if (find_vdev_problem(nvroot, vdev_faulted))
337 return (ZPOOL_STATUS_FAULTED_DEV_R);
338 if (find_vdev_problem(nvroot, vdev_missing))
339 return (ZPOOL_STATUS_MISSING_DEV_R);
340 if (find_vdev_problem(nvroot, vdev_broken))
341 return (ZPOOL_STATUS_CORRUPT_LABEL_R);
342
343 /*
344 * Devices with errors
345 */
346 if (!isimport && find_vdev_problem(nvroot, vdev_errors))
347 return (ZPOOL_STATUS_FAILING_DEV);
348
349 /*
350 * Offlined devices
351 */
352 if (find_vdev_problem(nvroot, vdev_offlined))
353 return (ZPOOL_STATUS_OFFLINE_DEV);
354
355 /*
356 * Removed device
357 */
358 if (find_vdev_problem(nvroot, vdev_removed))
359 return (ZPOOL_STATUS_REMOVED_DEV);
360
361 /*
362 * Informational errata available.
363 */
364 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRATA, &errata);
365 if (errata) {
366 *erratap = errata;
367 return (ZPOOL_STATUS_ERRATA);
368 }
369
370 /*
371 * Outdated, but usable, version
372 */
373 if (SPA_VERSION_IS_SUPPORTED(version) && version != SPA_VERSION)
374 return (ZPOOL_STATUS_VERSION_OLDER);
375
376 /*
377 * Usable pool with disabled features
378 */
379 if (version >= SPA_VERSION_FEATURES) {
380 int i;
381 nvlist_t *feat;
382
383 if (isimport) {
384 feat = fnvlist_lookup_nvlist(config,
385 ZPOOL_CONFIG_LOAD_INFO);
386 if (nvlist_exists(feat, ZPOOL_CONFIG_ENABLED_FEAT))
387 feat = fnvlist_lookup_nvlist(feat,
388 ZPOOL_CONFIG_ENABLED_FEAT);
389 } else {
390 feat = fnvlist_lookup_nvlist(config,
391 ZPOOL_CONFIG_FEATURE_STATS);
392 }
393
394 for (i = 0; i < SPA_FEATURES; i++) {
395 zfeature_info_t *fi = &spa_feature_table[i];
396 if (!nvlist_exists(feat, fi->fi_guid))
397 return (ZPOOL_STATUS_FEAT_DISABLED);
398 }
399 }
400
401 return (ZPOOL_STATUS_OK);
402 }
403
404 zpool_status_t
405 zpool_get_status(zpool_handle_t *zhp, char **msgid, zpool_errata_t *errata)
406 {
407 zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE, errata);
408 if (msgid != NULL) {
409 if (ret >= NMSGID)
410 *msgid = NULL;
411 else
412 *msgid = zfs_msgid_table[ret];
413 }
414 return (ret);
415 }
416
417 zpool_status_t
418 zpool_import_status(nvlist_t *config, char **msgid, zpool_errata_t *errata)
419 {
420 zpool_status_t ret = check_status(config, B_TRUE, errata);
421
422 if (ret >= NMSGID)
423 *msgid = NULL;
424 else
425 *msgid = zfs_msgid_table[ret];
426
427 return (ret);
428 }