]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs/libzfs_status.c
d5470f41238efc2189fc4608b89de44f6f89bbff
[mirror_zfs.git] / lib / libzfs / libzfs_status.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 * Copyright (c) 2013 Steven Hartland. All rights reserved.
26 */
27
28 /*
29 * This file contains the functions which analyze the status of a pool. This
30 * include both the status of an active pool, as well as the status exported
31 * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
32 * the pool. This status is independent (to a certain degree) from the state of
33 * the pool. A pool's state describes only whether or not it is capable of
34 * providing the necessary fault tolerance for data. The status describes the
35 * overall status of devices. A pool that is online can still have a device
36 * that is experiencing errors.
37 *
38 * Only a subset of the possible faults can be detected using 'zpool status',
39 * and not all possible errors correspond to a FMA message ID. The explanation
40 * is left up to the caller, depending on whether it is a live pool or an
41 * import.
42 */
43
44 #include <libzfs.h>
45 #include <string.h>
46 #include <unistd.h>
47 #include "libzfs_impl.h"
48 #include "zfeature_common.h"
49
50 /*
51 * Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
52 * in libzfs.h. Note that there are some status results which go past the end
53 * of this table, and hence have no associated message ID.
54 */
55 static char *zfs_msgid_table[] = {
56 "ZFS-8000-14",
57 "ZFS-8000-2Q",
58 "ZFS-8000-3C",
59 "ZFS-8000-4J",
60 "ZFS-8000-5E",
61 "ZFS-8000-6X",
62 "ZFS-8000-72",
63 "ZFS-8000-8A",
64 "ZFS-8000-9P",
65 "ZFS-8000-A5",
66 "ZFS-8000-EY",
67 "ZFS-8000-HC",
68 "ZFS-8000-JQ",
69 "ZFS-8000-K4",
70 "ZFS-8000-ER",
71 };
72
73 #define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
74
75 /* ARGSUSED */
76 static int
77 vdev_missing(uint64_t state, uint64_t aux, uint64_t errs)
78 {
79 return (state == VDEV_STATE_CANT_OPEN &&
80 aux == VDEV_AUX_OPEN_FAILED);
81 }
82
83 /* ARGSUSED */
84 static int
85 vdev_faulted(uint64_t state, uint64_t aux, uint64_t errs)
86 {
87 return (state == VDEV_STATE_FAULTED);
88 }
89
90 /* ARGSUSED */
91 static int
92 vdev_errors(uint64_t state, uint64_t aux, uint64_t errs)
93 {
94 return (state == VDEV_STATE_DEGRADED || errs != 0);
95 }
96
97 /* ARGSUSED */
98 static int
99 vdev_broken(uint64_t state, uint64_t aux, uint64_t errs)
100 {
101 return (state == VDEV_STATE_CANT_OPEN);
102 }
103
104 /* ARGSUSED */
105 static int
106 vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs)
107 {
108 return (state == VDEV_STATE_OFFLINE);
109 }
110
111 /* ARGSUSED */
112 static int
113 vdev_removed(uint64_t state, uint64_t aux, uint64_t errs)
114 {
115 return (state == VDEV_STATE_REMOVED);
116 }
117
118 /*
119 * Detect if any leaf devices that have seen errors or could not be opened.
120 */
121 static boolean_t
122 find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
123 {
124 nvlist_t **child;
125 vdev_stat_t *vs;
126 uint_t c, children;
127 char *type;
128
129 /*
130 * Ignore problems within a 'replacing' vdev, since we're presumably in
131 * the process of repairing any such errors, and don't want to call them
132 * out again. We'll pick up the fact that a resilver is happening
133 * later.
134 */
135 verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
136 if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
137 return (B_FALSE);
138
139 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
140 &children) == 0) {
141 for (c = 0; c < children; c++)
142 if (find_vdev_problem(child[c], func))
143 return (B_TRUE);
144 } else {
145 verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
146 (uint64_t **)&vs, &c) == 0);
147
148 if (func(vs->vs_state, vs->vs_aux,
149 vs->vs_read_errors +
150 vs->vs_write_errors +
151 vs->vs_checksum_errors))
152 return (B_TRUE);
153 }
154
155 /*
156 * Check any L2 cache devs
157 */
158 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_L2CACHE, &child,
159 &children) == 0) {
160 for (c = 0; c < children; c++)
161 if (find_vdev_problem(child[c], func))
162 return (B_TRUE);
163 }
164
165 return (B_FALSE);
166 }
167
168 /*
169 * Active pool health status.
170 *
171 * To determine the status for a pool, we make several passes over the config,
172 * picking the most egregious error we find. In order of importance, we do the
173 * following:
174 *
175 * - Check for a complete and valid configuration
176 * - Look for any faulted or missing devices in a non-replicated config
177 * - Check for any data errors
178 * - Check for any faulted or missing devices in a replicated config
179 * - Look for any devices showing errors
180 * - Check for any resilvering devices
181 *
182 * There can obviously be multiple errors within a single pool, so this routine
183 * only picks the most damaging of all the current errors to report.
184 */
185 static zpool_status_t
186 check_status(nvlist_t *config, boolean_t isimport, zpool_errata_t *erratap)
187 {
188 nvlist_t *nvroot;
189 vdev_stat_t *vs;
190 pool_scan_stat_t *ps = NULL;
191 uint_t vsc, psc;
192 uint64_t nerr;
193 uint64_t version;
194 uint64_t stateval;
195 uint64_t suspended;
196 uint64_t hostid = 0;
197 uint64_t errata = 0;
198 unsigned long system_hostid = get_system_hostid();
199
200 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
201 &version) == 0);
202 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
203 &nvroot) == 0);
204 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
205 (uint64_t **)&vs, &vsc) == 0);
206 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
207 &stateval) == 0);
208
209 /*
210 * Currently resilvering a vdev
211 */
212 (void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
213 (uint64_t **)&ps, &psc);
214 if (ps && ps->pss_func == POOL_SCAN_RESILVER &&
215 ps->pss_state == DSS_SCANNING)
216 return (ZPOOL_STATUS_RESILVERING);
217
218 /*
219 * Pool last accessed by another system.
220 */
221 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
222 if (hostid != 0 && (unsigned long)hostid != system_hostid &&
223 stateval == POOL_STATE_ACTIVE)
224 return (ZPOOL_STATUS_HOSTID_MISMATCH);
225
226 /*
227 * Newer on-disk version.
228 */
229 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
230 vs->vs_aux == VDEV_AUX_VERSION_NEWER)
231 return (ZPOOL_STATUS_VERSION_NEWER);
232
233 /*
234 * Unsupported feature(s).
235 */
236 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
237 vs->vs_aux == VDEV_AUX_UNSUP_FEAT) {
238 nvlist_t *nvinfo;
239
240 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
241 &nvinfo) == 0);
242 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY))
243 return (ZPOOL_STATUS_UNSUP_FEAT_WRITE);
244 return (ZPOOL_STATUS_UNSUP_FEAT_READ);
245 }
246
247 /*
248 * Check that the config is complete.
249 */
250 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
251 vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
252 return (ZPOOL_STATUS_BAD_GUID_SUM);
253
254 /*
255 * Check whether the pool has suspended due to failed I/O.
256 */
257 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
258 &suspended) == 0) {
259 if (suspended == ZIO_FAILURE_MODE_CONTINUE)
260 return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
261 return (ZPOOL_STATUS_IO_FAILURE_WAIT);
262 }
263
264 /*
265 * Could not read a log.
266 */
267 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
268 vs->vs_aux == VDEV_AUX_BAD_LOG) {
269 return (ZPOOL_STATUS_BAD_LOG);
270 }
271
272 /*
273 * Bad devices in non-replicated config.
274 */
275 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
276 find_vdev_problem(nvroot, vdev_faulted))
277 return (ZPOOL_STATUS_FAULTED_DEV_NR);
278
279 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
280 find_vdev_problem(nvroot, vdev_missing))
281 return (ZPOOL_STATUS_MISSING_DEV_NR);
282
283 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
284 find_vdev_problem(nvroot, vdev_broken))
285 return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
286
287 /*
288 * Corrupted pool metadata
289 */
290 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
291 vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
292 return (ZPOOL_STATUS_CORRUPT_POOL);
293
294 /*
295 * Persistent data errors.
296 */
297 if (!isimport) {
298 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
299 &nerr) == 0 && nerr != 0)
300 return (ZPOOL_STATUS_CORRUPT_DATA);
301 }
302
303 /*
304 * Missing devices in a replicated config.
305 */
306 if (find_vdev_problem(nvroot, vdev_faulted))
307 return (ZPOOL_STATUS_FAULTED_DEV_R);
308 if (find_vdev_problem(nvroot, vdev_missing))
309 return (ZPOOL_STATUS_MISSING_DEV_R);
310 if (find_vdev_problem(nvroot, vdev_broken))
311 return (ZPOOL_STATUS_CORRUPT_LABEL_R);
312
313 /*
314 * Devices with errors
315 */
316 if (!isimport && find_vdev_problem(nvroot, vdev_errors))
317 return (ZPOOL_STATUS_FAILING_DEV);
318
319 /*
320 * Offlined devices
321 */
322 if (find_vdev_problem(nvroot, vdev_offlined))
323 return (ZPOOL_STATUS_OFFLINE_DEV);
324
325 /*
326 * Removed device
327 */
328 if (find_vdev_problem(nvroot, vdev_removed))
329 return (ZPOOL_STATUS_REMOVED_DEV);
330
331 /*
332 * Outdated, but usable, version
333 */
334 if (SPA_VERSION_IS_SUPPORTED(version) && version != SPA_VERSION)
335 return (ZPOOL_STATUS_VERSION_OLDER);
336
337 /*
338 * Usable pool with disabled features
339 */
340 if (version >= SPA_VERSION_FEATURES) {
341 int i;
342 nvlist_t *feat;
343
344 if (isimport) {
345 feat = fnvlist_lookup_nvlist(config,
346 ZPOOL_CONFIG_LOAD_INFO);
347 feat = fnvlist_lookup_nvlist(feat,
348 ZPOOL_CONFIG_ENABLED_FEAT);
349 } else {
350 feat = fnvlist_lookup_nvlist(config,
351 ZPOOL_CONFIG_FEATURE_STATS);
352 }
353
354 for (i = 0; i < SPA_FEATURES; i++) {
355 zfeature_info_t *fi = &spa_feature_table[i];
356 if (!nvlist_exists(feat, fi->fi_guid))
357 return (ZPOOL_STATUS_FEAT_DISABLED);
358 }
359 }
360
361 /*
362 * Informational errata available.
363 */
364 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRATA, &errata);
365 if (errata) {
366 *erratap = errata;
367 return (ZPOOL_STATUS_ERRATA);
368 }
369
370 return (ZPOOL_STATUS_OK);
371 }
372
373 zpool_status_t
374 zpool_get_status(zpool_handle_t *zhp, char **msgid, zpool_errata_t *errata)
375 {
376 zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE, errata);
377
378 if (ret >= NMSGID)
379 *msgid = NULL;
380 else
381 *msgid = zfs_msgid_table[ret];
382
383 return (ret);
384 }
385
386 zpool_status_t
387 zpool_import_status(nvlist_t *config, char **msgid, zpool_errata_t *errata)
388 {
389 zpool_status_t ret = check_status(config, B_TRUE, errata);
390
391 if (ret >= NMSGID)
392 *msgid = NULL;
393 else
394 *msgid = zfs_msgid_table[ret];
395
396 return (ret);
397 }
398
399 static void
400 dump_ddt_stat(const ddt_stat_t *dds, int h)
401 {
402 char refcnt[6];
403 char blocks[6], lsize[6], psize[6], dsize[6];
404 char ref_blocks[6], ref_lsize[6], ref_psize[6], ref_dsize[6];
405
406 if (dds == NULL || dds->dds_blocks == 0)
407 return;
408
409 if (h == -1)
410 (void) strcpy(refcnt, "Total");
411 else
412 zfs_nicenum(1ULL << h, refcnt, sizeof (refcnt));
413
414 zfs_nicenum(dds->dds_blocks, blocks, sizeof (blocks));
415 zfs_nicebytes(dds->dds_lsize, lsize, sizeof (lsize));
416 zfs_nicebytes(dds->dds_psize, psize, sizeof (psize));
417 zfs_nicebytes(dds->dds_dsize, dsize, sizeof (dsize));
418 zfs_nicenum(dds->dds_ref_blocks, ref_blocks, sizeof (ref_blocks));
419 zfs_nicebytes(dds->dds_ref_lsize, ref_lsize, sizeof (ref_lsize));
420 zfs_nicebytes(dds->dds_ref_psize, ref_psize, sizeof (ref_psize));
421 zfs_nicebytes(dds->dds_ref_dsize, ref_dsize, sizeof (ref_dsize));
422
423 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
424 refcnt,
425 blocks, lsize, psize, dsize,
426 ref_blocks, ref_lsize, ref_psize, ref_dsize);
427 }
428
429 /*
430 * Print the DDT histogram and the column totals.
431 */
432 void
433 zpool_dump_ddt(const ddt_stat_t *dds_total, const ddt_histogram_t *ddh)
434 {
435 int h;
436
437 (void) printf("\n");
438
439 (void) printf("bucket "
440 " allocated "
441 " referenced \n");
442 (void) printf("______ "
443 "______________________________ "
444 "______________________________\n");
445
446 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
447 "refcnt",
448 "blocks", "LSIZE", "PSIZE", "DSIZE",
449 "blocks", "LSIZE", "PSIZE", "DSIZE");
450
451 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
452 "------",
453 "------", "-----", "-----", "-----",
454 "------", "-----", "-----", "-----");
455
456 for (h = 0; h < 64; h++)
457 dump_ddt_stat(&ddh->ddh_stat[h], h);
458
459 dump_ddt_stat(dds_total, -1);
460
461 (void) printf("\n");
462 }