/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017 Lawrence Livermore National Security, LLC.
#include <zfs_comutil.h>
#include <libzfs.h>
+#include "zdb.h"
+
#define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \
zio_compress_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \
zio_checksum_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \
- (((idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA) ? \
- DMU_OT_ZAP_OTHER : DMU_OT_NUMTYPES))
+ (idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA ? \
+ DMU_OT_ZAP_OTHER : \
+ (idx) == DMU_OTN_UINT64_DATA || (idx) == DMU_OTN_UINT64_METADATA ? \
+ DMU_OT_UINT64_OTHER : DMU_OT_NUMTYPES)
static char *
zdb_ot_name(dmu_object_type_t type)
extern uint64_t zfs_arc_max, zfs_arc_meta_limit;
extern int zfs_vdev_async_read_max_active;
-const char cmdname[] = "zdb";
+static const char cmdname[] = "zdb";
uint8_t dump_opt[256];
typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
-extern void dump_intent_log(zilog_t *);
uint64_t *zopt_object = NULL;
-int zopt_objects = 0;
+static unsigned zopt_objects = 0;
libzfs_handle_t *g_zfs;
uint64_t max_inflight = 1000;
"[-I <inflight I/Os>]\n"
"\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
"\t\t[<poolname> [<object> ...]]\n"
- "\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>] <dataset> "
- "[<object> ...]\n"
+ "\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>] <dataset>\n"
+ "\t\t[<object> ...]\n"
"\t%s -C [-A] [-U <cache>]\n"
"\t%s -l [-Aqu] <device>\n"
"\t%s -m [-AFLPX] [-e [-V] [-p <path> ...]] [-t <txg>] "
}
static void
-zdb_nicenum(uint64_t num, char *buf)
+zdb_nicenum(uint64_t num, char *buf, size_t buflen)
{
if (dump_opt['P'])
- (void) sprintf(buf, "%llu", (longlong_t)num);
+ (void) snprintf(buf, buflen, "%llu", (longlong_t)num);
else
- nicenum(num, buf);
+ nicenum(num, buf, sizeof (buf));
}
-const char histo_stars[] = "****************************************";
-const int histo_width = sizeof (histo_stars) - 1;
+static const char histo_stars[] = "****************************************";
+static const uint64_t histo_width = sizeof (histo_stars) - 1;
static void
dump_histogram(const uint64_t *histo, int size, int offset)
}
/*ARGSUSED*/
-void
+static void
dump_uint8(objset_t *os, uint64_t object, void *data, size_t size)
{
}
zap_cursor_t zc;
zap_attribute_t attr;
void *prop;
- int i;
+ unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
uint64_t i;
char bytes[32], comp[32], uncomp[32];
+ /* make sure the output won't get truncated */
+ CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
+
if (bpop == NULL)
return;
- zdb_nicenum(bpop->bpo_bytes, bytes);
- zdb_nicenum(bpop->bpo_comp, comp);
- zdb_nicenum(bpop->bpo_uncomp, uncomp);
+ zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes));
+ zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp));
+ zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp));
(void) printf("\t\tnum_blkptrs = %llu\n",
(u_longlong_t)bpop->bpo_num_blkptrs);
zap_cursor_t zc;
zap_attribute_t attr;
uint16_t *layout_attrs;
- int i;
+ unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
zap_cursor_fini(&zc);
}
-int
+static int
get_dtl_refcount(vdev_t *vd)
{
int refcount = 0;
- int c;
if (vd->vdev_ops->vdev_op_leaf) {
space_map_t *sm = vd->vdev_dtl_sm;
return (0);
}
- for (c = 0; c < vd->vdev_children; c++)
+ for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_dtl_refcount(vd->vdev_child[c]);
return (refcount);
}
-int
+static int
get_metaslab_refcount(vdev_t *vd)
{
int refcount = 0;
- int c, m;
- if (vd->vdev_top == vd && !vd->vdev_removing) {
- for (m = 0; m < vd->vdev_ms_count; m++) {
+ if (vd->vdev_top == vd) {
+ for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
space_map_t *sm = vd->vdev_ms[m]->ms_sm;
if (sm != NULL &&
refcount++;
}
}
- for (c = 0; c < vd->vdev_children; c++)
+ for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_metaslab_refcount(vd->vdev_child[c]);
return (refcount);
}
+static int
+get_obsolete_refcount(vdev_t *vd)
+{
+ int refcount = 0;
+
+ uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd);
+ if (vd->vdev_top == vd && obsolete_sm_obj != 0) {
+ dmu_object_info_t doi;
+ VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset,
+ obsolete_sm_obj, &doi));
+ if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
+ refcount++;
+ }
+ } else {
+ ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
+ ASSERT3U(obsolete_sm_obj, ==, 0);
+ }
+ for (unsigned c = 0; c < vd->vdev_children; c++) {
+ refcount += get_obsolete_refcount(vd->vdev_child[c]);
+ }
+
+ return (refcount);
+}
+
+static int
+get_prev_obsolete_spacemap_refcount(spa_t *spa)
+{
+ uint64_t prev_obj =
+ spa->spa_condensing_indirect_phys.scip_prev_obsolete_sm_object;
+ if (prev_obj != 0) {
+ dmu_object_info_t doi;
+ VERIFY0(dmu_object_info(spa->spa_meta_objset, prev_obj, &doi));
+ if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
static int
verify_spacemap_refcounts(spa_t *spa)
{
&expected_refcount);
actual_refcount = get_dtl_refcount(spa->spa_root_vdev);
actual_refcount += get_metaslab_refcount(spa->spa_root_vdev);
+ actual_refcount += get_obsolete_refcount(spa->spa_root_vdev);
+ actual_refcount += get_prev_obsolete_spacemap_refcount(spa);
if (expected_refcount != actual_refcount) {
(void) printf("space map refcount mismatch: expected %lld != "
dump_spacemap(objset_t *os, space_map_t *sm)
{
uint64_t alloc, offset, entry;
- char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
- "INVALID", "INVALID", "INVALID", "INVALID" };
+ const char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
+ "INVALID", "INVALID", "INVALID", "INVALID" };
if (sm == NULL)
return;
+ (void) printf("space map object %llu:\n",
+ (longlong_t)sm->sm_phys->smp_object);
+ (void) printf(" smp_objsize = 0x%llx\n",
+ (longlong_t)sm->sm_phys->smp_objsize);
+ (void) printf(" smp_alloc = 0x%llx\n",
+ (longlong_t)sm->sm_phys->smp_alloc);
+
/*
* Print out the freelist entries in both encoded and decoded form.
*/
avl_tree_t *t = &msp->ms_size_tree;
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
- zdb_nicenum(metaslab_block_maxsize(msp), maxbuf);
+ /* max sure nicenum has enough space */
+ CTASSERT(sizeof (maxbuf) >= NN_NUMBUF_SZ);
+
+ zdb_nicenum(metaslab_block_maxsize(msp), maxbuf, sizeof (maxbuf));
(void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n",
"segments", avl_numnodes(t), "maxsize", maxbuf,
space_map_t *sm = msp->ms_sm;
char freebuf[32];
- zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf);
+ zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf,
+ sizeof (freebuf));
(void) printf(
"\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n",
if (dump_opt['d'] > 5 || dump_opt['m'] > 3) {
ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift));
- mutex_enter(&msp->ms_lock);
dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
- mutex_exit(&msp->ms_lock);
}
}
vdev_t *rvd = spa->spa_root_vdev;
metaslab_class_t *mc = spa_normal_class(spa);
uint64_t fragmentation;
- int c;
metaslab_class_histogram_verify(mc);
- for (c = 0; c < rvd->vdev_children; c++) {
+ for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
+static void
+print_vdev_indirect(vdev_t *vd)
+{
+ vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ vdev_indirect_births_t *vib = vd->vdev_indirect_births;
+
+ if (vim == NULL) {
+ ASSERT3P(vib, ==, NULL);
+ return;
+ }
+
+ ASSERT3U(vdev_indirect_mapping_object(vim), ==,
+ vic->vic_mapping_object);
+ ASSERT3U(vdev_indirect_births_object(vib), ==,
+ vic->vic_births_object);
+
+ (void) printf("indirect births obj %llu:\n",
+ (longlong_t)vic->vic_births_object);
+ (void) printf(" vib_count = %llu\n",
+ (longlong_t)vdev_indirect_births_count(vib));
+ for (uint64_t i = 0; i < vdev_indirect_births_count(vib); i++) {
+ vdev_indirect_birth_entry_phys_t *cur_vibe =
+ &vib->vib_entries[i];
+ (void) printf("\toffset %llx -> txg %llu\n",
+ (longlong_t)cur_vibe->vibe_offset,
+ (longlong_t)cur_vibe->vibe_phys_birth_txg);
+ }
+ (void) printf("\n");
+
+ (void) printf("indirect mapping obj %llu:\n",
+ (longlong_t)vic->vic_mapping_object);
+ (void) printf(" vim_max_offset = 0x%llx\n",
+ (longlong_t)vdev_indirect_mapping_max_offset(vim));
+ (void) printf(" vim_bytes_mapped = 0x%llx\n",
+ (longlong_t)vdev_indirect_mapping_bytes_mapped(vim));
+ (void) printf(" vim_count = %llu\n",
+ (longlong_t)vdev_indirect_mapping_num_entries(vim));
+
+ if (dump_opt['d'] <= 5 && dump_opt['m'] <= 3)
+ return;
+
+ uint32_t *counts = vdev_indirect_mapping_load_obsolete_counts(vim);
+
+ for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
+ vdev_indirect_mapping_entry_phys_t *vimep =
+ &vim->vim_entries[i];
+ (void) printf("\t<%llx:%llx:%llx> -> "
+ "<%llx:%llx:%llx> (%x obsolete)\n",
+ (longlong_t)vd->vdev_id,
+ (longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
+ (longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
+ (longlong_t)DVA_GET_VDEV(&vimep->vimep_dst),
+ (longlong_t)DVA_GET_OFFSET(&vimep->vimep_dst),
+ (longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
+ counts[i]);
+ }
+ (void) printf("\n");
+
+ uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd);
+ if (obsolete_sm_object != 0) {
+ objset_t *mos = vd->vdev_spa->spa_meta_objset;
+ (void) printf("obsolete space map object %llu:\n",
+ (u_longlong_t)obsolete_sm_object);
+ ASSERT(vd->vdev_obsolete_sm != NULL);
+ ASSERT3U(space_map_object(vd->vdev_obsolete_sm), ==,
+ obsolete_sm_object);
+ dump_spacemap(mos, vd->vdev_obsolete_sm);
+ (void) printf("\n");
+ }
+}
+
static void
dump_metaslabs(spa_t *spa)
{
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
+ print_vdev_indirect(vd);
+
for (m = 0; m < vd->vdev_ms_count; m++)
dump_metaslab(vd->vdev_ms[m]);
(void) printf("\n");
{
const ddt_phys_t *ddp = dde->dde_phys;
const ddt_key_t *ddk = &dde->dde_key;
- char *types[4] = { "ditto", "single", "double", "triple" };
+ const char *types[4] = { "ditto", "single", "double", "triple" };
char blkbuf[BP_SPRINTF_LEN];
blkptr_t blk;
int p;
{
ddt_histogram_t ddh_total;
ddt_stat_t dds_total;
- enum zio_checksum c;
- enum ddt_type type;
- enum ddt_class class;
- bzero(&ddh_total, sizeof (ddt_histogram_t));
- bzero(&dds_total, sizeof (ddt_stat_t));
+ bzero(&ddh_total, sizeof (ddh_total));
+ bzero(&dds_total, sizeof (dds_total));
- for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
+ for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
- for (type = 0; type < DDT_TYPES; type++) {
- for (class = 0; class < DDT_CLASSES;
+ for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
+ for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
dump_ddt(ddt, type, class);
}
{
spa_t *spa = vd->vdev_spa;
boolean_t required;
- char *name[DTL_TYPES] = { "missing", "partial", "scrub", "outage" };
+ const char *name[DTL_TYPES] = { "missing", "partial", "scrub",
+ "outage" };
char prefix[256];
- int c, t;
spa_vdev_state_enter(spa, SCL_NONE);
required = vdev_dtl_required(vd);
vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa),
required ? "DTL-required" : "DTL-expendable");
- for (t = 0; t < DTL_TYPES; t++) {
+ for (int t = 0; t < DTL_TYPES; t++) {
range_tree_t *rt = vd->vdev_dtl[t];
if (range_tree_space(rt) == 0)
continue;
(void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
indent + 2, "", name[t]);
- mutex_enter(rt->rt_lock);
range_tree_walk(rt, dump_dtl_seg, prefix);
- mutex_exit(rt->rt_lock);
if (dump_opt['d'] > 5 && vd->vdev_children == 0)
dump_spacemap(spa->spa_meta_objset,
vd->vdev_dtl_sm);
}
- for (c = 0; c < vd->vdev_children; c++)
+ for (unsigned c = 0; c < vd->vdev_children; c++)
dump_dtl(vd->vdev_child[c], indent + 4);
}
struct tm t;
char tbuf[30];
char internalstr[MAXPATHLEN];
- int i;
if ((buf = malloc(SPA_OLD_MAXBLOCKSIZE)) == NULL) {
(void) fprintf(stderr, "%s: unable to allocate I/O buffer\n",
} while (len != 0);
(void) printf("\nHistory:\n");
- for (i = 0; i < num; i++) {
+ for (unsigned i = 0; i < num; i++) {
uint64_t time, txg, ievent;
char *cmd, *intstr;
boolean_t printed = B_FALSE;
time_t crtime;
char nice[32];
+ /* make sure nicenum has enough space */
+ CTASSERT(sizeof (nice) >= NN_NUMBUF_SZ);
+
if (dd == NULL)
return;
(u_longlong_t)dd->dd_origin_obj);
(void) printf("\t\tchild_dir_zapobj = %llu\n",
(u_longlong_t)dd->dd_child_dir_zapobj);
- zdb_nicenum(dd->dd_used_bytes, nice);
+ zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice));
(void) printf("\t\tused_bytes = %s\n", nice);
- zdb_nicenum(dd->dd_compressed_bytes, nice);
+ zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice));
(void) printf("\t\tcompressed_bytes = %s\n", nice);
- zdb_nicenum(dd->dd_uncompressed_bytes, nice);
+ zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice));
(void) printf("\t\tuncompressed_bytes = %s\n", nice);
- zdb_nicenum(dd->dd_quota, nice);
+ zdb_nicenum(dd->dd_quota, nice, sizeof (nice));
(void) printf("\t\tquota = %s\n", nice);
- zdb_nicenum(dd->dd_reserved, nice);
+ zdb_nicenum(dd->dd_reserved, nice, sizeof (nice));
(void) printf("\t\treserved = %s\n", nice);
(void) printf("\t\tprops_zapobj = %llu\n",
(u_longlong_t)dd->dd_props_zapobj);
(u_longlong_t)dd->dd_flags);
#define DO(which) \
- zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice); \
+ zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \
+ sizeof (nice)); \
(void) printf("\t\tused_breakdown[" #which "] = %s\n", nice)
DO(HEAD);
DO(SNAP);
char used[32], compressed[32], uncompressed[32], unique[32];
char blkbuf[BP_SPRINTF_LEN];
+ /* make sure nicenum has enough space */
+ CTASSERT(sizeof (used) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (compressed) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (uncompressed) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (unique) >= NN_NUMBUF_SZ);
+
if (ds == NULL)
return;
ASSERT(size == sizeof (*ds));
crtime = ds->ds_creation_time;
- zdb_nicenum(ds->ds_referenced_bytes, used);
- zdb_nicenum(ds->ds_compressed_bytes, compressed);
- zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed);
- zdb_nicenum(ds->ds_unique_bytes, unique);
+ zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used));
+ zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed));
+ zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed,
+ sizeof (uncompressed));
+ zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique));
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp);
(void) printf("\t\tdir_obj = %llu\n",
}
static void
-dump_bptree(objset_t *os, uint64_t obj, char *name)
+dump_bptree(objset_t *os, uint64_t obj, const char *name)
{
char bytes[32];
bptree_phys_t *bt;
dmu_buf_t *db;
+ /* make sure nicenum has enough space */
+ CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
+
if (dump_opt['d'] < 3)
return;
VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
bt = db->db_data;
- zdb_nicenum(bt->bt_bytes, bytes);
+ zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes));
(void) printf("\n %s: %llu datasets, %s\n",
name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes);
dmu_buf_rele(db, FTAG);
}
static void
-dump_full_bpobj(bpobj_t *bpo, char *name, int indent)
+dump_full_bpobj(bpobj_t *bpo, const char *name, int indent)
{
char bytes[32];
char comp[32];
char uncomp[32];
uint64_t i;
+ /* make sure nicenum has enough space */
+ CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
+
if (dump_opt['d'] < 3)
return;
- zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes);
+ zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes));
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
- zdb_nicenum(bpo->bpo_phys->bpo_comp, comp);
- zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp);
+ zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp));
+ zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp));
(void) printf(" %*s: object %llu, %llu local blkptrs, "
"%llu subobjs in object, %llu, %s (%s/%s comp)\n",
indent * 8, name,
char comp[32];
char uncomp[32];
+ /* make sure nicenum has enough space */
+ CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
+
if (dump_opt['d'] < 3)
return;
return;
}
- zdb_nicenum(dl->dl_phys->dl_used, bytes);
- zdb_nicenum(dl->dl_phys->dl_comp, comp);
- zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp);
+ zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes));
+ zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp));
+ zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp));
(void) printf("\n Deadlist: %s (%s/%s comp)\n",
bytes, comp, uncomp);
(void) printf("\tparent %llu\n", (u_longlong_t)parent);
(void) printf("\tlinks %llu\n", (u_longlong_t)links);
(void) printf("\tpflags %llx\n", (u_longlong_t)pflags);
+ if (dmu_objset_projectquota_enabled(os) && (pflags & ZFS_PROJID)) {
+ uint64_t projid;
+
+ if (sa_lookup(hdl, sa_attr_table[ZPL_PROJID], &projid,
+ sizeof (uint64_t)) == 0)
+ (void) printf("\tprojid %llu\n", (u_longlong_t)projid);
+ }
if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr,
sizeof (uint64_t)) == 0)
(void) printf("\txattr %llu\n", (u_longlong_t)xattr);
dump_packed_nvlist, /* FUID nvlist size */
dump_zap, /* DSL dataset next clones */
dump_zap, /* DSL scrub queue */
- dump_zap, /* ZFS user/group used */
- dump_zap, /* ZFS user/group quota */
+ dump_zap, /* ZFS user/group/project used */
+ dump_zap, /* ZFS user/group/project quota */
dump_zap, /* snapshot refcount tags */
dump_ddt_zap, /* DDT ZAP object */
dump_zap, /* DDT statistics */
char aux[50];
int error;
+ /* make sure nicenum has enough space */
+ CTASSERT(sizeof (iblk) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (dblk) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (bonus_size) >= NN_NUMBUF_SZ);
+
if (*print_header) {
(void) printf("\n%10s %3s %5s %5s %5s %6s %5s %6s %s\n",
"Object", "lvl", "iblk", "dblk", "dsize", "dnsize",
if (dnode_slots_used)
*dnode_slots_used = doi.doi_dnodesize / DNODE_MIN_SIZE;
- zdb_nicenum(doi.doi_metadata_block_size, iblk);
- zdb_nicenum(doi.doi_data_block_size, dblk);
- zdb_nicenum(doi.doi_max_offset, lsize);
- zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize);
- zdb_nicenum(doi.doi_bonus_size, bonus_size);
- zdb_nicenum(doi.doi_dnodesize, dnsize);
+ zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk));
+ zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk));
+ zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize));
+ zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize));
+ zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size));
+ zdb_nicenum(doi.doi_dnodesize, dnsize, sizeof (dnsize));
(void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count *
doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) /
doi.doi_max_offset);
for (;;) {
char segsize[32];
+ /* make sure nicenum has enough space */
+ CTASSERT(sizeof (segsize) >= NN_NUMBUF_SZ);
error = dnode_next_offset(dn,
0, &start, minlvl, blkfill, 0);
if (error)
end = start;
error = dnode_next_offset(dn,
DNODE_FIND_HOLE, &end, minlvl, blkfill, 0);
- zdb_nicenum(end - start, segsize);
+ zdb_nicenum(end - start, segsize, sizeof (segsize));
(void) printf("\t\tsegment [%016llx, %016llx)"
" size %5s\n", (u_longlong_t)start,
(u_longlong_t)end, segsize);
dnode_rele(dn, FTAG);
}
-static char *objset_types[DMU_OST_NUMTYPES] = {
+static const char *objset_types[DMU_OST_NUMTYPES] = {
"NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" };
static void
char numbuf[32];
char blkbuf[BP_SPRINTF_LEN + 20];
char osname[ZFS_MAX_DATASET_NAME_LEN];
- char *type = "UNKNOWN";
+ const char *type = "UNKNOWN";
int verbosity = dump_opt['d'];
int print_header = 1;
- int i, error;
+ unsigned i;
+ int error;
uint64_t total_slots_used = 0;
uint64_t max_slot_used = 0;
uint64_t dnode_slots;
+ /* make sure nicenum has enough space */
+ CTASSERT(sizeof (numbuf) >= NN_NUMBUF_SZ);
+
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp));
- zdb_nicenum(refdbytes, numbuf);
+ zdb_nicenum(refdbytes, numbuf, sizeof (numbuf));
if (verbosity >= 4) {
(void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp ");
if (dump_opt['i'] != 0 || verbosity >= 2)
dump_intent_log(dmu_objset_zil(os));
- if (dmu_objset_ds(os) != NULL)
- dump_deadlist(&dmu_objset_ds(os)->ds_deadlist);
+ if (dmu_objset_ds(os) != NULL) {
+ dsl_dataset_t *ds = dmu_objset_ds(os);
+ dump_deadlist(&ds->ds_deadlist);
+
+ if (dsl_dataset_remap_deadlist_exists(ds)) {
+ (void) printf("ds_remap_deadlist:\n");
+ dump_deadlist(&ds->ds_remap_deadlist);
+ }
+ }
if (verbosity < 2)
return;
NULL);
}
+ if (DMU_PROJECTUSED_DNODE(os) != NULL &&
+ DMU_PROJECTUSED_DNODE(os)->dn_type != 0)
+ dump_object(os, DMU_PROJECTUSED_OBJECT, verbosity,
+ &print_header, NULL);
+
object = 0;
while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
dump_object(os, object, verbosity, &print_header, &dnode_slots);
max_slot_used = object + dnode_slots - 1;
}
- ASSERT3U(object_count, ==, usedobjs);
-
(void) printf("\n");
(void) printf(" Dnode slots:\n");
(void) fprintf(stderr, "dmu_object_next() = %d\n", error);
abort();
}
+
+ ASSERT3U(object_count, ==, usedobjs);
}
static void
exit(1);
}
- if (ioctl(fd, BLKFLSBUF) != 0)
- (void) printf("failed to invalidate cache '%s' : %s\n", path,
- strerror(errno));
-
if (fstat64_blk(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", path,
strerror(errno));
exit(1);
}
+ if (S_ISBLK(statbuf.st_mode) && ioctl(fd, BLKFLSBUF) != 0)
+ (void) printf("failed to invalidate cache '%s' : %s\n", path,
+ strerror(errno));
+
avl_create(&config_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
avl_create(&uberblock_tree, cksum_record_compare,
}
static uint64_t dataset_feature_count[SPA_FEATURES];
+static uint64_t remap_deadlist_count = 0;
/*ARGSUSED*/
static int
dataset_feature_count[f]++;
}
+ if (dsl_dataset_remap_deadlist_exists(dmu_objset_ds(os))) {
+ remap_deadlist_count++;
+ }
+
dump_dir(os);
close_objset(os, FTAG);
fuid_table_destroy();
#define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2)
#define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3)
-static char *zdb_ot_extname[] = {
+static const char *zdb_ot_extname[] = {
"deferred free",
"dedup ditto",
"other",
typedef struct zdb_cb {
zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1];
+ uint64_t zcb_removing_size;
uint64_t zcb_dedup_asize;
uint64_t zcb_dedup_blocks;
uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES];
uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES]
[BPE_PAYLOAD_SIZE + 1];
uint64_t zcb_start;
- uint64_t zcb_lastprint;
+ hrtime_t zcb_lastprint;
uint64_t zcb_totalasize;
uint64_t zcb_errors[256];
int zcb_readfails;
int zcb_haderrors;
spa_t *zcb_spa;
+ uint32_t **zcb_vd_obsolete_counts;
} zdb_cb_t;
static void
* SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last,
* "other", bucket.
*/
- int idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT;
+ unsigned idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT;
idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1);
zb->zb_psize_histogram[idx]++;
abd_free(zio->io_abd);
mutex_enter(&spa->spa_scrub_lock);
- spa->spa_scrub_inflight--;
+ spa->spa_load_verify_ios--;
cv_broadcast(&spa->spa_scrub_io_cv);
if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
flags |= ZIO_FLAG_SPECULATIVE;
mutex_enter(&spa->spa_scrub_lock);
- while (spa->spa_scrub_inflight > max_inflight)
+ while (spa->spa_load_verify_ios > max_inflight)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
- spa->spa_scrub_inflight++;
+ spa->spa_load_verify_ios++;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(NULL, spa, bp, abd, size,
int sec_remaining =
(zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec;
+ /* make sure nicenum has enough space */
+ CTASSERT(sizeof (buf) >= NN_NUMBUF_SZ);
+
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"\r%5s completed (%4dMB/s) "
NULL /* alloc */
};
+/* ARGSUSED */
+static void
+claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
+ uint64_t size, void *arg)
+{
+ /*
+ * This callback was called through a remap from
+ * a device being removed. Therefore, the vdev that
+ * this callback is applied to is a concrete
+ * vdev.
+ */
+ ASSERT(vdev_is_concrete(vd));
+
+ VERIFY0(metaslab_claim_impl(vd, offset, size,
+ spa_first_txg(vd->vdev_spa)));
+}
+
+static void
+claim_segment_cb(void *arg, uint64_t offset, uint64_t size)
+{
+ vdev_t *vd = arg;
+
+ vdev_indirect_ops.vdev_op_remap(vd, offset, size,
+ claim_segment_impl_cb, NULL);
+}
+
+/*
+ * After accounting for all allocated blocks that are directly referenced,
+ * we might have missed a reference to a block from a partially complete
+ * (and thus unused) indirect mapping object. We perform a secondary pass
+ * through the metaslabs we have already mapped and claim the destination
+ * blocks.
+ */
+static void
+zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
+{
+ if (spa->spa_vdev_removal == NULL)
+ return;
+
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+
+ spa_vdev_removal_t *svr = spa->spa_vdev_removal;
+ vdev_t *vd = svr->svr_vdev;
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+
+ for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
+ metaslab_t *msp = vd->vdev_ms[msi];
+
+ if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
+ break;
+
+ ASSERT0(range_tree_space(svr->svr_allocd_segs));
+
+ if (msp->ms_sm != NULL) {
+ VERIFY0(space_map_load(msp->ms_sm,
+ svr->svr_allocd_segs, SM_ALLOC));
+
+ /*
+ * Clear everything past what has been synced,
+ * because we have not allocated mappings for it yet.
+ */
+ range_tree_clear(svr->svr_allocd_segs,
+ vdev_indirect_mapping_max_offset(vim),
+ msp->ms_sm->sm_start + msp->ms_sm->sm_size -
+ vdev_indirect_mapping_max_offset(vim));
+ }
+
+ zcb->zcb_removing_size +=
+ range_tree_space(svr->svr_allocd_segs);
+ range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
+ }
+
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+}
+
+/*
+ * vm_idxp is an in-out parameter which (for indirect vdevs) is the
+ * index in vim_entries that has the first entry in this metaslab. On
+ * return, it will be set to the first entry after this metaslab.
+ */
+static void
+zdb_leak_init_ms(metaslab_t *msp, uint64_t *vim_idxp)
+{
+ metaslab_group_t *mg = msp->ms_group;
+ vdev_t *vd = mg->mg_vd;
+ vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
+
+ mutex_enter(&msp->ms_lock);
+ metaslab_unload(msp);
+
+ /*
+ * We don't want to spend the CPU manipulating the size-ordered
+ * tree, so clear the range_tree ops.
+ */
+ msp->ms_tree->rt_ops = NULL;
+
+ (void) fprintf(stderr,
+ "\rloading vdev %llu of %llu, metaslab %llu of %llu ...",
+ (longlong_t)vd->vdev_id,
+ (longlong_t)rvd->vdev_children,
+ (longlong_t)msp->ms_id,
+ (longlong_t)vd->vdev_ms_count);
+
+ /*
+ * For leak detection, we overload the metaslab ms_tree to
+ * contain allocated segments instead of free segments. As a
+ * result, we can't use the normal metaslab_load/unload
+ * interfaces.
+ */
+ if (vd->vdev_ops == &vdev_indirect_ops) {
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ for (; *vim_idxp < vdev_indirect_mapping_num_entries(vim);
+ (*vim_idxp)++) {
+ vdev_indirect_mapping_entry_phys_t *vimep =
+ &vim->vim_entries[*vim_idxp];
+ uint64_t ent_offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
+ uint64_t ent_len = DVA_GET_ASIZE(&vimep->vimep_dst);
+ ASSERT3U(ent_offset, >=, msp->ms_start);
+ if (ent_offset >= msp->ms_start + msp->ms_size)
+ break;
+
+ /*
+ * Mappings do not cross metaslab boundaries,
+ * because we create them by walking the metaslabs.
+ */
+ ASSERT3U(ent_offset + ent_len, <=,
+ msp->ms_start + msp->ms_size);
+ range_tree_add(msp->ms_tree, ent_offset, ent_len);
+ }
+ } else if (msp->ms_sm != NULL) {
+ VERIFY0(space_map_load(msp->ms_sm, msp->ms_tree, SM_ALLOC));
+ }
+
+ if (!msp->ms_loaded) {
+ msp->ms_loaded = B_TRUE;
+ }
+ mutex_exit(&msp->ms_lock);
+}
+
+/* ARGSUSED */
+static int
+increment_indirect_mapping_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
+{
+ zdb_cb_t *zcb = arg;
+ spa_t *spa = zcb->zcb_spa;
+ vdev_t *vd;
+ const dva_t *dva = &bp->blk_dva[0];
+
+ ASSERT(!dump_opt['L']);
+ ASSERT3U(BP_GET_NDVAS(bp), ==, 1);
+
+ spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+ vd = vdev_lookup_top(zcb->zcb_spa, DVA_GET_VDEV(dva));
+ ASSERT3P(vd, !=, NULL);
+ spa_config_exit(spa, SCL_VDEV, FTAG);
+
+ ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
+ ASSERT3P(zcb->zcb_vd_obsolete_counts[vd->vdev_id], !=, NULL);
+
+ vdev_indirect_mapping_increment_obsolete_count(
+ vd->vdev_indirect_mapping,
+ DVA_GET_OFFSET(dva), DVA_GET_ASIZE(dva),
+ zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
+
+ return (0);
+}
+
+static uint32_t *
+zdb_load_obsolete_counts(vdev_t *vd)
+{
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ spa_t *spa = vd->vdev_spa;
+ spa_condensing_indirect_phys_t *scip =
+ &spa->spa_condensing_indirect_phys;
+ uint32_t *counts;
+
+ EQUIV(vdev_obsolete_sm_object(vd) != 0, vd->vdev_obsolete_sm != NULL);
+ counts = vdev_indirect_mapping_load_obsolete_counts(vim);
+ if (vd->vdev_obsolete_sm != NULL) {
+ vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
+ vd->vdev_obsolete_sm);
+ }
+ if (scip->scip_vdev == vd->vdev_id &&
+ scip->scip_prev_obsolete_sm_object != 0) {
+ space_map_t *prev_obsolete_sm = NULL;
+ VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
+ scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
+ space_map_update(prev_obsolete_sm);
+ vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
+ prev_obsolete_sm);
+ space_map_close(prev_obsolete_sm);
+ }
+ return (counts);
+}
+
static void
zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
- ddt_bookmark_t ddb = { 0 };
+ ddt_bookmark_t ddb;
ddt_entry_t dde;
int error;
int p;
+ bzero(&ddb, sizeof (ddb));
while ((error = ddt_walk(spa, &ddb, &dde)) == 0) {
blkptr_t blk;
ddt_phys_t *ddp = dde.dde_phys;
zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
zcb->zcb_spa = spa;
- uint64_t c, m;
+ uint64_t c;
if (!dump_opt['L']) {
+ dsl_pool_t *dp = spa->spa_dsl_pool;
vdev_t *rvd = spa->spa_root_vdev;
/*
spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
+ zcb->zcb_vd_obsolete_counts =
+ umem_zalloc(rvd->vdev_children * sizeof (uint32_t *),
+ UMEM_NOFAIL);
+
for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
- ASSERTV(metaslab_group_t *mg = vd->vdev_mg);
- for (m = 0; m < vd->vdev_ms_count; m++) {
- metaslab_t *msp = vd->vdev_ms[m];
- ASSERT3P(msp->ms_group, ==, mg);
- mutex_enter(&msp->ms_lock);
- metaslab_unload(msp);
+ uint64_t vim_idx = 0;
+
+ ASSERT3U(c, ==, vd->vdev_id);
+
+ /*
+ * Note: we don't check for mapping leaks on
+ * removing vdevs because their ms_tree's are
+ * used to look for leaks in allocated space.
+ */
+ if (vd->vdev_ops == &vdev_indirect_ops) {
+ zcb->zcb_vd_obsolete_counts[c] =
+ zdb_load_obsolete_counts(vd);
/*
- * For leak detection, we overload the metaslab
- * ms_tree to contain allocated segments
- * instead of free segments. As a result,
- * we can't use the normal metaslab_load/unload
- * interfaces.
+ * Normally, indirect vdevs don't have any
+ * metaslabs. We want to set them up for
+ * zio_claim().
*/
- if (msp->ms_sm != NULL) {
- (void) fprintf(stderr,
- "\rloading space map for "
- "vdev %llu of %llu, "
- "metaslab %llu of %llu ...",
- (longlong_t)c,
- (longlong_t)rvd->vdev_children,
- (longlong_t)m,
- (longlong_t)vd->vdev_ms_count);
-
- /*
- * We don't want to spend the CPU
- * manipulating the size-ordered
- * tree, so clear the range_tree
- * ops.
- */
- msp->ms_tree->rt_ops = NULL;
- VERIFY0(space_map_load(msp->ms_sm,
- msp->ms_tree, SM_ALLOC));
-
- if (!msp->ms_loaded)
- msp->ms_loaded = B_TRUE;
- }
- mutex_exit(&msp->ms_lock);
+ VERIFY0(vdev_metaslab_init(vd, 0));
+ }
+
+ for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
+ zdb_leak_init_ms(vd->vdev_ms[m], &vim_idx);
+ }
+ if (vd->vdev_ops == &vdev_indirect_ops) {
+ ASSERT3U(vim_idx, ==,
+ vdev_indirect_mapping_num_entries(
+ vd->vdev_indirect_mapping));
}
}
(void) fprintf(stderr, "\n");
+
+ if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
+ ASSERT(spa_feature_is_enabled(spa,
+ SPA_FEATURE_DEVICE_REMOVAL));
+ (void) bpobj_iterate_nofree(&dp->dp_obsolete_bpobj,
+ increment_indirect_mapping_cb, zcb, NULL);
+ }
}
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
-static void
-zdb_leak_fini(spa_t *spa)
+static boolean_t
+zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
{
- int c, m;
+ boolean_t leaks = B_FALSE;
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ uint64_t total_leaked = 0;
+
+ ASSERT(vim != NULL);
+
+ for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
+ vdev_indirect_mapping_entry_phys_t *vimep =
+ &vim->vim_entries[i];
+ uint64_t obsolete_bytes = 0;
+ uint64_t offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
+ metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+
+ /*
+ * This is not very efficient but it's easy to
+ * verify correctness.
+ */
+ for (uint64_t inner_offset = 0;
+ inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
+ inner_offset += 1 << vd->vdev_ashift) {
+ if (range_tree_contains(msp->ms_tree,
+ offset + inner_offset, 1 << vd->vdev_ashift)) {
+ obsolete_bytes += 1 << vd->vdev_ashift;
+ }
+ }
+
+ int64_t bytes_leaked = obsolete_bytes -
+ zcb->zcb_vd_obsolete_counts[vd->vdev_id][i];
+ ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=,
+ zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]);
+ if (bytes_leaked != 0 &&
+ (vdev_obsolete_counts_are_precise(vd) ||
+ dump_opt['d'] >= 5)) {
+ (void) printf("obsolete indirect mapping count "
+ "mismatch on %llu:%llx:%llx : %llx bytes leaked\n",
+ (u_longlong_t)vd->vdev_id,
+ (u_longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
+ (u_longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
+ (u_longlong_t)bytes_leaked);
+ }
+ total_leaked += ABS(bytes_leaked);
+ }
+
+ if (!vdev_obsolete_counts_are_precise(vd) && total_leaked > 0) {
+ int pct_leaked = total_leaked * 100 /
+ vdev_indirect_mapping_bytes_mapped(vim);
+ (void) printf("cannot verify obsolete indirect mapping "
+ "counts of vdev %llu because precise feature was not "
+ "enabled when it was removed: %d%% (%llx bytes) of mapping"
+ "unreferenced\n",
+ (u_longlong_t)vd->vdev_id, pct_leaked,
+ (u_longlong_t)total_leaked);
+ } else if (total_leaked > 0) {
+ (void) printf("obsolete indirect mapping count mismatch "
+ "for vdev %llu -- %llx total bytes mismatched\n",
+ (u_longlong_t)vd->vdev_id,
+ (u_longlong_t)total_leaked);
+ leaks |= B_TRUE;
+ }
+
+ vdev_indirect_mapping_free_obsolete_counts(vim,
+ zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
+ zcb->zcb_vd_obsolete_counts[vd->vdev_id] = NULL;
+
+ return (leaks);
+}
+static boolean_t
+zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
+{
+ boolean_t leaks = B_FALSE;
if (!dump_opt['L']) {
vdev_t *rvd = spa->spa_root_vdev;
- for (c = 0; c < rvd->vdev_children; c++) {
+ for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
ASSERTV(metaslab_group_t *mg = vd->vdev_mg);
- for (m = 0; m < vd->vdev_ms_count; m++) {
+
+ if (zcb->zcb_vd_obsolete_counts[c] != NULL) {
+ leaks |= zdb_check_for_obsolete_leaks(vd, zcb);
+ }
+
+ for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(mg, ==, msp->ms_group);
- mutex_enter(&msp->ms_lock);
/*
* The ms_tree has been overloaded to
* represents an allocated block that we
* did not claim during the traversal.
* Claimed blocks would have been removed
- * from the ms_tree.
+ * from the ms_tree. For indirect vdevs,
+ * space remaining in the tree represents
+ * parts of the mapping that are not
+ * referenced, which is not a bug.
*/
- range_tree_vacate(msp->ms_tree, zdb_leak, vd);
+ if (vd->vdev_ops == &vdev_indirect_ops) {
+ range_tree_vacate(msp->ms_tree,
+ NULL, NULL);
+ } else {
+ range_tree_vacate(msp->ms_tree,
+ zdb_leak, vd);
+ }
if (msp->ms_loaded)
msp->ms_loaded = B_FALSE;
-
- mutex_exit(&msp->ms_lock);
}
}
+
+ umem_free(zcb->zcb_vd_obsolete_counts,
+ rvd->vdev_children * sizeof (uint32_t *));
+ zcb->zcb_vd_obsolete_counts = NULL;
}
+ return (leaks);
}
/* ARGSUSED */
int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT | TRAVERSE_HARD;
boolean_t leaks = B_FALSE;
- int e, c;
+ int e, c, err;
bp_embedded_type_t i;
+ bzero(&zcb, sizeof (zcb));
(void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
(dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
(dump_opt['c'] == 1) ? "metadata " : "",
*/
(void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj,
count_block_cb, &zcb, NULL);
+
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj,
count_block_cb, &zcb, NULL);
}
+
+ zdb_claim_removing(spa, &zcb);
+
if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset,
spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb,
zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
zcb.zcb_start = zcb.zcb_lastprint = gethrtime();
- zcb.zcb_haderrors |= traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
+ err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
/*
* If we've traversed the data blocks then we need to wait for those
}
}
+ /*
+ * Done after zio_wait() since zcb_haderrors is modified in
+ * zdb_blkptr_done()
+ */
+ zcb.zcb_haderrors |= err;
+
if (zcb.zcb_haderrors) {
(void) printf("\nError counts:\n\n");
(void) printf("\t%5s %s\n", "errno", "count");
/*
* Report any leaked segments.
*/
- zdb_leak_fini(spa);
+ leaks |= zdb_leak_fini(spa, &zcb);
tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
norm_space = metaslab_class_get_space(spa_normal_class(spa));
total_alloc = norm_alloc + metaslab_class_get_alloc(spa_log_class(spa));
- total_found = tzb->zb_asize - zcb.zcb_dedup_asize;
+ total_found = tzb->zb_asize - zcb.zcb_dedup_asize +
+ zcb.zcb_removing_size;
if (total_found == total_alloc) {
if (!dump_opt['L'])
(longlong_t)tzb->zb_ditto_samevdev);
}
+ for (uint64_t v = 0; v < spa->spa_root_vdev->vdev_children; v++) {
+ vdev_t *vd = spa->spa_root_vdev->vdev_child[v];
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+
+ if (vim == NULL) {
+ continue;
+ }
+
+ char mem[32];
+ zdb_nicenum(vdev_indirect_mapping_num_entries(vim),
+ mem, vdev_indirect_mapping_size(vim));
+
+ (void) printf("\tindirect vdev id %llu has %llu segments "
+ "(%s in memory)\n",
+ (longlong_t)vd->vdev_id,
+ (longlong_t)vdev_indirect_mapping_num_entries(vim), mem);
+ }
+
if (dump_opt['b'] >= 2) {
int l, t, level;
(void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
for (t = 0; t <= ZDB_OT_TOTAL; t++) {
char csize[32], lsize[32], psize[32], asize[32];
char avg[32], gang[32];
- char *typename;
+ const char *typename;
+
+ /* make sure nicenum has enough space */
+ CTASSERT(sizeof (csize) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (psize) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (avg) >= NN_NUMBUF_SZ);
+ CTASSERT(sizeof (gang) >= NN_NUMBUF_SZ);
if (t < DMU_OT_NUMTYPES)
typename = dmu_ot[t].ot_name;
zcb.zcb_type[ZB_TOTAL][t].zb_asize)
continue;
- zdb_nicenum(zb->zb_count, csize);
- zdb_nicenum(zb->zb_lsize, lsize);
- zdb_nicenum(zb->zb_psize, psize);
- zdb_nicenum(zb->zb_asize, asize);
- zdb_nicenum(zb->zb_asize / zb->zb_count, avg);
- zdb_nicenum(zb->zb_gangs, gang);
+ zdb_nicenum(zb->zb_count, csize,
+ sizeof (csize));
+ zdb_nicenum(zb->zb_lsize, lsize,
+ sizeof (lsize));
+ zdb_nicenum(zb->zb_psize, psize,
+ sizeof (psize));
+ zdb_nicenum(zb->zb_asize, asize,
+ sizeof (asize));
+ zdb_nicenum(zb->zb_asize / zb->zb_count, avg,
+ sizeof (avg));
+ zdb_nicenum(zb->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
ddt_histogram_t ddh_total;
ddt_stat_t dds_total;
- bzero(&ddh_total, sizeof (ddt_histogram_t));
- bzero(&dds_total, sizeof (ddt_stat_t));
-
+ bzero(&ddh_total, sizeof (ddh_total));
+ bzero(&dds_total, sizeof (dds_total));
avl_create(&t, ddt_entry_compare,
sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
dump_dedup_ratio(&dds_total);
}
+static int
+verify_device_removal_feature_counts(spa_t *spa)
+{
+ uint64_t dr_feature_refcount = 0;
+ uint64_t oc_feature_refcount = 0;
+ uint64_t indirect_vdev_count = 0;
+ uint64_t precise_vdev_count = 0;
+ uint64_t obsolete_counts_object_count = 0;
+ uint64_t obsolete_sm_count = 0;
+ uint64_t obsolete_counts_count = 0;
+ uint64_t scip_count = 0;
+ uint64_t obsolete_bpobj_count = 0;
+ int ret = 0;
+
+ spa_condensing_indirect_phys_t *scip =
+ &spa->spa_condensing_indirect_phys;
+ if (scip->scip_next_mapping_object != 0) {
+ vdev_t *vd = spa->spa_root_vdev->vdev_child[scip->scip_vdev];
+ ASSERT(scip->scip_prev_obsolete_sm_object != 0);
+ ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+
+ (void) printf("Condensing indirect vdev %llu: new mapping "
+ "object %llu, prev obsolete sm %llu\n",
+ (u_longlong_t)scip->scip_vdev,
+ (u_longlong_t)scip->scip_next_mapping_object,
+ (u_longlong_t)scip->scip_prev_obsolete_sm_object);
+ if (scip->scip_prev_obsolete_sm_object != 0) {
+ space_map_t *prev_obsolete_sm = NULL;
+ VERIFY0(space_map_open(&prev_obsolete_sm,
+ spa->spa_meta_objset,
+ scip->scip_prev_obsolete_sm_object,
+ 0, vd->vdev_asize, 0));
+ space_map_update(prev_obsolete_sm);
+ dump_spacemap(spa->spa_meta_objset, prev_obsolete_sm);
+ (void) printf("\n");
+ space_map_close(prev_obsolete_sm);
+ }
+
+ scip_count += 2;
+ }
+
+ for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
+ vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
+ vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
+
+ if (vic->vic_mapping_object != 0) {
+ ASSERT(vd->vdev_ops == &vdev_indirect_ops ||
+ vd->vdev_removing);
+ indirect_vdev_count++;
+
+ if (vd->vdev_indirect_mapping->vim_havecounts) {
+ obsolete_counts_count++;
+ }
+ }
+ if (vdev_obsolete_counts_are_precise(vd)) {
+ ASSERT(vic->vic_mapping_object != 0);
+ precise_vdev_count++;
+ }
+ if (vdev_obsolete_sm_object(vd) != 0) {
+ ASSERT(vic->vic_mapping_object != 0);
+ obsolete_sm_count++;
+ }
+ }
+
+ (void) feature_get_refcount(spa,
+ &spa_feature_table[SPA_FEATURE_DEVICE_REMOVAL],
+ &dr_feature_refcount);
+ (void) feature_get_refcount(spa,
+ &spa_feature_table[SPA_FEATURE_OBSOLETE_COUNTS],
+ &oc_feature_refcount);
+
+ if (dr_feature_refcount != indirect_vdev_count) {
+ ret = 1;
+ (void) printf("Number of indirect vdevs (%llu) " \
+ "does not match feature count (%llu)\n",
+ (u_longlong_t)indirect_vdev_count,
+ (u_longlong_t)dr_feature_refcount);
+ } else {
+ (void) printf("Verified device_removal feature refcount " \
+ "of %llu is correct\n",
+ (u_longlong_t)dr_feature_refcount);
+ }
+
+ if (zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_OBSOLETE_BPOBJ) == 0) {
+ obsolete_bpobj_count++;
+ }
+
+
+ obsolete_counts_object_count = precise_vdev_count;
+ obsolete_counts_object_count += obsolete_sm_count;
+ obsolete_counts_object_count += obsolete_counts_count;
+ obsolete_counts_object_count += scip_count;
+ obsolete_counts_object_count += obsolete_bpobj_count;
+ obsolete_counts_object_count += remap_deadlist_count;
+
+ if (oc_feature_refcount != obsolete_counts_object_count) {
+ ret = 1;
+ (void) printf("Number of obsolete counts objects (%llu) " \
+ "does not match feature count (%llu)\n",
+ (u_longlong_t)obsolete_counts_object_count,
+ (u_longlong_t)oc_feature_refcount);
+ (void) printf("pv:%llu os:%llu oc:%llu sc:%llu "
+ "ob:%llu rd:%llu\n",
+ (u_longlong_t)precise_vdev_count,
+ (u_longlong_t)obsolete_sm_count,
+ (u_longlong_t)obsolete_counts_count,
+ (u_longlong_t)scip_count,
+ (u_longlong_t)obsolete_bpobj_count,
+ (u_longlong_t)remap_deadlist_count);
+ } else {
+ (void) printf("Verified indirect_refcount feature refcount " \
+ "of %llu is correct\n",
+ (u_longlong_t)oc_feature_refcount);
+ }
+ return (ret);
+}
+
static void
dump_zpool(spa_t *spa)
{
dump_dir(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) {
+ dsl_pool_t *dp = spa->spa_dsl_pool;
dump_full_bpobj(&spa->spa_deferred_bpobj,
"Deferred frees", 0);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
- dump_full_bpobj(
- &spa->spa_dsl_pool->dp_free_bpobj,
+ dump_full_bpobj(&dp->dp_free_bpobj,
"Pool snapshot frees", 0);
}
+ if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
+ ASSERT(spa_feature_is_enabled(spa,
+ SPA_FEATURE_DEVICE_REMOVAL));
+ dump_full_bpobj(&dp->dp_obsolete_bpobj,
+ "Pool obsolete blocks", 0);
+ }
if (spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY)) {
dump_bptree(spa->spa_meta_objset,
- spa->spa_dsl_pool->dp_bptree_obj,
+ dp->dp_bptree_obj,
"Pool dataset frees");
}
dump_dtl(spa->spa_root_vdev, 0);
(longlong_t)refcount);
}
}
+
+ if (rc == 0) {
+ rc = verify_device_removal_feature_counts(spa);
+ }
}
if (rc == 0 && (dump_opt['b'] || dump_opt['c']))
rc = dump_block_stats(spa);
#define ZDB_FLAG_RAW 0x0040
#define ZDB_FLAG_PRINT_BLKPTR 0x0080
-int flagbits[256];
+static int flagbits[256];
static void
zdb_print_blkptr(blkptr_t *bp, int flags)
zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
{
uint64_t *d = (uint64_t *)buf;
- int nwords = size / sizeof (uint64_t);
+ unsigned nwords = size / sizeof (uint64_t);
int do_bswap = !!(flags & ZDB_FLAG_BSWAP);
- int i, j;
- char *hdr, *c;
+ unsigned i, j;
+ const char *hdr;
+ char *c;
if (do_bswap)
* RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
*/
static vdev_t *
-zdb_vdev_lookup(vdev_t *vdev, char *path)
+zdb_vdev_lookup(vdev_t *vdev, const char *path)
{
char *s, *p, *q;
- int i;
+ unsigned i;
if (vdev == NULL)
return (NULL);
/* First, assume the x.x.x.x format */
- i = (int)strtoul(path, &s, 10);
+ i = strtoul(path, &s, 10);
if (s == path || (s && *s != '.' && *s != '\0'))
goto name;
- if (i < 0 || i >= vdev->vdev_children)
+ if (i >= vdev->vdev_children)
return (NULL);
vdev = vdev->vdev_child[i];
return (NULL);
}
-/* ARGSUSED */
-static int
-random_get_pseudo_bytes_cb(void *buf, size_t len, void *unused)
-{
- return (random_get_pseudo_bytes(buf, len));
-}
-
/*
* Read a block from a pool and print it out. The syntax of the
* block descriptor is:
vdev_t *vd;
abd_t *pabd;
void *lbuf, *buf;
- char *s, *p, *dup, *vdev, *flagstr;
+ const char *s, *vdev;
+ char *p, *dup, *flagstr;
int i, error;
boolean_t borrowed = B_FALSE;
s = strtok(NULL, ":");
size = strtoull(s ? s : "", NULL, 16);
s = strtok(NULL, ":");
- flagstr = s ? s : "";
+ if (s)
+ flagstr = strdup(s);
+ else
+ flagstr = strdup("");
s = NULL;
if (size == 0)
s = "offset must be a multiple of sector size";
if (s) {
(void) printf("Invalid block specifier: %s - %s\n", thing, s);
+ free(flagstr);
free(dup);
return;
}
}
if (*p != ':' && *p != '\0') {
(void) printf("***Invalid flag arg: '%s'\n", s);
+ free(flagstr);
free(dup);
return;
}
}
}
+ free(flagstr);
vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev);
if (vd == NULL) {
psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE |
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
- ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL, NULL));
+ ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW | ZIO_FLAG_OPTIONAL,
+ NULL, NULL));
}
error = zio_wait(zio);
* every decompress function at every inflated blocksize.
*/
enum zio_compress c;
- void *pbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
- abd_copy_to_buf(pbuf2, pabd, psize);
-
- VERIFY0(abd_iterate_func(pabd, psize, SPA_MAXBLOCKSIZE - psize,
- random_get_pseudo_bytes_cb, NULL));
-
- VERIFY0(random_get_pseudo_bytes((uint8_t *)pbuf2 + psize,
- SPA_MAXBLOCKSIZE - psize));
-
/*
* XXX - On the one hand, with SPA_MAXBLOCKSIZE at 16MB,
* this could take a while and we should let the user know
for (lsize = psize + SPA_MINBLOCKSIZE;
lsize <= SPA_MAXBLOCKSIZE; lsize += SPA_MINBLOCKSIZE) {
for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) {
+ /*
+ * ZLE can easily decompress non zle stream.
+ * So have an option to disable it.
+ */
+ if (c == ZIO_COMPRESS_ZLE &&
+ getenv("ZDB_NO_ZLE"))
+ continue;
+
(void) fprintf(stderr,
"Trying %05llx -> %05llx (%s)\n",
(u_longlong_t)psize, (u_longlong_t)lsize,
zio_compress_table[c].ci_name);
+
+ /*
+ * We randomize lbuf2, and decompress to both
+ * lbuf and lbuf2. This way, we will know if
+ * decompression fill exactly to lsize.
+ */
+ VERIFY0(random_get_pseudo_bytes(lbuf2, lsize));
+
if (zio_decompress_data(c, pabd,
lbuf, psize, lsize) == 0 &&
- zio_decompress_data_buf(c, pbuf2,
+ zio_decompress_data(c, pabd,
lbuf2, psize, lsize) == 0 &&
bcmp(lbuf, lbuf2, lsize) == 0)
break;
if (c != ZIO_COMPRESS_FUNCTIONS)
break;
}
-
- umem_free(pbuf2, SPA_MAXBLOCKSIZE);
umem_free(lbuf2, SPA_MAXBLOCKSIZE);
- if (lsize <= psize) {
+ if (lsize > SPA_MAXBLOCKSIZE) {
(void) printf("Decompress of %s failed\n", thing);
goto out;
}
{
blkptr_t bp;
unsigned long long *words = (void *)&bp;
- char buf[SPA_MAXBLOCKSIZE];
+ char *buf;
int err;
- memset(&bp, 0, sizeof (blkptr_t));
+ buf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
+ bzero(&bp, sizeof (bp));
err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
"%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
words + 0, words + 1, words + 2, words + 3,
exit(1);
}
zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
+ umem_free(buf, SPA_MAXBLOCKSIZE);
}
int
main(int argc, char **argv)
{
- int i, c;
+ int c;
struct rlimit rl = { 1024, 1024 };
spa_t *spa = NULL;
objset_t *os = NULL;
int error = 0;
char **searchdirs = NULL;
int nsearch = 0;
- char *target;
+ char *target, *target_pool;
nvlist_t *policy = NULL;
uint64_t max_txg = UINT64_MAX;
int flags = ZFS_IMPORT_MISSING_LOG;
error = 0;
target = argv[0];
+ if (strpbrk(target, "/@") != NULL) {
+ size_t targetlen;
+
+ target_pool = strdup(target);
+ *strpbrk(target_pool, "/@") = '\0';
+
+ target_is_spa = B_FALSE;
+ targetlen = strlen(target);
+ if (targetlen && target[targetlen - 1] == '/')
+ target[targetlen - 1] = '\0';
+ } else {
+ target_pool = target;
+ }
+
if (dump_opt['e']) {
importargs_t args = { 0 };
nvlist_t *cfg = NULL;
args.path = searchdirs;
args.can_be_active = B_TRUE;
- error = zpool_tryimport(g_zfs, target, &cfg, &args);
+ error = zpool_tryimport(g_zfs, target_pool, &cfg, &args);
+
if (error == 0) {
+
if (nvlist_add_nvlist(cfg,
ZPOOL_REWIND_POLICY, policy) != 0) {
fatal("can't open '%s': %s",
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
- error = spa_import(target, cfg, NULL,
+ error = spa_import(target_pool, cfg, NULL,
flags | ZFS_IMPORT_SKIP_MMP);
}
}
- if (strpbrk(target, "/@") != NULL) {
- size_t targetlen;
-
- target_is_spa = B_FALSE;
- targetlen = strlen(target);
- if (targetlen && target[targetlen - 1] == '/')
- target[targetlen - 1] = '\0';
- }
+ if (target_pool != target)
+ free(target_pool);
if (error == 0) {
if (target_is_spa || dump_opt['R']) {
}
} else {
error = open_objset(target, DMU_OST_ANY, FTAG, &os);
+ if (error == 0)
+ spa = dmu_objset_spa(os);
}
}
nvlist_free(policy);
if (error)
fatal("can't open '%s': %s", target, strerror(error));
+ /*
+ * Set the pool failure mode to panic in order to prevent the pool
+ * from suspending. A suspended I/O will have no way to resume and
+ * can prevent the zdb(8) command from terminating as expected.
+ */
+ if (spa != NULL)
+ spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
+
argv++;
argc--;
if (!dump_opt['R']) {
if (argc > 0) {
zopt_objects = argc;
zopt_object = calloc(zopt_objects, sizeof (uint64_t));
- for (i = 0; i < zopt_objects; i++) {
+ for (unsigned i = 0; i < zopt_objects; i++) {
errno = 0;
zopt_object[i] = strtoull(argv[i], NULL, 0);
if (zopt_object[i] == 0 && errno != 0)
flagbits['p'] = ZDB_FLAG_PHYS;
flagbits['r'] = ZDB_FLAG_RAW;
- for (i = 0; i < argc; i++)
+ for (int i = 0; i < argc; i++)
zdb_read_block(argv[i], spa);
}