#include <sys/zio.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
+#include <sys/zfs_rlock.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_file.h>
#include <sys/spa_impl.h>
#include <ctype.h>
#include <math.h>
#include <sys/fs/zfs.h>
+#include <zfs_fletcher.h>
#include <libnvpair.h>
#ifdef __GLIBC__
#include <execinfo.h> /* for backtrace() */
uint64_t bt_magic;
uint64_t bt_objset;
uint64_t bt_object;
+ uint64_t bt_dnodesize;
uint64_t bt_offset;
uint64_t bt_gen;
uint64_t bt_txg;
uint64_t bw_data;
} bufwad_t;
-/*
- * XXX -- fix zfs range locks to be generic so we can use them here.
- */
-typedef enum {
- RL_READER,
- RL_WRITER,
- RL_APPEND
-} rl_type_t;
-
typedef struct rll {
void *rll_writer;
int rll_readers;
kcondvar_t rll_cv;
} rll_t;
-typedef struct rl {
- uint64_t rl_object;
- uint64_t rl_offset;
- uint64_t rl_size;
- rll_t *rl_lock;
-} rl_t;
+typedef struct zll {
+ list_t z_list;
+ kmutex_t z_lock;
+} zll_t;
#define ZTEST_RANGE_LOCKS 64
#define ZTEST_OBJECT_LOCKS 64
dmu_object_type_t od_crtype;
uint64_t od_blocksize;
uint64_t od_crblocksize;
+ uint64_t od_crdnodesize;
uint64_t od_gen;
uint64_t od_crgen;
char od_name[MAXNAMELEN];
char zd_name[MAXNAMELEN];
kmutex_t zd_dirobj_lock;
rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
- rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
+ zll_t zd_range_lock[ZTEST_RANGE_LOCKS];
} ztest_ds_t;
/*
ztest_func_t ztest_split_pool;
ztest_func_t ztest_reguid;
ztest_func_t ztest_spa_upgrade;
+ztest_func_t ztest_fletcher;
+ztest_func_t ztest_verify_dnode_bt;
uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
ZTI_INIT(ztest_vdev_LUN_growth, 1, &zopt_rarely),
ZTI_INIT(ztest_vdev_add_remove, 1, &ztest_opts.zo_vdevtime),
ZTI_INIT(ztest_vdev_aux_add_remove, 1, &ztest_opts.zo_vdevtime),
+ ZTI_INIT(ztest_fletcher, 1, &zopt_rarely),
+ ZTI_INIT(ztest_verify_dnode_bt, 1, &zopt_sometimes),
};
#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
return (1 << (SPA_MINBLOCKSHIFT + block_shift));
}
+static int
+ztest_random_dnodesize(void)
+{
+ int slots;
+ int max_slots = spa_maxdnodesize(ztest_spa) >> DNODE_SHIFT;
+
+ if (max_slots == DNODE_MIN_SLOTS)
+ return (DNODE_MIN_SIZE);
+
+ /*
+ * Weight the random distribution more heavily toward smaller
+ * dnode sizes since that is more likely to reflect real-world
+ * usage.
+ */
+ ASSERT3U(max_slots, >, 4);
+ switch (ztest_random(10)) {
+ case 0:
+ slots = 5 + ztest_random(max_slots - 4);
+ break;
+ case 1 ... 4:
+ slots = 2 + ztest_random(3);
+ break;
+ default:
+ slots = 1;
+ break;
+ }
+
+ return (slots << DNODE_SHIFT);
+}
+
static int
ztest_random_ibshift(void)
{
return (error);
}
+
+/*
+ * Object and range lock mechanics
+ */
+typedef struct {
+ list_node_t z_lnode;
+ refcount_t z_refcnt;
+ uint64_t z_object;
+ zfs_rlock_t z_range_lock;
+} ztest_znode_t;
+
+typedef struct {
+ rl_t *z_rl;
+ ztest_znode_t *z_ztznode;
+} ztest_zrl_t;
+
+static ztest_znode_t *
+ztest_znode_init(uint64_t object)
+{
+ ztest_znode_t *zp = umem_alloc(sizeof (*zp), UMEM_NOFAIL);
+
+ list_link_init(&zp->z_lnode);
+ refcount_create(&zp->z_refcnt);
+ zp->z_object = object;
+ zfs_rlock_init(&zp->z_range_lock);
+
+ return (zp);
+}
+
+static void
+ztest_znode_fini(ztest_znode_t *zp)
+{
+ ASSERT(refcount_is_zero(&zp->z_refcnt));
+ zfs_rlock_destroy(&zp->z_range_lock);
+ zp->z_object = 0;
+ refcount_destroy(&zp->z_refcnt);
+ list_link_init(&zp->z_lnode);
+ umem_free(zp, sizeof (*zp));
+}
+
+static void
+ztest_zll_init(zll_t *zll)
+{
+ mutex_init(&zll->z_lock, NULL, MUTEX_DEFAULT, NULL);
+ list_create(&zll->z_list, sizeof (ztest_znode_t),
+ offsetof(ztest_znode_t, z_lnode));
+}
+
+static void
+ztest_zll_destroy(zll_t *zll)
+{
+ list_destroy(&zll->z_list);
+ mutex_destroy(&zll->z_lock);
+}
+
+#define RL_TAG "range_lock"
+static ztest_znode_t *
+ztest_znode_get(ztest_ds_t *zd, uint64_t object)
+{
+ zll_t *zll = &zd->zd_range_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
+ ztest_znode_t *zp = NULL;
+ mutex_enter(&zll->z_lock);
+ for (zp = list_head(&zll->z_list); (zp);
+ zp = list_next(&zll->z_list, zp)) {
+ if (zp->z_object == object) {
+ refcount_add(&zp->z_refcnt, RL_TAG);
+ break;
+ }
+ }
+ if (zp == NULL) {
+ zp = ztest_znode_init(object);
+ refcount_add(&zp->z_refcnt, RL_TAG);
+ list_insert_head(&zll->z_list, zp);
+ }
+ mutex_exit(&zll->z_lock);
+ return (zp);
+}
+
+static void
+ztest_znode_put(ztest_ds_t *zd, ztest_znode_t *zp)
+{
+ zll_t *zll = NULL;
+ ASSERT3U(zp->z_object, !=, 0);
+ zll = &zd->zd_range_lock[zp->z_object & (ZTEST_OBJECT_LOCKS - 1)];
+ mutex_enter(&zll->z_lock);
+ refcount_remove(&zp->z_refcnt, RL_TAG);
+ if (refcount_is_zero(&zp->z_refcnt)) {
+ list_remove(&zll->z_list, zp);
+ ztest_znode_fini(zp);
+ }
+ mutex_exit(&zll->z_lock);
+}
+
+
static void
ztest_rll_init(rll_t *rll)
{
ztest_rll_unlock(rll);
}
-static rl_t *
-ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
- uint64_t size, rl_type_t type)
+static ztest_zrl_t *
+ztest_zrl_init(rl_t *rl, ztest_znode_t *zp)
{
- uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
- rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
- rl_t *rl;
-
- rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
- rl->rl_object = object;
- rl->rl_offset = offset;
- rl->rl_size = size;
- rl->rl_lock = rll;
-
- ztest_rll_lock(rll, type);
-
- return (rl);
+ ztest_zrl_t *zrl = umem_alloc(sizeof (*zrl), UMEM_NOFAIL);
+ zrl->z_rl = rl;
+ zrl->z_ztznode = zp;
+ return (zrl);
}
static void
-ztest_range_unlock(rl_t *rl)
+ztest_zrl_fini(ztest_zrl_t *zrl)
{
- rll_t *rll = rl->rl_lock;
+ umem_free(zrl, sizeof (*zrl));
+}
- ztest_rll_unlock(rll);
+static ztest_zrl_t *
+ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
+ uint64_t size, rl_type_t type)
+{
+ ztest_znode_t *zp = ztest_znode_get(zd, object);
+ rl_t *rl = zfs_range_lock(&zp->z_range_lock, offset,
+ size, type);
+ return (ztest_zrl_init(rl, zp));
+}
- umem_free(rl, sizeof (*rl));
+static void
+ztest_range_unlock(ztest_ds_t *zd, ztest_zrl_t *zrl)
+{
+ zfs_range_unlock(zrl->z_rl);
+ ztest_znode_put(zd, zrl->z_ztznode);
+ ztest_zrl_fini(zrl);
}
static void
ztest_rll_init(&zd->zd_object_lock[l]);
for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
- ztest_rll_init(&zd->zd_range_lock[l]);
+ ztest_zll_init(&zd->zd_range_lock[l]);
}
static void
ztest_rll_destroy(&zd->zd_object_lock[l]);
for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
- ztest_rll_destroy(&zd->zd_range_lock[l]);
+ ztest_zll_destroy(&zd->zd_range_lock[l]);
}
#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
static void
ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
- uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
+ uint64_t dnodesize, uint64_t offset, uint64_t gen, uint64_t txg,
+ uint64_t crtxg)
{
bt->bt_magic = BT_MAGIC;
bt->bt_objset = dmu_objset_id(os);
bt->bt_object = object;
+ bt->bt_dnodesize = dnodesize;
bt->bt_offset = offset;
bt->bt_gen = gen;
bt->bt_txg = txg;
static void
ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
- uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
+ uint64_t dnodesize, uint64_t offset, uint64_t gen, uint64_t txg,
+ uint64_t crtxg)
{
ASSERT3U(bt->bt_magic, ==, BT_MAGIC);
ASSERT3U(bt->bt_objset, ==, dmu_objset_id(os));
ASSERT3U(bt->bt_object, ==, object);
+ ASSERT3U(bt->bt_dnodesize, ==, dnodesize);
ASSERT3U(bt->bt_offset, ==, offset);
ASSERT3U(bt->bt_gen, <=, gen);
ASSERT3U(bt->bt_txg, <=, txg);
return (bt);
}
+/*
+ * Generate a token to fill up unused bonus buffer space. Try to make
+ * it unique to the object, generation, and offset to verify that data
+ * is not getting overwritten by data from other dnodes.
+ */
+#define ZTEST_BONUS_FILL_TOKEN(obj, ds, gen, offset) \
+ (((ds) << 48) | ((gen) << 32) | ((obj) << 8) | (offset))
+
+/*
+ * Fill up the unused bonus buffer region before the block tag with a
+ * verifiable pattern. Filling the whole bonus area with non-zero data
+ * helps ensure that all dnode traversal code properly skips the
+ * interior regions of large dnodes.
+ */
+void
+ztest_fill_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj,
+ objset_t *os, uint64_t gen)
+{
+ uint64_t *bonusp;
+
+ ASSERT(IS_P2ALIGNED((char *)end - (char *)db->db_data, 8));
+
+ for (bonusp = db->db_data; bonusp < (uint64_t *)end; bonusp++) {
+ uint64_t token = ZTEST_BONUS_FILL_TOKEN(obj, dmu_objset_id(os),
+ gen, bonusp - (uint64_t *)db->db_data);
+ *bonusp = token;
+ }
+}
+
+/*
+ * Verify that the unused area of a bonus buffer is filled with the
+ * expected tokens.
+ */
+void
+ztest_verify_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj,
+ objset_t *os, uint64_t gen)
+{
+ uint64_t *bonusp;
+
+ for (bonusp = db->db_data; bonusp < (uint64_t *)end; bonusp++) {
+ uint64_t token = ZTEST_BONUS_FILL_TOKEN(obj, dmu_objset_id(os),
+ gen, bonusp - (uint64_t *)db->db_data);
+ VERIFY3U(*bonusp, ==, token);
+ }
+}
+
/*
* ZIL logging ops
*/
#define lrz_blocksize lr_uid
#define lrz_ibshift lr_gid
#define lrz_bonustype lr_rdev
-#define lrz_bonuslen lr_crtime[1]
+#define lrz_dnodesize lr_crtime[1]
static void
ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
dmu_tx_t *tx;
uint64_t txg;
int error = 0;
+ int bonuslen;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
return (ENOSPC);
ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid);
+ bonuslen = DN_BONUS_SIZE(lr->lrz_dnodesize);
if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
if (lr->lr_foid == 0) {
- lr->lr_foid = zap_create(os,
+ lr->lr_foid = zap_create_dnsize(os,
lr->lrz_type, lr->lrz_bonustype,
- lr->lrz_bonuslen, tx);
+ bonuslen, lr->lrz_dnodesize, tx);
} else {
- error = zap_create_claim(os, lr->lr_foid,
+ error = zap_create_claim_dnsize(os, lr->lr_foid,
lr->lrz_type, lr->lrz_bonustype,
- lr->lrz_bonuslen, tx);
+ bonuslen, lr->lrz_dnodesize, tx);
}
} else {
if (lr->lr_foid == 0) {
- lr->lr_foid = dmu_object_alloc(os,
+ lr->lr_foid = dmu_object_alloc_dnsize(os,
lr->lrz_type, 0, lr->lrz_bonustype,
- lr->lrz_bonuslen, tx);
+ bonuslen, lr->lrz_dnodesize, tx);
} else {
- error = dmu_object_claim(os, lr->lr_foid,
+ error = dmu_object_claim_dnsize(os, lr->lr_foid,
lr->lrz_type, 0, lr->lrz_bonustype,
- lr->lrz_bonuslen, tx);
+ bonuslen, lr->lrz_dnodesize, tx);
}
}
VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
bbt = ztest_bt_bonus(db);
dmu_buf_will_dirty(db, tx);
- ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg);
+ ztest_bt_generate(bbt, os, lr->lr_foid, lr->lrz_dnodesize, -1ULL,
+ lr->lr_gen, txg, txg);
+ ztest_fill_unused_bonus(db, bbt, lr->lr_foid, os, lr->lr_gen);
dmu_buf_rele(db, FTAG);
VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
dmu_tx_t *tx;
dmu_buf_t *db;
arc_buf_t *abuf = NULL;
- rl_t *rl;
+ ztest_zrl_t *rl;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if (abuf != NULL)
dmu_return_arcbuf(abuf);
dmu_buf_rele(db, FTAG);
- ztest_range_unlock(rl);
+ ztest_range_unlock(zd, rl);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
VERIFY(dmu_read(os, lr->lr_foid, offset,
sizeof (rbt), &rbt, prefetch) == 0);
if (rbt.bt_magic == BT_MAGIC) {
- ztest_bt_verify(&rbt, os, lr->lr_foid,
+ ztest_bt_verify(&rbt, os, lr->lr_foid, 0,
offset, gen, txg, crtxg);
}
}
* as it was when the write was generated.
*/
if (zd->zd_zilog->zl_replay) {
- ztest_bt_verify(bt, os, lr->lr_foid, offset,
+ ztest_bt_verify(bt, os, lr->lr_foid, 0, offset,
MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
bt->bt_crtxg);
}
* Set the bt's gen/txg to the bonus buffer's gen/txg
* so that all of the usual ASSERTs will work.
*/
- ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg);
+ ztest_bt_generate(bt, os, lr->lr_foid, 0, offset, gen, txg,
+ crtxg);
}
if (abuf == NULL) {
dmu_tx_commit(tx);
- ztest_range_unlock(rl);
+ ztest_range_unlock(zd, rl);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
uint64_t txg;
- rl_t *rl;
+ ztest_zrl_t *rl;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
- ztest_range_unlock(rl);
+ ztest_range_unlock(zd, rl);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
dmu_tx_commit(tx);
- ztest_range_unlock(rl);
+ ztest_range_unlock(zd, rl);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
dmu_tx_t *tx;
dmu_buf_t *db;
ztest_block_tag_t *bbt;
- uint64_t txg, lrtxg, crtxg;
+ uint64_t txg, lrtxg, crtxg, dnodesize;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
crtxg = bbt->bt_crtxg;
lrtxg = lr->lr_common.lrc_txg;
+ dnodesize = bbt->bt_dnodesize;
if (zd->zd_zilog->zl_replay) {
ASSERT(lr->lr_size != 0);
/*
* Verify that the current bonus buffer is not newer than our txg.
*/
- ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode,
+ ztest_bt_verify(bbt, os, lr->lr_foid, dnodesize, -1ULL, lr->lr_mode,
MAX(txg, lrtxg), crtxg);
dmu_buf_will_dirty(db, tx);
VERIFY0(dmu_set_bonus(db, lr->lr_size, tx));
bbt = ztest_bt_bonus(db);
- ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg);
-
+ ztest_bt_generate(bbt, os, lr->lr_foid, dnodesize, -1ULL, lr->lr_mode,
+ txg, crtxg);
+ ztest_fill_unused_bonus(db, bbt, lr->lr_foid, os, bbt->bt_gen);
dmu_buf_rele(db, FTAG);
(void) ztest_log_setattr(zd, tx, lr);
/*
* ZIL get_data callbacks
*/
+typedef struct ztest_zgd_private {
+ ztest_ds_t *z_zd;
+ ztest_zrl_t *z_rl;
+ uint64_t z_object;
+} ztest_zgd_private_t;
static void
ztest_get_done(zgd_t *zgd, int error)
{
- ztest_ds_t *zd = zgd->zgd_private;
- uint64_t object = zgd->zgd_rl->rl_object;
+ ztest_zgd_private_t *zzp = zgd->zgd_private;
+ ztest_ds_t *zd = zzp->z_zd;
+ uint64_t object = zzp->z_object;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
- ztest_range_unlock(zgd->zgd_rl);
+ ztest_range_unlock(zd, zzp->z_rl);
ztest_object_unlock(zd, object);
if (error == 0 && zgd->zgd_bp)
zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
umem_free(zgd, sizeof (*zgd));
+ umem_free(zzp, sizeof (*zzp));
}
static int
dmu_buf_t *db;
zgd_t *zgd;
int error;
+ ztest_zgd_private_t *zgd_private;
ztest_object_lock(zd, object, RL_READER);
error = dmu_bonus_hold(os, object, FTAG, &db);
zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
zgd->zgd_zilog = zd->zd_zilog;
- zgd->zgd_private = zd;
+ zgd_private = umem_zalloc(sizeof (ztest_zgd_private_t), UMEM_NOFAIL);
+ zgd_private->z_zd = zd;
+ zgd_private->z_object = object;
+ zgd->zgd_private = zgd_private;
if (buf != NULL) { /* immediate write */
- zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
+ zgd_private->z_rl = ztest_range_lock(zd, object, offset, size,
RL_READER);
error = dmu_read(os, object, offset, size, buf,
offset = 0;
}
- zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
+ zgd_private->z_rl = ztest_range_lock(zd, object, offset, size,
RL_READER);
error = dmu_buf_hold(os, object, offset, zgd, &db,
lr->lrz_blocksize = od->od_crblocksize;
lr->lrz_ibshift = ztest_random_ibshift();
lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
- lr->lrz_bonuslen = dmu_bonus_max();
+ lr->lrz_dnodesize = od->od_crdnodesize;
lr->lr_gen = od->od_crgen;
lr->lr_crtime[0] = time(NULL);
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
uint64_t txg;
- rl_t *rl;
+ ztest_zrl_t *rl;
txg_wait_synced(dmu_objset_pool(os), 0);
(void) dmu_free_long_range(os, object, offset, size);
}
- ztest_range_unlock(rl);
+ ztest_range_unlock(zd, rl);
ztest_object_unlock(zd, object);
}
switch (io_type) {
case ZTEST_IO_WRITE_TAG:
- ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
+ ztest_bt_generate(&wbt, zd->zd_os, object, doi.doi_dnodesize,
+ offset, 0, 0, 0);
(void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
break;
*/
static void
ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
- dmu_object_type_t type, uint64_t blocksize, uint64_t gen)
+ dmu_object_type_t type, uint64_t blocksize, uint64_t dnodesize,
+ uint64_t gen)
{
od->od_dir = ZTEST_DIROBJ;
od->od_object = 0;
od->od_crtype = type;
od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
+ od->od_crdnodesize = dnodesize ? dnodesize : ztest_random_dnodesize();
od->od_crgen = gen;
od->od_type = DMU_OT_NONE;
batchsize = OD_ARRAY_SIZE;
for (b = 0; b < batchsize; b++)
- ztest_od_init(od + b, id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0);
+ ztest_od_init(od + b, id, FTAG, b, DMU_OT_UINT64_OTHER,
+ 0, 0, 0);
/*
* Destroy the previous batch of objects, create a new batch,
/*
* Read the directory info. If it's the first time, set things up.
*/
- ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize);
- ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, chunksize);
+ ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, 0,
+ chunksize);
if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
umem_free(od, size);
/*
* Read the directory info. If it's the first time, set things up.
*/
- ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
- ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0);
+ ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, 0,
+ chunksize);
if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
* to verify that parallel writes to an object -- even to the
* same blocks within the object -- doesn't cause any trouble.
*/
- ztest_od_init(od, ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
+ ztest_od_init(od, ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0)
return;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
- ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0) {
char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
- ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0)
int i;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
- ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0)
void *data;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
- ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
+ ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
umem_free(od, sizeof (ztest_od_t));
int i, error = 0;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
- ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
umem_free(od, sizeof (ztest_od_t));
umem_free(od, sizeof (ztest_od_t));
}
+/*
+ * Visit each object in the dataset. Verify that its properties
+ * are consistent what was stored in the block tag when it was created,
+ * and that its unused bonus buffer space has not been overwritten.
+ */
+void
+ztest_verify_dnode_bt(ztest_ds_t *zd, uint64_t id)
+{
+ objset_t *os = zd->zd_os;
+ uint64_t obj;
+ int err = 0;
+
+ for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
+ ztest_block_tag_t *bt = NULL;
+ dmu_object_info_t doi;
+ dmu_buf_t *db;
+
+ if (dmu_bonus_hold(os, obj, FTAG, &db) != 0)
+ continue;
+
+ dmu_object_info_from_db(db, &doi);
+ if (doi.doi_bonus_size >= sizeof (*bt))
+ bt = ztest_bt_bonus(db);
+
+ if (bt && bt->bt_magic == BT_MAGIC) {
+ ztest_bt_verify(bt, os, obj, doi.doi_dnodesize,
+ bt->bt_offset, bt->bt_gen, bt->bt_txg,
+ bt->bt_crtxg);
+ ztest_verify_unused_bonus(db, bt, obj, os, bt->bt_gen);
+ }
+
+ dmu_buf_rele(db, FTAG);
+ }
+}
+
/* ARGSUSED */
void
ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
blocksize = MIN(blocksize, 2048); /* because we write so many */
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
- ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
umem_free(od, sizeof (ztest_od_t));
(void) rw_unlock(&ztest_name_lock);
}
+void
+ztest_fletcher(ztest_ds_t *zd, uint64_t id)
+{
+ hrtime_t end = gethrtime() + NANOSEC;
+
+ while (gethrtime() <= end) {
+ int run_count = 100;
+ void *buf;
+ uint32_t size;
+ int *ptr;
+ int i;
+ zio_cksum_t zc_ref;
+ zio_cksum_t zc_ref_byteswap;
+
+ size = ztest_random_blocksize();
+ buf = umem_alloc(size, UMEM_NOFAIL);
+
+ for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++)
+ *ptr = ztest_random(UINT_MAX);
+
+ VERIFY0(fletcher_4_impl_set("scalar"));
+ fletcher_4_native(buf, size, &zc_ref);
+ fletcher_4_byteswap(buf, size, &zc_ref_byteswap);
+
+ VERIFY0(fletcher_4_impl_set("cycle"));
+ while (run_count-- > 0) {
+ zio_cksum_t zc;
+ zio_cksum_t zc_byteswap;
+
+ fletcher_4_byteswap(buf, size, &zc_byteswap);
+ fletcher_4_native(buf, size, &zc);
+
+ VERIFY0(bcmp(&zc, &zc_ref, sizeof (zc)));
+ VERIFY0(bcmp(&zc_byteswap, &zc_ref_byteswap,
+ sizeof (zc_byteswap)));
+ }
+
+ umem_free(buf, size);
+ }
+}
+
static int
ztest_check_path(char *path)
{
numloops++ < ztest_opts.zo_maxloops &&
metaslab_class_get_alloc(spa_normal_class(spa)) < capacity) {
ztest_od_t od;
- ztest_od_init(&od, 0, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
+ ztest_od_init(&od, 0, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
VERIFY0(ztest_object_init(zd, &od, sizeof (od), B_FALSE));
ztest_io(zd, od.od_object,
ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);