#include <sys/zil_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_file.h>
+#include <sys/vdev_initialize.h>
#include <sys/spa_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dsl_prop.h>
extern unsigned long zio_decompress_fail_fraction;
extern unsigned long zfs_reconstruct_indirect_damage_fraction;
+
static ztest_shared_opts_t *ztest_shared_opts;
static ztest_shared_opts_t ztest_opts;
static char *ztest_wkeydata = "abcdefghijklmnopqrstuvwxyz012345";
ztest_func_t ztest_reguid;
ztest_func_t ztest_spa_upgrade;
ztest_func_t ztest_device_removal;
-ztest_func_t ztest_remap_blocks;
ztest_func_t ztest_spa_checkpoint_create_discard;
+ztest_func_t ztest_initialize;
ztest_func_t ztest_fletcher;
ztest_func_t ztest_fletcher_incr;
ztest_func_t ztest_verify_dnode_bt;
ZTI_INIT(ztest_vdev_class_add, 1, &ztest_opts.zo_vdevtime),
ZTI_INIT(ztest_vdev_aux_add_remove, 1, &ztest_opts.zo_vdevtime),
ZTI_INIT(ztest_device_removal, 1, &zopt_sometimes),
- ZTI_INIT(ztest_remap_blocks, 1, &zopt_sometimes),
ZTI_INIT(ztest_spa_checkpoint_create_discard, 1, &zopt_rarely),
+ ZTI_INIT(ztest_initialize, 1, &zopt_sometimes),
ZTI_INIT(ztest_fletcher, 1, &zopt_rarely),
ZTI_INIT(ztest_fletcher_incr, 1, &zopt_rarely),
ZTI_INIT(ztest_verify_dnode_bt, 1, &zopt_sometimes),
static kmutex_t ztest_vdev_lock;
static boolean_t ztest_device_removal_active = B_FALSE;
+static boolean_t ztest_pool_scrubbed = B_FALSE;
static kmutex_t ztest_checkpoint_lock;
/*
};
static void usage(boolean_t) __NORETURN;
+static int ztest_scrub_impl(spa_t *spa);
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* ZIL get_data callbacks
*/
+/* ARGSUSED */
static void
ztest_get_done(zgd_t *zgd, int error)
{
ztest_range_unlock((rl_t *)zgd->zgd_lr);
ztest_object_unlock(zd, object);
- if (error == 0 && zgd->zgd_bp)
- zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
-
umem_free(zgd, sizeof (*zgd));
}
{
objset_t *os = zd->zd_os;
+ /*
+ * We hold the ztest_vdev_lock so we don't cause problems with
+ * other threads that wish to remove a log device, such as
+ * ztest_device_removal().
+ */
+ mutex_enter(&ztest_vdev_lock);
+
/*
* We grab the zd_dirobj_lock to ensure that no other thread is
* updating the zil (i.e. adding in-memory log records) and the
(void) pthread_rwlock_unlock(&zd->zd_zilog_lock);
mutex_exit(&zd->zd_dirobj_lock);
+ mutex_exit(&ztest_vdev_lock);
}
/*
/*
* If we're configuring a RAIDZ device then make sure that the
- * the initial version is capable of supporting that feature.
+ * initial version is capable of supporting that feature.
*/
switch (ztest_opts.zo_raidz_parity) {
case 0:
mutex_enter(&ztest_vdev_lock);
- /* ensure we have a useable config; mirrors of raidz aren't supported */
+ /* ensure we have a usable config; mirrors of raidz aren't supported */
if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
mutex_exit(&ztest_vdev_lock);
return;
pguid = pvd->vdev_guid;
/*
- * If oldvd has siblings, then half of the time, detach it.
+ * If oldvd has siblings, then half of the time, detach it. Prior
+ * to the detach the pool is scrubbed in order to prevent creating
+ * unrepairable blocks as a result of the data corruption injection.
*/
if (oldvd_has_siblings && ztest_random(2) == 0) {
spa_config_exit(spa, SCL_ALL, FTAG);
+
+ error = ztest_scrub_impl(spa);
+ if (error)
+ goto out;
+
error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
if (error != 0 && error != ENODEV && error != EBUSY &&
error != ENOTSUP && error != ZFS_ERR_CHECKPOINT_EXISTS &&
*/
txg_wait_synced(spa_get_dsl(spa), 0);
- while (spa->spa_vdev_removal != NULL)
+ while (spa->spa_removing_phys.sr_state == DSS_SCANNING)
txg_wait_synced(spa_get_dsl(spa), 0);
} else {
mutex_exit(&ztest_vdev_lock);
VERIFY0(dsl_destroy_snapshot(name, B_TRUE));
} else {
error = dsl_destroy_head(name);
- /* There could be a hold on this dataset */
- if (error != EBUSY)
+ if (error == ENOSPC) {
+ /* There could be checkpoint or insufficient slop */
+ ztest_record_enospc(FTAG);
+ } else if (error != EBUSY) {
+ /* There could be a hold on this dataset */
ASSERT0(error);
+ }
}
return (0);
}
FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
}
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
- dmu_assign_arcbuf_by_dbuf(bonus_db, off,
- bigbuf_arcbufs[j], tx);
+ VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
+ off, bigbuf_arcbufs[j], tx));
} else {
- dmu_assign_arcbuf_by_dbuf(bonus_db, off,
- bigbuf_arcbufs[2 * j], tx);
- dmu_assign_arcbuf_by_dbuf(bonus_db,
+ VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
+ off, bigbuf_arcbufs[2 * j], tx));
+ VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
off + chunksize / 2,
- bigbuf_arcbufs[2 * j + 1], tx);
+ bigbuf_arcbufs[2 * j + 1], tx));
}
if (i == 1) {
dmu_buf_rele(dbt, FTAG);
dmu_tx_commit(tx);
/*
- * Generate a buch of random entries.
+ * Generate a bunch of random entries.
*/
ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
}
/*
- * Testcase to test the upgrading of a microzap to fatzap.
+ * Test case to test the upgrading of a microzap to fatzap.
*/
void
ztest_fzap(ztest_ds_t *zd, uint64_t id)
(void) pthread_rwlock_unlock(&ztest_name_lock);
}
-/* ARGSUSED */
-void
-ztest_remap_blocks(ztest_ds_t *zd, uint64_t id)
-{
- (void) pthread_rwlock_rdlock(&ztest_name_lock);
-
- int error = dmu_objset_remap_indirects(zd->zd_name);
- if (error == ENOSPC)
- error = 0;
- ASSERT0(error);
-
- (void) pthread_rwlock_unlock(&ztest_name_lock);
-}
-
/* ARGSUSED */
void
ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
ASSERT(leaves >= 1);
+ /*
+ * While ztest is running the number of leaves will not change. This
+ * is critical for the fault injection logic as it determines where
+ * errors can be safely injected such that they are always repairable.
+ *
+ * When restarting ztest a different number of leaves may be requested
+ * which will shift the regions to be damaged. This is fine as long
+ * as the pool has been scrubbed prior to using the new mapping.
+ * Failure to do can result in non-repairable damage being injected.
+ */
+ if (ztest_pool_scrubbed == B_FALSE)
+ goto out;
+
/*
* Grab the name lock as reader. There are some operations
* which don't like to have their vdevs changed while
spa_t *spa = ztest_spa;
objset_t *os = zd->zd_os;
ztest_od_t *od;
- uint64_t object, blocksize, txg, pattern, psize;
+ uint64_t object, blocksize, txg, pattern;
enum zio_checksum checksum = spa_dedup_checksum(spa);
dmu_buf_t *db;
dmu_tx_t *tx;
- abd_t *abd;
- blkptr_t blk;
- int copies = 2 * ZIO_DEDUPDITTO_MIN;
- int i;
-
- blocksize = ztest_random_blocksize();
- blocksize = MIN(blocksize, 2048); /* because we write so many */
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
- ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
umem_free(od, sizeof (ztest_od_t));
/*
* Take the name lock as writer to prevent anyone else from changing
- * the pool and dataset properies we need to maintain during this test.
+ * the pool and dataset properties we need to maintain during this test.
*/
(void) pthread_rwlock_wrlock(&ztest_name_lock);
blocksize = od[0].od_blocksize;
pattern = zs->zs_guid ^ dds.dds_guid;
+ /*
+ * The numbers of copies written must always be greater than or
+ * equal to the threshold set by the dedupditto property. This
+ * is initialized in ztest_run() and then randomly changed by
+ * ztest_spa_prop_get_set(), these function will never set it
+ * larger than 2 * ZIO_DEDUPDITTO_MIN.
+ */
+ int copies = 2 * ZIO_DEDUPDITTO_MIN;
+
+ /*
+ * The block size is limited by DMU_MAX_ACCESS (64MB) which
+ * caps the maximum transaction size. A block size of up to
+ * SPA_OLD_MAXBLOCKSIZE is allowed which results in a maximum
+ * transaction size of: 128K * 200 (copies) = ~25MB
+ *
+ * The actual block size is checked here, rather than requested
+ * above, because the way ztest_od_init() is implemented it does
+ * not guarantee the block size requested will be used.
+ */
+ if (blocksize > SPA_OLD_MAXBLOCKSIZE) {
+ (void) pthread_rwlock_unlock(&ztest_name_lock);
+ umem_free(od, sizeof (ztest_od_t));
+ return;
+ }
+
ASSERT(object != 0);
tx = dmu_tx_create(os);
/*
* Write all the copies of our block.
*/
- for (i = 0; i < copies; i++) {
+ for (int i = 0; i < copies; i++) {
uint64_t offset = i * blocksize;
int error = dmu_buf_hold(os, object, offset, FTAG, &db,
DMU_READ_NO_PREFETCH);
/*
* Find out what block we got.
*/
- VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db,
- DMU_READ_NO_PREFETCH));
- blk = *((dmu_buf_impl_t *)db)->db_blkptr;
+ VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db, DMU_READ_NO_PREFETCH));
+ blkptr_t blk = *((dmu_buf_impl_t *)db)->db_blkptr;
dmu_buf_rele(db, FTAG);
/*
* Damage the block. Dedup-ditto will save us when we read it later.
*/
- psize = BP_GET_PSIZE(&blk);
- abd = abd_alloc_linear(psize, B_TRUE);
+ uint64_t psize = BP_GET_PSIZE(&blk);
+ abd_t *abd = abd_alloc_linear(psize, B_TRUE);
ztest_pattern_set(abd_to_buf(abd), psize, ~pattern);
(void) zio_wait(zio_rewrite(NULL, spa, 0, &blk,
umem_free(od, sizeof (ztest_od_t));
}
+/*
+ * By design ztest will never inject uncorrectable damage in to the pool.
+ * Issue a scrub, wait for it to complete, and verify there is never any
+ * any persistent damage.
+ *
+ * Only after a full scrub has been completed is it safe to start injecting
+ * data corruption. See the comment in zfs_fault_inject().
+ */
+static int
+ztest_scrub_impl(spa_t *spa)
+{
+ int error = spa_scan(spa, POOL_SCAN_SCRUB);
+ if (error)
+ return (error);
+
+ while (dsl_scan_scrubbing(spa_get_dsl(spa)))
+ txg_wait_synced(spa_get_dsl(spa), 0);
+
+ if (spa_get_errlog_size(spa) > 0)
+ return (ECKSUM);
+
+ ztest_pool_scrubbed = B_TRUE;
+
+ return (0);
+}
+
/*
* Scrub the pool.
*/
ztest_scrub(ztest_ds_t *zd, uint64_t id)
{
spa_t *spa = ztest_spa;
+ int error;
/*
* Scrub in progress by device removal.
if (ztest_device_removal_active)
return;
+ /*
+ * Start a scrub, wait a moment, then force a restart.
+ */
(void) spa_scan(spa, POOL_SCAN_SCRUB);
- (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */
- (void) spa_scan(spa, POOL_SCAN_SCRUB);
+ (void) poll(NULL, 0, 100);
+
+ error = ztest_scrub_impl(spa);
+ if (error == EBUSY)
+ error = 0;
+ ASSERT0(error);
}
/*
strcpy(bin, "zdb");
}
+static vdev_t *
+ztest_random_concrete_vdev_leaf(vdev_t *vd)
+{
+ if (vd == NULL)
+ return (NULL);
+
+ if (vd->vdev_children == 0)
+ return (vd);
+
+ vdev_t *eligible[vd->vdev_children];
+ int eligible_idx = 0, i;
+ for (i = 0; i < vd->vdev_children; i++) {
+ vdev_t *cvd = vd->vdev_child[i];
+ if (cvd->vdev_top->vdev_removing)
+ continue;
+ if (cvd->vdev_children > 0 ||
+ (vdev_is_concrete(cvd) && !cvd->vdev_detached)) {
+ eligible[eligible_idx++] = cvd;
+ }
+ }
+ VERIFY(eligible_idx > 0);
+
+ uint64_t child_no = ztest_random(eligible_idx);
+ return (ztest_random_concrete_vdev_leaf(eligible[child_no]));
+}
+
+/* ARGSUSED */
+void
+ztest_initialize(ztest_ds_t *zd, uint64_t id)
+{
+ spa_t *spa = ztest_spa;
+ int error = 0;
+
+ mutex_enter(&ztest_vdev_lock);
+
+ spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+
+ /* Random leaf vdev */
+ vdev_t *rand_vd = ztest_random_concrete_vdev_leaf(spa->spa_root_vdev);
+ if (rand_vd == NULL) {
+ spa_config_exit(spa, SCL_VDEV, FTAG);
+ mutex_exit(&ztest_vdev_lock);
+ return;
+ }
+
+ /*
+ * The random vdev we've selected may change as soon as we
+ * drop the spa_config_lock. We create local copies of things
+ * we're interested in.
+ */
+ uint64_t guid = rand_vd->vdev_guid;
+ char *path = strdup(rand_vd->vdev_path);
+ boolean_t active = rand_vd->vdev_initialize_thread != NULL;
+
+ zfs_dbgmsg("vd %p, guid %llu", rand_vd, guid);
+ spa_config_exit(spa, SCL_VDEV, FTAG);
+
+ uint64_t cmd = ztest_random(POOL_INITIALIZE_FUNCS);
+
+ nvlist_t *vdev_guids = fnvlist_alloc();
+ nvlist_t *vdev_errlist = fnvlist_alloc();
+ fnvlist_add_uint64(vdev_guids, path, guid);
+ error = spa_vdev_initialize(spa, vdev_guids, cmd, vdev_errlist);
+ fnvlist_free(vdev_guids);
+ fnvlist_free(vdev_errlist);
+
+ switch (cmd) {
+ case POOL_INITIALIZE_CANCEL:
+ if (ztest_opts.zo_verbose >= 4) {
+ (void) printf("Cancel initialize %s", path);
+ if (!active)
+ (void) printf(" failed (no initialize active)");
+ (void) printf("\n");
+ }
+ break;
+ case POOL_INITIALIZE_DO:
+ if (ztest_opts.zo_verbose >= 4) {
+ (void) printf("Start initialize %s", path);
+ if (active && error == 0)
+ (void) printf(" failed (already active)");
+ else if (error != 0)
+ (void) printf(" failed (error %d)", error);
+ (void) printf("\n");
+ }
+ break;
+ case POOL_INITIALIZE_SUSPEND:
+ if (ztest_opts.zo_verbose >= 4) {
+ (void) printf("Suspend initialize %s", path);
+ if (!active)
+ (void) printf(" failed (no initialize active)");
+ (void) printf("\n");
+ }
+ break;
+ }
+ free(path);
+ mutex_exit(&ztest_vdev_lock);
+}
+
/*
* Verify pool integrity by running zdb.
*/
ztest_get_zdb_bin(bin, len);
(void) sprintf(zdb,
- "%s -bcc%s%s -G -d -U %s "
- "-o zfs_reconstruct_indirect_combinations_max=65536 %s",
+ "%s -bcc%s%s -G -d -Y -U %s %s",
bin,
ztest_opts.zo_verbose >= 3 ? "s" : "",
ztest_opts.zo_verbose >= 4 ? "v" : "",
}
zs->zs_enospc_count = 0;
+ /*
+ * If we were in the middle of ztest_device_removal() and were killed
+ * we need to ensure the removal and scrub complete before running
+ * any tests that check ztest_device_removal_active. The removal will
+ * be restarted automatically when the spa is opened, but we need to
+ * initiate the scrub manually if it is not already in progress. Note
+ * that we always run the scrub whenever an indirect vdev exists
+ * because we have no way of knowing for sure if ztest_device_removal()
+ * fully completed its scrub before the pool was reimported.
+ */
+ if (spa->spa_removing_phys.sr_state == DSS_SCANNING ||
+ spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
+ while (spa->spa_removing_phys.sr_state == DSS_SCANNING)
+ txg_wait_synced(spa_get_dsl(spa), 0);
+
+ error = ztest_scrub_impl(spa);
+ if (error == EBUSY)
+ error = 0;
+ ASSERT0(error);
+ }
+
run_threads = umem_zalloc(ztest_opts.zo_threads * sizeof (kthread_t *),
UMEM_NOFAIL);
}
/*
- * Wait for all of the tests to complete. We go in reverse order
- * so we don't close datasets while threads are still using them.
+ * Wait for all of the tests to complete.
*/
- for (t = ztest_opts.zo_threads - 1; t >= 0; t--) {
+ for (t = 0; t < ztest_opts.zo_threads; t++)
VERIFY0(thread_join(run_threads[t]));
+
+ /*
+ * Close all datasets. This must be done after all the threads
+ * are joined so we can be sure none of the datasets are in-use
+ * by any of the threads.
+ */
+ for (t = 0; t < ztest_opts.zo_threads; t++) {
if (t < ztest_opts.zo_datasets)
ztest_dataset_close(t);
}