*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2017, Intel Corporation.
*/
#include <sys/sysmacros.h>
#include <sys/trace_zio.h>
#include <sys/abd.h>
#include <sys/dsl_crypt.h>
+#include <sys/cityhash.h>
/*
* ==========================================================================
int zio_delay_max = ZIO_DELAY_MAX;
-#define ZIO_PIPELINE_CONTINUE 0x100
-#define ZIO_PIPELINE_STOP 0x101
-
#define BP_SPANB(indblkshift, level) \
(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
#define COMPARE_META_LEVEL 0x80000000ul
zio->io_abd, tmp, zio->io_size, size);
abd_return_buf_copy(data, tmp, size);
+ if (zio_injection_enabled && ret == 0)
+ ret = zio_handle_fault_injection(zio, EINVAL);
+
if (ret != 0)
zio->io_error = SET_ERROR(EIO);
}
__attribute__((always_inline))
static inline void
-zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait)
+zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
+ zio_t **next_to_executep)
{
uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
int *errorp = &pio->io_child_error[zio->io_child_type];
ZIO_TASKQ_INTERRUPT;
pio->io_stall = NULL;
mutex_exit(&pio->io_lock);
+
/*
- * Dispatch the parent zio in its own taskq so that
- * the child can continue to make progress. This also
- * prevents overflowing the stack when we have deeply nested
- * parent-child relationships.
+ * If we can tell the caller to execute this parent next, do
+ * so. Otherwise dispatch the parent zio as its own task.
+ *
+ * Having the caller execute the parent when possible reduces
+ * locking on the zio taskq's, reduces context switch
+ * overhead, and has no recursion penalty. Note that one
+ * read from disk typically causes at least 3 zio's: a
+ * zio_null(), the logical zio_read(), and then a physical
+ * zio. When the physical ZIO completes, we are able to call
+ * zio_done() on all 3 of these zio's from one invocation of
+ * zio_execute() by returning the parent back to
+ * zio_execute(). Since the parent isn't executed until this
+ * thread returns back to zio_execute(), the caller should do
+ * so promptly.
+ *
+ * In other cases, dispatching the parent prevents
+ * overflowing the stack when we have deeply nested
+ * parent-child relationships, as we do with the "mega zio"
+ * of writes for spa_sync(), and the chain of ZIL blocks.
*/
- zio_taskq_dispatch(pio, type, B_FALSE);
+ if (next_to_executep != NULL && *next_to_executep == NULL) {
+ *next_to_executep = pio;
+ } else {
+ zio_taskq_dispatch(pio, type, B_FALSE);
+ }
} else {
mutex_exit(&pio->io_lock);
}
zio->io_bookmark = *zb;
if (pio != NULL) {
+ if (zio->io_metaslab_class == NULL)
+ zio->io_metaslab_class = pio->io_metaslab_class;
if (zio->io_logical == NULL)
zio->io_logical = pio->io_logical;
if (zio->io_child_type == ZIO_CHILD_GANG)
* starts allocating blocks -- so that nothing is allocated twice.
* If txg == 0 we just verify that the block is claimable.
*/
- ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa));
- ASSERT(txg == spa_first_txg(spa) || txg == 0);
+ ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <,
+ spa_min_claim_txg(spa));
+ ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */
zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
*/
if (flags & ZIO_FLAG_IO_ALLOCATING &&
(vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
- ASSERTV(metaslab_class_t *mc = spa_normal_class(pio->io_spa));
-
- ASSERT(mc->mc_alloc_throttle_enabled);
+ ASSERT(pio->io_metaslab_class != NULL);
+ ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
ASSERT(type == ZIO_TYPE_WRITE);
ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
* ==========================================================================
*/
-static int
+static zio_t *
zio_read_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
-static int
+static zio_t *
zio_write_bp_init(zio_t *zio)
{
if (!IO_IS_ALLOCATING(zio))
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (BP_IS_EMBEDDED(bp))
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
/*
* If we've been overridden and nopwrite is set then
ASSERT(!zp->zp_dedup);
ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
zio->io_flags |= ZIO_FLAG_NOPWRITE;
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
ASSERT(!zp->zp_nopwrite);
if (BP_IS_HOLE(bp) || !zp->zp_dedup)
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
!zp->zp_encrypt) {
BP_SET_DEDUP(bp, 1);
zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/*
zio->io_pipeline = zio->io_orig_pipeline;
}
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
-static int
+static zio_t *
zio_write_compress(zio_t *zio)
{
spa_t *spa = zio->io_spa;
*/
if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
if (!IO_IS_ALLOCATING(zio))
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
if (zio->io_children_ready != NULL) {
/*
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
ASSERT(spa_feature_is_active(spa,
SPA_FEATURE_EMBEDDED_DATA));
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
} else {
/*
* Round up compressed size up to the ashift
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
BP_GET_PSIZE(bp) == psize &&
pass >= zfs_sync_pass_rewrite) {
- ASSERT(psize != 0);
+ VERIFY3U(psize, !=, 0);
enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
+
zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
zio->io_flags |= ZIO_FLAG_IO_REWRITE;
} else {
zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
}
}
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
-static int
+static zio_t *
zio_free_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/*
return (B_FALSE);
}
-static int
+static zio_t *
zio_issue_async(zio_t *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
void
static inline void
__zio_execute(zio_t *zio)
{
- zio->io_executor = curthread;
-
ASSERT3U(zio->io_queued_timestamp, >, 0);
while (zio->io_stage < ZIO_STAGE_DONE) {
enum zio_stage pipeline = zio->io_pipeline;
enum zio_stage stage = zio->io_stage;
- int rv;
+
+ zio->io_executor = curthread;
ASSERT(!MUTEX_HELD(&zio->io_lock));
ASSERT(ISP2(stage));
zio->io_stage = stage;
zio->io_pipeline_trace |= zio->io_stage;
- rv = zio_pipeline[highbit64(stage) - 1](zio);
- if (rv == ZIO_PIPELINE_STOP)
- return;
+ /*
+ * The zio pipeline stage returns the next zio to execute
+ * (typically the same as this one), or NULL if we should
+ * stop.
+ */
+ zio = zio_pipeline[highbit64(stage) - 1](zio);
- ASSERT(rv == ZIO_PIPELINE_CONTINUE);
+ if (zio == NULL)
+ return;
}
}
zio_nowait(zio);
}
-static int
+static zio_t *
zio_gang_assemble(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
-static int
+static zio_t *
zio_gang_issue(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
static void
abd_put(zio->io_abd);
}
-static int
+static zio_t *
zio_write_gang_block(zio_t *pio)
{
spa_t *spa = pio->io_spa;
ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA));
flags |= METASLAB_ASYNC_ALLOC;
- VERIFY(refcount_held(&mc->mc_alloc_slots, pio));
+ VERIFY(zfs_refcount_held(&mc->mc_alloc_slots[pio->io_allocator],
+ pio));
/*
* The logical zio has already placed a reservation for
* additional reservations for gang blocks.
*/
VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
- pio, flags));
+ pio->io_allocator, pio, flags));
}
error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
- &pio->io_alloc_list, pio);
+ &pio->io_alloc_list, pio, pio->io_allocator);
if (error) {
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
* stage.
*/
metaslab_class_throttle_unreserve(mc,
- gbh_copies - copies, pio);
+ gbh_copies - copies, pio->io_allocator, pio);
}
pio->io_error = error;
- return (ZIO_PIPELINE_CONTINUE);
+ return (pio);
}
if (pio == gio) {
* slot for them here.
*/
VERIFY(metaslab_class_throttle_reserve(mc,
- zp.zp_copies, cio, flags));
+ zp.zp_copies, cio->io_allocator, cio, flags));
}
zio_nowait(cio);
}
zio_nowait(zio);
- return (ZIO_PIPELINE_CONTINUE);
+ return (pio);
}
/*
* used for nopwrite, assuming that the salt and the checksums
* themselves remain secret.
*/
-static int
+static zio_t *
zio_nop_write(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
zp->zp_copies != BP_GET_NDVAS(bp_orig))
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
/*
* If the checksums match then reset the pipeline so that we
zio->io_flags |= ZIO_FLAG_NOPWRITE;
}
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/*
mutex_exit(&pio->io_lock);
}
-static int
+static zio_t *
zio_ddt_read_start(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
zio->io_vsd = dde;
if (ddp_self == NULL)
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
}
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
zio_nowait(zio_read(zio, zio->io_spa, bp,
zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
-static int
+static zio_t *
zio_ddt_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
ASSERT(BP_GET_DEDUP(bp));
ddt_entry_t *dde = zio->io_vsd;
if (ddt == NULL) {
ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
if (dde == NULL) {
zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
if (dde->dde_repair_abd != NULL) {
abd_copy(zio->io_abd, dde->dde_repair_abd,
ASSERT(zio->io_vsd == NULL);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
static boolean_t
ddt_exit(ddt);
}
-static int
+static zio_t *
zio_ddt_write(zio_t *zio)
{
spa_t *spa = zio->io_spa;
}
zio->io_pipeline = ZIO_WRITE_PIPELINE;
ddt_exit(ddt);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp);
zio->io_bp_override = NULL;
BP_ZERO(bp);
ddt_exit(ddt);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
dio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
if (dio)
zio_nowait(dio);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
ddt_entry_t *freedde; /* for debugging */
-static int
+static zio_t *
zio_ddt_free(zio_t *zio)
{
spa_t *spa = zio->io_spa;
}
ddt_exit(ddt);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/*
*/
static zio_t *
-zio_io_to_allocate(spa_t *spa)
+zio_io_to_allocate(spa_t *spa, int allocator)
{
zio_t *zio;
- ASSERT(MUTEX_HELD(&spa->spa_alloc_lock));
+ ASSERT(MUTEX_HELD(&spa->spa_alloc_locks[allocator]));
- zio = avl_first(&spa->spa_alloc_tree);
+ zio = avl_first(&spa->spa_alloc_trees[allocator]);
if (zio == NULL)
return (NULL);
* Try to place a reservation for this zio. If we're unable to
* reserve then we throttle.
*/
- if (!metaslab_class_throttle_reserve(spa_normal_class(spa),
- zio->io_prop.zp_copies, zio, 0)) {
+ ASSERT3U(zio->io_allocator, ==, allocator);
+ if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
+ zio->io_prop.zp_copies, zio->io_allocator, zio, 0)) {
return (NULL);
}
- avl_remove(&spa->spa_alloc_tree, zio);
+ avl_remove(&spa->spa_alloc_trees[allocator], zio);
ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
return (zio);
}
-static int
+static zio_t *
zio_dva_throttle(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_t *nio;
+ metaslab_class_t *mc;
+
+ /* locate an appropriate allocation class */
+ mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type,
+ zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk);
if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
- !spa_normal_class(zio->io_spa)->mc_alloc_throttle_enabled ||
+ !mc->mc_alloc_throttle_enabled ||
zio->io_child_type == ZIO_CHILD_GANG ||
zio->io_flags & ZIO_FLAG_NODATA) {
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
ASSERT3U(zio->io_queued_timestamp, >, 0);
ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
- mutex_enter(&spa->spa_alloc_lock);
-
+ zbookmark_phys_t *bm = &zio->io_bookmark;
+ /*
+ * We want to try to use as many allocators as possible to help improve
+ * performance, but we also want logically adjacent IOs to be physically
+ * adjacent to improve sequential read performance. We chunk each object
+ * into 2^20 block regions, and then hash based on the objset, object,
+ * level, and region to accomplish both of these goals.
+ */
+ zio->io_allocator = cityhash4(bm->zb_objset, bm->zb_object,
+ bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count;
+ mutex_enter(&spa->spa_alloc_locks[zio->io_allocator]);
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
- avl_add(&spa->spa_alloc_tree, zio);
-
- nio = zio_io_to_allocate(zio->io_spa);
- mutex_exit(&spa->spa_alloc_lock);
-
- if (nio == zio)
- return (ZIO_PIPELINE_CONTINUE);
-
- if (nio != NULL) {
- ASSERT(nio->io_stage == ZIO_STAGE_DVA_THROTTLE);
- /*
- * We are passing control to a new zio so make sure that
- * it is processed by a different thread. We do this to
- * avoid stack overflows that can occur when parents are
- * throttled and children are making progress. We allow
- * it to go to the head of the taskq since it's already
- * been waiting.
- */
- zio_taskq_dispatch(nio, ZIO_TASKQ_ISSUE, B_TRUE);
- }
- return (ZIO_PIPELINE_STOP);
+ zio->io_metaslab_class = mc;
+ avl_add(&spa->spa_alloc_trees[zio->io_allocator], zio);
+ nio = zio_io_to_allocate(spa, zio->io_allocator);
+ mutex_exit(&spa->spa_alloc_locks[zio->io_allocator]);
+ return (nio);
}
-void
-zio_allocate_dispatch(spa_t *spa)
+static void
+zio_allocate_dispatch(spa_t *spa, int allocator)
{
zio_t *zio;
- mutex_enter(&spa->spa_alloc_lock);
- zio = zio_io_to_allocate(spa);
- mutex_exit(&spa->spa_alloc_lock);
+ mutex_enter(&spa->spa_alloc_locks[allocator]);
+ zio = zio_io_to_allocate(spa, allocator);
+ mutex_exit(&spa->spa_alloc_locks[allocator]);
if (zio == NULL)
return;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
}
-static int
+static zio_t *
zio_dva_allocate(zio_t *zio)
{
spa_t *spa = zio->io_spa;
- metaslab_class_t *mc = spa_normal_class(spa);
+ metaslab_class_t *mc;
blkptr_t *bp = zio->io_bp;
int error;
int flags = 0;
if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
flags |= METASLAB_ASYNC_ALLOC;
+ /*
+ * if not already chosen, locate an appropriate allocation class
+ */
+ mc = zio->io_metaslab_class;
+ if (mc == NULL) {
+ mc = spa_preferred_class(spa, zio->io_size,
+ zio->io_prop.zp_type, zio->io_prop.zp_level,
+ zio->io_prop.zp_zpl_smallblk);
+ zio->io_metaslab_class = mc;
+ }
+
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
- &zio->io_alloc_list, zio);
+ &zio->io_alloc_list, zio, zio->io_allocator);
+
+ /*
+ * Fallback to normal class when an alloc class is full
+ */
+ if (error == ENOSPC && mc != spa_normal_class(spa)) {
+ /*
+ * If throttling, transfer reservation over to normal class.
+ * The io_allocator slot can remain the same even though we
+ * are switching classes.
+ */
+ if (mc->mc_alloc_throttle_enabled &&
+ (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
+ metaslab_class_throttle_unreserve(mc,
+ zio->io_prop.zp_copies, zio->io_allocator, zio);
+ zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
+
+ mc = spa_normal_class(spa);
+ VERIFY(metaslab_class_throttle_reserve(mc,
+ zio->io_prop.zp_copies, zio->io_allocator, zio,
+ flags | METASLAB_MUST_RESERVE));
+ } else {
+ mc = spa_normal_class(spa);
+ }
+ zio->io_metaslab_class = mc;
+
+ error = metaslab_alloc(spa, mc, zio->io_size, bp,
+ zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
+ &zio->io_alloc_list, zio, zio->io_allocator);
+ }
if (error != 0) {
zfs_dbgmsg("%s: metaslab allocation failure: zio %p, "
zio->io_error = error;
}
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
-static int
+static zio_t *
zio_dva_free(zio_t *zio)
{
metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
-static int
+static zio_t *
zio_dva_claim(zio_t *zio)
{
int error;
if (error)
zio->io_error = error;
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/*
ASSERT(txg > spa_syncing_txg(spa));
metaslab_trace_init(&io_alloc_list);
+
+ /*
+ * Block pointer fields are useful to metaslabs for stats and debugging.
+ * Fill in the obvious ones before calling into metaslab_alloc().
+ */
+ BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
+ BP_SET_PSIZE(new_bp, size);
+ BP_SET_LEVEL(new_bp, 0);
+
+ /*
+ * When allocating a zil block, we don't have information about
+ * the final destination of the block except the objset it's part
+ * of, so we just hash the objset ID to pick the allocator to get
+ * some parallelism.
+ */
error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
- txg, NULL, METASLAB_FASTWRITE, &io_alloc_list, NULL);
+ txg, NULL, METASLAB_FASTWRITE, &io_alloc_list, NULL,
+ cityhash4(0, 0, 0, os->os_dsl_dataset->ds_object) %
+ spa->spa_alloc_count);
if (error == 0) {
*slog = TRUE;
} else {
error = metaslab_alloc(spa, spa_normal_class(spa), size,
new_bp, 1, txg, NULL, METASLAB_FASTWRITE,
- &io_alloc_list, NULL);
+ &io_alloc_list, NULL, cityhash4(0, 0, 0,
+ os->os_dsl_dataset->ds_object) % spa->spa_alloc_count);
if (error == 0)
*slog = FALSE;
}
return (error);
}
-/*
- * Free an intent log block.
- */
-void
-zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp)
-{
- ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG);
- ASSERT(!BP_IS_GANG(bp));
-
- zio_free(spa, txg, bp);
-}
-
/*
* ==========================================================================
* Read and write to physical devices
* force the underlying vdev layers to call either zio_execute() or
* zio_interrupt() to ensure that the pipeline continues with the correct I/O.
*/
-static int
+static zio_t *
zio_vdev_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
* The mirror_ops handle multiple DVAs in a single BP.
*/
vdev_mirror_ops.vdev_op_io_start(zio);
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
ASSERT3P(zio->io_logical, !=, zio);
!vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
zio_vdev_io_bypass(zio);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
if (vd->vdev_ops->vdev_op_leaf &&
(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) {
if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio))
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
if ((zio = vdev_queue_io(zio)) == NULL)
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
zio->io_delay = gethrtime();
}
vd->vdev_ops->vdev_op_io_start(zio);
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
-static int
+static zio_t *
zio_vdev_io_done(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
boolean_t unexpected_error = B_FALSE;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
if (unexpected_error)
VERIFY(vdev_probe(vd, zio) == NULL);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/*
zcr->zcr_free = zio_abd_free;
}
-static int
+static zio_t *
zio_vdev_io_assess(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
zio_requeue_io_start_cut_in_line);
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
/*
zio->io_physdone(zio->io_logical);
}
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
void
* managing the storage of encryption parameters and passing them to the
* lower-level encryption functions.
*/
-static int
+static zio_t *
zio_encrypt(zio_t *zio)
{
zio_prop_t *zp = &zio->io_prop;
/* the root zio already encrypted the data */
if (zio->io_child_type == ZIO_CHILD_GANG)
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
/* only ZIL blocks are re-encrypted on rewrite */
if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
BP_SET_CRYPT(bp, B_FALSE);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/* if we are doing raw encryption set the provided encryption params */
if (DMU_OT_IS_ENCRYPTED(ot))
zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/* indirect blocks only maintain a cksum of the lower level MACs */
zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
mac));
zio_crypt_encode_mac_bp(bp, mac);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/*
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/* unencrypted object types are only authenticated with a MAC */
VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, mac));
zio_crypt_encode_mac_bp(bp, mac);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/*
}
}
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/*
* Generate and verify checksums
* ==========================================================================
*/
-static int
+static zio_t *
zio_checksum_generate(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
checksum = zio->io_prop.zp_checksum;
if (checksum == ZIO_CHECKSUM_OFF)
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
ASSERT(checksum == ZIO_CHECKSUM_LABEL);
} else {
zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
-static int
+static zio_t *
zio_checksum_verify(zio_t *zio)
{
zio_bad_cksum_t info;
* We're either verifying a label checksum, or nothing at all.
*/
if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL);
}
}
}
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/*
* I/O completion
* ==========================================================================
*/
-static int
+static zio_t *
zio_ready(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT,
ZIO_WAIT_READY)) {
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
if (zio->io_ready) {
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
+ ASSERT(zio->io_metaslab_class != NULL);
+
/*
* We were unable to allocate anything, unreserve and
* issue the next I/O to allocate.
*/
metaslab_class_throttle_unreserve(
- spa_normal_class(zio->io_spa),
- zio->io_prop.zp_copies, zio);
- zio_allocate_dispatch(zio->io_spa);
+ zio->io_metaslab_class, zio->io_prop.zp_copies,
+ zio->io_allocator, zio);
+ zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
}
}
*/
for (; pio != NULL; pio = pio_next) {
pio_next = zio_walk_parents(zio, &zl);
- zio_notify_parent(pio, zio, ZIO_WAIT_READY);
+ zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
}
if (zio->io_flags & ZIO_FLAG_NODATA) {
zio->io_spa->spa_syncing_txg == zio->io_txg)
zio_handle_ignored_writes(zio);
- return (ZIO_PIPELINE_CONTINUE);
+ return (zio);
}
/*
ASSERT(zio->io_logical != NULL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
+ ASSERT(zio->io_metaslab_class != NULL);
mutex_enter(&pio->io_lock);
- metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags);
+ metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
+ pio->io_allocator, B_TRUE);
mutex_exit(&pio->io_lock);
- metaslab_class_throttle_unreserve(spa_normal_class(zio->io_spa),
- 1, pio);
+ metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
+ pio->io_allocator, pio);
/*
* Call into the pipeline to see if there is more work that
* needs to be done. If there is work to be done it will be
* dispatched to another taskq thread.
*/
- zio_allocate_dispatch(zio->io_spa);
+ zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
}
-static int
+static zio_t *
zio_done(zio_t *zio)
{
/*
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
/*
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
zio->io_child_type == ZIO_CHILD_VDEV) {
- ASSERT(spa_normal_class(
- zio->io_spa)->mc_alloc_throttle_enabled);
+ ASSERT(zio->io_metaslab_class != NULL);
+ ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
zio_dva_throttle_done(zio);
}
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_bp != NULL);
- metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio);
- VERIFY(refcount_not_held(
- &(spa_normal_class(zio->io_spa)->mc_alloc_slots), zio));
+
+ metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
+ zio->io_allocator);
+ VERIFY(zfs_refcount_not_held(
+ &zio->io_metaslab_class->mc_alloc_slots[zio->io_allocator],
+ zio));
}
if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
zio_remove_child(pio, zio, remove_zl);
- zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
+ /*
+ * This is a rare code path, so we don't
+ * bother with "next_to_execute".
+ */
+ zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
+ NULL);
}
}
*/
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
- zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
+ /*
+ * This is a rare code path, so we don't bother with
+ * "next_to_execute".
+ */
+ zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
} else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
/*
* We'd fail again if we reexecuted now, so suspend
(task_func_t *)zio_reexecute, zio, 0,
&zio->io_tqent);
}
- return (ZIO_PIPELINE_STOP);
+ return (NULL);
}
ASSERT(zio->io_child_count == 0);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
+ /*
+ * We are done executing this zio. We may want to execute a parent
+ * next. See the comment in zio_notify_parent().
+ */
+ zio_t *next_to_execute = NULL;
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
zio_remove_child(pio, zio, remove_zl);
- zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
+ zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
}
if (zio->io_waiter != NULL) {
zio_destroy(zio);
}
- return (ZIO_PIPELINE_STOP);
+ return (next_to_execute);
}
/*