* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
+ * or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
*/
/*
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
- * Copyright (c) 2016 by Delphix. All rights reserved.
+ * Copyright (c) 2019 by Delphix. All rights reserved.
*/
/*
* +----------------->| chunk N-1 |
* +-----------+
*
- * Linear buffers act exactly like normal buffers and are always mapped into the
- * kernel's virtual memory space, while scattered ABD data chunks are allocated
- * as physical pages and then mapped in only while they are actually being
- * accessed through one of the abd_* library functions. Using scattered ABDs
- * provides several benefits:
- *
- * (1) They avoid use of kmem_*, preventing performance problems where running
- * kmem_reap on very large memory systems never finishes and causes
- * constant TLB shootdowns.
- *
- * (2) Fragmentation is less of an issue since when we are at the limit of
- * allocatable space, we won't have to search around for a long free
- * hole in the VA space for large ARC allocations. Each chunk is mapped in
- * individually, so even if we weren't using segkpm (see next point) we
- * wouldn't need to worry about finding a contiguous address range.
- *
- * (3) Use of segkpm will avoid the need for map / unmap / TLB shootdown costs
- * on each ABD access. (If segkpm isn't available then we use all linear
- * ABDs to avoid this penalty.) See seg_kpm.c for more details.
- *
- * It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
- * B_FALSE. However, it is not possible to use scattered ABDs if segkpm is not
- * available, which is the case on all 32-bit systems and any 64-bit systems
- * where kpm_enable is turned off.
- *
* In addition to directly allocating a linear or scattered ABD, it is also
* possible to create an ABD by requesting the "sub-ABD" starting at an offset
* within an existing ABD. In linear buffers this is simple (set abd_buf of
* compare, copy, read, write, and fill with zeroes. If you need a custom
* function which progressively accesses the whole ABD, use the abd_iterate_*
* functions.
+ *
+ * As an additional feature, linear and scatter ABD's can be stitched together
+ * by using the gang ABD type (abd_alloc_gang_abd()). This allows for
+ * multiple ABDs to be viewed as a singular ABD.
+ *
+ * It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
+ * B_FALSE.
*/
-#include <sys/abd.h>
+#include <sys/abd_impl.h>
#include <sys/param.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/zfs_znode.h>
-#ifndef KMC_NOTOUCH
-#define KMC_NOTOUCH 0
-#endif
-
-typedef struct abd_stats {
- kstat_named_t abdstat_struct_size;
- kstat_named_t abdstat_scatter_cnt;
- kstat_named_t abdstat_scatter_data_size;
- kstat_named_t abdstat_scatter_chunk_waste;
- kstat_named_t abdstat_linear_cnt;
- kstat_named_t abdstat_linear_data_size;
-} abd_stats_t;
-
-static abd_stats_t abd_stats = {
- /* Amount of memory occupied by all of the abd_t struct allocations */
- { "struct_size", KSTAT_DATA_UINT64 },
- /*
- * The number of scatter ABDs which are currently allocated, excluding
- * ABDs which don't own their data (for instance the ones which were
- * allocated through abd_get_offset()).
- */
- { "scatter_cnt", KSTAT_DATA_UINT64 },
- /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
- { "scatter_data_size", KSTAT_DATA_UINT64 },
- /*
- * The amount of space wasted at the end of the last chunk across all
- * scatter ABDs tracked by scatter_cnt.
- */
- { "scatter_chunk_waste", KSTAT_DATA_UINT64 },
- /*
- * The number of linear ABDs which are currently allocated, excluding
- * ABDs which don't own their data (for instance the ones which were
- * allocated through abd_get_offset() and abd_get_from_buf()). If an
- * ABD takes ownership of its buf then it will become tracked.
- */
- { "linear_cnt", KSTAT_DATA_UINT64 },
- /* Amount of data stored in all linear ABDs tracked by linear_cnt */
- { "linear_data_size", KSTAT_DATA_UINT64 },
-};
-
-#define ABDSTAT(stat) (abd_stats.stat.value.ui64)
-#define ABDSTAT_INCR(stat, val) \
- atomic_add_64(&abd_stats.stat.value.ui64, (val))
-#define ABDSTAT_BUMP(stat) ABDSTAT_INCR(stat, 1)
-#define ABDSTAT_BUMPDOWN(stat) ABDSTAT_INCR(stat, -1)
-
/* see block comment above for description */
int zfs_abd_scatter_enabled = B_TRUE;
-
-#ifdef _KERNEL
-static kstat_t *abd_ksp;
-
-static struct page *
-abd_alloc_chunk(void)
-{
- struct page *c = alloc_page(kmem_flags_convert(KM_SLEEP));
- ASSERT3P(c, !=, NULL);
- return (c);
-}
-
-static void
-abd_free_chunk(struct page *c)
-{
- __free_pages(c, 0);
-}
-
-static void *
-abd_map_chunk(struct page *c)
-{
- /*
- * Use of segkpm means we don't care if this is mapped S_READ or S_WRITE
- * but S_WRITE is conceptually more accurate.
- */
- return (kmap(c));
-}
-
-static void
-abd_unmap_chunk(struct page *c)
-{
- kunmap(c);
-}
-
void
-abd_init(void)
-{
- abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
- sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
- if (abd_ksp != NULL) {
- abd_ksp->ks_data = &abd_stats;
- kstat_install(abd_ksp);
- }
-}
-
-void
-abd_fini(void)
-{
- if (abd_ksp != NULL) {
- kstat_delete(abd_ksp);
- abd_ksp = NULL;
- }
-}
-
-#else
-
-struct page;
-#define kpm_enable 1
-#define abd_alloc_chunk() \
- ((struct page *)kmem_alloc(PAGESIZE, KM_SLEEP))
-#define abd_free_chunk(chunk) kmem_free(chunk, PAGESIZE)
-#define abd_map_chunk(chunk) ((void *)chunk)
-static void
-abd_unmap_chunk(struct page *c)
-{
-}
-
-void
-abd_init(void)
-{
-}
-
-void
-abd_fini(void)
-{
-}
-
-#endif /* _KERNEL */
-
-static inline size_t
-abd_chunkcnt_for_bytes(size_t size)
-{
- return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
-}
-
-static inline size_t
-abd_scatter_chunkcnt(abd_t *abd)
-{
- ASSERT(!abd_is_linear(abd));
- return (abd_chunkcnt_for_bytes(
- abd->abd_u.abd_scatter.abd_offset + abd->abd_size));
-}
-
-static inline void
abd_verify(abd_t *abd)
{
- ASSERT3U(abd->abd_size, >, 0);
+#ifdef ZFS_DEBUG
ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
- ABD_FLAG_OWNER | ABD_FLAG_META));
+ ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
+ ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE | ABD_FLAG_GANG |
+ ABD_FLAG_GANG_FREE | ABD_FLAG_ZEROS | ABD_FLAG_ALLOCD));
IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
if (abd_is_linear(abd)) {
- ASSERT3P(abd->abd_u.abd_linear.abd_buf, !=, NULL);
- } else {
- size_t n;
- int i;
-
- ASSERT3U(abd->abd_u.abd_scatter.abd_offset, <, PAGESIZE);
- n = abd_scatter_chunkcnt(abd);
- for (i = 0; i < n; i++) {
- ASSERT3P(
- abd->abd_u.abd_scatter.abd_chunks[i], !=, NULL);
+ ASSERT3U(abd->abd_size, >, 0);
+ ASSERT3P(ABD_LINEAR_BUF(abd), !=, NULL);
+ } else if (abd_is_gang(abd)) {
+ uint_t child_sizes = 0;
+ for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
+ cabd != NULL;
+ cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
+ ASSERT(list_link_active(&cabd->abd_gang_link));
+ child_sizes += cabd->abd_size;
+ abd_verify(cabd);
}
+ ASSERT3U(abd->abd_size, ==, child_sizes);
+ } else {
+ ASSERT3U(abd->abd_size, >, 0);
+ abd_verify_scatter(abd);
}
+#endif
}
-static inline abd_t *
-abd_alloc_struct(size_t chunkcnt)
+static void
+abd_init_struct(abd_t *abd)
{
- size_t size = offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]);
- abd_t *abd = kmem_alloc(size, KM_PUSHPAGE);
- ASSERT3P(abd, !=, NULL);
- ABDSTAT_INCR(abdstat_struct_size, size);
+ list_link_init(&abd->abd_gang_link);
+ mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
+ abd->abd_flags = 0;
+#ifdef ZFS_DEBUG
+ zfs_refcount_create(&abd->abd_children);
+ abd->abd_parent = NULL;
+#endif
+ abd->abd_size = 0;
+}
+
+static void
+abd_fini_struct(abd_t *abd)
+{
+ mutex_destroy(&abd->abd_mtx);
+ ASSERT(!list_link_active(&abd->abd_gang_link));
+#ifdef ZFS_DEBUG
+ zfs_refcount_destroy(&abd->abd_children);
+#endif
+}
+abd_t *
+abd_alloc_struct(size_t size)
+{
+ abd_t *abd = abd_alloc_struct_impl(size);
+ abd_init_struct(abd);
+ abd->abd_flags |= ABD_FLAG_ALLOCD;
return (abd);
}
-static inline void
+void
abd_free_struct(abd_t *abd)
{
- size_t chunkcnt = abd_is_linear(abd) ? 0 : abd_scatter_chunkcnt(abd);
- int size = offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]);
- kmem_free(abd, size);
- ABDSTAT_INCR(abdstat_struct_size, -size);
+ abd_fini_struct(abd);
+ abd_free_struct_impl(abd);
}
/*
abd_t *
abd_alloc(size_t size, boolean_t is_metadata)
{
- int i;
- size_t n;
- abd_t *abd;
-
- if (!zfs_abd_scatter_enabled)
+ if (abd_size_alloc_linear(size))
return (abd_alloc_linear(size, is_metadata));
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
- n = abd_chunkcnt_for_bytes(size);
- abd = abd_alloc_struct(n);
+ abd_t *abd = abd_alloc_struct(size);
+ abd->abd_flags |= ABD_FLAG_OWNER;
+ abd->abd_u.abd_scatter.abd_offset = 0;
+ abd_alloc_chunks(abd, size);
- abd->abd_flags = ABD_FLAG_OWNER;
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd->abd_size = size;
- abd->abd_parent = NULL;
- refcount_create(&abd->abd_children);
-
- abd->abd_u.abd_scatter.abd_offset = 0;
- abd->abd_u.abd_scatter.abd_chunk_size = PAGESIZE;
- for (i = 0; i < n; i++) {
- void *c = abd_alloc_chunk();
- ASSERT3P(c, !=, NULL);
- abd->abd_u.abd_scatter.abd_chunks[i] = c;
- }
-
- ABDSTAT_BUMP(abdstat_scatter_cnt);
- ABDSTAT_INCR(abdstat_scatter_data_size, size);
- ABDSTAT_INCR(abdstat_scatter_chunk_waste,
- n * PAGESIZE - size);
+ abd_update_scatter_stats(abd, ABDSTAT_INCR);
return (abd);
}
-static void
-abd_free_scatter(abd_t *abd)
-{
- size_t n = abd_scatter_chunkcnt(abd);
- int i;
-
- for (i = 0; i < n; i++) {
- abd_free_chunk(abd->abd_u.abd_scatter.abd_chunks[i]);
- }
-
- refcount_destroy(&abd->abd_children);
- ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
- ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
- ABDSTAT_INCR(abdstat_scatter_chunk_waste,
- abd->abd_size - n * PAGESIZE);
-
- abd_free_struct(abd);
-}
-
/*
* Allocate an ABD that must be linear, along with its own underlying data
* buffer. Only use this when it would be very annoying to write your ABD
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
- abd->abd_flags = ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
+ abd->abd_flags |= ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd->abd_size = size;
- abd->abd_parent = NULL;
- refcount_create(&abd->abd_children);
if (is_metadata) {
- abd->abd_u.abd_linear.abd_buf = zio_buf_alloc(size);
+ ABD_LINEAR_BUF(abd) = zio_buf_alloc(size);
} else {
- abd->abd_u.abd_linear.abd_buf = zio_data_buf_alloc(size);
+ ABD_LINEAR_BUF(abd) = zio_data_buf_alloc(size);
}
- ABDSTAT_BUMP(abdstat_linear_cnt);
- ABDSTAT_INCR(abdstat_linear_data_size, size);
+ abd_update_linear_stats(abd, ABDSTAT_INCR);
return (abd);
}
static void
abd_free_linear(abd_t *abd)
{
+ if (abd_is_linear_page(abd)) {
+ abd_free_linear_page(abd);
+ return;
+ }
if (abd->abd_flags & ABD_FLAG_META) {
- zio_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
+ zio_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
} else {
- zio_data_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
+ zio_data_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
}
- refcount_destroy(&abd->abd_children);
- ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
- ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
+ abd_update_linear_stats(abd, ABDSTAT_DECR);
+}
+
+static void
+abd_free_gang(abd_t *abd)
+{
+ ASSERT(abd_is_gang(abd));
+ abd_t *cabd;
+
+ while ((cabd = list_head(&ABD_GANG(abd).abd_gang_chain)) != NULL) {
+ /*
+ * We must acquire the child ABDs mutex to ensure that if it
+ * is being added to another gang ABD we will set the link
+ * as inactive when removing it from this gang ABD and before
+ * adding it to the other gang ABD.
+ */
+ mutex_enter(&cabd->abd_mtx);
+ ASSERT(list_link_active(&cabd->abd_gang_link));
+ list_remove(&ABD_GANG(abd).abd_gang_chain, cabd);
+ mutex_exit(&cabd->abd_mtx);
+ if (cabd->abd_flags & ABD_FLAG_GANG_FREE)
+ abd_free(cabd);
+ }
+ list_destroy(&ABD_GANG(abd).abd_gang_chain);
+}
- abd_free_struct(abd);
+static void
+abd_free_scatter(abd_t *abd)
+{
+ abd_free_chunks(abd);
+ abd_update_scatter_stats(abd, ABDSTAT_DECR);
}
/*
- * Free an ABD. Only use this on ABDs allocated with abd_alloc() or
- * abd_alloc_linear().
+ * Free an ABD. Use with any kind of abd: those created with abd_alloc_*()
+ * and abd_get_*(), including abd_get_offset_struct().
+ *
+ * If the ABD was created with abd_alloc_*(), the underlying data
+ * (scatterlist or linear buffer) will also be freed. (Subject to ownership
+ * changes via abd_*_ownership_of_buf().)
+ *
+ * Unless the ABD was created with abd_get_offset_struct(), the abd_t will
+ * also be freed.
*/
void
abd_free(abd_t *abd)
{
+ if (abd == NULL)
+ return;
+
abd_verify(abd);
- ASSERT3P(abd->abd_parent, ==, NULL);
- ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
- if (abd_is_linear(abd))
- abd_free_linear(abd);
- else
- abd_free_scatter(abd);
+#ifdef ZFS_DEBUG
+ IMPLY(abd->abd_flags & ABD_FLAG_OWNER, abd->abd_parent == NULL);
+#endif
+
+ if (abd_is_gang(abd)) {
+ abd_free_gang(abd);
+ } else if (abd_is_linear(abd)) {
+ if (abd->abd_flags & ABD_FLAG_OWNER)
+ abd_free_linear(abd);
+ } else {
+ if (abd->abd_flags & ABD_FLAG_OWNER)
+ abd_free_scatter(abd);
+ }
+
+#ifdef ZFS_DEBUG
+ if (abd->abd_parent != NULL) {
+ (void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
+ abd->abd_size, abd);
+ }
+#endif
+
+ abd_fini_struct(abd);
+ if (abd->abd_flags & ABD_FLAG_ALLOCD)
+ abd_free_struct_impl(abd);
}
/*
abd_t *
abd_alloc_sametype(abd_t *sabd, size_t size)
{
- boolean_t is_metadata = (sabd->abd_flags | ABD_FLAG_META) != 0;
- if (abd_is_linear(sabd)) {
+ boolean_t is_metadata = (sabd->abd_flags & ABD_FLAG_META) != 0;
+ if (abd_is_linear(sabd) &&
+ !abd_is_linear_page(sabd)) {
return (abd_alloc_linear(size, is_metadata));
} else {
return (abd_alloc(size, is_metadata));
}
/*
- * If we're going to use this ABD for doing I/O using the block layer, the
- * consumer of the ABD data doesn't care if it's scattered or not, and we don't
- * plan to store this ABD in memory for a long period of time, we should
- * allocate the ABD type that requires the least data copying to do the I/O.
- *
- * On Illumos this is linear ABDs, however if ldi_strategy() can ever issue I/Os
- * using a scatter/gather list we should switch to that and replace this call
- * with vanilla abd_alloc().
- *
- * On Linux the optimal thing to do would be to use abd_get_offset() and
- * construct a new ABD which shares the original pages thereby eliminating
- * the copy. But for the moment a new linear ABD is allocated until this
- * performance optimization can be implemented.
+ * Create gang ABD that will be the head of a list of ABD's. This is used
+ * to "chain" scatter/gather lists together when constructing aggregated
+ * IO's. To free this abd, abd_free() must be called.
*/
abd_t *
-abd_alloc_for_io(size_t size, boolean_t is_metadata)
+abd_alloc_gang(void)
{
- return (abd_alloc_linear(size, is_metadata));
+ abd_t *abd = abd_alloc_struct(0);
+ abd->abd_flags |= ABD_FLAG_GANG | ABD_FLAG_OWNER;
+ list_create(&ABD_GANG(abd).abd_gang_chain,
+ sizeof (abd_t), offsetof(abd_t, abd_gang_link));
+ return (abd);
}
/*
- * Allocate a new ABD to point to offset off of sabd. It shares the underlying
- * buffer data with sabd. Use abd_put() to free. sabd must not be freed while
- * any derived ABDs exist.
+ * Add a child gang ABD to a parent gang ABDs chained list.
*/
-abd_t *
-abd_get_offset(abd_t *sabd, size_t off)
+static void
+abd_gang_add_gang(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
{
- abd_t *abd;
+ ASSERT(abd_is_gang(pabd));
+ ASSERT(abd_is_gang(cabd));
- abd_verify(sabd);
- ASSERT3U(off, <=, sabd->abd_size);
+ if (free_on_free) {
+ /*
+ * If the parent is responsible for freeing the child gang
+ * ABD we will just splice the child's children ABD list to
+ * the parent's list and immediately free the child gang ABD
+ * struct. The parent gang ABDs children from the child gang
+ * will retain all the free_on_free settings after being
+ * added to the parents list.
+ */
+#ifdef ZFS_DEBUG
+ /*
+ * If cabd had abd_parent, we have to drop it here. We can't
+ * transfer it to pabd, nor we can clear abd_size leaving it.
+ */
+ if (cabd->abd_parent != NULL) {
+ (void) zfs_refcount_remove_many(
+ &cabd->abd_parent->abd_children,
+ cabd->abd_size, cabd);
+ cabd->abd_parent = NULL;
+ }
+#endif
+ pabd->abd_size += cabd->abd_size;
+ cabd->abd_size = 0;
+ list_move_tail(&ABD_GANG(pabd).abd_gang_chain,
+ &ABD_GANG(cabd).abd_gang_chain);
+ ASSERT(list_is_empty(&ABD_GANG(cabd).abd_gang_chain));
+ abd_verify(pabd);
+ abd_free(cabd);
+ } else {
+ for (abd_t *child = list_head(&ABD_GANG(cabd).abd_gang_chain);
+ child != NULL;
+ child = list_next(&ABD_GANG(cabd).abd_gang_chain, child)) {
+ /*
+ * We always pass B_FALSE for free_on_free as it is the
+ * original child gang ABDs responsibility to determine
+ * if any of its child ABDs should be free'd on the call
+ * to abd_free().
+ */
+ abd_gang_add(pabd, child, B_FALSE);
+ }
+ abd_verify(pabd);
+ }
+}
- if (abd_is_linear(sabd)) {
- abd = abd_alloc_struct(0);
+/*
+ * Add a child ABD to a gang ABD's chained list.
+ */
+void
+abd_gang_add(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
+{
+ ASSERT(abd_is_gang(pabd));
+ abd_t *child_abd = NULL;
+ /*
+ * If the child being added is a gang ABD, we will add the
+ * child's ABDs to the parent gang ABD. This allows us to account
+ * for the offset correctly in the parent gang ABD.
+ */
+ if (abd_is_gang(cabd)) {
+ ASSERT(!list_link_active(&cabd->abd_gang_link));
+ return (abd_gang_add_gang(pabd, cabd, free_on_free));
+ }
+ ASSERT(!abd_is_gang(cabd));
+
+ /*
+ * In order to verify that an ABD is not already part of
+ * another gang ABD, we must lock the child ABD's abd_mtx
+ * to check its abd_gang_link status. We unlock the abd_mtx
+ * only after it is has been added to a gang ABD, which
+ * will update the abd_gang_link's status. See comment below
+ * for how an ABD can be in multiple gang ABD's simultaneously.
+ */
+ mutex_enter(&cabd->abd_mtx);
+ if (list_link_active(&cabd->abd_gang_link)) {
/*
- * Even if this buf is filesystem metadata, we only track that
- * if we own the underlying data buffer, which is not true in
- * this case. Therefore, we don't ever use ABD_FLAG_META here.
+ * If the child ABD is already part of another
+ * gang ABD then we must allocate a new
+ * ABD to use a separate link. We mark the newly
+ * allocated ABD with ABD_FLAG_GANG_FREE, before
+ * adding it to the gang ABD's list, to make the
+ * gang ABD aware that it is responsible to call
+ * abd_free(). We use abd_get_offset() in order
+ * to just allocate a new ABD but avoid copying the
+ * data over into the newly allocated ABD.
+ *
+ * An ABD may become part of multiple gang ABD's. For
+ * example, when writing ditto bocks, the same ABD
+ * is used to write 2 or 3 locations with 2 or 3
+ * zio_t's. Each of the zio's may be aggregated with
+ * different adjacent zio's. zio aggregation uses gang
+ * zio's, so the single ABD can become part of multiple
+ * gang zio's.
+ *
+ * The ASSERT below is to make sure that if
+ * free_on_free is passed as B_TRUE, the ABD can
+ * not be in multiple gang ABD's. The gang ABD
+ * can not be responsible for cleaning up the child
+ * ABD memory allocation if the ABD can be in
+ * multiple gang ABD's at one time.
*/
- abd->abd_flags = ABD_FLAG_LINEAR;
-
- abd->abd_u.abd_linear.abd_buf =
- (char *)sabd->abd_u.abd_linear.abd_buf + off;
+ ASSERT3B(free_on_free, ==, B_FALSE);
+ child_abd = abd_get_offset(cabd, 0);
+ child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
} else {
- size_t new_offset = sabd->abd_u.abd_scatter.abd_offset + off;
- size_t chunkcnt = abd_scatter_chunkcnt(sabd) -
- (new_offset / PAGESIZE);
+ child_abd = cabd;
+ if (free_on_free)
+ child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
+ }
+ ASSERT3P(child_abd, !=, NULL);
+
+ list_insert_tail(&ABD_GANG(pabd).abd_gang_chain, child_abd);
+ mutex_exit(&cabd->abd_mtx);
+ pabd->abd_size += child_abd->abd_size;
+}
+
+/*
+ * Locate the ABD for the supplied offset in the gang ABD.
+ * Return a new offset relative to the returned ABD.
+ */
+abd_t *
+abd_gang_get_offset(abd_t *abd, size_t *off)
+{
+ abd_t *cabd;
+
+ ASSERT(abd_is_gang(abd));
+ ASSERT3U(*off, <, abd->abd_size);
+ for (cabd = list_head(&ABD_GANG(abd).abd_gang_chain); cabd != NULL;
+ cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
+ if (*off >= cabd->abd_size)
+ *off -= cabd->abd_size;
+ else
+ return (cabd);
+ }
+ VERIFY3P(cabd, !=, NULL);
+ return (cabd);
+}
- abd = abd_alloc_struct(chunkcnt);
+/*
+ * Allocate a new ABD, using the provided struct (if non-NULL, and if
+ * circumstances allow - otherwise allocate the struct). The returned ABD will
+ * point to offset off of sabd. It shares the underlying buffer data with sabd.
+ * Use abd_free() to free. sabd must not be freed while any derived ABDs exist.
+ */
+static abd_t *
+abd_get_offset_impl(abd_t *abd, abd_t *sabd, size_t off, size_t size)
+{
+ abd_verify(sabd);
+ ASSERT3U(off + size, <=, sabd->abd_size);
+ if (abd_is_linear(sabd)) {
+ if (abd == NULL)
+ abd = abd_alloc_struct(0);
/*
* Even if this buf is filesystem metadata, we only track that
* if we own the underlying data buffer, which is not true in
* this case. Therefore, we don't ever use ABD_FLAG_META here.
*/
- abd->abd_flags = 0;
+ abd->abd_flags |= ABD_FLAG_LINEAR;
+
+ ABD_LINEAR_BUF(abd) = (char *)ABD_LINEAR_BUF(sabd) + off;
+ } else if (abd_is_gang(sabd)) {
+ size_t left = size;
+ if (abd == NULL) {
+ abd = abd_alloc_gang();
+ } else {
+ abd->abd_flags |= ABD_FLAG_GANG;
+ list_create(&ABD_GANG(abd).abd_gang_chain,
+ sizeof (abd_t), offsetof(abd_t, abd_gang_link));
+ }
- abd->abd_u.abd_scatter.abd_offset = new_offset % PAGESIZE;
- abd->abd_u.abd_scatter.abd_chunk_size = PAGESIZE;
+ abd->abd_flags &= ~ABD_FLAG_OWNER;
+ for (abd_t *cabd = abd_gang_get_offset(sabd, &off);
+ cabd != NULL && left > 0;
+ cabd = list_next(&ABD_GANG(sabd).abd_gang_chain, cabd)) {
+ int csize = MIN(left, cabd->abd_size - off);
- /* Copy the scatterlist starting at the correct offset */
- (void) memcpy(&abd->abd_u.abd_scatter.abd_chunks,
- &sabd->abd_u.abd_scatter.abd_chunks[new_offset / PAGESIZE],
- chunkcnt * sizeof (void *));
+ abd_t *nabd = abd_get_offset_size(cabd, off, csize);
+ abd_gang_add(abd, nabd, B_TRUE);
+ left -= csize;
+ off = 0;
+ }
+ ASSERT3U(left, ==, 0);
+ } else {
+ abd = abd_get_offset_scatter(abd, sabd, off, size);
}
- abd->abd_size = sabd->abd_size - off;
+ ASSERT3P(abd, !=, NULL);
+ abd->abd_size = size;
+#ifdef ZFS_DEBUG
abd->abd_parent = sabd;
- refcount_create(&abd->abd_children);
- (void) refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
-
+ (void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
+#endif
return (abd);
}
/*
- * Allocate a linear ABD structure for buf. You must free this with abd_put()
- * since the resulting ABD doesn't own its own buffer.
+ * Like abd_get_offset_size(), but memory for the abd_t is provided by the
+ * caller. Using this routine can improve performance by avoiding the cost
+ * of allocating memory for the abd_t struct, and updating the abd stats.
+ * Usually, the provided abd is returned, but in some circumstances (FreeBSD,
+ * if sabd is scatter and size is more than 2 pages) a new abd_t may need to
+ * be allocated. Therefore callers should be careful to use the returned
+ * abd_t*.
+ */
+abd_t *
+abd_get_offset_struct(abd_t *abd, abd_t *sabd, size_t off, size_t size)
+{
+ abd_t *result;
+ abd_init_struct(abd);
+ result = abd_get_offset_impl(abd, sabd, off, size);
+ if (result != abd)
+ abd_fini_struct(abd);
+ return (result);
+}
+
+abd_t *
+abd_get_offset(abd_t *sabd, size_t off)
+{
+ size_t size = sabd->abd_size > off ? sabd->abd_size - off : 0;
+ VERIFY3U(size, >, 0);
+ return (abd_get_offset_impl(NULL, sabd, off, size));
+}
+
+abd_t *
+abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
+{
+ ASSERT3U(off + size, <=, sabd->abd_size);
+ return (abd_get_offset_impl(NULL, sabd, off, size));
+}
+
+/*
+ * Return a size scatter ABD containing only zeros.
+ */
+abd_t *
+abd_get_zeros(size_t size)
+{
+ ASSERT3P(abd_zero_scatter, !=, NULL);
+ ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
+ return (abd_get_offset_size(abd_zero_scatter, 0, size));
+}
+
+/*
+ * Allocate a linear ABD structure for buf.
*/
abd_t *
abd_get_from_buf(void *buf, size_t size)
* own the underlying data buffer, which is not true in this case.
* Therefore, we don't ever use ABD_FLAG_META here.
*/
- abd->abd_flags = ABD_FLAG_LINEAR;
+ abd->abd_flags |= ABD_FLAG_LINEAR;
abd->abd_size = size;
- abd->abd_parent = NULL;
- refcount_create(&abd->abd_children);
- abd->abd_u.abd_linear.abd_buf = buf;
+ ABD_LINEAR_BUF(abd) = buf;
return (abd);
}
-/*
- * Free an ABD allocated from abd_get_offset() or abd_get_from_buf(). Will not
- * free the underlying scatterlist or buffer.
- */
-void
-abd_put(abd_t *abd)
-{
- abd_verify(abd);
- ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
-
- if (abd->abd_parent != NULL) {
- (void) refcount_remove_many(&abd->abd_parent->abd_children,
- abd->abd_size, abd);
- }
-
- refcount_destroy(&abd->abd_children);
- abd_free_struct(abd);
-}
-
/*
* Get the raw buffer associated with a linear ABD.
*/
{
ASSERT(abd_is_linear(abd));
abd_verify(abd);
- return (abd->abd_u.abd_linear.abd_buf);
+ return (ABD_LINEAR_BUF(abd));
}
/*
} else {
buf = zio_buf_alloc(n);
}
- (void) refcount_add_many(&abd->abd_children, n, buf);
-
+#ifdef ZFS_DEBUG
+ (void) zfs_refcount_add_many(&abd->abd_children, n, buf);
+#endif
return (buf);
}
{
abd_verify(abd);
ASSERT3U(abd->abd_size, >=, n);
+#ifdef ZFS_DEBUG
+ (void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
+#endif
if (abd_is_linear(abd)) {
ASSERT3P(buf, ==, abd_to_buf(abd));
} else {
ASSERT0(abd_cmp_buf(abd, buf, n));
zio_buf_free(buf, n);
}
- (void) refcount_remove_many(&abd->abd_children, n, buf);
}
void
abd_return_buf(abd, buf, n);
}
+void
+abd_release_ownership_of_buf(abd_t *abd)
+{
+ ASSERT(abd_is_linear(abd));
+ ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
+
+ /*
+ * abd_free() needs to handle LINEAR_PAGE ABD's specially.
+ * Since that flag does not survive the
+ * abd_release_ownership_of_buf() -> abd_get_from_buf() ->
+ * abd_take_ownership_of_buf() sequence, we don't allow releasing
+ * these "linear but not zio_[data_]buf_alloc()'ed" ABD's.
+ */
+ ASSERT(!abd_is_linear_page(abd));
+
+ abd_verify(abd);
+
+ abd->abd_flags &= ~ABD_FLAG_OWNER;
+ /* Disable this flag since we no longer own the data buffer */
+ abd->abd_flags &= ~ABD_FLAG_META;
+
+ abd_update_linear_stats(abd, ABDSTAT_DECR);
+}
+
+
/*
* Give this ABD ownership of the buffer that it's storing. Can only be used on
* linear ABDs which were allocated via abd_get_from_buf(), or ones allocated
abd->abd_flags |= ABD_FLAG_META;
}
- ABDSTAT_BUMP(abdstat_linear_cnt);
- ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
+ abd_update_linear_stats(abd, ABDSTAT_INCR);
}
-void
-abd_release_ownership_of_buf(abd_t *abd)
-{
- ASSERT(abd_is_linear(abd));
- ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
- abd_verify(abd);
-
- abd->abd_flags &= ~ABD_FLAG_OWNER;
- /* Disable this flag since we no longer own the data buffer */
- abd->abd_flags &= ~ABD_FLAG_META;
-
- ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
- ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
-}
-
-struct abd_iter {
- abd_t *iter_abd; /* ABD being iterated through */
- size_t iter_pos; /* position (relative to abd_offset) */
- void *iter_mapaddr; /* addr corresponding to iter_pos */
- size_t iter_mapsize; /* length of data valid at mapaddr */
-};
-
-static inline size_t
-abd_iter_scatter_chunk_offset(struct abd_iter *aiter)
+/*
+ * Initializes an abd_iter based on whether the abd is a gang ABD
+ * or just a single ABD.
+ */
+static inline abd_t *
+abd_init_abd_iter(abd_t *abd, struct abd_iter *aiter, size_t off)
{
- ASSERT(!abd_is_linear(aiter->iter_abd));
- return ((aiter->iter_abd->abd_u.abd_scatter.abd_offset +
- aiter->iter_pos) % PAGESIZE);
-}
+ abd_t *cabd = NULL;
-static inline size_t
-abd_iter_scatter_chunk_index(struct abd_iter *aiter)
-{
- ASSERT(!abd_is_linear(aiter->iter_abd));
- return ((aiter->iter_abd->abd_u.abd_scatter.abd_offset +
- aiter->iter_pos) / PAGESIZE);
+ if (abd_is_gang(abd)) {
+ cabd = abd_gang_get_offset(abd, &off);
+ if (cabd) {
+ abd_iter_init(aiter, cabd);
+ abd_iter_advance(aiter, off);
+ }
+ } else {
+ abd_iter_init(aiter, abd);
+ abd_iter_advance(aiter, off);
+ }
+ return (cabd);
}
/*
- * Initialize the abd_iter.
+ * Advances an abd_iter. We have to be careful with gang ABD as
+ * advancing could mean that we are at the end of a particular ABD and
+ * must grab the ABD in the gang ABD's list.
*/
-static void
-abd_iter_init(struct abd_iter *aiter, abd_t *abd)
+static inline abd_t *
+abd_advance_abd_iter(abd_t *abd, abd_t *cabd, struct abd_iter *aiter,
+ size_t len)
{
- abd_verify(abd);
- aiter->iter_abd = abd;
- aiter->iter_pos = 0;
- aiter->iter_mapaddr = NULL;
- aiter->iter_mapsize = 0;
+ abd_iter_advance(aiter, len);
+ if (abd_is_gang(abd) && abd_iter_at_end(aiter)) {
+ ASSERT3P(cabd, !=, NULL);
+ cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd);
+ if (cabd) {
+ abd_iter_init(aiter, cabd);
+ abd_iter_advance(aiter, 0);
+ }
+ }
+ return (cabd);
}
-/*
- * Advance the iterator by a certain amount. Cannot be called when a chunk is
- * in use. This can be safely called when the aiter has already exhausted, in
- * which case this does nothing.
- */
-static void
-abd_iter_advance(struct abd_iter *aiter, size_t amount)
+int
+abd_iterate_func(abd_t *abd, size_t off, size_t size,
+ abd_iter_func_t *func, void *private)
{
- ASSERT3P(aiter->iter_mapaddr, ==, NULL);
- ASSERT0(aiter->iter_mapsize);
+ struct abd_iter aiter;
+ int ret = 0;
- /* There's nothing left to advance to, so do nothing */
- if (aiter->iter_pos == aiter->iter_abd->abd_size)
- return;
+ if (size == 0)
+ return (0);
- aiter->iter_pos += amount;
-}
+ abd_verify(abd);
+ ASSERT3U(off + size, <=, abd->abd_size);
-/*
- * Map the current chunk into aiter. This can be safely called when the aiter
- * has already exhausted, in which case this does nothing.
- */
-static void
-abd_iter_map(struct abd_iter *aiter)
-{
- void *paddr;
- size_t offset = 0;
+ abd_t *c_abd = abd_init_abd_iter(abd, &aiter, off);
- ASSERT3P(aiter->iter_mapaddr, ==, NULL);
- ASSERT0(aiter->iter_mapsize);
+ while (size > 0) {
+ IMPLY(abd_is_gang(abd), c_abd != NULL);
- /* There's nothing left to iterate over, so do nothing */
- if (aiter->iter_pos == aiter->iter_abd->abd_size)
- return;
+ abd_iter_map(&aiter);
- if (abd_is_linear(aiter->iter_abd)) {
- offset = aiter->iter_pos;
- aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
- paddr = aiter->iter_abd->abd_u.abd_linear.abd_buf;
- } else {
- size_t index = abd_iter_scatter_chunk_index(aiter);
- offset = abd_iter_scatter_chunk_offset(aiter);
- aiter->iter_mapsize = PAGESIZE - offset;
- paddr = abd_map_chunk(
- aiter->iter_abd->abd_u.abd_scatter.abd_chunks[index]);
- }
- aiter->iter_mapaddr = (char *)paddr + offset;
-}
+ size_t len = MIN(aiter.iter_mapsize, size);
+ ASSERT3U(len, >, 0);
-/*
- * Unmap the current chunk from aiter. This can be safely called when the aiter
- * has already exhausted, in which case this does nothing.
- */
-static void
-abd_iter_unmap(struct abd_iter *aiter)
-{
- /* There's nothing left to unmap, so do nothing */
- if (aiter->iter_pos == aiter->iter_abd->abd_size)
- return;
+ ret = func(aiter.iter_mapaddr, len, private);
- if (!abd_is_linear(aiter->iter_abd)) {
- /* LINTED E_FUNC_SET_NOT_USED */
- size_t index = abd_iter_scatter_chunk_index(aiter);
- abd_unmap_chunk(
- aiter->iter_abd->abd_u.abd_scatter.abd_chunks[index]);
- }
+ abd_iter_unmap(&aiter);
- ASSERT3P(aiter->iter_mapaddr, !=, NULL);
- ASSERT3U(aiter->iter_mapsize, >, 0);
+ if (ret != 0)
+ break;
- aiter->iter_mapaddr = NULL;
- aiter->iter_mapsize = 0;
+ size -= len;
+ c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len);
+ }
+
+ return (ret);
}
+#if defined(__linux__) && defined(_KERNEL)
int
-abd_iterate_func(abd_t *abd, size_t off, size_t size,
- abd_iter_func_t *func, void *private)
+abd_iterate_page_func(abd_t *abd, size_t off, size_t size,
+ abd_iter_page_func_t *func, void *private)
{
- int ret = 0;
struct abd_iter aiter;
+ int ret = 0;
+
+ if (size == 0)
+ return (0);
abd_verify(abd);
ASSERT3U(off + size, <=, abd->abd_size);
- abd_iter_init(&aiter, abd);
- abd_iter_advance(&aiter, off);
+ abd_t *c_abd = abd_init_abd_iter(abd, &aiter, off);
while (size > 0) {
- size_t len;
- abd_iter_map(&aiter);
+ IMPLY(abd_is_gang(abd), c_abd != NULL);
+
+ abd_iter_page(&aiter);
- len = MIN(aiter.iter_mapsize, size);
+ size_t len = MIN(aiter.iter_page_dsize, size);
ASSERT3U(len, >, 0);
- ret = func(aiter.iter_mapaddr, len, private);
+ ret = func(aiter.iter_page, aiter.iter_page_doff,
+ len, private);
- abd_iter_unmap(&aiter);
+ aiter.iter_page = NULL;
+ aiter.iter_page_doff = 0;
+ aiter.iter_page_dsize = 0;
if (ret != 0)
break;
size -= len;
- abd_iter_advance(&aiter, len);
+ c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len);
}
return (ret);
}
+#endif
struct buf_arg {
void *arg_buf;
&ba_ptr);
}
-/*ARGSUSED*/
static int
abd_zero_off_cb(void *buf, size_t size, void *private)
{
+ (void) private;
(void) memset(buf, 0, size);
return (0);
}
{
int ret = 0;
struct abd_iter daiter, saiter;
+ abd_t *c_dabd, *c_sabd;
+
+ if (size == 0)
+ return (0);
abd_verify(dabd);
abd_verify(sabd);
ASSERT3U(doff + size, <=, dabd->abd_size);
ASSERT3U(soff + size, <=, sabd->abd_size);
- abd_iter_init(&daiter, dabd);
- abd_iter_init(&saiter, sabd);
- abd_iter_advance(&daiter, doff);
- abd_iter_advance(&saiter, soff);
+ c_dabd = abd_init_abd_iter(dabd, &daiter, doff);
+ c_sabd = abd_init_abd_iter(sabd, &saiter, soff);
while (size > 0) {
- size_t dlen, slen, len;
+ IMPLY(abd_is_gang(dabd), c_dabd != NULL);
+ IMPLY(abd_is_gang(sabd), c_sabd != NULL);
+
abd_iter_map(&daiter);
abd_iter_map(&saiter);
- dlen = MIN(daiter.iter_mapsize, size);
- slen = MIN(saiter.iter_mapsize, size);
- len = MIN(dlen, slen);
+ size_t dlen = MIN(daiter.iter_mapsize, size);
+ size_t slen = MIN(saiter.iter_mapsize, size);
+ size_t len = MIN(dlen, slen);
ASSERT(dlen > 0 || slen > 0);
ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,
break;
size -= len;
- abd_iter_advance(&daiter, len);
- abd_iter_advance(&saiter, len);
+ c_dabd =
+ abd_advance_abd_iter(dabd, c_dabd, &daiter, len);
+ c_sabd =
+ abd_advance_abd_iter(sabd, c_sabd, &saiter, len);
}
return (ret);
}
-/*ARGSUSED*/
static int
abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
{
+ (void) private;
(void) memcpy(dbuf, sbuf, size);
return (0);
}
abd_copy_off_cb, NULL);
}
-/*ARGSUSED*/
static int
abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
{
+ (void) private;
return (memcmp(bufa, bufb, size));
}
abd_cmp_cb, NULL));
}
+/*
+ * Iterate over code ABDs and a data ABD and call @func_raidz_gen.
+ *
+ * @cabds parity ABDs, must have equal size
+ * @dabd data ABD. Can be NULL (in this case @dsize = 0)
+ * @func_raidz_gen should be implemented so that its behaviour
+ * is the same when taking linear and when taking scatter
+ */
+void
+abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd, size_t off,
+ size_t csize, size_t dsize, const unsigned parity,
+ void (*func_raidz_gen)(void **, const void *, size_t, size_t))
+{
+ int i;
+ size_t len, dlen;
+ struct abd_iter caiters[3];
+ struct abd_iter daiter;
+ void *caddrs[3], *daddr;
+ unsigned long flags __maybe_unused = 0;
+ abd_t *c_cabds[3];
+ abd_t *c_dabd = NULL;
+
+ ASSERT3U(parity, <=, 3);
+ for (i = 0; i < parity; i++) {
+ abd_verify(cabds[i]);
+ ASSERT3U(off + csize, <=, cabds[i]->abd_size);
+ c_cabds[i] = abd_init_abd_iter(cabds[i], &caiters[i], off);
+ }
-#if defined(_KERNEL) && defined(HAVE_SPL)
-/* Tunable Parameters */
-module_param(zfs_abd_scatter_enabled, int, 0644);
-MODULE_PARM_DESC(zfs_abd_scatter_enabled,
- "Toggle whether ABD allocations must be linear.");
-#endif
+ if (dsize > 0) {
+ ASSERT(dabd);
+ abd_verify(dabd);
+ ASSERT3U(off + dsize, <=, dabd->abd_size);
+ c_dabd = abd_init_abd_iter(dabd, &daiter, off);
+ }
+
+ abd_enter_critical(flags);
+ while (csize > 0) {
+ len = csize;
+ for (i = 0; i < parity; i++) {
+ IMPLY(abd_is_gang(cabds[i]), c_cabds[i] != NULL);
+ abd_iter_map(&caiters[i]);
+ caddrs[i] = caiters[i].iter_mapaddr;
+ len = MIN(caiters[i].iter_mapsize, len);
+ }
+
+ if (dsize > 0) {
+ IMPLY(abd_is_gang(dabd), c_dabd != NULL);
+ abd_iter_map(&daiter);
+ daddr = daiter.iter_mapaddr;
+ len = MIN(daiter.iter_mapsize, len);
+ dlen = len;
+ } else {
+ daddr = NULL;
+ dlen = 0;
+ }
+
+ /* must be progressive */
+ ASSERT3U(len, >, 0);
+ /*
+ * The iterated function likely will not do well if each
+ * segment except the last one is not multiple of 512 (raidz).
+ */
+ ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
+
+ func_raidz_gen(caddrs, daddr, len, dlen);
+
+ for (i = parity-1; i >= 0; i--) {
+ abd_iter_unmap(&caiters[i]);
+ c_cabds[i] =
+ abd_advance_abd_iter(cabds[i], c_cabds[i],
+ &caiters[i], len);
+ }
+
+ if (dsize > 0) {
+ abd_iter_unmap(&daiter);
+ c_dabd =
+ abd_advance_abd_iter(dabd, c_dabd, &daiter,
+ dlen);
+ dsize -= dlen;
+ }
+
+ csize -= len;
+ }
+ abd_exit_critical(flags);
+}
+
+/*
+ * Iterate over code ABDs and data reconstruction target ABDs and call
+ * @func_raidz_rec. Function maps at most 6 pages atomically.
+ *
+ * @cabds parity ABDs, must have equal size
+ * @tabds rec target ABDs, at most 3
+ * @tsize size of data target columns
+ * @func_raidz_rec expects syndrome data in target columns. Function
+ * reconstructs data and overwrites target columns.
+ */
+void
+abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
+ size_t tsize, const unsigned parity,
+ void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
+ const unsigned *mul),
+ const unsigned *mul)
+{
+ int i;
+ size_t len;
+ struct abd_iter citers[3];
+ struct abd_iter xiters[3];
+ void *caddrs[3], *xaddrs[3];
+ unsigned long flags __maybe_unused = 0;
+ abd_t *c_cabds[3];
+ abd_t *c_tabds[3];
+
+ ASSERT3U(parity, <=, 3);
+
+ for (i = 0; i < parity; i++) {
+ abd_verify(cabds[i]);
+ abd_verify(tabds[i]);
+ ASSERT3U(tsize, <=, cabds[i]->abd_size);
+ ASSERT3U(tsize, <=, tabds[i]->abd_size);
+ c_cabds[i] =
+ abd_init_abd_iter(cabds[i], &citers[i], 0);
+ c_tabds[i] =
+ abd_init_abd_iter(tabds[i], &xiters[i], 0);
+ }
+
+ abd_enter_critical(flags);
+ while (tsize > 0) {
+ len = tsize;
+ for (i = 0; i < parity; i++) {
+ IMPLY(abd_is_gang(cabds[i]), c_cabds[i] != NULL);
+ IMPLY(abd_is_gang(tabds[i]), c_tabds[i] != NULL);
+ abd_iter_map(&citers[i]);
+ abd_iter_map(&xiters[i]);
+ caddrs[i] = citers[i].iter_mapaddr;
+ xaddrs[i] = xiters[i].iter_mapaddr;
+ len = MIN(citers[i].iter_mapsize, len);
+ len = MIN(xiters[i].iter_mapsize, len);
+ }
+
+ /* must be progressive */
+ ASSERT3S(len, >, 0);
+ /*
+ * The iterated function likely will not do well if each
+ * segment except the last one is not multiple of 512 (raidz).
+ */
+ ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
+
+ func_raidz_rec(xaddrs, len, caddrs, mul);
+
+ for (i = parity-1; i >= 0; i--) {
+ abd_iter_unmap(&xiters[i]);
+ abd_iter_unmap(&citers[i]);
+ c_tabds[i] =
+ abd_advance_abd_iter(tabds[i], c_tabds[i],
+ &xiters[i], len);
+ c_cabds[i] =
+ abd_advance_abd_iter(cabds[i], c_cabds[i],
+ &citers[i], len);
+ }
+
+ tsize -= len;
+ ASSERT3S(tsize, >=, 0);
+ }
+ abd_exit_critical(flags);
+}