* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
+ * or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
/*
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
* Copyright (c) 2019 by Delphix. All rights reserved.
+ * Copyright (c) 2023, 2024, Klara Inc.
*/
/*
#include <sys/abd_impl.h>
#include <sys/param.h>
#include <sys/zio.h>
+#include <sys/arc.h>
#include <sys/zfs_context.h>
#include <sys/zfs_znode.h>
#ifdef _KERNEL
#include <linux/kmap_compat.h>
+#include <linux/mm_compat.h>
#include <linux/scatterlist.h>
+#include <linux/version.h>
+#endif
+
+#ifdef _KERNEL
+#if defined(MAX_ORDER)
+#define ABD_MAX_ORDER (MAX_ORDER)
+#elif defined(MAX_PAGE_ORDER)
+#define ABD_MAX_ORDER (MAX_PAGE_ORDER)
+#endif
#else
-#define MAX_ORDER 1
+#define ABD_MAX_ORDER (1)
#endif
typedef struct abd_stats {
kstat_named_t abdstat_scatter_cnt;
kstat_named_t abdstat_scatter_data_size;
kstat_named_t abdstat_scatter_chunk_waste;
- kstat_named_t abdstat_scatter_orders[MAX_ORDER];
+ kstat_named_t abdstat_scatter_orders[ABD_MAX_ORDER];
kstat_named_t abdstat_scatter_page_multi_chunk;
kstat_named_t abdstat_scatter_page_multi_zone;
kstat_named_t abdstat_scatter_page_alloc_retry;
{ "scatter_sg_table_retry", KSTAT_DATA_UINT64 },
};
+static struct {
+ wmsum_t abdstat_struct_size;
+ wmsum_t abdstat_linear_cnt;
+ wmsum_t abdstat_linear_data_size;
+ wmsum_t abdstat_scatter_cnt;
+ wmsum_t abdstat_scatter_data_size;
+ wmsum_t abdstat_scatter_chunk_waste;
+ wmsum_t abdstat_scatter_orders[ABD_MAX_ORDER];
+ wmsum_t abdstat_scatter_page_multi_chunk;
+ wmsum_t abdstat_scatter_page_multi_zone;
+ wmsum_t abdstat_scatter_page_alloc_retry;
+ wmsum_t abdstat_scatter_sg_table_retry;
+} abd_sums;
+
#define abd_for_each_sg(abd, sg, n, i) \
for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
-unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1;
-
/*
* zfs_abd_scatter_min_size is the minimum allocation size to use scatter
* ABD's. Smaller allocations will use linear ABD's which uses
* By default we use linear allocations for 512B and 1KB, and scatter
* allocations for larger (1.5KB and up).
*/
-int zfs_abd_scatter_min_size = 512 * 3;
+static int zfs_abd_scatter_min_size = 512 * 3;
/*
* We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
struct page;
/*
- * abd_zero_page we will be an allocated zero'd PAGESIZE buffer, which is
- * assigned to set each of the pages of abd_zero_scatter.
+ * _KERNEL - Will point to ZERO_PAGE if it is available or it will be
+ * an allocated zero'd PAGESIZE buffer.
+ * Userspace - Will be an allocated zero'ed PAGESIZE buffer.
+ *
+ * abd_zero_page is assigned to each of the pages of abd_zero_scatter.
*/
static struct page *abd_zero_page = NULL;
static kmem_cache_t *abd_cache = NULL;
static kstat_t *abd_ksp;
-static size_t
+static uint_t
abd_chunkcnt_for_bytes(size_t size)
{
return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
}
abd_t *
-abd_alloc_struct(size_t size)
+abd_alloc_struct_impl(size_t size)
{
/*
* In Linux we do not use the size passed in during ABD
* allocation, so we just ignore it.
*/
+ (void) size;
abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
ASSERT3P(abd, !=, NULL);
- list_link_init(&abd->abd_gang_link);
- mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
return (abd);
}
void
-abd_free_struct(abd_t *abd)
+abd_free_struct_impl(abd_t *abd)
{
- mutex_destroy(&abd->abd_mtx);
- ASSERT(!list_link_active(&abd->abd_gang_link));
kmem_cache_free(abd_cache, abd);
ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t));
}
#ifdef _KERNEL
+static unsigned zfs_abd_scatter_max_order = ABD_MAX_ORDER - 1;
+
/*
* Mark zfs data pages so they can be excluded from kernel crash dumps
*/
struct page *page, *tmp_page = NULL;
gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM;
- int max_order = MIN(zfs_abd_scatter_max_order, MAX_ORDER - 1);
- int nr_pages = abd_chunkcnt_for_bytes(size);
- int chunks = 0, zones = 0;
+ unsigned int max_order = MIN(zfs_abd_scatter_max_order,
+ ABD_MAX_ORDER - 1);
+ unsigned int nr_pages = abd_chunkcnt_for_bytes(size);
+ unsigned int chunks = 0, zones = 0;
size_t remaining_size;
int nid = NUMA_NO_NODE;
- int alloc_pages = 0;
+ unsigned int alloc_pages = 0;
INIT_LIST_HEAD(&pages);
+ ASSERT3U(alloc_pages, <, nr_pages);
+
while (alloc_pages < nr_pages) {
- unsigned chunk_pages;
- int order;
+ unsigned int chunk_pages;
+ unsigned int order;
order = MIN(highbit64(nr_pages - alloc_pages) - 1, max_order);
chunk_pages = (1U << order);
struct scatterlist *sg = NULL;
struct sg_table table;
gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
- gfp_t gfp_zero_page = gfp | __GFP_ZERO;
int nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
int i = 0;
+#if defined(HAVE_ZERO_PAGE_GPL_ONLY)
+ gfp_t gfp_zero_page = gfp | __GFP_ZERO;
while ((abd_zero_page = __page_cache_alloc(gfp_zero_page)) == NULL) {
ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
schedule_timeout_interruptible(1);
}
abd_mark_zfs_page(abd_zero_page);
+#else
+ abd_zero_page = ZERO_PAGE(0);
+#endif /* HAVE_ZERO_PAGE_GPL_ONLY */
while (sg_alloc_table(&table, nr_pages, gfp)) {
ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
ASSERT3U(table.nents, ==, nr_pages);
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
- abd_zero_scatter->abd_flags = ABD_FLAG_OWNER;
+ abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
ABD_SCATTER(abd_zero_scatter).abd_sgl = table.sgl;
ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
- abd_zero_scatter->abd_parent = NULL;
abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
- zfs_refcount_create(&abd_zero_scatter->abd_children);
abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
#define PAGE_SHIFT (highbit64(PAGESIZE)-1)
#endif
-#define zfs_kmap_atomic(chunk, km) ((void *)chunk)
-#define zfs_kunmap_atomic(addr, km) do { (void)(addr); } while (0)
+#define zfs_kmap_atomic(chunk) ((void *)chunk)
+#define zfs_kunmap_atomic(addr) do { (void)(addr); } while (0)
#define local_irq_save(flags) do { (void)(flags); } while (0)
#define local_irq_restore(flags) do { (void)(flags); } while (0)
#define nth_page(pg, i) \
struct scatterlist *sg;
abd_for_each_sg(abd, sg, n, i) {
- for (int j = 0; j < sg->length; j += PAGESIZE) {
- struct page *p = nth_page(sg_page(sg), j >> PAGE_SHIFT);
- umem_free(p, PAGESIZE);
- }
+ struct page *p = nth_page(sg_page(sg), 0);
+ umem_free_aligned(p, PAGESIZE);
}
abd_free_sg_table(abd);
}
abd_zero_page = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP);
memset(abd_zero_page, 0, PAGESIZE);
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
- abd_zero_scatter->abd_flags = ABD_FLAG_OWNER;
+ abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
- abd_zero_scatter->abd_parent = NULL;
- zfs_refcount_create(&abd_zero_scatter->abd_children);
ABD_SCATTER(abd_zero_scatter).abd_sgl = vmem_alloc(nr_pages *
sizeof (struct scatterlist), KM_SLEEP);
boolean_t
abd_size_alloc_linear(size_t size)
{
- return (size < zfs_abd_scatter_min_size ? B_TRUE : B_FALSE);
+ return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size);
}
void
abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
{
ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
+ int waste = P2ROUNDUP(abd->abd_size, PAGESIZE) - abd->abd_size;
if (op == ABDSTAT_INCR) {
ABDSTAT_BUMP(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
- ABDSTAT_INCR(abdstat_scatter_chunk_waste,
- P2ROUNDUP(abd->abd_size, PAGESIZE) - abd->abd_size);
+ ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste);
+ arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE);
} else {
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
- ABDSTAT_INCR(abdstat_scatter_chunk_waste,
- (int)abd->abd_size
- -(int)P2ROUNDUP(abd->abd_size, PAGESIZE));
+ ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste);
+ arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE);
}
}
static void
abd_free_zero_scatter(void)
{
- zfs_refcount_destroy(&abd_zero_scatter->abd_children);
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGESIZE);
ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
abd_zero_scatter = NULL;
ASSERT3P(abd_zero_page, !=, NULL);
#if defined(_KERNEL)
+#if defined(HAVE_ZERO_PAGE_GPL_ONLY)
abd_unmark_zfs_page(abd_zero_page);
__free_page(abd_zero_page);
+#endif /* HAVE_ZERO_PAGE_GPL_ONLY */
#else
- umem_free(abd_zero_page, PAGESIZE);
+ umem_free_aligned(abd_zero_page, PAGESIZE);
#endif /* _KERNEL */
}
+static int
+abd_kstats_update(kstat_t *ksp, int rw)
+{
+ abd_stats_t *as = ksp->ks_data;
+
+ if (rw == KSTAT_WRITE)
+ return (EACCES);
+ as->abdstat_struct_size.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_struct_size);
+ as->abdstat_linear_cnt.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_linear_cnt);
+ as->abdstat_linear_data_size.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_linear_data_size);
+ as->abdstat_scatter_cnt.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_cnt);
+ as->abdstat_scatter_data_size.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_data_size);
+ as->abdstat_scatter_chunk_waste.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
+ for (int i = 0; i < ABD_MAX_ORDER; i++) {
+ as->abdstat_scatter_orders[i].value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_orders[i]);
+ }
+ as->abdstat_scatter_page_multi_chunk.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_page_multi_chunk);
+ as->abdstat_scatter_page_multi_zone.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_page_multi_zone);
+ as->abdstat_scatter_page_alloc_retry.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_page_alloc_retry);
+ as->abdstat_scatter_sg_table_retry.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_sg_table_retry);
+ return (0);
+}
+
void
abd_init(void)
{
abd_cache = kmem_cache_create("abd_t", sizeof (abd_t),
0, NULL, NULL, NULL, NULL, NULL, 0);
+ wmsum_init(&abd_sums.abdstat_struct_size, 0);
+ wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
+ wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
+ for (i = 0; i < ABD_MAX_ORDER; i++)
+ wmsum_init(&abd_sums.abdstat_scatter_orders[i], 0);
+ wmsum_init(&abd_sums.abdstat_scatter_page_multi_chunk, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_page_multi_zone, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_page_alloc_retry, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_sg_table_retry, 0);
+
abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (abd_ksp != NULL) {
- for (i = 0; i < MAX_ORDER; i++) {
+ for (i = 0; i < ABD_MAX_ORDER; i++) {
snprintf(abd_stats.abdstat_scatter_orders[i].name,
KSTAT_STRLEN, "scatter_order_%d", i);
abd_stats.abdstat_scatter_orders[i].data_type =
KSTAT_DATA_UINT64;
}
abd_ksp->ks_data = &abd_stats;
+ abd_ksp->ks_update = abd_kstats_update;
kstat_install(abd_ksp);
}
abd_ksp = NULL;
}
+ wmsum_fini(&abd_sums.abdstat_struct_size);
+ wmsum_fini(&abd_sums.abdstat_linear_cnt);
+ wmsum_fini(&abd_sums.abdstat_linear_data_size);
+ wmsum_fini(&abd_sums.abdstat_scatter_cnt);
+ wmsum_fini(&abd_sums.abdstat_scatter_data_size);
+ wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
+ for (int i = 0; i < ABD_MAX_ORDER; i++)
+ wmsum_fini(&abd_sums.abdstat_scatter_orders[i]);
+ wmsum_fini(&abd_sums.abdstat_scatter_page_multi_chunk);
+ wmsum_fini(&abd_sums.abdstat_scatter_page_multi_zone);
+ wmsum_fini(&abd_sums.abdstat_scatter_page_alloc_retry);
+ wmsum_fini(&abd_sums.abdstat_scatter_sg_table_retry);
+
if (abd_cache) {
kmem_cache_destroy(abd_cache);
abd_cache = NULL;
ABD_SCATTER(abd).abd_sgl = sg;
abd_free_chunks(abd);
- zfs_refcount_destroy(&abd->abd_children);
abd_update_scatter_stats(abd, ABDSTAT_DECR);
- abd_free_struct(abd);
}
/*
}
abd_t *
-abd_get_offset_scatter(abd_t *sabd, size_t off)
+abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off,
+ size_t size)
{
- abd_t *abd = NULL;
+ (void) size;
int i = 0;
struct scatterlist *sg = NULL;
size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
- abd = abd_alloc_struct(0);
+ if (abd == NULL)
+ abd = abd_alloc_struct(0);
/*
* Even if this buf is filesystem metadata, we only track that
* if we own the underlying data buffer, which is not true in
* this case. Therefore, we don't ever use ABD_FLAG_META here.
*/
- abd->abd_flags = 0;
abd_for_each_sg(sabd, sg, ABD_SCATTER(sabd).abd_nents, i) {
if (new_offset < sg->length)
{
ASSERT(!abd_is_gang(abd));
abd_verify(abd);
+ memset(aiter, 0, sizeof (struct abd_iter));
aiter->iter_abd = abd;
- aiter->iter_mapaddr = NULL;
- aiter->iter_mapsize = 0;
- aiter->iter_pos = 0;
- if (abd_is_linear(abd)) {
- aiter->iter_offset = 0;
- aiter->iter_sg = NULL;
- } else {
+ if (!abd_is_linear(abd)) {
aiter->iter_offset = ABD_SCATTER(abd).abd_offset;
aiter->iter_sg = ABD_SCATTER(abd).abd_sgl;
}
boolean_t
abd_iter_at_end(struct abd_iter *aiter)
{
+ ASSERT3U(aiter->iter_pos, <=, aiter->iter_abd->abd_size);
return (aiter->iter_pos == aiter->iter_abd->abd_size);
}
void
abd_iter_advance(struct abd_iter *aiter, size_t amount)
{
+ /*
+ * Ensure that last chunk is not in use. abd_iterate_*() must clear
+ * this state (directly or abd_iter_unmap()) before advancing.
+ */
ASSERT3P(aiter->iter_mapaddr, ==, NULL);
ASSERT0(aiter->iter_mapsize);
+ ASSERT3P(aiter->iter_page, ==, NULL);
+ ASSERT0(aiter->iter_page_doff);
+ ASSERT0(aiter->iter_page_dsize);
/* There's nothing left to advance to, so do nothing */
if (abd_iter_at_end(aiter))
aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset,
aiter->iter_abd->abd_size - aiter->iter_pos);
- paddr = zfs_kmap_atomic(sg_page(aiter->iter_sg),
- km_table[aiter->iter_km]);
+ paddr = zfs_kmap_atomic(sg_page(aiter->iter_sg));
}
aiter->iter_mapaddr = (char *)paddr + offset;
if (!abd_is_linear(aiter->iter_abd)) {
/* LINTED E_FUNC_SET_NOT_USED */
- zfs_kunmap_atomic(aiter->iter_mapaddr - aiter->iter_offset,
- km_table[aiter->iter_km]);
+ zfs_kunmap_atomic(aiter->iter_mapaddr - aiter->iter_offset);
}
ASSERT3P(aiter->iter_mapaddr, !=, NULL);
}
#if defined(_KERNEL)
+/*
+ * Yield the next page struct and data offset and size within it, without
+ * mapping it into the address space.
+ */
+void
+abd_iter_page(struct abd_iter *aiter)
+{
+ if (abd_iter_at_end(aiter)) {
+ aiter->iter_page = NULL;
+ aiter->iter_page_doff = 0;
+ aiter->iter_page_dsize = 0;
+ return;
+ }
+
+ struct page *page;
+ size_t doff, dsize;
+
+ if (abd_is_linear(aiter->iter_abd)) {
+ ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset);
+
+ /* memory address at iter_pos */
+ void *paddr = ABD_LINEAR_BUF(aiter->iter_abd) + aiter->iter_pos;
+
+ /* struct page for address */
+ page = is_vmalloc_addr(paddr) ?
+ vmalloc_to_page(paddr) : virt_to_page(paddr);
+
+ /* offset of address within the page */
+ doff = offset_in_page(paddr);
+
+ /* total data remaining in abd from this position */
+ dsize = aiter->iter_abd->abd_size - aiter->iter_offset;
+ } else {
+ ASSERT(!abd_is_gang(aiter->iter_abd));
+
+ /* current scatter page */
+ page = sg_page(aiter->iter_sg);
+
+ /* position within page */
+ doff = aiter->iter_offset;
+
+ /* remaining data in scatterlist */
+ dsize = MIN(aiter->iter_sg->length - aiter->iter_offset,
+ aiter->iter_abd->abd_size - aiter->iter_pos);
+ }
+ ASSERT(page);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
+ if (PageTail(page)) {
+ /*
+ * This page is part of a "compound page", which is a group of
+ * pages that can be referenced from a single struct page *.
+ * Its organised as a "head" page, followed by a series of
+ * "tail" pages.
+ *
+ * In OpenZFS, compound pages are allocated using the
+ * __GFP_COMP flag, which we get from scatter ABDs and SPL
+ * vmalloc slabs (ie >16K allocations). So a great many of the
+ * IO buffers we get are going to be of this type.
+ *
+ * The tail pages are just regular PAGE_SIZE pages, and can be
+ * safely used as-is. However, the head page has length
+ * covering itself and all the tail pages. If this ABD chunk
+ * spans multiple pages, then we can use the head page and a
+ * >PAGE_SIZE length, which is far more efficient.
+ *
+ * To do this, we need to adjust the offset to be counted from
+ * the head page. struct page for compound pages are stored
+ * contiguously, so we can just adjust by a simple offset.
+ *
+ * Before kernel 4.5, compound page heads were refcounted
+ * separately, such that moving back to the head page would
+ * require us to take a reference to it and releasing it once
+ * we're completely finished with it. In practice, that means
+ * when our caller is done with the ABD, which we have no
+ * insight into from here. Rather than contort this API to
+ * track head page references on such ancient kernels, we just
+ * compile this block out and use the tail pages directly. This
+ * is slightly less efficient, but makes everything far
+ * simpler.
+ */
+ struct page *head = compound_head(page);
+ doff += ((page - head) * PAGESIZE);
+ page = head;
+ }
+#endif
+
+ /* final page and position within it */
+ aiter->iter_page = page;
+ aiter->iter_page_doff = doff;
+
+ /* amount of data in the chunk, up to the end of the page */
+ aiter->iter_page_dsize = MIN(dsize, page_size(page) - doff);
+}
+
+/*
+ * Note: ABD BIO functions only needed to support vdev_classic. See comments in
+ * vdev_disk.c.
+ */
+
/*
* bio_nr_pages for ABD.
* @off is the offset in @abd
{
unsigned long pos;
- while (abd_is_gang(abd))
- abd = abd_gang_get_offset(abd, &off);
+ if (abd_is_gang(abd)) {
+ unsigned long count = 0;
+
+ for (abd_t *cabd = abd_gang_get_offset(abd, &off);
+ cabd != NULL && size != 0;
+ cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
+ ASSERT3U(off, <, cabd->abd_size);
+ int mysize = MIN(size, cabd->abd_size - off);
+ count += abd_nr_pages_off(cabd, mysize, off);
+ size -= mysize;
+ off = 0;
+ }
+ return (count);
+ }
- ASSERT(!abd_is_gang(abd));
if (abd_is_linear(abd))
pos = (unsigned long)abd_to_buf(abd) + off;
else
pos = ABD_SCATTER(abd).abd_offset + off;
- return ((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
- (pos >> PAGE_SHIFT);
+ return (((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
+ (pos >> PAGE_SHIFT));
}
static unsigned int
abd_bio_map_off(struct bio *bio, abd_t *abd,
unsigned int io_size, size_t off)
{
- int i;
struct abd_iter aiter;
ASSERT3U(io_size, <=, abd->abd_size - off);
abd_iter_init(&aiter, abd);
abd_iter_advance(&aiter, off);
- for (i = 0; i < bio->bi_max_vecs; i++) {
+ for (int i = 0; i < bio->bi_max_vecs; i++) {
struct page *pg;
size_t len, sgoff, pgoff;
struct scatterlist *sg;
module_param(zfs_abd_scatter_max_order, uint, 0644);
MODULE_PARM_DESC(zfs_abd_scatter_max_order,
"Maximum order allocation used for a scatter ABD.");
-#endif
+
+#endif /* _KERNEL */