4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
23 * Copyright (c) 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2023, 2024, Klara Inc.
28 * See abd.c for a general overview of the arc buffered data (ABD).
30 * Linear buffers act exactly like normal buffers and are always mapped into the
31 * kernel's virtual memory space, while scattered ABD data chunks are allocated
32 * as physical pages and then mapped in only while they are actually being
33 * accessed through one of the abd_* library functions. Using scattered ABDs
34 * provides several benefits:
36 * (1) They avoid use of kmem_*, preventing performance problems where running
37 * kmem_reap on very large memory systems never finishes and causes
38 * constant TLB shootdowns.
40 * (2) Fragmentation is less of an issue since when we are at the limit of
41 * allocatable space, we won't have to search around for a long free
42 * hole in the VA space for large ARC allocations. Each chunk is mapped in
43 * individually, so even if we are using HIGHMEM (see next point) we
44 * wouldn't need to worry about finding a contiguous address range.
46 * (3) If we are not using HIGHMEM, then all physical memory is always
47 * mapped into the kernel's address space, so we also avoid the map /
48 * unmap costs on each ABD access.
50 * If we are not using HIGHMEM, scattered buffers which have only one chunk
51 * can be treated as linear buffers, because they are contiguous in the
52 * kernel's virtual address space. See abd_alloc_chunks() for details.
55 #include <sys/abd_impl.h>
56 #include <sys/param.h>
59 #include <sys/zfs_context.h>
60 #include <sys/zfs_znode.h>
62 #include <linux/kmap_compat.h>
63 #include <linux/mm_compat.h>
64 #include <linux/scatterlist.h>
68 #if defined(MAX_ORDER)
69 #define ABD_MAX_ORDER (MAX_ORDER)
70 #elif defined(MAX_PAGE_ORDER)
71 #define ABD_MAX_ORDER (MAX_PAGE_ORDER)
74 #define ABD_MAX_ORDER (1)
77 typedef struct abd_stats
{
78 kstat_named_t abdstat_struct_size
;
79 kstat_named_t abdstat_linear_cnt
;
80 kstat_named_t abdstat_linear_data_size
;
81 kstat_named_t abdstat_scatter_cnt
;
82 kstat_named_t abdstat_scatter_data_size
;
83 kstat_named_t abdstat_scatter_chunk_waste
;
84 kstat_named_t abdstat_scatter_orders
[ABD_MAX_ORDER
];
85 kstat_named_t abdstat_scatter_page_multi_chunk
;
86 kstat_named_t abdstat_scatter_page_multi_zone
;
87 kstat_named_t abdstat_scatter_page_alloc_retry
;
88 kstat_named_t abdstat_scatter_sg_table_retry
;
91 static abd_stats_t abd_stats
= {
92 /* Amount of memory occupied by all of the abd_t struct allocations */
93 { "struct_size", KSTAT_DATA_UINT64
},
95 * The number of linear ABDs which are currently allocated, excluding
96 * ABDs which don't own their data (for instance the ones which were
97 * allocated through abd_get_offset() and abd_get_from_buf()). If an
98 * ABD takes ownership of its buf then it will become tracked.
100 { "linear_cnt", KSTAT_DATA_UINT64
},
101 /* Amount of data stored in all linear ABDs tracked by linear_cnt */
102 { "linear_data_size", KSTAT_DATA_UINT64
},
104 * The number of scatter ABDs which are currently allocated, excluding
105 * ABDs which don't own their data (for instance the ones which were
106 * allocated through abd_get_offset()).
108 { "scatter_cnt", KSTAT_DATA_UINT64
},
109 /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
110 { "scatter_data_size", KSTAT_DATA_UINT64
},
112 * The amount of space wasted at the end of the last chunk across all
113 * scatter ABDs tracked by scatter_cnt.
115 { "scatter_chunk_waste", KSTAT_DATA_UINT64
},
117 * The number of compound allocations of a given order. These
118 * allocations are spread over all currently allocated ABDs, and
119 * act as a measure of memory fragmentation.
121 { { "scatter_order_N", KSTAT_DATA_UINT64
} },
123 * The number of scatter ABDs which contain multiple chunks.
124 * ABDs are preferentially allocated from the minimum number of
125 * contiguous multi-page chunks, a single chunk is optimal.
127 { "scatter_page_multi_chunk", KSTAT_DATA_UINT64
},
129 * The number of scatter ABDs which are split across memory zones.
130 * ABDs are preferentially allocated using pages from a single zone.
132 { "scatter_page_multi_zone", KSTAT_DATA_UINT64
},
134 * The total number of retries encountered when attempting to
135 * allocate the pages to populate the scatter ABD.
137 { "scatter_page_alloc_retry", KSTAT_DATA_UINT64
},
139 * The total number of retries encountered when attempting to
140 * allocate the sg table for an ABD.
142 { "scatter_sg_table_retry", KSTAT_DATA_UINT64
},
146 wmsum_t abdstat_struct_size
;
147 wmsum_t abdstat_linear_cnt
;
148 wmsum_t abdstat_linear_data_size
;
149 wmsum_t abdstat_scatter_cnt
;
150 wmsum_t abdstat_scatter_data_size
;
151 wmsum_t abdstat_scatter_chunk_waste
;
152 wmsum_t abdstat_scatter_orders
[ABD_MAX_ORDER
];
153 wmsum_t abdstat_scatter_page_multi_chunk
;
154 wmsum_t abdstat_scatter_page_multi_zone
;
155 wmsum_t abdstat_scatter_page_alloc_retry
;
156 wmsum_t abdstat_scatter_sg_table_retry
;
159 #define abd_for_each_sg(abd, sg, n, i) \
160 for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
163 * zfs_abd_scatter_min_size is the minimum allocation size to use scatter
164 * ABD's. Smaller allocations will use linear ABD's which uses
165 * zio_[data_]buf_alloc().
167 * Scatter ABD's use at least one page each, so sub-page allocations waste
168 * some space when allocated as scatter (e.g. 2KB scatter allocation wastes
169 * half of each page). Using linear ABD's for small allocations means that
170 * they will be put on slabs which contain many allocations. This can
171 * improve memory efficiency, but it also makes it much harder for ARC
172 * evictions to actually free pages, because all the buffers on one slab need
173 * to be freed in order for the slab (and underlying pages) to be freed.
174 * Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's
175 * possible for them to actually waste more memory than scatter (one page per
176 * buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th).
178 * Spill blocks are typically 512B and are heavily used on systems running
179 * selinux with the default dnode size and the `xattr=sa` property set.
181 * By default we use linear allocations for 512B and 1KB, and scatter
182 * allocations for larger (1.5KB and up).
184 static int zfs_abd_scatter_min_size
= 512 * 3;
187 * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
188 * just a single zero'd page. This allows us to conserve memory by
189 * only using a single zero page for the scatterlist.
191 abd_t
*abd_zero_scatter
= NULL
;
195 * _KERNEL - Will point to ZERO_PAGE if it is available or it will be
196 * an allocated zero'd PAGESIZE buffer.
197 * Userspace - Will be an allocated zero'ed PAGESIZE buffer.
199 * abd_zero_page is assigned to each of the pages of abd_zero_scatter.
201 static struct page
*abd_zero_page
= NULL
;
203 static kmem_cache_t
*abd_cache
= NULL
;
204 static kstat_t
*abd_ksp
;
207 abd_chunkcnt_for_bytes(size_t size
)
209 return (P2ROUNDUP(size
, PAGESIZE
) / PAGESIZE
);
213 abd_alloc_struct_impl(size_t size
)
216 * In Linux we do not use the size passed in during ABD
217 * allocation, so we just ignore it.
220 abd_t
*abd
= kmem_cache_alloc(abd_cache
, KM_PUSHPAGE
);
221 ASSERT3P(abd
, !=, NULL
);
222 ABDSTAT_INCR(abdstat_struct_size
, sizeof (abd_t
));
228 abd_free_struct_impl(abd_t
*abd
)
230 kmem_cache_free(abd_cache
, abd
);
231 ABDSTAT_INCR(abdstat_struct_size
, -(int)sizeof (abd_t
));
235 static unsigned zfs_abd_scatter_max_order
= ABD_MAX_ORDER
- 1;
238 * Mark zfs data pages so they can be excluded from kernel crash dumps
241 #define ABD_FILE_CACHE_PAGE 0x2F5ABDF11ECAC4E
244 abd_mark_zfs_page(struct page
*page
)
247 SetPagePrivate(page
);
248 set_page_private(page
, ABD_FILE_CACHE_PAGE
);
252 abd_unmark_zfs_page(struct page
*page
)
254 set_page_private(page
, 0UL);
255 ClearPagePrivate(page
);
259 #define abd_mark_zfs_page(page)
260 #define abd_unmark_zfs_page(page)
263 #ifndef CONFIG_HIGHMEM
265 #ifndef __GFP_RECLAIM
266 #define __GFP_RECLAIM __GFP_WAIT
270 * The goal is to minimize fragmentation by preferentially populating ABDs
271 * with higher order compound pages from a single zone. Allocation size is
272 * progressively decreased until it can be satisfied without performing
273 * reclaim or compaction. When necessary this function will degenerate to
274 * allocating individual pages and allowing reclaim to satisfy allocations.
277 abd_alloc_chunks(abd_t
*abd
, size_t size
)
279 struct list_head pages
;
280 struct sg_table table
;
281 struct scatterlist
*sg
;
282 struct page
*page
, *tmp_page
= NULL
;
283 gfp_t gfp
= __GFP_NOWARN
| GFP_NOIO
;
284 gfp_t gfp_comp
= (gfp
| __GFP_NORETRY
| __GFP_COMP
) & ~__GFP_RECLAIM
;
285 unsigned int max_order
= MIN(zfs_abd_scatter_max_order
,
287 unsigned int nr_pages
= abd_chunkcnt_for_bytes(size
);
288 unsigned int chunks
= 0, zones
= 0;
289 size_t remaining_size
;
290 int nid
= NUMA_NO_NODE
;
291 unsigned int alloc_pages
= 0;
293 INIT_LIST_HEAD(&pages
);
295 ASSERT3U(alloc_pages
, <, nr_pages
);
297 while (alloc_pages
< nr_pages
) {
298 unsigned int chunk_pages
;
301 order
= MIN(highbit64(nr_pages
- alloc_pages
) - 1, max_order
);
302 chunk_pages
= (1U << order
);
304 page
= alloc_pages_node(nid
, order
? gfp_comp
: gfp
, order
);
307 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry
);
308 schedule_timeout_interruptible(1);
310 max_order
= MAX(0, order
- 1);
315 list_add_tail(&page
->lru
, &pages
);
317 if ((nid
!= NUMA_NO_NODE
) && (page_to_nid(page
) != nid
))
320 nid
= page_to_nid(page
);
321 ABDSTAT_BUMP(abdstat_scatter_orders
[order
]);
323 alloc_pages
+= chunk_pages
;
326 ASSERT3S(alloc_pages
, ==, nr_pages
);
328 while (sg_alloc_table(&table
, chunks
, gfp
)) {
329 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry
);
330 schedule_timeout_interruptible(1);
334 remaining_size
= size
;
335 list_for_each_entry_safe(page
, tmp_page
, &pages
, lru
) {
336 size_t sg_size
= MIN(PAGESIZE
<< compound_order(page
),
338 sg_set_page(sg
, page
, sg_size
, 0);
339 abd_mark_zfs_page(page
);
340 remaining_size
-= sg_size
;
343 list_del(&page
->lru
);
347 * These conditions ensure that a possible transformation to a linear
348 * ABD would be valid.
350 ASSERT(!PageHighMem(sg_page(table
.sgl
)));
351 ASSERT0(ABD_SCATTER(abd
).abd_offset
);
353 if (table
.nents
== 1) {
355 * Since there is only one entry, this ABD can be represented
356 * as a linear buffer. All single-page (4K) ABD's can be
357 * represented this way. Some multi-page ABD's can also be
358 * represented this way, if we were able to allocate a single
359 * "chunk" (higher-order "page" which represents a power-of-2
360 * series of physically-contiguous pages). This is often the
361 * case for 2-page (8K) ABD's.
363 * Representing a single-entry scatter ABD as a linear ABD
364 * has the performance advantage of avoiding the copy (and
365 * allocation) in abd_borrow_buf_copy / abd_return_buf_copy.
366 * A performance increase of around 5% has been observed for
367 * ARC-cached reads (of small blocks which can take advantage
370 * Note that this optimization is only possible because the
371 * pages are always mapped into the kernel's address space.
372 * This is not the case for highmem pages, so the
373 * optimization can not be made there.
375 abd
->abd_flags
|= ABD_FLAG_LINEAR
;
376 abd
->abd_flags
|= ABD_FLAG_LINEAR_PAGE
;
377 abd
->abd_u
.abd_linear
.abd_sgl
= table
.sgl
;
378 ABD_LINEAR_BUF(abd
) = page_address(sg_page(table
.sgl
));
379 } else if (table
.nents
> 1) {
380 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
381 abd
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
;
384 ABDSTAT_BUMP(abdstat_scatter_page_multi_zone
);
385 abd
->abd_flags
|= ABD_FLAG_MULTI_ZONE
;
388 ABD_SCATTER(abd
).abd_sgl
= table
.sgl
;
389 ABD_SCATTER(abd
).abd_nents
= table
.nents
;
395 * Allocate N individual pages to construct a scatter ABD. This function
396 * makes no attempt to request contiguous pages and requires the minimal
397 * number of kernel interfaces. It's designed for maximum compatibility.
400 abd_alloc_chunks(abd_t
*abd
, size_t size
)
402 struct scatterlist
*sg
= NULL
;
403 struct sg_table table
;
405 gfp_t gfp
= __GFP_NOWARN
| GFP_NOIO
;
406 int nr_pages
= abd_chunkcnt_for_bytes(size
);
409 while (sg_alloc_table(&table
, nr_pages
, gfp
)) {
410 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry
);
411 schedule_timeout_interruptible(1);
414 ASSERT3U(table
.nents
, ==, nr_pages
);
415 ABD_SCATTER(abd
).abd_sgl
= table
.sgl
;
416 ABD_SCATTER(abd
).abd_nents
= nr_pages
;
418 abd_for_each_sg(abd
, sg
, nr_pages
, i
) {
419 while ((page
= __page_cache_alloc(gfp
)) == NULL
) {
420 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry
);
421 schedule_timeout_interruptible(1);
424 ABDSTAT_BUMP(abdstat_scatter_orders
[0]);
425 sg_set_page(sg
, page
, PAGESIZE
, 0);
426 abd_mark_zfs_page(page
);
430 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
431 abd
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
;
434 #endif /* !CONFIG_HIGHMEM */
437 * This must be called if any of the sg_table allocation functions
441 abd_free_sg_table(abd_t
*abd
)
443 struct sg_table table
;
445 table
.sgl
= ABD_SCATTER(abd
).abd_sgl
;
446 table
.nents
= table
.orig_nents
= ABD_SCATTER(abd
).abd_nents
;
447 sg_free_table(&table
);
451 abd_free_chunks(abd_t
*abd
)
453 struct scatterlist
*sg
= NULL
;
455 int nr_pages
= ABD_SCATTER(abd
).abd_nents
;
458 if (abd
->abd_flags
& ABD_FLAG_MULTI_ZONE
)
459 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone
);
461 if (abd
->abd_flags
& ABD_FLAG_MULTI_CHUNK
)
462 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk
);
464 abd_for_each_sg(abd
, sg
, nr_pages
, i
) {
466 abd_unmark_zfs_page(page
);
467 order
= compound_order(page
);
468 __free_pages(page
, order
);
469 ASSERT3U(sg
->length
, <=, PAGE_SIZE
<< order
);
470 ABDSTAT_BUMPDOWN(abdstat_scatter_orders
[order
]);
472 abd_free_sg_table(abd
);
476 * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in
477 * the scatterlist will be set to the zero'd out buffer abd_zero_page.
480 abd_alloc_zero_scatter(void)
482 struct scatterlist
*sg
= NULL
;
483 struct sg_table table
;
484 gfp_t gfp
= __GFP_NOWARN
| GFP_NOIO
;
485 int nr_pages
= abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE
);
488 #if defined(HAVE_ZERO_PAGE_GPL_ONLY)
489 gfp_t gfp_zero_page
= gfp
| __GFP_ZERO
;
490 while ((abd_zero_page
= __page_cache_alloc(gfp_zero_page
)) == NULL
) {
491 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry
);
492 schedule_timeout_interruptible(1);
494 abd_mark_zfs_page(abd_zero_page
);
496 abd_zero_page
= ZERO_PAGE(0);
497 #endif /* HAVE_ZERO_PAGE_GPL_ONLY */
499 while (sg_alloc_table(&table
, nr_pages
, gfp
)) {
500 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry
);
501 schedule_timeout_interruptible(1);
503 ASSERT3U(table
.nents
, ==, nr_pages
);
505 abd_zero_scatter
= abd_alloc_struct(SPA_MAXBLOCKSIZE
);
506 abd_zero_scatter
->abd_flags
|= ABD_FLAG_OWNER
;
507 ABD_SCATTER(abd_zero_scatter
).abd_offset
= 0;
508 ABD_SCATTER(abd_zero_scatter
).abd_sgl
= table
.sgl
;
509 ABD_SCATTER(abd_zero_scatter
).abd_nents
= nr_pages
;
510 abd_zero_scatter
->abd_size
= SPA_MAXBLOCKSIZE
;
511 abd_zero_scatter
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
| ABD_FLAG_ZEROS
;
513 abd_for_each_sg(abd_zero_scatter
, sg
, nr_pages
, i
) {
514 sg_set_page(sg
, abd_zero_page
, PAGESIZE
, 0);
517 ABDSTAT_BUMP(abdstat_scatter_cnt
);
518 ABDSTAT_INCR(abdstat_scatter_data_size
, PAGESIZE
);
519 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
525 #define PAGE_SHIFT (highbit64(PAGESIZE)-1)
528 #define zfs_kmap_atomic(chunk) ((void *)chunk)
529 #define zfs_kunmap_atomic(addr) do { (void)(addr); } while (0)
530 #define local_irq_save(flags) do { (void)(flags); } while (0)
531 #define local_irq_restore(flags) do { (void)(flags); } while (0)
532 #define nth_page(pg, i) \
533 ((struct page *)((void *)(pg) + (i) * PAGESIZE))
542 sg_init_table(struct scatterlist
*sg
, int nr
)
544 memset(sg
, 0, nr
* sizeof (struct scatterlist
));
549 * This must be called if any of the sg_table allocation functions
553 abd_free_sg_table(abd_t
*abd
)
555 int nents
= ABD_SCATTER(abd
).abd_nents
;
556 vmem_free(ABD_SCATTER(abd
).abd_sgl
,
557 nents
* sizeof (struct scatterlist
));
560 #define for_each_sg(sgl, sg, nr, i) \
561 for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg))
564 sg_set_page(struct scatterlist
*sg
, struct page
*page
, unsigned int len
,
567 /* currently we don't use offset */
573 static inline struct page
*
574 sg_page(struct scatterlist
*sg
)
579 static inline struct scatterlist
*
580 sg_next(struct scatterlist
*sg
)
589 abd_alloc_chunks(abd_t
*abd
, size_t size
)
591 unsigned nr_pages
= abd_chunkcnt_for_bytes(size
);
592 struct scatterlist
*sg
;
595 ABD_SCATTER(abd
).abd_sgl
= vmem_alloc(nr_pages
*
596 sizeof (struct scatterlist
), KM_SLEEP
);
597 sg_init_table(ABD_SCATTER(abd
).abd_sgl
, nr_pages
);
599 abd_for_each_sg(abd
, sg
, nr_pages
, i
) {
600 struct page
*p
= umem_alloc_aligned(PAGESIZE
, 64, KM_SLEEP
);
601 sg_set_page(sg
, p
, PAGESIZE
, 0);
603 ABD_SCATTER(abd
).abd_nents
= nr_pages
;
607 abd_free_chunks(abd_t
*abd
)
609 int i
, n
= ABD_SCATTER(abd
).abd_nents
;
610 struct scatterlist
*sg
;
612 abd_for_each_sg(abd
, sg
, n
, i
) {
613 struct page
*p
= nth_page(sg_page(sg
), 0);
614 umem_free_aligned(p
, PAGESIZE
);
616 abd_free_sg_table(abd
);
620 abd_alloc_zero_scatter(void)
622 unsigned nr_pages
= abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE
);
623 struct scatterlist
*sg
;
626 abd_zero_page
= umem_alloc_aligned(PAGESIZE
, 64, KM_SLEEP
);
627 memset(abd_zero_page
, 0, PAGESIZE
);
628 abd_zero_scatter
= abd_alloc_struct(SPA_MAXBLOCKSIZE
);
629 abd_zero_scatter
->abd_flags
|= ABD_FLAG_OWNER
;
630 abd_zero_scatter
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
| ABD_FLAG_ZEROS
;
631 ABD_SCATTER(abd_zero_scatter
).abd_offset
= 0;
632 ABD_SCATTER(abd_zero_scatter
).abd_nents
= nr_pages
;
633 abd_zero_scatter
->abd_size
= SPA_MAXBLOCKSIZE
;
634 ABD_SCATTER(abd_zero_scatter
).abd_sgl
= vmem_alloc(nr_pages
*
635 sizeof (struct scatterlist
), KM_SLEEP
);
637 sg_init_table(ABD_SCATTER(abd_zero_scatter
).abd_sgl
, nr_pages
);
639 abd_for_each_sg(abd_zero_scatter
, sg
, nr_pages
, i
) {
640 sg_set_page(sg
, abd_zero_page
, PAGESIZE
, 0);
643 ABDSTAT_BUMP(abdstat_scatter_cnt
);
644 ABDSTAT_INCR(abdstat_scatter_data_size
, PAGESIZE
);
645 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
651 abd_size_alloc_linear(size_t size
)
653 return (!zfs_abd_scatter_enabled
|| size
< zfs_abd_scatter_min_size
);
657 abd_update_scatter_stats(abd_t
*abd
, abd_stats_op_t op
)
659 ASSERT(op
== ABDSTAT_INCR
|| op
== ABDSTAT_DECR
);
660 int waste
= P2ROUNDUP(abd
->abd_size
, PAGESIZE
) - abd
->abd_size
;
661 if (op
== ABDSTAT_INCR
) {
662 ABDSTAT_BUMP(abdstat_scatter_cnt
);
663 ABDSTAT_INCR(abdstat_scatter_data_size
, abd
->abd_size
);
664 ABDSTAT_INCR(abdstat_scatter_chunk_waste
, waste
);
665 arc_space_consume(waste
, ARC_SPACE_ABD_CHUNK_WASTE
);
667 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt
);
668 ABDSTAT_INCR(abdstat_scatter_data_size
, -(int)abd
->abd_size
);
669 ABDSTAT_INCR(abdstat_scatter_chunk_waste
, -waste
);
670 arc_space_return(waste
, ARC_SPACE_ABD_CHUNK_WASTE
);
675 abd_update_linear_stats(abd_t
*abd
, abd_stats_op_t op
)
677 ASSERT(op
== ABDSTAT_INCR
|| op
== ABDSTAT_DECR
);
678 if (op
== ABDSTAT_INCR
) {
679 ABDSTAT_BUMP(abdstat_linear_cnt
);
680 ABDSTAT_INCR(abdstat_linear_data_size
, abd
->abd_size
);
682 ABDSTAT_BUMPDOWN(abdstat_linear_cnt
);
683 ABDSTAT_INCR(abdstat_linear_data_size
, -(int)abd
->abd_size
);
688 abd_verify_scatter(abd_t
*abd
)
692 struct scatterlist
*sg
= NULL
;
694 ASSERT3U(ABD_SCATTER(abd
).abd_nents
, >, 0);
695 ASSERT3U(ABD_SCATTER(abd
).abd_offset
, <,
696 ABD_SCATTER(abd
).abd_sgl
->length
);
697 n
= ABD_SCATTER(abd
).abd_nents
;
698 abd_for_each_sg(abd
, sg
, n
, i
) {
699 ASSERT3P(sg_page(sg
), !=, NULL
);
704 abd_free_zero_scatter(void)
706 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt
);
707 ABDSTAT_INCR(abdstat_scatter_data_size
, -(int)PAGESIZE
);
708 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk
);
710 abd_free_sg_table(abd_zero_scatter
);
711 abd_free_struct(abd_zero_scatter
);
712 abd_zero_scatter
= NULL
;
713 ASSERT3P(abd_zero_page
, !=, NULL
);
715 #if defined(HAVE_ZERO_PAGE_GPL_ONLY)
716 abd_unmark_zfs_page(abd_zero_page
);
717 __free_page(abd_zero_page
);
718 #endif /* HAVE_ZERO_PAGE_GPL_ONLY */
720 umem_free_aligned(abd_zero_page
, PAGESIZE
);
725 abd_kstats_update(kstat_t
*ksp
, int rw
)
727 abd_stats_t
*as
= ksp
->ks_data
;
729 if (rw
== KSTAT_WRITE
)
731 as
->abdstat_struct_size
.value
.ui64
=
732 wmsum_value(&abd_sums
.abdstat_struct_size
);
733 as
->abdstat_linear_cnt
.value
.ui64
=
734 wmsum_value(&abd_sums
.abdstat_linear_cnt
);
735 as
->abdstat_linear_data_size
.value
.ui64
=
736 wmsum_value(&abd_sums
.abdstat_linear_data_size
);
737 as
->abdstat_scatter_cnt
.value
.ui64
=
738 wmsum_value(&abd_sums
.abdstat_scatter_cnt
);
739 as
->abdstat_scatter_data_size
.value
.ui64
=
740 wmsum_value(&abd_sums
.abdstat_scatter_data_size
);
741 as
->abdstat_scatter_chunk_waste
.value
.ui64
=
742 wmsum_value(&abd_sums
.abdstat_scatter_chunk_waste
);
743 for (int i
= 0; i
< ABD_MAX_ORDER
; i
++) {
744 as
->abdstat_scatter_orders
[i
].value
.ui64
=
745 wmsum_value(&abd_sums
.abdstat_scatter_orders
[i
]);
747 as
->abdstat_scatter_page_multi_chunk
.value
.ui64
=
748 wmsum_value(&abd_sums
.abdstat_scatter_page_multi_chunk
);
749 as
->abdstat_scatter_page_multi_zone
.value
.ui64
=
750 wmsum_value(&abd_sums
.abdstat_scatter_page_multi_zone
);
751 as
->abdstat_scatter_page_alloc_retry
.value
.ui64
=
752 wmsum_value(&abd_sums
.abdstat_scatter_page_alloc_retry
);
753 as
->abdstat_scatter_sg_table_retry
.value
.ui64
=
754 wmsum_value(&abd_sums
.abdstat_scatter_sg_table_retry
);
763 abd_cache
= kmem_cache_create("abd_t", sizeof (abd_t
),
764 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
766 wmsum_init(&abd_sums
.abdstat_struct_size
, 0);
767 wmsum_init(&abd_sums
.abdstat_linear_cnt
, 0);
768 wmsum_init(&abd_sums
.abdstat_linear_data_size
, 0);
769 wmsum_init(&abd_sums
.abdstat_scatter_cnt
, 0);
770 wmsum_init(&abd_sums
.abdstat_scatter_data_size
, 0);
771 wmsum_init(&abd_sums
.abdstat_scatter_chunk_waste
, 0);
772 for (i
= 0; i
< ABD_MAX_ORDER
; i
++)
773 wmsum_init(&abd_sums
.abdstat_scatter_orders
[i
], 0);
774 wmsum_init(&abd_sums
.abdstat_scatter_page_multi_chunk
, 0);
775 wmsum_init(&abd_sums
.abdstat_scatter_page_multi_zone
, 0);
776 wmsum_init(&abd_sums
.abdstat_scatter_page_alloc_retry
, 0);
777 wmsum_init(&abd_sums
.abdstat_scatter_sg_table_retry
, 0);
779 abd_ksp
= kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED
,
780 sizeof (abd_stats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
781 if (abd_ksp
!= NULL
) {
782 for (i
= 0; i
< ABD_MAX_ORDER
; i
++) {
783 snprintf(abd_stats
.abdstat_scatter_orders
[i
].name
,
784 KSTAT_STRLEN
, "scatter_order_%d", i
);
785 abd_stats
.abdstat_scatter_orders
[i
].data_type
=
788 abd_ksp
->ks_data
= &abd_stats
;
789 abd_ksp
->ks_update
= abd_kstats_update
;
790 kstat_install(abd_ksp
);
793 abd_alloc_zero_scatter();
799 abd_free_zero_scatter();
801 if (abd_ksp
!= NULL
) {
802 kstat_delete(abd_ksp
);
806 wmsum_fini(&abd_sums
.abdstat_struct_size
);
807 wmsum_fini(&abd_sums
.abdstat_linear_cnt
);
808 wmsum_fini(&abd_sums
.abdstat_linear_data_size
);
809 wmsum_fini(&abd_sums
.abdstat_scatter_cnt
);
810 wmsum_fini(&abd_sums
.abdstat_scatter_data_size
);
811 wmsum_fini(&abd_sums
.abdstat_scatter_chunk_waste
);
812 for (int i
= 0; i
< ABD_MAX_ORDER
; i
++)
813 wmsum_fini(&abd_sums
.abdstat_scatter_orders
[i
]);
814 wmsum_fini(&abd_sums
.abdstat_scatter_page_multi_chunk
);
815 wmsum_fini(&abd_sums
.abdstat_scatter_page_multi_zone
);
816 wmsum_fini(&abd_sums
.abdstat_scatter_page_alloc_retry
);
817 wmsum_fini(&abd_sums
.abdstat_scatter_sg_table_retry
);
820 kmem_cache_destroy(abd_cache
);
826 abd_free_linear_page(abd_t
*abd
)
828 /* Transform it back into a scatter ABD for freeing */
829 struct scatterlist
*sg
= abd
->abd_u
.abd_linear
.abd_sgl
;
830 abd
->abd_flags
&= ~ABD_FLAG_LINEAR
;
831 abd
->abd_flags
&= ~ABD_FLAG_LINEAR_PAGE
;
832 ABD_SCATTER(abd
).abd_nents
= 1;
833 ABD_SCATTER(abd
).abd_offset
= 0;
834 ABD_SCATTER(abd
).abd_sgl
= sg
;
835 abd_free_chunks(abd
);
837 abd_update_scatter_stats(abd
, ABDSTAT_DECR
);
841 * If we're going to use this ABD for doing I/O using the block layer, the
842 * consumer of the ABD data doesn't care if it's scattered or not, and we don't
843 * plan to store this ABD in memory for a long period of time, we should
844 * allocate the ABD type that requires the least data copying to do the I/O.
846 * On Linux the optimal thing to do would be to use abd_get_offset() and
847 * construct a new ABD which shares the original pages thereby eliminating
848 * the copy. But for the moment a new linear ABD is allocated until this
849 * performance optimization can be implemented.
852 abd_alloc_for_io(size_t size
, boolean_t is_metadata
)
854 return (abd_alloc(size
, is_metadata
));
858 abd_get_offset_scatter(abd_t
*abd
, abd_t
*sabd
, size_t off
,
863 struct scatterlist
*sg
= NULL
;
866 ASSERT3U(off
, <=, sabd
->abd_size
);
868 size_t new_offset
= ABD_SCATTER(sabd
).abd_offset
+ off
;
871 abd
= abd_alloc_struct(0);
874 * Even if this buf is filesystem metadata, we only track that
875 * if we own the underlying data buffer, which is not true in
876 * this case. Therefore, we don't ever use ABD_FLAG_META here.
879 abd_for_each_sg(sabd
, sg
, ABD_SCATTER(sabd
).abd_nents
, i
) {
880 if (new_offset
< sg
->length
)
882 new_offset
-= sg
->length
;
885 ABD_SCATTER(abd
).abd_sgl
= sg
;
886 ABD_SCATTER(abd
).abd_offset
= new_offset
;
887 ABD_SCATTER(abd
).abd_nents
= ABD_SCATTER(sabd
).abd_nents
- i
;
893 * Initialize the abd_iter.
896 abd_iter_init(struct abd_iter
*aiter
, abd_t
*abd
)
898 ASSERT(!abd_is_gang(abd
));
900 memset(aiter
, 0, sizeof (struct abd_iter
));
901 aiter
->iter_abd
= abd
;
902 if (!abd_is_linear(abd
)) {
903 aiter
->iter_offset
= ABD_SCATTER(abd
).abd_offset
;
904 aiter
->iter_sg
= ABD_SCATTER(abd
).abd_sgl
;
909 * This is just a helper function to see if we have exhausted the
910 * abd_iter and reached the end.
913 abd_iter_at_end(struct abd_iter
*aiter
)
915 ASSERT3U(aiter
->iter_pos
, <=, aiter
->iter_abd
->abd_size
);
916 return (aiter
->iter_pos
== aiter
->iter_abd
->abd_size
);
920 * Advance the iterator by a certain amount. Cannot be called when a chunk is
921 * in use. This can be safely called when the aiter has already exhausted, in
922 * which case this does nothing.
925 abd_iter_advance(struct abd_iter
*aiter
, size_t amount
)
928 * Ensure that last chunk is not in use. abd_iterate_*() must clear
929 * this state (directly or abd_iter_unmap()) before advancing.
931 ASSERT3P(aiter
->iter_mapaddr
, ==, NULL
);
932 ASSERT0(aiter
->iter_mapsize
);
933 ASSERT3P(aiter
->iter_page
, ==, NULL
);
934 ASSERT0(aiter
->iter_page_doff
);
935 ASSERT0(aiter
->iter_page_dsize
);
937 /* There's nothing left to advance to, so do nothing */
938 if (abd_iter_at_end(aiter
))
941 aiter
->iter_pos
+= amount
;
942 aiter
->iter_offset
+= amount
;
943 if (!abd_is_linear(aiter
->iter_abd
)) {
944 while (aiter
->iter_offset
>= aiter
->iter_sg
->length
) {
945 aiter
->iter_offset
-= aiter
->iter_sg
->length
;
946 aiter
->iter_sg
= sg_next(aiter
->iter_sg
);
947 if (aiter
->iter_sg
== NULL
) {
948 ASSERT0(aiter
->iter_offset
);
956 * Map the current chunk into aiter. This can be safely called when the aiter
957 * has already exhausted, in which case this does nothing.
960 abd_iter_map(struct abd_iter
*aiter
)
965 ASSERT3P(aiter
->iter_mapaddr
, ==, NULL
);
966 ASSERT0(aiter
->iter_mapsize
);
968 /* There's nothing left to iterate over, so do nothing */
969 if (abd_iter_at_end(aiter
))
972 if (abd_is_linear(aiter
->iter_abd
)) {
973 ASSERT3U(aiter
->iter_pos
, ==, aiter
->iter_offset
);
974 offset
= aiter
->iter_offset
;
975 aiter
->iter_mapsize
= aiter
->iter_abd
->abd_size
- offset
;
976 paddr
= ABD_LINEAR_BUF(aiter
->iter_abd
);
978 offset
= aiter
->iter_offset
;
979 aiter
->iter_mapsize
= MIN(aiter
->iter_sg
->length
- offset
,
980 aiter
->iter_abd
->abd_size
- aiter
->iter_pos
);
982 paddr
= zfs_kmap_atomic(sg_page(aiter
->iter_sg
));
985 aiter
->iter_mapaddr
= (char *)paddr
+ offset
;
989 * Unmap the current chunk from aiter. This can be safely called when the aiter
990 * has already exhausted, in which case this does nothing.
993 abd_iter_unmap(struct abd_iter
*aiter
)
995 /* There's nothing left to unmap, so do nothing */
996 if (abd_iter_at_end(aiter
))
999 if (!abd_is_linear(aiter
->iter_abd
)) {
1000 /* LINTED E_FUNC_SET_NOT_USED */
1001 zfs_kunmap_atomic(aiter
->iter_mapaddr
- aiter
->iter_offset
);
1004 ASSERT3P(aiter
->iter_mapaddr
, !=, NULL
);
1005 ASSERT3U(aiter
->iter_mapsize
, >, 0);
1007 aiter
->iter_mapaddr
= NULL
;
1008 aiter
->iter_mapsize
= 0;
1012 abd_cache_reap_now(void)
1016 #if defined(_KERNEL)
1018 * Yield the next page struct and data offset and size within it, without
1019 * mapping it into the address space.
1022 abd_iter_page(struct abd_iter
*aiter
)
1024 if (abd_iter_at_end(aiter
)) {
1025 aiter
->iter_page
= NULL
;
1026 aiter
->iter_page_doff
= 0;
1027 aiter
->iter_page_dsize
= 0;
1034 if (abd_is_linear(aiter
->iter_abd
)) {
1035 ASSERT3U(aiter
->iter_pos
, ==, aiter
->iter_offset
);
1037 /* memory address at iter_pos */
1038 void *paddr
= ABD_LINEAR_BUF(aiter
->iter_abd
) + aiter
->iter_pos
;
1040 /* struct page for address */
1041 page
= is_vmalloc_addr(paddr
) ?
1042 vmalloc_to_page(paddr
) : virt_to_page(paddr
);
1044 /* offset of address within the page */
1045 doff
= offset_in_page(paddr
);
1047 /* total data remaining in abd from this position */
1048 dsize
= aiter
->iter_abd
->abd_size
- aiter
->iter_offset
;
1050 ASSERT(!abd_is_gang(aiter
->iter_abd
));
1052 /* current scatter page */
1053 page
= sg_page(aiter
->iter_sg
);
1055 /* position within page */
1056 doff
= aiter
->iter_offset
;
1058 /* remaining data in scatterlist */
1059 dsize
= MIN(aiter
->iter_sg
->length
- aiter
->iter_offset
,
1060 aiter
->iter_abd
->abd_size
- aiter
->iter_pos
);
1064 if (PageTail(page
)) {
1066 * This page is part of a "compound page", which is a group of
1067 * pages that can be referenced from a single struct page *.
1068 * Its organised as a "head" page, followed by a series of
1071 * In OpenZFS, compound pages are allocated using the
1072 * __GFP_COMP flag, which we get from scatter ABDs and SPL
1073 * vmalloc slabs (ie >16K allocations). So a great many of the
1074 * IO buffers we get are going to be of this type.
1076 * The tail pages are just regular PAGE_SIZE pages, and can be
1077 * safely used as-is. However, the head page has length
1078 * covering itself and all the tail pages. If this ABD chunk
1079 * spans multiple pages, then we can use the head page and a
1080 * >PAGE_SIZE length, which is far more efficient.
1082 * To do this, we need to adjust the offset to be counted from
1083 * the head page. struct page for compound pages are stored
1084 * contiguously, so we can just adjust by a simple offset.
1086 struct page
*head
= compound_head(page
);
1087 doff
+= ((page
- head
) * PAGESIZE
);
1091 /* final page and position within it */
1092 aiter
->iter_page
= page
;
1093 aiter
->iter_page_doff
= doff
;
1095 /* amount of data in the chunk, up to the end of the page */
1096 aiter
->iter_page_dsize
= MIN(dsize
, page_size(page
) - doff
);
1100 * bio_nr_pages for ABD.
1101 * @off is the offset in @abd
1104 abd_nr_pages_off(abd_t
*abd
, unsigned int size
, size_t off
)
1108 if (abd_is_gang(abd
)) {
1109 unsigned long count
= 0;
1111 for (abd_t
*cabd
= abd_gang_get_offset(abd
, &off
);
1112 cabd
!= NULL
&& size
!= 0;
1113 cabd
= list_next(&ABD_GANG(abd
).abd_gang_chain
, cabd
)) {
1114 ASSERT3U(off
, <, cabd
->abd_size
);
1115 int mysize
= MIN(size
, cabd
->abd_size
- off
);
1116 count
+= abd_nr_pages_off(cabd
, mysize
, off
);
1123 if (abd_is_linear(abd
))
1124 pos
= (unsigned long)abd_to_buf(abd
) + off
;
1126 pos
= ABD_SCATTER(abd
).abd_offset
+ off
;
1128 return (((pos
+ size
+ PAGESIZE
- 1) >> PAGE_SHIFT
) -
1129 (pos
>> PAGE_SHIFT
));
1133 bio_map(struct bio
*bio
, void *buf_ptr
, unsigned int bio_size
)
1135 unsigned int offset
, size
, i
;
1138 offset
= offset_in_page(buf_ptr
);
1139 for (i
= 0; i
< bio
->bi_max_vecs
; i
++) {
1140 size
= PAGE_SIZE
- offset
;
1145 if (size
> bio_size
)
1148 if (is_vmalloc_addr(buf_ptr
))
1149 page
= vmalloc_to_page(buf_ptr
);
1151 page
= virt_to_page(buf_ptr
);
1154 * Some network related block device uses tcp_sendpage, which
1155 * doesn't behave well when using 0-count page, this is a
1156 * safety net to catch them.
1158 ASSERT3S(page_count(page
), >, 0);
1160 if (bio_add_page(bio
, page
, size
, offset
) != size
)
1172 * bio_map for gang ABD.
1175 abd_gang_bio_map_off(struct bio
*bio
, abd_t
*abd
,
1176 unsigned int io_size
, size_t off
)
1178 ASSERT(abd_is_gang(abd
));
1180 for (abd_t
*cabd
= abd_gang_get_offset(abd
, &off
);
1182 cabd
= list_next(&ABD_GANG(abd
).abd_gang_chain
, cabd
)) {
1183 ASSERT3U(off
, <, cabd
->abd_size
);
1184 int size
= MIN(io_size
, cabd
->abd_size
- off
);
1185 int remainder
= abd_bio_map_off(bio
, cabd
, size
, off
);
1186 io_size
-= (size
- remainder
);
1187 if (io_size
== 0 || remainder
> 0)
1197 * @off is the offset in @abd
1198 * Remaining IO size is returned
1201 abd_bio_map_off(struct bio
*bio
, abd_t
*abd
,
1202 unsigned int io_size
, size_t off
)
1204 struct abd_iter aiter
;
1206 ASSERT3U(io_size
, <=, abd
->abd_size
- off
);
1207 if (abd_is_linear(abd
))
1208 return (bio_map(bio
, ((char *)abd_to_buf(abd
)) + off
, io_size
));
1210 ASSERT(!abd_is_linear(abd
));
1211 if (abd_is_gang(abd
))
1212 return (abd_gang_bio_map_off(bio
, abd
, io_size
, off
));
1214 abd_iter_init(&aiter
, abd
);
1215 abd_iter_advance(&aiter
, off
);
1217 for (int i
= 0; i
< bio
->bi_max_vecs
; i
++) {
1219 size_t len
, sgoff
, pgoff
;
1220 struct scatterlist
*sg
;
1226 sgoff
= aiter
.iter_offset
;
1227 pgoff
= sgoff
& (PAGESIZE
- 1);
1228 len
= MIN(io_size
, PAGESIZE
- pgoff
);
1231 pg
= nth_page(sg_page(sg
), sgoff
>> PAGE_SHIFT
);
1232 if (bio_add_page(bio
, pg
, len
, pgoff
) != len
)
1236 abd_iter_advance(&aiter
, len
);
1242 /* Tunable Parameters */
1243 module_param(zfs_abd_scatter_enabled
, int, 0644);
1244 MODULE_PARM_DESC(zfs_abd_scatter_enabled
,
1245 "Toggle whether ABD allocations must be linear.");
1246 module_param(zfs_abd_scatter_min_size
, int, 0644);
1247 MODULE_PARM_DESC(zfs_abd_scatter_min_size
,
1248 "Minimum size of scatter allocations.");
1250 module_param(zfs_abd_scatter_max_order
, uint
, 0644);
1251 MODULE_PARM_DESC(zfs_abd_scatter_max_order
,
1252 "Maximum order allocation used for a scatter ABD.");
1254 #endif /* _KERNEL */