4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
23 * Copyright (c) 2019 by Delphix. All rights reserved.
27 * See abd.c for a general overview of the arc buffered data (ABD).
29 * Linear buffers act exactly like normal buffers and are always mapped into the
30 * kernel's virtual memory space, while scattered ABD data chunks are allocated
31 * as physical pages and then mapped in only while they are actually being
32 * accessed through one of the abd_* library functions. Using scattered ABDs
33 * provides several benefits:
35 * (1) They avoid use of kmem_*, preventing performance problems where running
36 * kmem_reap on very large memory systems never finishes and causes
37 * constant TLB shootdowns.
39 * (2) Fragmentation is less of an issue since when we are at the limit of
40 * allocatable space, we won't have to search around for a long free
41 * hole in the VA space for large ARC allocations. Each chunk is mapped in
42 * individually, so even if we are using HIGHMEM (see next point) we
43 * wouldn't need to worry about finding a contiguous address range.
45 * (3) If we are not using HIGHMEM, then all physical memory is always
46 * mapped into the kernel's address space, so we also avoid the map /
47 * unmap costs on each ABD access.
49 * If we are not using HIGHMEM, scattered buffers which have only one chunk
50 * can be treated as linear buffers, because they are contiguous in the
51 * kernel's virtual address space. See abd_alloc_chunks() for details.
54 #include <sys/abd_impl.h>
55 #include <sys/param.h>
58 #include <sys/zfs_context.h>
59 #include <sys/zfs_znode.h>
61 #include <linux/kmap_compat.h>
62 #include <linux/scatterlist.h>
66 #if defined(MAX_ORDER)
67 #define ABD_MAX_ORDER (MAX_ORDER)
68 #elif defined(MAX_PAGE_ORDER)
69 #define ABD_MAX_ORDER (MAX_PAGE_ORDER)
72 #define ABD_MAX_ORDER (1)
75 typedef struct abd_stats
{
76 kstat_named_t abdstat_struct_size
;
77 kstat_named_t abdstat_linear_cnt
;
78 kstat_named_t abdstat_linear_data_size
;
79 kstat_named_t abdstat_scatter_cnt
;
80 kstat_named_t abdstat_scatter_data_size
;
81 kstat_named_t abdstat_scatter_chunk_waste
;
82 kstat_named_t abdstat_scatter_orders
[ABD_MAX_ORDER
];
83 kstat_named_t abdstat_scatter_page_multi_chunk
;
84 kstat_named_t abdstat_scatter_page_multi_zone
;
85 kstat_named_t abdstat_scatter_page_alloc_retry
;
86 kstat_named_t abdstat_scatter_sg_table_retry
;
89 static abd_stats_t abd_stats
= {
90 /* Amount of memory occupied by all of the abd_t struct allocations */
91 { "struct_size", KSTAT_DATA_UINT64
},
93 * The number of linear ABDs which are currently allocated, excluding
94 * ABDs which don't own their data (for instance the ones which were
95 * allocated through abd_get_offset() and abd_get_from_buf()). If an
96 * ABD takes ownership of its buf then it will become tracked.
98 { "linear_cnt", KSTAT_DATA_UINT64
},
99 /* Amount of data stored in all linear ABDs tracked by linear_cnt */
100 { "linear_data_size", KSTAT_DATA_UINT64
},
102 * The number of scatter ABDs which are currently allocated, excluding
103 * ABDs which don't own their data (for instance the ones which were
104 * allocated through abd_get_offset()).
106 { "scatter_cnt", KSTAT_DATA_UINT64
},
107 /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
108 { "scatter_data_size", KSTAT_DATA_UINT64
},
110 * The amount of space wasted at the end of the last chunk across all
111 * scatter ABDs tracked by scatter_cnt.
113 { "scatter_chunk_waste", KSTAT_DATA_UINT64
},
115 * The number of compound allocations of a given order. These
116 * allocations are spread over all currently allocated ABDs, and
117 * act as a measure of memory fragmentation.
119 { { "scatter_order_N", KSTAT_DATA_UINT64
} },
121 * The number of scatter ABDs which contain multiple chunks.
122 * ABDs are preferentially allocated from the minimum number of
123 * contiguous multi-page chunks, a single chunk is optimal.
125 { "scatter_page_multi_chunk", KSTAT_DATA_UINT64
},
127 * The number of scatter ABDs which are split across memory zones.
128 * ABDs are preferentially allocated using pages from a single zone.
130 { "scatter_page_multi_zone", KSTAT_DATA_UINT64
},
132 * The total number of retries encountered when attempting to
133 * allocate the pages to populate the scatter ABD.
135 { "scatter_page_alloc_retry", KSTAT_DATA_UINT64
},
137 * The total number of retries encountered when attempting to
138 * allocate the sg table for an ABD.
140 { "scatter_sg_table_retry", KSTAT_DATA_UINT64
},
144 wmsum_t abdstat_struct_size
;
145 wmsum_t abdstat_linear_cnt
;
146 wmsum_t abdstat_linear_data_size
;
147 wmsum_t abdstat_scatter_cnt
;
148 wmsum_t abdstat_scatter_data_size
;
149 wmsum_t abdstat_scatter_chunk_waste
;
150 wmsum_t abdstat_scatter_orders
[ABD_MAX_ORDER
];
151 wmsum_t abdstat_scatter_page_multi_chunk
;
152 wmsum_t abdstat_scatter_page_multi_zone
;
153 wmsum_t abdstat_scatter_page_alloc_retry
;
154 wmsum_t abdstat_scatter_sg_table_retry
;
157 #define abd_for_each_sg(abd, sg, n, i) \
158 for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
161 * zfs_abd_scatter_min_size is the minimum allocation size to use scatter
162 * ABD's. Smaller allocations will use linear ABD's which uses
163 * zio_[data_]buf_alloc().
165 * Scatter ABD's use at least one page each, so sub-page allocations waste
166 * some space when allocated as scatter (e.g. 2KB scatter allocation wastes
167 * half of each page). Using linear ABD's for small allocations means that
168 * they will be put on slabs which contain many allocations. This can
169 * improve memory efficiency, but it also makes it much harder for ARC
170 * evictions to actually free pages, because all the buffers on one slab need
171 * to be freed in order for the slab (and underlying pages) to be freed.
172 * Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's
173 * possible for them to actually waste more memory than scatter (one page per
174 * buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th).
176 * Spill blocks are typically 512B and are heavily used on systems running
177 * selinux with the default dnode size and the `xattr=sa` property set.
179 * By default we use linear allocations for 512B and 1KB, and scatter
180 * allocations for larger (1.5KB and up).
182 static int zfs_abd_scatter_min_size
= 512 * 3;
185 * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
186 * just a single zero'd page. This allows us to conserve memory by
187 * only using a single zero page for the scatterlist.
189 abd_t
*abd_zero_scatter
= NULL
;
193 * _KERNEL - Will point to ZERO_PAGE if it is available or it will be
194 * an allocated zero'd PAGESIZE buffer.
195 * Userspace - Will be an allocated zero'ed PAGESIZE buffer.
197 * abd_zero_page is assigned to each of the pages of abd_zero_scatter.
199 static struct page
*abd_zero_page
= NULL
;
201 static kmem_cache_t
*abd_cache
= NULL
;
202 static kstat_t
*abd_ksp
;
205 abd_chunkcnt_for_bytes(size_t size
)
207 return (P2ROUNDUP(size
, PAGESIZE
) / PAGESIZE
);
211 abd_alloc_struct_impl(size_t size
)
214 * In Linux we do not use the size passed in during ABD
215 * allocation, so we just ignore it.
218 abd_t
*abd
= kmem_cache_alloc(abd_cache
, KM_PUSHPAGE
);
219 ASSERT3P(abd
, !=, NULL
);
220 ABDSTAT_INCR(abdstat_struct_size
, sizeof (abd_t
));
226 abd_free_struct_impl(abd_t
*abd
)
228 kmem_cache_free(abd_cache
, abd
);
229 ABDSTAT_INCR(abdstat_struct_size
, -(int)sizeof (abd_t
));
233 static unsigned zfs_abd_scatter_max_order
= ABD_MAX_ORDER
- 1;
236 * Mark zfs data pages so they can be excluded from kernel crash dumps
239 #define ABD_FILE_CACHE_PAGE 0x2F5ABDF11ECAC4E
242 abd_mark_zfs_page(struct page
*page
)
245 SetPagePrivate(page
);
246 set_page_private(page
, ABD_FILE_CACHE_PAGE
);
250 abd_unmark_zfs_page(struct page
*page
)
252 set_page_private(page
, 0UL);
253 ClearPagePrivate(page
);
257 #define abd_mark_zfs_page(page)
258 #define abd_unmark_zfs_page(page)
261 #ifndef CONFIG_HIGHMEM
263 #ifndef __GFP_RECLAIM
264 #define __GFP_RECLAIM __GFP_WAIT
268 * The goal is to minimize fragmentation by preferentially populating ABDs
269 * with higher order compound pages from a single zone. Allocation size is
270 * progressively decreased until it can be satisfied without performing
271 * reclaim or compaction. When necessary this function will degenerate to
272 * allocating individual pages and allowing reclaim to satisfy allocations.
275 abd_alloc_chunks(abd_t
*abd
, size_t size
)
277 struct list_head pages
;
278 struct sg_table table
;
279 struct scatterlist
*sg
;
280 struct page
*page
, *tmp_page
= NULL
;
281 gfp_t gfp
= __GFP_NOWARN
| GFP_NOIO
;
282 gfp_t gfp_comp
= (gfp
| __GFP_NORETRY
| __GFP_COMP
) & ~__GFP_RECLAIM
;
283 unsigned int max_order
= MIN(zfs_abd_scatter_max_order
,
285 unsigned int nr_pages
= abd_chunkcnt_for_bytes(size
);
286 unsigned int chunks
= 0, zones
= 0;
287 size_t remaining_size
;
288 int nid
= NUMA_NO_NODE
;
289 unsigned int alloc_pages
= 0;
291 INIT_LIST_HEAD(&pages
);
293 ASSERT3U(alloc_pages
, <, nr_pages
);
295 while (alloc_pages
< nr_pages
) {
296 unsigned int chunk_pages
;
299 order
= MIN(highbit64(nr_pages
- alloc_pages
) - 1, max_order
);
300 chunk_pages
= (1U << order
);
302 page
= alloc_pages_node(nid
, order
? gfp_comp
: gfp
, order
);
305 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry
);
306 schedule_timeout_interruptible(1);
308 max_order
= MAX(0, order
- 1);
313 list_add_tail(&page
->lru
, &pages
);
315 if ((nid
!= NUMA_NO_NODE
) && (page_to_nid(page
) != nid
))
318 nid
= page_to_nid(page
);
319 ABDSTAT_BUMP(abdstat_scatter_orders
[order
]);
321 alloc_pages
+= chunk_pages
;
324 ASSERT3S(alloc_pages
, ==, nr_pages
);
326 while (sg_alloc_table(&table
, chunks
, gfp
)) {
327 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry
);
328 schedule_timeout_interruptible(1);
332 remaining_size
= size
;
333 list_for_each_entry_safe(page
, tmp_page
, &pages
, lru
) {
334 size_t sg_size
= MIN(PAGESIZE
<< compound_order(page
),
336 sg_set_page(sg
, page
, sg_size
, 0);
337 abd_mark_zfs_page(page
);
338 remaining_size
-= sg_size
;
341 list_del(&page
->lru
);
345 * These conditions ensure that a possible transformation to a linear
346 * ABD would be valid.
348 ASSERT(!PageHighMem(sg_page(table
.sgl
)));
349 ASSERT0(ABD_SCATTER(abd
).abd_offset
);
351 if (table
.nents
== 1) {
353 * Since there is only one entry, this ABD can be represented
354 * as a linear buffer. All single-page (4K) ABD's can be
355 * represented this way. Some multi-page ABD's can also be
356 * represented this way, if we were able to allocate a single
357 * "chunk" (higher-order "page" which represents a power-of-2
358 * series of physically-contiguous pages). This is often the
359 * case for 2-page (8K) ABD's.
361 * Representing a single-entry scatter ABD as a linear ABD
362 * has the performance advantage of avoiding the copy (and
363 * allocation) in abd_borrow_buf_copy / abd_return_buf_copy.
364 * A performance increase of around 5% has been observed for
365 * ARC-cached reads (of small blocks which can take advantage
368 * Note that this optimization is only possible because the
369 * pages are always mapped into the kernel's address space.
370 * This is not the case for highmem pages, so the
371 * optimization can not be made there.
373 abd
->abd_flags
|= ABD_FLAG_LINEAR
;
374 abd
->abd_flags
|= ABD_FLAG_LINEAR_PAGE
;
375 abd
->abd_u
.abd_linear
.abd_sgl
= table
.sgl
;
376 ABD_LINEAR_BUF(abd
) = page_address(sg_page(table
.sgl
));
377 } else if (table
.nents
> 1) {
378 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
379 abd
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
;
382 ABDSTAT_BUMP(abdstat_scatter_page_multi_zone
);
383 abd
->abd_flags
|= ABD_FLAG_MULTI_ZONE
;
386 ABD_SCATTER(abd
).abd_sgl
= table
.sgl
;
387 ABD_SCATTER(abd
).abd_nents
= table
.nents
;
393 * Allocate N individual pages to construct a scatter ABD. This function
394 * makes no attempt to request contiguous pages and requires the minimal
395 * number of kernel interfaces. It's designed for maximum compatibility.
398 abd_alloc_chunks(abd_t
*abd
, size_t size
)
400 struct scatterlist
*sg
= NULL
;
401 struct sg_table table
;
403 gfp_t gfp
= __GFP_NOWARN
| GFP_NOIO
;
404 int nr_pages
= abd_chunkcnt_for_bytes(size
);
407 while (sg_alloc_table(&table
, nr_pages
, gfp
)) {
408 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry
);
409 schedule_timeout_interruptible(1);
412 ASSERT3U(table
.nents
, ==, nr_pages
);
413 ABD_SCATTER(abd
).abd_sgl
= table
.sgl
;
414 ABD_SCATTER(abd
).abd_nents
= nr_pages
;
416 abd_for_each_sg(abd
, sg
, nr_pages
, i
) {
417 while ((page
= __page_cache_alloc(gfp
)) == NULL
) {
418 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry
);
419 schedule_timeout_interruptible(1);
422 ABDSTAT_BUMP(abdstat_scatter_orders
[0]);
423 sg_set_page(sg
, page
, PAGESIZE
, 0);
424 abd_mark_zfs_page(page
);
428 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
429 abd
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
;
432 #endif /* !CONFIG_HIGHMEM */
435 * This must be called if any of the sg_table allocation functions
439 abd_free_sg_table(abd_t
*abd
)
441 struct sg_table table
;
443 table
.sgl
= ABD_SCATTER(abd
).abd_sgl
;
444 table
.nents
= table
.orig_nents
= ABD_SCATTER(abd
).abd_nents
;
445 sg_free_table(&table
);
449 abd_free_chunks(abd_t
*abd
)
451 struct scatterlist
*sg
= NULL
;
453 int nr_pages
= ABD_SCATTER(abd
).abd_nents
;
456 if (abd
->abd_flags
& ABD_FLAG_MULTI_ZONE
)
457 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone
);
459 if (abd
->abd_flags
& ABD_FLAG_MULTI_CHUNK
)
460 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk
);
462 abd_for_each_sg(abd
, sg
, nr_pages
, i
) {
464 abd_unmark_zfs_page(page
);
465 order
= compound_order(page
);
466 __free_pages(page
, order
);
467 ASSERT3U(sg
->length
, <=, PAGE_SIZE
<< order
);
468 ABDSTAT_BUMPDOWN(abdstat_scatter_orders
[order
]);
470 abd_free_sg_table(abd
);
474 * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in
475 * the scatterlist will be set to the zero'd out buffer abd_zero_page.
478 abd_alloc_zero_scatter(void)
480 struct scatterlist
*sg
= NULL
;
481 struct sg_table table
;
482 gfp_t gfp
= __GFP_NOWARN
| GFP_NOIO
;
483 int nr_pages
= abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE
);
486 #if defined(HAVE_ZERO_PAGE_GPL_ONLY)
487 gfp_t gfp_zero_page
= gfp
| __GFP_ZERO
;
488 while ((abd_zero_page
= __page_cache_alloc(gfp_zero_page
)) == NULL
) {
489 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry
);
490 schedule_timeout_interruptible(1);
492 abd_mark_zfs_page(abd_zero_page
);
494 abd_zero_page
= ZERO_PAGE(0);
495 #endif /* HAVE_ZERO_PAGE_GPL_ONLY */
497 while (sg_alloc_table(&table
, nr_pages
, gfp
)) {
498 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry
);
499 schedule_timeout_interruptible(1);
501 ASSERT3U(table
.nents
, ==, nr_pages
);
503 abd_zero_scatter
= abd_alloc_struct(SPA_MAXBLOCKSIZE
);
504 abd_zero_scatter
->abd_flags
|= ABD_FLAG_OWNER
;
505 ABD_SCATTER(abd_zero_scatter
).abd_offset
= 0;
506 ABD_SCATTER(abd_zero_scatter
).abd_sgl
= table
.sgl
;
507 ABD_SCATTER(abd_zero_scatter
).abd_nents
= nr_pages
;
508 abd_zero_scatter
->abd_size
= SPA_MAXBLOCKSIZE
;
509 abd_zero_scatter
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
| ABD_FLAG_ZEROS
;
511 abd_for_each_sg(abd_zero_scatter
, sg
, nr_pages
, i
) {
512 sg_set_page(sg
, abd_zero_page
, PAGESIZE
, 0);
515 ABDSTAT_BUMP(abdstat_scatter_cnt
);
516 ABDSTAT_INCR(abdstat_scatter_data_size
, PAGESIZE
);
517 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
523 #define PAGE_SHIFT (highbit64(PAGESIZE)-1)
526 #define zfs_kmap_atomic(chunk) ((void *)chunk)
527 #define zfs_kunmap_atomic(addr) do { (void)(addr); } while (0)
528 #define local_irq_save(flags) do { (void)(flags); } while (0)
529 #define local_irq_restore(flags) do { (void)(flags); } while (0)
530 #define nth_page(pg, i) \
531 ((struct page *)((void *)(pg) + (i) * PAGESIZE))
540 sg_init_table(struct scatterlist
*sg
, int nr
)
542 memset(sg
, 0, nr
* sizeof (struct scatterlist
));
547 * This must be called if any of the sg_table allocation functions
551 abd_free_sg_table(abd_t
*abd
)
553 int nents
= ABD_SCATTER(abd
).abd_nents
;
554 vmem_free(ABD_SCATTER(abd
).abd_sgl
,
555 nents
* sizeof (struct scatterlist
));
558 #define for_each_sg(sgl, sg, nr, i) \
559 for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg))
562 sg_set_page(struct scatterlist
*sg
, struct page
*page
, unsigned int len
,
565 /* currently we don't use offset */
571 static inline struct page
*
572 sg_page(struct scatterlist
*sg
)
577 static inline struct scatterlist
*
578 sg_next(struct scatterlist
*sg
)
587 abd_alloc_chunks(abd_t
*abd
, size_t size
)
589 unsigned nr_pages
= abd_chunkcnt_for_bytes(size
);
590 struct scatterlist
*sg
;
593 ABD_SCATTER(abd
).abd_sgl
= vmem_alloc(nr_pages
*
594 sizeof (struct scatterlist
), KM_SLEEP
);
595 sg_init_table(ABD_SCATTER(abd
).abd_sgl
, nr_pages
);
597 abd_for_each_sg(abd
, sg
, nr_pages
, i
) {
598 struct page
*p
= umem_alloc_aligned(PAGESIZE
, 64, KM_SLEEP
);
599 sg_set_page(sg
, p
, PAGESIZE
, 0);
601 ABD_SCATTER(abd
).abd_nents
= nr_pages
;
605 abd_free_chunks(abd_t
*abd
)
607 int i
, n
= ABD_SCATTER(abd
).abd_nents
;
608 struct scatterlist
*sg
;
610 abd_for_each_sg(abd
, sg
, n
, i
) {
611 struct page
*p
= nth_page(sg_page(sg
), 0);
612 umem_free_aligned(p
, PAGESIZE
);
614 abd_free_sg_table(abd
);
618 abd_alloc_zero_scatter(void)
620 unsigned nr_pages
= abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE
);
621 struct scatterlist
*sg
;
624 abd_zero_page
= umem_alloc_aligned(PAGESIZE
, 64, KM_SLEEP
);
625 memset(abd_zero_page
, 0, PAGESIZE
);
626 abd_zero_scatter
= abd_alloc_struct(SPA_MAXBLOCKSIZE
);
627 abd_zero_scatter
->abd_flags
|= ABD_FLAG_OWNER
;
628 abd_zero_scatter
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
| ABD_FLAG_ZEROS
;
629 ABD_SCATTER(abd_zero_scatter
).abd_offset
= 0;
630 ABD_SCATTER(abd_zero_scatter
).abd_nents
= nr_pages
;
631 abd_zero_scatter
->abd_size
= SPA_MAXBLOCKSIZE
;
632 ABD_SCATTER(abd_zero_scatter
).abd_sgl
= vmem_alloc(nr_pages
*
633 sizeof (struct scatterlist
), KM_SLEEP
);
635 sg_init_table(ABD_SCATTER(abd_zero_scatter
).abd_sgl
, nr_pages
);
637 abd_for_each_sg(abd_zero_scatter
, sg
, nr_pages
, i
) {
638 sg_set_page(sg
, abd_zero_page
, PAGESIZE
, 0);
641 ABDSTAT_BUMP(abdstat_scatter_cnt
);
642 ABDSTAT_INCR(abdstat_scatter_data_size
, PAGESIZE
);
643 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
649 abd_size_alloc_linear(size_t size
)
651 return (!zfs_abd_scatter_enabled
|| size
< zfs_abd_scatter_min_size
);
655 abd_update_scatter_stats(abd_t
*abd
, abd_stats_op_t op
)
657 ASSERT(op
== ABDSTAT_INCR
|| op
== ABDSTAT_DECR
);
658 int waste
= P2ROUNDUP(abd
->abd_size
, PAGESIZE
) - abd
->abd_size
;
659 if (op
== ABDSTAT_INCR
) {
660 ABDSTAT_BUMP(abdstat_scatter_cnt
);
661 ABDSTAT_INCR(abdstat_scatter_data_size
, abd
->abd_size
);
662 ABDSTAT_INCR(abdstat_scatter_chunk_waste
, waste
);
663 arc_space_consume(waste
, ARC_SPACE_ABD_CHUNK_WASTE
);
665 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt
);
666 ABDSTAT_INCR(abdstat_scatter_data_size
, -(int)abd
->abd_size
);
667 ABDSTAT_INCR(abdstat_scatter_chunk_waste
, -waste
);
668 arc_space_return(waste
, ARC_SPACE_ABD_CHUNK_WASTE
);
673 abd_update_linear_stats(abd_t
*abd
, abd_stats_op_t op
)
675 ASSERT(op
== ABDSTAT_INCR
|| op
== ABDSTAT_DECR
);
676 if (op
== ABDSTAT_INCR
) {
677 ABDSTAT_BUMP(abdstat_linear_cnt
);
678 ABDSTAT_INCR(abdstat_linear_data_size
, abd
->abd_size
);
680 ABDSTAT_BUMPDOWN(abdstat_linear_cnt
);
681 ABDSTAT_INCR(abdstat_linear_data_size
, -(int)abd
->abd_size
);
686 abd_verify_scatter(abd_t
*abd
)
690 struct scatterlist
*sg
= NULL
;
692 ASSERT3U(ABD_SCATTER(abd
).abd_nents
, >, 0);
693 ASSERT3U(ABD_SCATTER(abd
).abd_offset
, <,
694 ABD_SCATTER(abd
).abd_sgl
->length
);
695 n
= ABD_SCATTER(abd
).abd_nents
;
696 abd_for_each_sg(abd
, sg
, n
, i
) {
697 ASSERT3P(sg_page(sg
), !=, NULL
);
702 abd_free_zero_scatter(void)
704 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt
);
705 ABDSTAT_INCR(abdstat_scatter_data_size
, -(int)PAGESIZE
);
706 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk
);
708 abd_free_sg_table(abd_zero_scatter
);
709 abd_free_struct(abd_zero_scatter
);
710 abd_zero_scatter
= NULL
;
711 ASSERT3P(abd_zero_page
, !=, NULL
);
713 #if defined(HAVE_ZERO_PAGE_GPL_ONLY)
714 abd_unmark_zfs_page(abd_zero_page
);
715 __free_page(abd_zero_page
);
716 #endif /* HAVE_ZERO_PAGE_GPL_ONLY */
718 umem_free_aligned(abd_zero_page
, PAGESIZE
);
723 abd_kstats_update(kstat_t
*ksp
, int rw
)
725 abd_stats_t
*as
= ksp
->ks_data
;
727 if (rw
== KSTAT_WRITE
)
729 as
->abdstat_struct_size
.value
.ui64
=
730 wmsum_value(&abd_sums
.abdstat_struct_size
);
731 as
->abdstat_linear_cnt
.value
.ui64
=
732 wmsum_value(&abd_sums
.abdstat_linear_cnt
);
733 as
->abdstat_linear_data_size
.value
.ui64
=
734 wmsum_value(&abd_sums
.abdstat_linear_data_size
);
735 as
->abdstat_scatter_cnt
.value
.ui64
=
736 wmsum_value(&abd_sums
.abdstat_scatter_cnt
);
737 as
->abdstat_scatter_data_size
.value
.ui64
=
738 wmsum_value(&abd_sums
.abdstat_scatter_data_size
);
739 as
->abdstat_scatter_chunk_waste
.value
.ui64
=
740 wmsum_value(&abd_sums
.abdstat_scatter_chunk_waste
);
741 for (int i
= 0; i
< ABD_MAX_ORDER
; i
++) {
742 as
->abdstat_scatter_orders
[i
].value
.ui64
=
743 wmsum_value(&abd_sums
.abdstat_scatter_orders
[i
]);
745 as
->abdstat_scatter_page_multi_chunk
.value
.ui64
=
746 wmsum_value(&abd_sums
.abdstat_scatter_page_multi_chunk
);
747 as
->abdstat_scatter_page_multi_zone
.value
.ui64
=
748 wmsum_value(&abd_sums
.abdstat_scatter_page_multi_zone
);
749 as
->abdstat_scatter_page_alloc_retry
.value
.ui64
=
750 wmsum_value(&abd_sums
.abdstat_scatter_page_alloc_retry
);
751 as
->abdstat_scatter_sg_table_retry
.value
.ui64
=
752 wmsum_value(&abd_sums
.abdstat_scatter_sg_table_retry
);
761 abd_cache
= kmem_cache_create("abd_t", sizeof (abd_t
),
762 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
764 wmsum_init(&abd_sums
.abdstat_struct_size
, 0);
765 wmsum_init(&abd_sums
.abdstat_linear_cnt
, 0);
766 wmsum_init(&abd_sums
.abdstat_linear_data_size
, 0);
767 wmsum_init(&abd_sums
.abdstat_scatter_cnt
, 0);
768 wmsum_init(&abd_sums
.abdstat_scatter_data_size
, 0);
769 wmsum_init(&abd_sums
.abdstat_scatter_chunk_waste
, 0);
770 for (i
= 0; i
< ABD_MAX_ORDER
; i
++)
771 wmsum_init(&abd_sums
.abdstat_scatter_orders
[i
], 0);
772 wmsum_init(&abd_sums
.abdstat_scatter_page_multi_chunk
, 0);
773 wmsum_init(&abd_sums
.abdstat_scatter_page_multi_zone
, 0);
774 wmsum_init(&abd_sums
.abdstat_scatter_page_alloc_retry
, 0);
775 wmsum_init(&abd_sums
.abdstat_scatter_sg_table_retry
, 0);
777 abd_ksp
= kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED
,
778 sizeof (abd_stats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
779 if (abd_ksp
!= NULL
) {
780 for (i
= 0; i
< ABD_MAX_ORDER
; i
++) {
781 snprintf(abd_stats
.abdstat_scatter_orders
[i
].name
,
782 KSTAT_STRLEN
, "scatter_order_%d", i
);
783 abd_stats
.abdstat_scatter_orders
[i
].data_type
=
786 abd_ksp
->ks_data
= &abd_stats
;
787 abd_ksp
->ks_update
= abd_kstats_update
;
788 kstat_install(abd_ksp
);
791 abd_alloc_zero_scatter();
797 abd_free_zero_scatter();
799 if (abd_ksp
!= NULL
) {
800 kstat_delete(abd_ksp
);
804 wmsum_fini(&abd_sums
.abdstat_struct_size
);
805 wmsum_fini(&abd_sums
.abdstat_linear_cnt
);
806 wmsum_fini(&abd_sums
.abdstat_linear_data_size
);
807 wmsum_fini(&abd_sums
.abdstat_scatter_cnt
);
808 wmsum_fini(&abd_sums
.abdstat_scatter_data_size
);
809 wmsum_fini(&abd_sums
.abdstat_scatter_chunk_waste
);
810 for (int i
= 0; i
< ABD_MAX_ORDER
; i
++)
811 wmsum_fini(&abd_sums
.abdstat_scatter_orders
[i
]);
812 wmsum_fini(&abd_sums
.abdstat_scatter_page_multi_chunk
);
813 wmsum_fini(&abd_sums
.abdstat_scatter_page_multi_zone
);
814 wmsum_fini(&abd_sums
.abdstat_scatter_page_alloc_retry
);
815 wmsum_fini(&abd_sums
.abdstat_scatter_sg_table_retry
);
818 kmem_cache_destroy(abd_cache
);
824 abd_free_linear_page(abd_t
*abd
)
826 /* Transform it back into a scatter ABD for freeing */
827 struct scatterlist
*sg
= abd
->abd_u
.abd_linear
.abd_sgl
;
828 abd
->abd_flags
&= ~ABD_FLAG_LINEAR
;
829 abd
->abd_flags
&= ~ABD_FLAG_LINEAR_PAGE
;
830 ABD_SCATTER(abd
).abd_nents
= 1;
831 ABD_SCATTER(abd
).abd_offset
= 0;
832 ABD_SCATTER(abd
).abd_sgl
= sg
;
833 abd_free_chunks(abd
);
835 abd_update_scatter_stats(abd
, ABDSTAT_DECR
);
839 * If we're going to use this ABD for doing I/O using the block layer, the
840 * consumer of the ABD data doesn't care if it's scattered or not, and we don't
841 * plan to store this ABD in memory for a long period of time, we should
842 * allocate the ABD type that requires the least data copying to do the I/O.
844 * On Linux the optimal thing to do would be to use abd_get_offset() and
845 * construct a new ABD which shares the original pages thereby eliminating
846 * the copy. But for the moment a new linear ABD is allocated until this
847 * performance optimization can be implemented.
850 abd_alloc_for_io(size_t size
, boolean_t is_metadata
)
852 return (abd_alloc(size
, is_metadata
));
856 abd_get_offset_scatter(abd_t
*abd
, abd_t
*sabd
, size_t off
,
861 struct scatterlist
*sg
= NULL
;
864 ASSERT3U(off
, <=, sabd
->abd_size
);
866 size_t new_offset
= ABD_SCATTER(sabd
).abd_offset
+ off
;
869 abd
= abd_alloc_struct(0);
872 * Even if this buf is filesystem metadata, we only track that
873 * if we own the underlying data buffer, which is not true in
874 * this case. Therefore, we don't ever use ABD_FLAG_META here.
877 abd_for_each_sg(sabd
, sg
, ABD_SCATTER(sabd
).abd_nents
, i
) {
878 if (new_offset
< sg
->length
)
880 new_offset
-= sg
->length
;
883 ABD_SCATTER(abd
).abd_sgl
= sg
;
884 ABD_SCATTER(abd
).abd_offset
= new_offset
;
885 ABD_SCATTER(abd
).abd_nents
= ABD_SCATTER(sabd
).abd_nents
- i
;
891 * Initialize the abd_iter.
894 abd_iter_init(struct abd_iter
*aiter
, abd_t
*abd
)
896 ASSERT(!abd_is_gang(abd
));
898 aiter
->iter_abd
= abd
;
899 aiter
->iter_mapaddr
= NULL
;
900 aiter
->iter_mapsize
= 0;
902 if (abd_is_linear(abd
)) {
903 aiter
->iter_offset
= 0;
904 aiter
->iter_sg
= NULL
;
906 aiter
->iter_offset
= ABD_SCATTER(abd
).abd_offset
;
907 aiter
->iter_sg
= ABD_SCATTER(abd
).abd_sgl
;
912 * This is just a helper function to see if we have exhausted the
913 * abd_iter and reached the end.
916 abd_iter_at_end(struct abd_iter
*aiter
)
918 return (aiter
->iter_pos
== aiter
->iter_abd
->abd_size
);
922 * Advance the iterator by a certain amount. Cannot be called when a chunk is
923 * in use. This can be safely called when the aiter has already exhausted, in
924 * which case this does nothing.
927 abd_iter_advance(struct abd_iter
*aiter
, size_t amount
)
929 ASSERT3P(aiter
->iter_mapaddr
, ==, NULL
);
930 ASSERT0(aiter
->iter_mapsize
);
932 /* There's nothing left to advance to, so do nothing */
933 if (abd_iter_at_end(aiter
))
936 aiter
->iter_pos
+= amount
;
937 aiter
->iter_offset
+= amount
;
938 if (!abd_is_linear(aiter
->iter_abd
)) {
939 while (aiter
->iter_offset
>= aiter
->iter_sg
->length
) {
940 aiter
->iter_offset
-= aiter
->iter_sg
->length
;
941 aiter
->iter_sg
= sg_next(aiter
->iter_sg
);
942 if (aiter
->iter_sg
== NULL
) {
943 ASSERT0(aiter
->iter_offset
);
951 * Map the current chunk into aiter. This can be safely called when the aiter
952 * has already exhausted, in which case this does nothing.
955 abd_iter_map(struct abd_iter
*aiter
)
960 ASSERT3P(aiter
->iter_mapaddr
, ==, NULL
);
961 ASSERT0(aiter
->iter_mapsize
);
963 /* There's nothing left to iterate over, so do nothing */
964 if (abd_iter_at_end(aiter
))
967 if (abd_is_linear(aiter
->iter_abd
)) {
968 ASSERT3U(aiter
->iter_pos
, ==, aiter
->iter_offset
);
969 offset
= aiter
->iter_offset
;
970 aiter
->iter_mapsize
= aiter
->iter_abd
->abd_size
- offset
;
971 paddr
= ABD_LINEAR_BUF(aiter
->iter_abd
);
973 offset
= aiter
->iter_offset
;
974 aiter
->iter_mapsize
= MIN(aiter
->iter_sg
->length
- offset
,
975 aiter
->iter_abd
->abd_size
- aiter
->iter_pos
);
977 paddr
= zfs_kmap_atomic(sg_page(aiter
->iter_sg
));
980 aiter
->iter_mapaddr
= (char *)paddr
+ offset
;
984 * Unmap the current chunk from aiter. This can be safely called when the aiter
985 * has already exhausted, in which case this does nothing.
988 abd_iter_unmap(struct abd_iter
*aiter
)
990 /* There's nothing left to unmap, so do nothing */
991 if (abd_iter_at_end(aiter
))
994 if (!abd_is_linear(aiter
->iter_abd
)) {
995 /* LINTED E_FUNC_SET_NOT_USED */
996 zfs_kunmap_atomic(aiter
->iter_mapaddr
- aiter
->iter_offset
);
999 ASSERT3P(aiter
->iter_mapaddr
, !=, NULL
);
1000 ASSERT3U(aiter
->iter_mapsize
, >, 0);
1002 aiter
->iter_mapaddr
= NULL
;
1003 aiter
->iter_mapsize
= 0;
1007 abd_cache_reap_now(void)
1011 #if defined(_KERNEL)
1013 * bio_nr_pages for ABD.
1014 * @off is the offset in @abd
1017 abd_nr_pages_off(abd_t
*abd
, unsigned int size
, size_t off
)
1021 if (abd_is_gang(abd
)) {
1022 unsigned long count
= 0;
1024 for (abd_t
*cabd
= abd_gang_get_offset(abd
, &off
);
1025 cabd
!= NULL
&& size
!= 0;
1026 cabd
= list_next(&ABD_GANG(abd
).abd_gang_chain
, cabd
)) {
1027 ASSERT3U(off
, <, cabd
->abd_size
);
1028 int mysize
= MIN(size
, cabd
->abd_size
- off
);
1029 count
+= abd_nr_pages_off(cabd
, mysize
, off
);
1036 if (abd_is_linear(abd
))
1037 pos
= (unsigned long)abd_to_buf(abd
) + off
;
1039 pos
= ABD_SCATTER(abd
).abd_offset
+ off
;
1041 return (((pos
+ size
+ PAGESIZE
- 1) >> PAGE_SHIFT
) -
1042 (pos
>> PAGE_SHIFT
));
1046 bio_map(struct bio
*bio
, void *buf_ptr
, unsigned int bio_size
)
1048 unsigned int offset
, size
, i
;
1051 offset
= offset_in_page(buf_ptr
);
1052 for (i
= 0; i
< bio
->bi_max_vecs
; i
++) {
1053 size
= PAGE_SIZE
- offset
;
1058 if (size
> bio_size
)
1061 if (is_vmalloc_addr(buf_ptr
))
1062 page
= vmalloc_to_page(buf_ptr
);
1064 page
= virt_to_page(buf_ptr
);
1067 * Some network related block device uses tcp_sendpage, which
1068 * doesn't behave well when using 0-count page, this is a
1069 * safety net to catch them.
1071 ASSERT3S(page_count(page
), >, 0);
1073 if (bio_add_page(bio
, page
, size
, offset
) != size
)
1085 * bio_map for gang ABD.
1088 abd_gang_bio_map_off(struct bio
*bio
, abd_t
*abd
,
1089 unsigned int io_size
, size_t off
)
1091 ASSERT(abd_is_gang(abd
));
1093 for (abd_t
*cabd
= abd_gang_get_offset(abd
, &off
);
1095 cabd
= list_next(&ABD_GANG(abd
).abd_gang_chain
, cabd
)) {
1096 ASSERT3U(off
, <, cabd
->abd_size
);
1097 int size
= MIN(io_size
, cabd
->abd_size
- off
);
1098 int remainder
= abd_bio_map_off(bio
, cabd
, size
, off
);
1099 io_size
-= (size
- remainder
);
1100 if (io_size
== 0 || remainder
> 0)
1110 * @off is the offset in @abd
1111 * Remaining IO size is returned
1114 abd_bio_map_off(struct bio
*bio
, abd_t
*abd
,
1115 unsigned int io_size
, size_t off
)
1117 struct abd_iter aiter
;
1119 ASSERT3U(io_size
, <=, abd
->abd_size
- off
);
1120 if (abd_is_linear(abd
))
1121 return (bio_map(bio
, ((char *)abd_to_buf(abd
)) + off
, io_size
));
1123 ASSERT(!abd_is_linear(abd
));
1124 if (abd_is_gang(abd
))
1125 return (abd_gang_bio_map_off(bio
, abd
, io_size
, off
));
1127 abd_iter_init(&aiter
, abd
);
1128 abd_iter_advance(&aiter
, off
);
1130 for (int i
= 0; i
< bio
->bi_max_vecs
; i
++) {
1132 size_t len
, sgoff
, pgoff
;
1133 struct scatterlist
*sg
;
1139 sgoff
= aiter
.iter_offset
;
1140 pgoff
= sgoff
& (PAGESIZE
- 1);
1141 len
= MIN(io_size
, PAGESIZE
- pgoff
);
1144 pg
= nth_page(sg_page(sg
), sgoff
>> PAGE_SHIFT
);
1145 if (bio_add_page(bio
, pg
, len
, pgoff
) != len
)
1149 abd_iter_advance(&aiter
, len
);
1155 /* Tunable Parameters */
1156 module_param(zfs_abd_scatter_enabled
, int, 0644);
1157 MODULE_PARM_DESC(zfs_abd_scatter_enabled
,
1158 "Toggle whether ABD allocations must be linear.");
1159 module_param(zfs_abd_scatter_min_size
, int, 0644);
1160 MODULE_PARM_DESC(zfs_abd_scatter_min_size
,
1161 "Minimum size of scatter allocations.");
1163 module_param(zfs_abd_scatter_max_order
, uint
, 0644);
1164 MODULE_PARM_DESC(zfs_abd_scatter_max_order
,
1165 "Maximum order allocation used for a scatter ABD.");