4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
23 * Copyright (c) 2019 by Delphix. All rights reserved.
27 * See abd.c for a general overview of the arc buffered data (ABD).
29 * Linear buffers act exactly like normal buffers and are always mapped into the
30 * kernel's virtual memory space, while scattered ABD data chunks are allocated
31 * as physical pages and then mapped in only while they are actually being
32 * accessed through one of the abd_* library functions. Using scattered ABDs
33 * provides several benefits:
35 * (1) They avoid use of kmem_*, preventing performance problems where running
36 * kmem_reap on very large memory systems never finishes and causes
37 * constant TLB shootdowns.
39 * (2) Fragmentation is less of an issue since when we are at the limit of
40 * allocatable space, we won't have to search around for a long free
41 * hole in the VA space for large ARC allocations. Each chunk is mapped in
42 * individually, so even if we are using HIGHMEM (see next point) we
43 * wouldn't need to worry about finding a contiguous address range.
45 * (3) If we are not using HIGHMEM, then all physical memory is always
46 * mapped into the kernel's address space, so we also avoid the map /
47 * unmap costs on each ABD access.
49 * If we are not using HIGHMEM, scattered buffers which have only one chunk
50 * can be treated as linear buffers, because they are contiguous in the
51 * kernel's virtual address space. See abd_alloc_chunks() for details.
54 #include <sys/abd_impl.h>
55 #include <sys/param.h>
58 #include <sys/zfs_context.h>
59 #include <sys/zfs_znode.h>
61 #include <linux/kmap_compat.h>
62 #include <linux/scatterlist.h>
67 typedef struct abd_stats
{
68 kstat_named_t abdstat_struct_size
;
69 kstat_named_t abdstat_linear_cnt
;
70 kstat_named_t abdstat_linear_data_size
;
71 kstat_named_t abdstat_scatter_cnt
;
72 kstat_named_t abdstat_scatter_data_size
;
73 kstat_named_t abdstat_scatter_chunk_waste
;
74 kstat_named_t abdstat_scatter_orders
[MAX_ORDER
];
75 kstat_named_t abdstat_scatter_page_multi_chunk
;
76 kstat_named_t abdstat_scatter_page_multi_zone
;
77 kstat_named_t abdstat_scatter_page_alloc_retry
;
78 kstat_named_t abdstat_scatter_sg_table_retry
;
81 static abd_stats_t abd_stats
= {
82 /* Amount of memory occupied by all of the abd_t struct allocations */
83 { "struct_size", KSTAT_DATA_UINT64
},
85 * The number of linear ABDs which are currently allocated, excluding
86 * ABDs which don't own their data (for instance the ones which were
87 * allocated through abd_get_offset() and abd_get_from_buf()). If an
88 * ABD takes ownership of its buf then it will become tracked.
90 { "linear_cnt", KSTAT_DATA_UINT64
},
91 /* Amount of data stored in all linear ABDs tracked by linear_cnt */
92 { "linear_data_size", KSTAT_DATA_UINT64
},
94 * The number of scatter ABDs which are currently allocated, excluding
95 * ABDs which don't own their data (for instance the ones which were
96 * allocated through abd_get_offset()).
98 { "scatter_cnt", KSTAT_DATA_UINT64
},
99 /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
100 { "scatter_data_size", KSTAT_DATA_UINT64
},
102 * The amount of space wasted at the end of the last chunk across all
103 * scatter ABDs tracked by scatter_cnt.
105 { "scatter_chunk_waste", KSTAT_DATA_UINT64
},
107 * The number of compound allocations of a given order. These
108 * allocations are spread over all currently allocated ABDs, and
109 * act as a measure of memory fragmentation.
111 { { "scatter_order_N", KSTAT_DATA_UINT64
} },
113 * The number of scatter ABDs which contain multiple chunks.
114 * ABDs are preferentially allocated from the minimum number of
115 * contiguous multi-page chunks, a single chunk is optimal.
117 { "scatter_page_multi_chunk", KSTAT_DATA_UINT64
},
119 * The number of scatter ABDs which are split across memory zones.
120 * ABDs are preferentially allocated using pages from a single zone.
122 { "scatter_page_multi_zone", KSTAT_DATA_UINT64
},
124 * The total number of retries encountered when attempting to
125 * allocate the pages to populate the scatter ABD.
127 { "scatter_page_alloc_retry", KSTAT_DATA_UINT64
},
129 * The total number of retries encountered when attempting to
130 * allocate the sg table for an ABD.
132 { "scatter_sg_table_retry", KSTAT_DATA_UINT64
},
136 wmsum_t abdstat_struct_size
;
137 wmsum_t abdstat_linear_cnt
;
138 wmsum_t abdstat_linear_data_size
;
139 wmsum_t abdstat_scatter_cnt
;
140 wmsum_t abdstat_scatter_data_size
;
141 wmsum_t abdstat_scatter_chunk_waste
;
142 wmsum_t abdstat_scatter_orders
[MAX_ORDER
];
143 wmsum_t abdstat_scatter_page_multi_chunk
;
144 wmsum_t abdstat_scatter_page_multi_zone
;
145 wmsum_t abdstat_scatter_page_alloc_retry
;
146 wmsum_t abdstat_scatter_sg_table_retry
;
149 #define abd_for_each_sg(abd, sg, n, i) \
150 for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
152 unsigned zfs_abd_scatter_max_order
= MAX_ORDER
- 1;
155 * zfs_abd_scatter_min_size is the minimum allocation size to use scatter
156 * ABD's. Smaller allocations will use linear ABD's which uses
157 * zio_[data_]buf_alloc().
159 * Scatter ABD's use at least one page each, so sub-page allocations waste
160 * some space when allocated as scatter (e.g. 2KB scatter allocation wastes
161 * half of each page). Using linear ABD's for small allocations means that
162 * they will be put on slabs which contain many allocations. This can
163 * improve memory efficiency, but it also makes it much harder for ARC
164 * evictions to actually free pages, because all the buffers on one slab need
165 * to be freed in order for the slab (and underlying pages) to be freed.
166 * Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's
167 * possible for them to actually waste more memory than scatter (one page per
168 * buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th).
170 * Spill blocks are typically 512B and are heavily used on systems running
171 * selinux with the default dnode size and the `xattr=sa` property set.
173 * By default we use linear allocations for 512B and 1KB, and scatter
174 * allocations for larger (1.5KB and up).
176 int zfs_abd_scatter_min_size
= 512 * 3;
179 * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
180 * just a single zero'd page. This allows us to conserve memory by
181 * only using a single zero page for the scatterlist.
183 abd_t
*abd_zero_scatter
= NULL
;
187 * abd_zero_page we will be an allocated zero'd PAGESIZE buffer, which is
188 * assigned to set each of the pages of abd_zero_scatter.
190 static struct page
*abd_zero_page
= NULL
;
192 static kmem_cache_t
*abd_cache
= NULL
;
193 static kstat_t
*abd_ksp
;
196 abd_chunkcnt_for_bytes(size_t size
)
198 return (P2ROUNDUP(size
, PAGESIZE
) / PAGESIZE
);
202 abd_alloc_struct_impl(size_t size
)
205 * In Linux we do not use the size passed in during ABD
206 * allocation, so we just ignore it.
208 abd_t
*abd
= kmem_cache_alloc(abd_cache
, KM_PUSHPAGE
);
209 ASSERT3P(abd
, !=, NULL
);
210 ABDSTAT_INCR(abdstat_struct_size
, sizeof (abd_t
));
216 abd_free_struct_impl(abd_t
*abd
)
218 kmem_cache_free(abd_cache
, abd
);
219 ABDSTAT_INCR(abdstat_struct_size
, -(int)sizeof (abd_t
));
224 * Mark zfs data pages so they can be excluded from kernel crash dumps
227 #define ABD_FILE_CACHE_PAGE 0x2F5ABDF11ECAC4E
230 abd_mark_zfs_page(struct page
*page
)
233 SetPagePrivate(page
);
234 set_page_private(page
, ABD_FILE_CACHE_PAGE
);
238 abd_unmark_zfs_page(struct page
*page
)
240 set_page_private(page
, 0UL);
241 ClearPagePrivate(page
);
245 #define abd_mark_zfs_page(page)
246 #define abd_unmark_zfs_page(page)
249 #ifndef CONFIG_HIGHMEM
251 #ifndef __GFP_RECLAIM
252 #define __GFP_RECLAIM __GFP_WAIT
256 * The goal is to minimize fragmentation by preferentially populating ABDs
257 * with higher order compound pages from a single zone. Allocation size is
258 * progressively decreased until it can be satisfied without performing
259 * reclaim or compaction. When necessary this function will degenerate to
260 * allocating individual pages and allowing reclaim to satisfy allocations.
263 abd_alloc_chunks(abd_t
*abd
, size_t size
)
265 struct list_head pages
;
266 struct sg_table table
;
267 struct scatterlist
*sg
;
268 struct page
*page
, *tmp_page
= NULL
;
269 gfp_t gfp
= __GFP_NOWARN
| GFP_NOIO
;
270 gfp_t gfp_comp
= (gfp
| __GFP_NORETRY
| __GFP_COMP
) & ~__GFP_RECLAIM
;
271 int max_order
= MIN(zfs_abd_scatter_max_order
, MAX_ORDER
- 1);
272 int nr_pages
= abd_chunkcnt_for_bytes(size
);
273 int chunks
= 0, zones
= 0;
274 size_t remaining_size
;
275 int nid
= NUMA_NO_NODE
;
278 INIT_LIST_HEAD(&pages
);
280 while (alloc_pages
< nr_pages
) {
281 unsigned chunk_pages
;
284 order
= MIN(highbit64(nr_pages
- alloc_pages
) - 1, max_order
);
285 chunk_pages
= (1U << order
);
287 page
= alloc_pages_node(nid
, order
? gfp_comp
: gfp
, order
);
290 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry
);
291 schedule_timeout_interruptible(1);
293 max_order
= MAX(0, order
- 1);
298 list_add_tail(&page
->lru
, &pages
);
300 if ((nid
!= NUMA_NO_NODE
) && (page_to_nid(page
) != nid
))
303 nid
= page_to_nid(page
);
304 ABDSTAT_BUMP(abdstat_scatter_orders
[order
]);
306 alloc_pages
+= chunk_pages
;
309 ASSERT3S(alloc_pages
, ==, nr_pages
);
311 while (sg_alloc_table(&table
, chunks
, gfp
)) {
312 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry
);
313 schedule_timeout_interruptible(1);
317 remaining_size
= size
;
318 list_for_each_entry_safe(page
, tmp_page
, &pages
, lru
) {
319 size_t sg_size
= MIN(PAGESIZE
<< compound_order(page
),
321 sg_set_page(sg
, page
, sg_size
, 0);
322 abd_mark_zfs_page(page
);
323 remaining_size
-= sg_size
;
326 list_del(&page
->lru
);
330 * These conditions ensure that a possible transformation to a linear
331 * ABD would be valid.
333 ASSERT(!PageHighMem(sg_page(table
.sgl
)));
334 ASSERT0(ABD_SCATTER(abd
).abd_offset
);
336 if (table
.nents
== 1) {
338 * Since there is only one entry, this ABD can be represented
339 * as a linear buffer. All single-page (4K) ABD's can be
340 * represented this way. Some multi-page ABD's can also be
341 * represented this way, if we were able to allocate a single
342 * "chunk" (higher-order "page" which represents a power-of-2
343 * series of physically-contiguous pages). This is often the
344 * case for 2-page (8K) ABD's.
346 * Representing a single-entry scatter ABD as a linear ABD
347 * has the performance advantage of avoiding the copy (and
348 * allocation) in abd_borrow_buf_copy / abd_return_buf_copy.
349 * A performance increase of around 5% has been observed for
350 * ARC-cached reads (of small blocks which can take advantage
353 * Note that this optimization is only possible because the
354 * pages are always mapped into the kernel's address space.
355 * This is not the case for highmem pages, so the
356 * optimization can not be made there.
358 abd
->abd_flags
|= ABD_FLAG_LINEAR
;
359 abd
->abd_flags
|= ABD_FLAG_LINEAR_PAGE
;
360 abd
->abd_u
.abd_linear
.abd_sgl
= table
.sgl
;
361 ABD_LINEAR_BUF(abd
) = page_address(sg_page(table
.sgl
));
362 } else if (table
.nents
> 1) {
363 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
364 abd
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
;
367 ABDSTAT_BUMP(abdstat_scatter_page_multi_zone
);
368 abd
->abd_flags
|= ABD_FLAG_MULTI_ZONE
;
371 ABD_SCATTER(abd
).abd_sgl
= table
.sgl
;
372 ABD_SCATTER(abd
).abd_nents
= table
.nents
;
378 * Allocate N individual pages to construct a scatter ABD. This function
379 * makes no attempt to request contiguous pages and requires the minimal
380 * number of kernel interfaces. It's designed for maximum compatibility.
383 abd_alloc_chunks(abd_t
*abd
, size_t size
)
385 struct scatterlist
*sg
= NULL
;
386 struct sg_table table
;
388 gfp_t gfp
= __GFP_NOWARN
| GFP_NOIO
;
389 int nr_pages
= abd_chunkcnt_for_bytes(size
);
392 while (sg_alloc_table(&table
, nr_pages
, gfp
)) {
393 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry
);
394 schedule_timeout_interruptible(1);
397 ASSERT3U(table
.nents
, ==, nr_pages
);
398 ABD_SCATTER(abd
).abd_sgl
= table
.sgl
;
399 ABD_SCATTER(abd
).abd_nents
= nr_pages
;
401 abd_for_each_sg(abd
, sg
, nr_pages
, i
) {
402 while ((page
= __page_cache_alloc(gfp
)) == NULL
) {
403 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry
);
404 schedule_timeout_interruptible(1);
407 ABDSTAT_BUMP(abdstat_scatter_orders
[0]);
408 sg_set_page(sg
, page
, PAGESIZE
, 0);
409 abd_mark_zfs_page(page
);
413 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
414 abd
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
;
417 #endif /* !CONFIG_HIGHMEM */
420 * This must be called if any of the sg_table allocation functions
424 abd_free_sg_table(abd_t
*abd
)
426 struct sg_table table
;
428 table
.sgl
= ABD_SCATTER(abd
).abd_sgl
;
429 table
.nents
= table
.orig_nents
= ABD_SCATTER(abd
).abd_nents
;
430 sg_free_table(&table
);
434 abd_free_chunks(abd_t
*abd
)
436 struct scatterlist
*sg
= NULL
;
438 int nr_pages
= ABD_SCATTER(abd
).abd_nents
;
441 if (abd
->abd_flags
& ABD_FLAG_MULTI_ZONE
)
442 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone
);
444 if (abd
->abd_flags
& ABD_FLAG_MULTI_CHUNK
)
445 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk
);
447 abd_for_each_sg(abd
, sg
, nr_pages
, i
) {
449 abd_unmark_zfs_page(page
);
450 order
= compound_order(page
);
451 __free_pages(page
, order
);
452 ASSERT3U(sg
->length
, <=, PAGE_SIZE
<< order
);
453 ABDSTAT_BUMPDOWN(abdstat_scatter_orders
[order
]);
455 abd_free_sg_table(abd
);
459 * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in
460 * the scatterlist will be set to the zero'd out buffer abd_zero_page.
463 abd_alloc_zero_scatter(void)
465 struct scatterlist
*sg
= NULL
;
466 struct sg_table table
;
467 gfp_t gfp
= __GFP_NOWARN
| GFP_NOIO
;
468 gfp_t gfp_zero_page
= gfp
| __GFP_ZERO
;
469 int nr_pages
= abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE
);
472 while ((abd_zero_page
= __page_cache_alloc(gfp_zero_page
)) == NULL
) {
473 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry
);
474 schedule_timeout_interruptible(1);
476 abd_mark_zfs_page(abd_zero_page
);
478 while (sg_alloc_table(&table
, nr_pages
, gfp
)) {
479 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry
);
480 schedule_timeout_interruptible(1);
482 ASSERT3U(table
.nents
, ==, nr_pages
);
484 abd_zero_scatter
= abd_alloc_struct(SPA_MAXBLOCKSIZE
);
485 abd_zero_scatter
->abd_flags
|= ABD_FLAG_OWNER
;
486 ABD_SCATTER(abd_zero_scatter
).abd_offset
= 0;
487 ABD_SCATTER(abd_zero_scatter
).abd_sgl
= table
.sgl
;
488 ABD_SCATTER(abd_zero_scatter
).abd_nents
= nr_pages
;
489 abd_zero_scatter
->abd_size
= SPA_MAXBLOCKSIZE
;
490 abd_zero_scatter
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
| ABD_FLAG_ZEROS
;
492 abd_for_each_sg(abd_zero_scatter
, sg
, nr_pages
, i
) {
493 sg_set_page(sg
, abd_zero_page
, PAGESIZE
, 0);
496 ABDSTAT_BUMP(abdstat_scatter_cnt
);
497 ABDSTAT_INCR(abdstat_scatter_data_size
, PAGESIZE
);
498 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
504 #define PAGE_SHIFT (highbit64(PAGESIZE)-1)
507 #define zfs_kmap_atomic(chunk) ((void *)chunk)
508 #define zfs_kunmap_atomic(addr) do { (void)(addr); } while (0)
509 #define local_irq_save(flags) do { (void)(flags); } while (0)
510 #define local_irq_restore(flags) do { (void)(flags); } while (0)
511 #define nth_page(pg, i) \
512 ((struct page *)((void *)(pg) + (i) * PAGESIZE))
521 sg_init_table(struct scatterlist
*sg
, int nr
)
523 memset(sg
, 0, nr
* sizeof (struct scatterlist
));
528 * This must be called if any of the sg_table allocation functions
532 abd_free_sg_table(abd_t
*abd
)
534 int nents
= ABD_SCATTER(abd
).abd_nents
;
535 vmem_free(ABD_SCATTER(abd
).abd_sgl
,
536 nents
* sizeof (struct scatterlist
));
539 #define for_each_sg(sgl, sg, nr, i) \
540 for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg))
543 sg_set_page(struct scatterlist
*sg
, struct page
*page
, unsigned int len
,
546 /* currently we don't use offset */
552 static inline struct page
*
553 sg_page(struct scatterlist
*sg
)
558 static inline struct scatterlist
*
559 sg_next(struct scatterlist
*sg
)
568 abd_alloc_chunks(abd_t
*abd
, size_t size
)
570 unsigned nr_pages
= abd_chunkcnt_for_bytes(size
);
571 struct scatterlist
*sg
;
574 ABD_SCATTER(abd
).abd_sgl
= vmem_alloc(nr_pages
*
575 sizeof (struct scatterlist
), KM_SLEEP
);
576 sg_init_table(ABD_SCATTER(abd
).abd_sgl
, nr_pages
);
578 abd_for_each_sg(abd
, sg
, nr_pages
, i
) {
579 struct page
*p
= umem_alloc_aligned(PAGESIZE
, 64, KM_SLEEP
);
580 sg_set_page(sg
, p
, PAGESIZE
, 0);
582 ABD_SCATTER(abd
).abd_nents
= nr_pages
;
586 abd_free_chunks(abd_t
*abd
)
588 int i
, n
= ABD_SCATTER(abd
).abd_nents
;
589 struct scatterlist
*sg
;
591 abd_for_each_sg(abd
, sg
, n
, i
) {
592 for (int j
= 0; j
< sg
->length
; j
+= PAGESIZE
) {
593 struct page
*p
= nth_page(sg_page(sg
), j
>> PAGE_SHIFT
);
594 umem_free(p
, PAGESIZE
);
597 abd_free_sg_table(abd
);
601 abd_alloc_zero_scatter(void)
603 unsigned nr_pages
= abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE
);
604 struct scatterlist
*sg
;
607 abd_zero_page
= umem_alloc_aligned(PAGESIZE
, 64, KM_SLEEP
);
608 memset(abd_zero_page
, 0, PAGESIZE
);
609 abd_zero_scatter
= abd_alloc_struct(SPA_MAXBLOCKSIZE
);
610 abd_zero_scatter
->abd_flags
|= ABD_FLAG_OWNER
;
611 abd_zero_scatter
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
| ABD_FLAG_ZEROS
;
612 ABD_SCATTER(abd_zero_scatter
).abd_offset
= 0;
613 ABD_SCATTER(abd_zero_scatter
).abd_nents
= nr_pages
;
614 abd_zero_scatter
->abd_size
= SPA_MAXBLOCKSIZE
;
615 zfs_refcount_create(&abd_zero_scatter
->abd_children
);
616 ABD_SCATTER(abd_zero_scatter
).abd_sgl
= vmem_alloc(nr_pages
*
617 sizeof (struct scatterlist
), KM_SLEEP
);
619 sg_init_table(ABD_SCATTER(abd_zero_scatter
).abd_sgl
, nr_pages
);
621 abd_for_each_sg(abd_zero_scatter
, sg
, nr_pages
, i
) {
622 sg_set_page(sg
, abd_zero_page
, PAGESIZE
, 0);
625 ABDSTAT_BUMP(abdstat_scatter_cnt
);
626 ABDSTAT_INCR(abdstat_scatter_data_size
, PAGESIZE
);
627 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
633 abd_size_alloc_linear(size_t size
)
635 return (size
< zfs_abd_scatter_min_size
? B_TRUE
: B_FALSE
);
639 abd_update_scatter_stats(abd_t
*abd
, abd_stats_op_t op
)
641 ASSERT(op
== ABDSTAT_INCR
|| op
== ABDSTAT_DECR
);
642 int waste
= P2ROUNDUP(abd
->abd_size
, PAGESIZE
) - abd
->abd_size
;
643 if (op
== ABDSTAT_INCR
) {
644 ABDSTAT_BUMP(abdstat_scatter_cnt
);
645 ABDSTAT_INCR(abdstat_scatter_data_size
, abd
->abd_size
);
646 ABDSTAT_INCR(abdstat_scatter_chunk_waste
, waste
);
647 arc_space_consume(waste
, ARC_SPACE_ABD_CHUNK_WASTE
);
649 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt
);
650 ABDSTAT_INCR(abdstat_scatter_data_size
, -(int)abd
->abd_size
);
651 ABDSTAT_INCR(abdstat_scatter_chunk_waste
, -waste
);
652 arc_space_return(waste
, ARC_SPACE_ABD_CHUNK_WASTE
);
657 abd_update_linear_stats(abd_t
*abd
, abd_stats_op_t op
)
659 ASSERT(op
== ABDSTAT_INCR
|| op
== ABDSTAT_DECR
);
660 if (op
== ABDSTAT_INCR
) {
661 ABDSTAT_BUMP(abdstat_linear_cnt
);
662 ABDSTAT_INCR(abdstat_linear_data_size
, abd
->abd_size
);
664 ABDSTAT_BUMPDOWN(abdstat_linear_cnt
);
665 ABDSTAT_INCR(abdstat_linear_data_size
, -(int)abd
->abd_size
);
670 abd_verify_scatter(abd_t
*abd
)
674 struct scatterlist
*sg
= NULL
;
676 ASSERT3U(ABD_SCATTER(abd
).abd_nents
, >, 0);
677 ASSERT3U(ABD_SCATTER(abd
).abd_offset
, <,
678 ABD_SCATTER(abd
).abd_sgl
->length
);
679 n
= ABD_SCATTER(abd
).abd_nents
;
680 abd_for_each_sg(abd
, sg
, n
, i
) {
681 ASSERT3P(sg_page(sg
), !=, NULL
);
686 abd_free_zero_scatter(void)
688 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt
);
689 ABDSTAT_INCR(abdstat_scatter_data_size
, -(int)PAGESIZE
);
690 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk
);
692 abd_free_sg_table(abd_zero_scatter
);
693 abd_free_struct(abd_zero_scatter
);
694 abd_zero_scatter
= NULL
;
695 ASSERT3P(abd_zero_page
, !=, NULL
);
697 abd_unmark_zfs_page(abd_zero_page
);
698 __free_page(abd_zero_page
);
700 umem_free(abd_zero_page
, PAGESIZE
);
705 abd_kstats_update(kstat_t
*ksp
, int rw
)
707 abd_stats_t
*as
= ksp
->ks_data
;
709 if (rw
== KSTAT_WRITE
)
711 as
->abdstat_struct_size
.value
.ui64
=
712 wmsum_value(&abd_sums
.abdstat_struct_size
);
713 as
->abdstat_linear_cnt
.value
.ui64
=
714 wmsum_value(&abd_sums
.abdstat_linear_cnt
);
715 as
->abdstat_linear_data_size
.value
.ui64
=
716 wmsum_value(&abd_sums
.abdstat_linear_data_size
);
717 as
->abdstat_scatter_cnt
.value
.ui64
=
718 wmsum_value(&abd_sums
.abdstat_scatter_cnt
);
719 as
->abdstat_scatter_data_size
.value
.ui64
=
720 wmsum_value(&abd_sums
.abdstat_scatter_data_size
);
721 as
->abdstat_scatter_chunk_waste
.value
.ui64
=
722 wmsum_value(&abd_sums
.abdstat_scatter_chunk_waste
);
723 for (int i
= 0; i
< MAX_ORDER
; i
++) {
724 as
->abdstat_scatter_orders
[i
].value
.ui64
=
725 wmsum_value(&abd_sums
.abdstat_scatter_orders
[i
]);
727 as
->abdstat_scatter_page_multi_chunk
.value
.ui64
=
728 wmsum_value(&abd_sums
.abdstat_scatter_page_multi_chunk
);
729 as
->abdstat_scatter_page_multi_zone
.value
.ui64
=
730 wmsum_value(&abd_sums
.abdstat_scatter_page_multi_zone
);
731 as
->abdstat_scatter_page_alloc_retry
.value
.ui64
=
732 wmsum_value(&abd_sums
.abdstat_scatter_page_alloc_retry
);
733 as
->abdstat_scatter_sg_table_retry
.value
.ui64
=
734 wmsum_value(&abd_sums
.abdstat_scatter_sg_table_retry
);
743 abd_cache
= kmem_cache_create("abd_t", sizeof (abd_t
),
744 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
746 wmsum_init(&abd_sums
.abdstat_struct_size
, 0);
747 wmsum_init(&abd_sums
.abdstat_linear_cnt
, 0);
748 wmsum_init(&abd_sums
.abdstat_linear_data_size
, 0);
749 wmsum_init(&abd_sums
.abdstat_scatter_cnt
, 0);
750 wmsum_init(&abd_sums
.abdstat_scatter_data_size
, 0);
751 wmsum_init(&abd_sums
.abdstat_scatter_chunk_waste
, 0);
752 for (i
= 0; i
< MAX_ORDER
; i
++)
753 wmsum_init(&abd_sums
.abdstat_scatter_orders
[i
], 0);
754 wmsum_init(&abd_sums
.abdstat_scatter_page_multi_chunk
, 0);
755 wmsum_init(&abd_sums
.abdstat_scatter_page_multi_zone
, 0);
756 wmsum_init(&abd_sums
.abdstat_scatter_page_alloc_retry
, 0);
757 wmsum_init(&abd_sums
.abdstat_scatter_sg_table_retry
, 0);
759 abd_ksp
= kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED
,
760 sizeof (abd_stats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
761 if (abd_ksp
!= NULL
) {
762 for (i
= 0; i
< MAX_ORDER
; i
++) {
763 snprintf(abd_stats
.abdstat_scatter_orders
[i
].name
,
764 KSTAT_STRLEN
, "scatter_order_%d", i
);
765 abd_stats
.abdstat_scatter_orders
[i
].data_type
=
768 abd_ksp
->ks_data
= &abd_stats
;
769 abd_ksp
->ks_update
= abd_kstats_update
;
770 kstat_install(abd_ksp
);
773 abd_alloc_zero_scatter();
779 abd_free_zero_scatter();
781 if (abd_ksp
!= NULL
) {
782 kstat_delete(abd_ksp
);
786 wmsum_fini(&abd_sums
.abdstat_struct_size
);
787 wmsum_fini(&abd_sums
.abdstat_linear_cnt
);
788 wmsum_fini(&abd_sums
.abdstat_linear_data_size
);
789 wmsum_fini(&abd_sums
.abdstat_scatter_cnt
);
790 wmsum_fini(&abd_sums
.abdstat_scatter_data_size
);
791 wmsum_fini(&abd_sums
.abdstat_scatter_chunk_waste
);
792 for (int i
= 0; i
< MAX_ORDER
; i
++)
793 wmsum_fini(&abd_sums
.abdstat_scatter_orders
[i
]);
794 wmsum_fini(&abd_sums
.abdstat_scatter_page_multi_chunk
);
795 wmsum_fini(&abd_sums
.abdstat_scatter_page_multi_zone
);
796 wmsum_fini(&abd_sums
.abdstat_scatter_page_alloc_retry
);
797 wmsum_fini(&abd_sums
.abdstat_scatter_sg_table_retry
);
800 kmem_cache_destroy(abd_cache
);
806 abd_free_linear_page(abd_t
*abd
)
808 /* Transform it back into a scatter ABD for freeing */
809 struct scatterlist
*sg
= abd
->abd_u
.abd_linear
.abd_sgl
;
810 abd
->abd_flags
&= ~ABD_FLAG_LINEAR
;
811 abd
->abd_flags
&= ~ABD_FLAG_LINEAR_PAGE
;
812 ABD_SCATTER(abd
).abd_nents
= 1;
813 ABD_SCATTER(abd
).abd_offset
= 0;
814 ABD_SCATTER(abd
).abd_sgl
= sg
;
815 abd_free_chunks(abd
);
817 abd_update_scatter_stats(abd
, ABDSTAT_DECR
);
821 * If we're going to use this ABD for doing I/O using the block layer, the
822 * consumer of the ABD data doesn't care if it's scattered or not, and we don't
823 * plan to store this ABD in memory for a long period of time, we should
824 * allocate the ABD type that requires the least data copying to do the I/O.
826 * On Linux the optimal thing to do would be to use abd_get_offset() and
827 * construct a new ABD which shares the original pages thereby eliminating
828 * the copy. But for the moment a new linear ABD is allocated until this
829 * performance optimization can be implemented.
832 abd_alloc_for_io(size_t size
, boolean_t is_metadata
)
834 return (abd_alloc(size
, is_metadata
));
838 abd_get_offset_scatter(abd_t
*abd
, abd_t
*sabd
, size_t off
,
842 struct scatterlist
*sg
= NULL
;
845 ASSERT3U(off
, <=, sabd
->abd_size
);
847 size_t new_offset
= ABD_SCATTER(sabd
).abd_offset
+ off
;
850 abd
= abd_alloc_struct(0);
853 * Even if this buf is filesystem metadata, we only track that
854 * if we own the underlying data buffer, which is not true in
855 * this case. Therefore, we don't ever use ABD_FLAG_META here.
858 abd_for_each_sg(sabd
, sg
, ABD_SCATTER(sabd
).abd_nents
, i
) {
859 if (new_offset
< sg
->length
)
861 new_offset
-= sg
->length
;
864 ABD_SCATTER(abd
).abd_sgl
= sg
;
865 ABD_SCATTER(abd
).abd_offset
= new_offset
;
866 ABD_SCATTER(abd
).abd_nents
= ABD_SCATTER(sabd
).abd_nents
- i
;
872 * Initialize the abd_iter.
875 abd_iter_init(struct abd_iter
*aiter
, abd_t
*abd
)
877 ASSERT(!abd_is_gang(abd
));
879 aiter
->iter_abd
= abd
;
880 aiter
->iter_mapaddr
= NULL
;
881 aiter
->iter_mapsize
= 0;
883 if (abd_is_linear(abd
)) {
884 aiter
->iter_offset
= 0;
885 aiter
->iter_sg
= NULL
;
887 aiter
->iter_offset
= ABD_SCATTER(abd
).abd_offset
;
888 aiter
->iter_sg
= ABD_SCATTER(abd
).abd_sgl
;
893 * This is just a helper function to see if we have exhausted the
894 * abd_iter and reached the end.
897 abd_iter_at_end(struct abd_iter
*aiter
)
899 return (aiter
->iter_pos
== aiter
->iter_abd
->abd_size
);
903 * Advance the iterator by a certain amount. Cannot be called when a chunk is
904 * in use. This can be safely called when the aiter has already exhausted, in
905 * which case this does nothing.
908 abd_iter_advance(struct abd_iter
*aiter
, size_t amount
)
910 ASSERT3P(aiter
->iter_mapaddr
, ==, NULL
);
911 ASSERT0(aiter
->iter_mapsize
);
913 /* There's nothing left to advance to, so do nothing */
914 if (abd_iter_at_end(aiter
))
917 aiter
->iter_pos
+= amount
;
918 aiter
->iter_offset
+= amount
;
919 if (!abd_is_linear(aiter
->iter_abd
)) {
920 while (aiter
->iter_offset
>= aiter
->iter_sg
->length
) {
921 aiter
->iter_offset
-= aiter
->iter_sg
->length
;
922 aiter
->iter_sg
= sg_next(aiter
->iter_sg
);
923 if (aiter
->iter_sg
== NULL
) {
924 ASSERT0(aiter
->iter_offset
);
932 * Map the current chunk into aiter. This can be safely called when the aiter
933 * has already exhausted, in which case this does nothing.
936 abd_iter_map(struct abd_iter
*aiter
)
941 ASSERT3P(aiter
->iter_mapaddr
, ==, NULL
);
942 ASSERT0(aiter
->iter_mapsize
);
944 /* There's nothing left to iterate over, so do nothing */
945 if (abd_iter_at_end(aiter
))
948 if (abd_is_linear(aiter
->iter_abd
)) {
949 ASSERT3U(aiter
->iter_pos
, ==, aiter
->iter_offset
);
950 offset
= aiter
->iter_offset
;
951 aiter
->iter_mapsize
= aiter
->iter_abd
->abd_size
- offset
;
952 paddr
= ABD_LINEAR_BUF(aiter
->iter_abd
);
954 offset
= aiter
->iter_offset
;
955 aiter
->iter_mapsize
= MIN(aiter
->iter_sg
->length
- offset
,
956 aiter
->iter_abd
->abd_size
- aiter
->iter_pos
);
958 paddr
= zfs_kmap_atomic(sg_page(aiter
->iter_sg
));
961 aiter
->iter_mapaddr
= (char *)paddr
+ offset
;
965 * Unmap the current chunk from aiter. This can be safely called when the aiter
966 * has already exhausted, in which case this does nothing.
969 abd_iter_unmap(struct abd_iter
*aiter
)
971 /* There's nothing left to unmap, so do nothing */
972 if (abd_iter_at_end(aiter
))
975 if (!abd_is_linear(aiter
->iter_abd
)) {
976 /* LINTED E_FUNC_SET_NOT_USED */
977 zfs_kunmap_atomic(aiter
->iter_mapaddr
- aiter
->iter_offset
);
980 ASSERT3P(aiter
->iter_mapaddr
, !=, NULL
);
981 ASSERT3U(aiter
->iter_mapsize
, >, 0);
983 aiter
->iter_mapaddr
= NULL
;
984 aiter
->iter_mapsize
= 0;
988 abd_cache_reap_now(void)
994 * bio_nr_pages for ABD.
995 * @off is the offset in @abd
998 abd_nr_pages_off(abd_t
*abd
, unsigned int size
, size_t off
)
1002 if (abd_is_gang(abd
)) {
1003 unsigned long count
= 0;
1005 for (abd_t
*cabd
= abd_gang_get_offset(abd
, &off
);
1006 cabd
!= NULL
&& size
!= 0;
1007 cabd
= list_next(&ABD_GANG(abd
).abd_gang_chain
, cabd
)) {
1008 ASSERT3U(off
, <, cabd
->abd_size
);
1009 int mysize
= MIN(size
, cabd
->abd_size
- off
);
1010 count
+= abd_nr_pages_off(cabd
, mysize
, off
);
1017 if (abd_is_linear(abd
))
1018 pos
= (unsigned long)abd_to_buf(abd
) + off
;
1020 pos
= ABD_SCATTER(abd
).abd_offset
+ off
;
1022 return (((pos
+ size
+ PAGESIZE
- 1) >> PAGE_SHIFT
) -
1023 (pos
>> PAGE_SHIFT
));
1027 bio_map(struct bio
*bio
, void *buf_ptr
, unsigned int bio_size
)
1029 unsigned int offset
, size
, i
;
1032 offset
= offset_in_page(buf_ptr
);
1033 for (i
= 0; i
< bio
->bi_max_vecs
; i
++) {
1034 size
= PAGE_SIZE
- offset
;
1039 if (size
> bio_size
)
1042 if (is_vmalloc_addr(buf_ptr
))
1043 page
= vmalloc_to_page(buf_ptr
);
1045 page
= virt_to_page(buf_ptr
);
1048 * Some network related block device uses tcp_sendpage, which
1049 * doesn't behave well when using 0-count page, this is a
1050 * safety net to catch them.
1052 ASSERT3S(page_count(page
), >, 0);
1054 if (bio_add_page(bio
, page
, size
, offset
) != size
)
1066 * bio_map for gang ABD.
1069 abd_gang_bio_map_off(struct bio
*bio
, abd_t
*abd
,
1070 unsigned int io_size
, size_t off
)
1072 ASSERT(abd_is_gang(abd
));
1074 for (abd_t
*cabd
= abd_gang_get_offset(abd
, &off
);
1076 cabd
= list_next(&ABD_GANG(abd
).abd_gang_chain
, cabd
)) {
1077 ASSERT3U(off
, <, cabd
->abd_size
);
1078 int size
= MIN(io_size
, cabd
->abd_size
- off
);
1079 int remainder
= abd_bio_map_off(bio
, cabd
, size
, off
);
1080 io_size
-= (size
- remainder
);
1081 if (io_size
== 0 || remainder
> 0)
1091 * @off is the offset in @abd
1092 * Remaining IO size is returned
1095 abd_bio_map_off(struct bio
*bio
, abd_t
*abd
,
1096 unsigned int io_size
, size_t off
)
1098 struct abd_iter aiter
;
1100 ASSERT3U(io_size
, <=, abd
->abd_size
- off
);
1101 if (abd_is_linear(abd
))
1102 return (bio_map(bio
, ((char *)abd_to_buf(abd
)) + off
, io_size
));
1104 ASSERT(!abd_is_linear(abd
));
1105 if (abd_is_gang(abd
))
1106 return (abd_gang_bio_map_off(bio
, abd
, io_size
, off
));
1108 abd_iter_init(&aiter
, abd
);
1109 abd_iter_advance(&aiter
, off
);
1111 for (int i
= 0; i
< bio
->bi_max_vecs
; i
++) {
1113 size_t len
, sgoff
, pgoff
;
1114 struct scatterlist
*sg
;
1120 sgoff
= aiter
.iter_offset
;
1121 pgoff
= sgoff
& (PAGESIZE
- 1);
1122 len
= MIN(io_size
, PAGESIZE
- pgoff
);
1125 pg
= nth_page(sg_page(sg
), sgoff
>> PAGE_SHIFT
);
1126 if (bio_add_page(bio
, pg
, len
, pgoff
) != len
)
1130 abd_iter_advance(&aiter
, len
);
1136 /* Tunable Parameters */
1137 module_param(zfs_abd_scatter_enabled
, int, 0644);
1138 MODULE_PARM_DESC(zfs_abd_scatter_enabled
,
1139 "Toggle whether ABD allocations must be linear.");
1140 module_param(zfs_abd_scatter_min_size
, int, 0644);
1141 MODULE_PARM_DESC(zfs_abd_scatter_min_size
,
1142 "Minimum size of scatter allocations.");
1144 module_param(zfs_abd_scatter_max_order
, uint
, 0644);
1145 MODULE_PARM_DESC(zfs_abd_scatter_max_order
,
1146 "Maximum order allocation used for a scatter ABD.");