]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/abd.c
Introduce ARC Buffer Data (ABD)
[mirror_zfs.git] / module / zfs / abd.c
CommitLineData
a6255b7f
DQ
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
23 * Copyright (c) 2016 by Delphix. All rights reserved.
24 */
25
26/*
27 * ARC buffer data (ABD).
28 *
29 * ABDs are an abstract data structure for the ARC which can use two
30 * different ways of storing the underlying data:
31 *
32 * (a) Linear buffer. In this case, all the data in the ABD is stored in one
33 * contiguous buffer in memory (from a zio_[data_]buf_* kmem cache).
34 *
35 * +-------------------+
36 * | ABD (linear) |
37 * | abd_flags = ... |
38 * | abd_size = ... | +--------------------------------+
39 * | abd_buf ------------->| raw buffer of size abd_size |
40 * +-------------------+ +--------------------------------+
41 * no abd_chunks
42 *
43 * (b) Scattered buffer. In this case, the data in the ABD is split into
44 * equal-sized chunks (from the abd_chunk_cache kmem_cache), with pointers
45 * to the chunks recorded in an array at the end of the ABD structure.
46 *
47 * +-------------------+
48 * | ABD (scattered) |
49 * | abd_flags = ... |
50 * | abd_size = ... |
51 * | abd_offset = 0 | +-----------+
52 * | abd_chunks[0] ----------------------------->| chunk 0 |
53 * | abd_chunks[1] ---------------------+ +-----------+
54 * | ... | | +-----------+
55 * | abd_chunks[N-1] ---------+ +------->| chunk 1 |
56 * +-------------------+ | +-----------+
57 * | ...
58 * | +-----------+
59 * +----------------->| chunk N-1 |
60 * +-----------+
61 *
62 * Linear buffers act exactly like normal buffers and are always mapped into the
63 * kernel's virtual memory space, while scattered ABD data chunks are allocated
64 * as physical pages and then mapped in only while they are actually being
65 * accessed through one of the abd_* library functions. Using scattered ABDs
66 * provides several benefits:
67 *
68 * (1) They avoid use of kmem_*, preventing performance problems where running
69 * kmem_reap on very large memory systems never finishes and causes
70 * constant TLB shootdowns.
71 *
72 * (2) Fragmentation is less of an issue since when we are at the limit of
73 * allocatable space, we won't have to search around for a long free
74 * hole in the VA space for large ARC allocations. Each chunk is mapped in
75 * individually, so even if we weren't using segkpm (see next point) we
76 * wouldn't need to worry about finding a contiguous address range.
77 *
78 * (3) Use of segkpm will avoid the need for map / unmap / TLB shootdown costs
79 * on each ABD access. (If segkpm isn't available then we use all linear
80 * ABDs to avoid this penalty.) See seg_kpm.c for more details.
81 *
82 * It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
83 * B_FALSE. However, it is not possible to use scattered ABDs if segkpm is not
84 * available, which is the case on all 32-bit systems and any 64-bit systems
85 * where kpm_enable is turned off.
86 *
87 * In addition to directly allocating a linear or scattered ABD, it is also
88 * possible to create an ABD by requesting the "sub-ABD" starting at an offset
89 * within an existing ABD. In linear buffers this is simple (set abd_buf of
90 * the new ABD to the starting point within the original raw buffer), but
91 * scattered ABDs are a little more complex. The new ABD makes a copy of the
92 * relevant abd_chunks pointers (but not the underlying data). However, to
93 * provide arbitrary rather than only chunk-aligned starting offsets, it also
94 * tracks an abd_offset field which represents the starting point of the data
95 * within the first chunk in abd_chunks. For both linear and scattered ABDs,
96 * creating an offset ABD marks the original ABD as the offset's parent, and the
97 * original ABD's abd_children refcount is incremented. This data allows us to
98 * ensure the root ABD isn't deleted before its children.
99 *
100 * Most consumers should never need to know what type of ABD they're using --
101 * the ABD public API ensures that it's possible to transparently switch from
102 * using a linear ABD to a scattered one when doing so would be beneficial.
103 *
104 * If you need to use the data within an ABD directly, if you know it's linear
105 * (because you allocated it) you can use abd_to_buf() to access the underlying
106 * raw buffer. Otherwise, you should use one of the abd_borrow_buf* functions
107 * which will allocate a raw buffer if necessary. Use the abd_return_buf*
108 * functions to return any raw buffers that are no longer necessary when you're
109 * done using them.
110 *
111 * There are a variety of ABD APIs that implement basic buffer operations:
112 * compare, copy, read, write, and fill with zeroes. If you need a custom
113 * function which progressively accesses the whole ABD, use the abd_iterate_*
114 * functions.
115 */
116
117#include <sys/abd.h>
118#include <sys/param.h>
119#include <sys/zio.h>
120#include <sys/zfs_context.h>
121#include <sys/zfs_znode.h>
4f601529 122#ifdef _KERNEL
98295748 123#include <linux/scatterlist.h>
4f601529 124#include <linux/kmap_compat.h>
98295748
CC
125#else
126#define MAX_ORDER 1
a6255b7f
DQ
127#endif
128
129typedef struct abd_stats {
130 kstat_named_t abdstat_struct_size;
98295748
CC
131 kstat_named_t abdstat_linear_cnt;
132 kstat_named_t abdstat_linear_data_size;
a6255b7f
DQ
133 kstat_named_t abdstat_scatter_cnt;
134 kstat_named_t abdstat_scatter_data_size;
135 kstat_named_t abdstat_scatter_chunk_waste;
98295748
CC
136 kstat_named_t abdstat_scatter_orders[MAX_ORDER];
137 kstat_named_t abdstat_scatter_page_multi_chunk;
138 kstat_named_t abdstat_scatter_page_multi_zone;
139 kstat_named_t abdstat_scatter_page_alloc_retry;
140 kstat_named_t abdstat_scatter_sg_table_retry;
a6255b7f
DQ
141} abd_stats_t;
142
143static abd_stats_t abd_stats = {
144 /* Amount of memory occupied by all of the abd_t struct allocations */
145 { "struct_size", KSTAT_DATA_UINT64 },
98295748
CC
146 /*
147 * The number of linear ABDs which are currently allocated, excluding
148 * ABDs which don't own their data (for instance the ones which were
149 * allocated through abd_get_offset() and abd_get_from_buf()). If an
150 * ABD takes ownership of its buf then it will become tracked.
151 */
152 { "linear_cnt", KSTAT_DATA_UINT64 },
153 /* Amount of data stored in all linear ABDs tracked by linear_cnt */
154 { "linear_data_size", KSTAT_DATA_UINT64 },
a6255b7f
DQ
155 /*
156 * The number of scatter ABDs which are currently allocated, excluding
157 * ABDs which don't own their data (for instance the ones which were
158 * allocated through abd_get_offset()).
159 */
160 { "scatter_cnt", KSTAT_DATA_UINT64 },
161 /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
162 { "scatter_data_size", KSTAT_DATA_UINT64 },
163 /*
164 * The amount of space wasted at the end of the last chunk across all
165 * scatter ABDs tracked by scatter_cnt.
166 */
167 { "scatter_chunk_waste", KSTAT_DATA_UINT64 },
168 /*
98295748
CC
169 * The number of compound allocations of a given order. These
170 * allocations are spread over all currently allocated ABDs, and
171 * act as a measure of memory fragmentation.
a6255b7f 172 */
98295748
CC
173 { { "scatter_order_N", KSTAT_DATA_UINT64 } },
174 /*
175 * The number of scatter ABDs which contain multiple chunks.
176 * ABDs are preferentially allocated from the minimum number of
177 * contiguous multi-page chunks, a single chunk is optimal.
178 */
179 { "scatter_page_multi_chunk", KSTAT_DATA_UINT64 },
180 /*
181 * The number of scatter ABDs which are split across memory zones.
182 * ABDs are preferentially allocated using pages from a single zone.
183 */
184 { "scatter_page_multi_zone", KSTAT_DATA_UINT64 },
185 /*
186 * The total number of retries encountered when attempting to
187 * allocate the pages to populate the scatter ABD.
188 */
189 { "scatter_page_alloc_retry", KSTAT_DATA_UINT64 },
190 /*
191 * The total number of retries encountered when attempting to
192 * allocate the sg table for an ABD.
193 */
194 { "scatter_sg_table_retry", KSTAT_DATA_UINT64 },
a6255b7f
DQ
195};
196
197#define ABDSTAT(stat) (abd_stats.stat.value.ui64)
198#define ABDSTAT_INCR(stat, val) \
199 atomic_add_64(&abd_stats.stat.value.ui64, (val))
200#define ABDSTAT_BUMP(stat) ABDSTAT_INCR(stat, 1)
201#define ABDSTAT_BUMPDOWN(stat) ABDSTAT_INCR(stat, -1)
202
98295748
CC
203#define ABD_SCATTER(abd) (abd->abd_u.abd_scatter)
204#define ABD_BUF(abd) (abd->abd_u.abd_linear.abd_buf)
205#define abd_for_each_sg(abd, sg, n, i) \
206 for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
207
a6255b7f
DQ
208/* see block comment above for description */
209int zfs_abd_scatter_enabled = B_TRUE;
98295748 210unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1;
a6255b7f 211
98295748
CC
212static kmem_cache_t *abd_cache = NULL;
213static kstat_t *abd_ksp;
214
215static inline size_t
216abd_chunkcnt_for_bytes(size_t size)
217{
218 return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
219}
a6255b7f
DQ
220
221#ifdef _KERNEL
98295748 222#ifndef CONFIG_HIGHMEM
a6255b7f 223
98295748
CC
224#ifndef __GFP_RECLAIM
225#define __GFP_RECLAIM __GFP_WAIT
226#endif
227
228static unsigned long
229abd_alloc_chunk(int nid, gfp_t gfp, unsigned int order)
a6255b7f 230{
98295748
CC
231 struct page *page;
232
233 page = alloc_pages_node(nid, gfp, order);
234 if (!page)
235 return (0);
236
237 return ((unsigned long) page_address(page));
a6255b7f
DQ
238}
239
98295748
CC
240/*
241 * The goal is to minimize fragmentation by preferentially populating ABDs
242 * with higher order compound pages from a single zone. Allocation size is
243 * progressively decreased until it can be satisfied without performing
244 * reclaim or compaction. When necessary this function will degenerate to
245 * allocating individual pages and allowing reclaim to satisfy allocations.
246 */
a6255b7f 247static void
98295748 248abd_alloc_pages(abd_t *abd, size_t size)
a6255b7f 249{
98295748
CC
250 struct list_head pages;
251 struct sg_table table;
252 struct scatterlist *sg;
253 struct page *page, *tmp_page;
254 gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
255 gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM;
256 int max_order = MIN(zfs_abd_scatter_max_order, MAX_ORDER - 1);
257 int nr_pages = abd_chunkcnt_for_bytes(size);
258 int chunks = 0, zones = 0;
259 size_t remaining_size;
260 int nid = NUMA_NO_NODE;
261 int alloc_pages = 0;
262 int order;
263
264 INIT_LIST_HEAD(&pages);
265
266 while (alloc_pages < nr_pages) {
267 unsigned long paddr;
268 unsigned chunk_pages;
269
270 order = MIN(highbit64(nr_pages - alloc_pages) - 1, max_order);
271 chunk_pages = (1U << order);
272
273 paddr = abd_alloc_chunk(nid, order ? gfp_comp : gfp, order);
274 if (paddr == 0) {
275 if (order == 0) {
276 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
277 schedule_timeout_interruptible(1);
278 } else {
279 max_order = MAX(0, order - 1);
280 }
281 continue;
282 }
a6255b7f 283
98295748
CC
284 page = virt_to_page(paddr);
285 list_add_tail(&page->lru, &pages);
286
287 if ((nid != NUMA_NO_NODE) && (page_to_nid(page) != nid))
288 zones++;
289
290 nid = page_to_nid(page);
291 ABDSTAT_BUMP(abdstat_scatter_orders[order]);
292 chunks++;
293 alloc_pages += chunk_pages;
294 }
295
296 ASSERT3S(alloc_pages, ==, nr_pages);
297
298 while (sg_alloc_table(&table, chunks, gfp)) {
299 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
300 schedule_timeout_interruptible(1);
301 }
302
303 sg = table.sgl;
304 remaining_size = size;
305 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
306 size_t sg_size = MIN(PAGESIZE << compound_order(page),
307 remaining_size);
308 sg_set_page(sg, page, sg_size, 0);
309 remaining_size -= sg_size;
310
311 sg = sg_next(sg);
312 list_del(&page->lru);
313 }
314
315 if (chunks > 1) {
316 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
317 abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
318
319 if (zones) {
320 ABDSTAT_BUMP(abdstat_scatter_page_multi_zone);
321 abd->abd_flags |= ABD_FLAG_MULTI_ZONE;
322 }
323 }
324
325 ABD_SCATTER(abd).abd_sgl = table.sgl;
326 ABD_SCATTER(abd).abd_nents = table.nents;
327}
328#else
329/*
330 * Allocate N individual pages to construct a scatter ABD. This function
331 * makes no attempt to request contiguous pages and requires the minimal
332 * number of kernel interfaces. It's designed for maximum compatibility.
333 */
334static void
335abd_alloc_pages(abd_t *abd, size_t size)
a6255b7f 336{
98295748
CC
337 struct scatterlist *sg;
338 struct sg_table table;
339 struct page *page;
340 gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
341 int nr_pages = abd_chunkcnt_for_bytes(size);
342 int i;
343
344 while (sg_alloc_table(&table, nr_pages, gfp)) {
345 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
346 schedule_timeout_interruptible(1);
347 }
348
349 ASSERT3U(table.nents, ==, nr_pages);
350 ABD_SCATTER(abd).abd_sgl = table.sgl;
351 ABD_SCATTER(abd).abd_nents = nr_pages;
352
353 abd_for_each_sg(abd, sg, nr_pages, i) {
354 while ((page = __page_cache_alloc(gfp)) == NULL) {
355 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
356 schedule_timeout_interruptible(1);
357 }
358
359 ABDSTAT_BUMP(abdstat_scatter_orders[0]);
360 sg_set_page(sg, page, PAGESIZE, 0);
361 }
362
363 if (nr_pages > 1) {
364 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
365 abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
a6255b7f
DQ
366 }
367}
98295748 368#endif /* !CONFIG_HIGHMEM */
a6255b7f 369
98295748
CC
370static void
371abd_free_pages(abd_t *abd)
a6255b7f 372{
98295748
CC
373 struct scatterlist *sg;
374 struct sg_table table;
375 struct page *page;
376 int nr_pages = ABD_SCATTER(abd).abd_nents;
377 int order, i, j;
378
379 if (abd->abd_flags & ABD_FLAG_MULTI_ZONE)
380 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone);
381
382 if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK)
383 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
384
385 abd_for_each_sg(abd, sg, nr_pages, i) {
386 for (j = 0; j < sg->length; ) {
387 page = nth_page(sg_page(sg), j >> PAGE_SHIFT);
388 order = compound_order(page);
389 __free_pages(page, order);
390 j += (PAGESIZE << order);
391 ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]);
392 }
a6255b7f 393 }
98295748
CC
394
395 table.sgl = ABD_SCATTER(abd).abd_sgl;
396 table.nents = table.orig_nents = nr_pages;
397 sg_free_table(&table);
a6255b7f
DQ
398}
399
98295748
CC
400#else /* _KERNEL */
401
402#ifndef PAGE_SHIFT
403#define PAGE_SHIFT (highbit64(PAGESIZE)-1)
404#endif
a6255b7f
DQ
405
406struct page;
98295748 407
a6255b7f 408#define kpm_enable 1
98295748
CC
409#define abd_alloc_chunk(o) \
410 ((struct page *) umem_alloc_aligned(PAGESIZE << (o), 64, KM_SLEEP))
411#define abd_free_chunk(chunk, o) umem_free(chunk, PAGESIZE << (o))
4f601529
CC
412#define zfs_kmap_atomic(chunk, km) ((void *)chunk)
413#define zfs_kunmap_atomic(addr, km) do { (void)(addr); } while (0)
414#define local_irq_save(flags) do { (void)(flags); } while (0)
415#define local_irq_restore(flags) do { (void)(flags); } while (0)
98295748
CC
416#define nth_page(pg, i) \
417 ((struct page *)((void *)(pg) + (i) * PAGESIZE))
a6255b7f 418
98295748
CC
419struct scatterlist {
420 struct page *page;
421 int length;
422 int end;
423};
424
425static void
426sg_init_table(struct scatterlist *sg, int nr) {
427 memset(sg, 0, nr * sizeof (struct scatterlist));
428 sg[nr - 1].end = 1;
429}
430
431#define for_each_sg(sgl, sg, nr, i) \
432 for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg))
433
434static inline void
435sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
436 unsigned int offset)
a6255b7f 437{
98295748
CC
438 /* currently we don't use offset */
439 ASSERT(offset == 0);
440 sg->page = page;
441 sg->length = len;
a6255b7f
DQ
442}
443
98295748
CC
444static inline struct page *
445sg_page(struct scatterlist *sg)
446{
447 return (sg->page);
448}
449
450static inline struct scatterlist *
451sg_next(struct scatterlist *sg)
452{
453 if (sg->end)
454 return (NULL);
455
456 return (sg + 1);
457}
458
459static void
460abd_alloc_pages(abd_t *abd, size_t size)
461{
462 unsigned nr_pages = abd_chunkcnt_for_bytes(size);
463 struct scatterlist *sg;
464 int i;
465
466 ABD_SCATTER(abd).abd_sgl = vmem_alloc(nr_pages *
467 sizeof (struct scatterlist), KM_SLEEP);
468 sg_init_table(ABD_SCATTER(abd).abd_sgl, nr_pages);
469
470 abd_for_each_sg(abd, sg, nr_pages, i) {
471 struct page *p = abd_alloc_chunk(0);
472 sg_set_page(sg, p, PAGESIZE, 0);
473 }
474 ABD_SCATTER(abd).abd_nents = nr_pages;
475}
476
477static void
478abd_free_pages(abd_t *abd)
a6255b7f 479{
98295748
CC
480 int i, n = ABD_SCATTER(abd).abd_nents;
481 struct scatterlist *sg;
482 int j;
483
484 abd_for_each_sg(abd, sg, n, i) {
485 for (j = 0; j < sg->length; j += PAGESIZE) {
486 struct page *p = nth_page(sg_page(sg), j>>PAGE_SHIFT);
487 abd_free_chunk(p, 0);
488 }
489 }
490
491 vmem_free(ABD_SCATTER(abd).abd_sgl, n * sizeof (struct scatterlist));
a6255b7f
DQ
492}
493
494#endif /* _KERNEL */
495
98295748
CC
496void
497abd_init(void)
a6255b7f 498{
98295748
CC
499 int i;
500
501 abd_cache = kmem_cache_create("abd_t", sizeof (abd_t),
502 0, NULL, NULL, NULL, NULL, NULL, 0);
503
504 abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
505 sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
506 if (abd_ksp != NULL) {
507 abd_ksp->ks_data = &abd_stats;
508 kstat_install(abd_ksp);
509
510 for (i = 0; i < MAX_ORDER; i++) {
511 snprintf(abd_stats.abdstat_scatter_orders[i].name,
512 KSTAT_STRLEN, "scatter_order_%d", i);
513 abd_stats.abdstat_scatter_orders[i].data_type =
514 KSTAT_DATA_UINT64;
515 }
516 }
a6255b7f
DQ
517}
518
98295748
CC
519void
520abd_fini(void)
a6255b7f 521{
98295748
CC
522 if (abd_ksp != NULL) {
523 kstat_delete(abd_ksp);
524 abd_ksp = NULL;
525 }
526
527 if (abd_cache) {
528 kmem_cache_destroy(abd_cache);
529 abd_cache = NULL;
530 }
a6255b7f
DQ
531}
532
533static inline void
534abd_verify(abd_t *abd)
535{
536 ASSERT3U(abd->abd_size, >, 0);
537 ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
538 ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
98295748
CC
539 ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
540 ABD_FLAG_MULTI_CHUNK));
a6255b7f
DQ
541 IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
542 IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
543 if (abd_is_linear(abd)) {
544 ASSERT3P(abd->abd_u.abd_linear.abd_buf, !=, NULL);
545 } else {
546 size_t n;
547 int i;
98295748
CC
548 struct scatterlist *sg;
549
550 ASSERT3U(ABD_SCATTER(abd).abd_nents, >, 0);
551 ASSERT3U(ABD_SCATTER(abd).abd_offset, <,
552 ABD_SCATTER(abd).abd_sgl->length);
553 n = ABD_SCATTER(abd).abd_nents;
554 abd_for_each_sg(abd, sg, n, i) {
555 ASSERT3P(sg_page(sg), !=, NULL);
a6255b7f
DQ
556 }
557 }
558}
559
560static inline abd_t *
98295748 561abd_alloc_struct(void)
a6255b7f 562{
98295748
CC
563 abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
564
a6255b7f 565 ASSERT3P(abd, !=, NULL);
98295748 566 ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
a6255b7f
DQ
567
568 return (abd);
569}
570
571static inline void
572abd_free_struct(abd_t *abd)
573{
98295748
CC
574 kmem_cache_free(abd_cache, abd);
575 ABDSTAT_INCR(abdstat_struct_size, -sizeof (abd_t));
a6255b7f
DQ
576}
577
578/*
579 * Allocate an ABD, along with its own underlying data buffers. Use this if you
580 * don't care whether the ABD is linear or not.
581 */
582abd_t *
583abd_alloc(size_t size, boolean_t is_metadata)
584{
a6255b7f
DQ
585 abd_t *abd;
586
98295748 587 if (!zfs_abd_scatter_enabled || size <= PAGESIZE)
a6255b7f
DQ
588 return (abd_alloc_linear(size, is_metadata));
589
590 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
591
98295748 592 abd = abd_alloc_struct();
a6255b7f 593 abd->abd_flags = ABD_FLAG_OWNER;
98295748
CC
594 abd_alloc_pages(abd, size);
595
a6255b7f
DQ
596 if (is_metadata) {
597 abd->abd_flags |= ABD_FLAG_META;
598 }
599 abd->abd_size = size;
600 abd->abd_parent = NULL;
601 refcount_create(&abd->abd_children);
602
603 abd->abd_u.abd_scatter.abd_offset = 0;
a6255b7f
DQ
604
605 ABDSTAT_BUMP(abdstat_scatter_cnt);
606 ABDSTAT_INCR(abdstat_scatter_data_size, size);
607 ABDSTAT_INCR(abdstat_scatter_chunk_waste,
98295748 608 P2ROUNDUP(size, PAGESIZE) - size);
a6255b7f
DQ
609
610 return (abd);
611}
612
613static void
614abd_free_scatter(abd_t *abd)
615{
98295748 616 abd_free_pages(abd);
a6255b7f
DQ
617
618 refcount_destroy(&abd->abd_children);
619 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
620 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
621 ABDSTAT_INCR(abdstat_scatter_chunk_waste,
98295748 622 abd->abd_size - P2ROUNDUP(abd->abd_size, PAGESIZE));
a6255b7f
DQ
623
624 abd_free_struct(abd);
625}
626
627/*
628 * Allocate an ABD that must be linear, along with its own underlying data
629 * buffer. Only use this when it would be very annoying to write your ABD
630 * consumer with a scattered ABD.
631 */
632abd_t *
633abd_alloc_linear(size_t size, boolean_t is_metadata)
634{
98295748 635 abd_t *abd = abd_alloc_struct();
a6255b7f
DQ
636
637 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
638
639 abd->abd_flags = ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
640 if (is_metadata) {
641 abd->abd_flags |= ABD_FLAG_META;
642 }
643 abd->abd_size = size;
644 abd->abd_parent = NULL;
645 refcount_create(&abd->abd_children);
646
647 if (is_metadata) {
648 abd->abd_u.abd_linear.abd_buf = zio_buf_alloc(size);
649 } else {
650 abd->abd_u.abd_linear.abd_buf = zio_data_buf_alloc(size);
651 }
652
653 ABDSTAT_BUMP(abdstat_linear_cnt);
654 ABDSTAT_INCR(abdstat_linear_data_size, size);
655
656 return (abd);
657}
658
659static void
660abd_free_linear(abd_t *abd)
661{
662 if (abd->abd_flags & ABD_FLAG_META) {
663 zio_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
664 } else {
665 zio_data_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
666 }
667
668 refcount_destroy(&abd->abd_children);
669 ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
670 ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
671
672 abd_free_struct(abd);
673}
674
675/*
676 * Free an ABD. Only use this on ABDs allocated with abd_alloc() or
677 * abd_alloc_linear().
678 */
679void
680abd_free(abd_t *abd)
681{
682 abd_verify(abd);
683 ASSERT3P(abd->abd_parent, ==, NULL);
684 ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
685 if (abd_is_linear(abd))
686 abd_free_linear(abd);
687 else
688 abd_free_scatter(abd);
689}
690
691/*
692 * Allocate an ABD of the same format (same metadata flag, same scatterize
693 * setting) as another ABD.
694 */
695abd_t *
696abd_alloc_sametype(abd_t *sabd, size_t size)
697{
698 boolean_t is_metadata = (sabd->abd_flags | ABD_FLAG_META) != 0;
699 if (abd_is_linear(sabd)) {
700 return (abd_alloc_linear(size, is_metadata));
701 } else {
702 return (abd_alloc(size, is_metadata));
703 }
704}
705
706/*
707 * If we're going to use this ABD for doing I/O using the block layer, the
708 * consumer of the ABD data doesn't care if it's scattered or not, and we don't
709 * plan to store this ABD in memory for a long period of time, we should
710 * allocate the ABD type that requires the least data copying to do the I/O.
711 *
712 * On Illumos this is linear ABDs, however if ldi_strategy() can ever issue I/Os
713 * using a scatter/gather list we should switch to that and replace this call
714 * with vanilla abd_alloc().
715 *
716 * On Linux the optimal thing to do would be to use abd_get_offset() and
717 * construct a new ABD which shares the original pages thereby eliminating
718 * the copy. But for the moment a new linear ABD is allocated until this
719 * performance optimization can be implemented.
720 */
721abd_t *
722abd_alloc_for_io(size_t size, boolean_t is_metadata)
723{
724 return (abd_alloc_linear(size, is_metadata));
725}
726
727/*
728 * Allocate a new ABD to point to offset off of sabd. It shares the underlying
729 * buffer data with sabd. Use abd_put() to free. sabd must not be freed while
730 * any derived ABDs exist.
731 */
a206522c
GN
732static inline abd_t *
733abd_get_offset_impl(abd_t *sabd, size_t off, size_t size)
a6255b7f
DQ
734{
735 abd_t *abd;
736
737 abd_verify(sabd);
738 ASSERT3U(off, <=, sabd->abd_size);
739
740 if (abd_is_linear(sabd)) {
98295748 741 abd = abd_alloc_struct();
a6255b7f
DQ
742
743 /*
744 * Even if this buf is filesystem metadata, we only track that
745 * if we own the underlying data buffer, which is not true in
746 * this case. Therefore, we don't ever use ABD_FLAG_META here.
747 */
748 abd->abd_flags = ABD_FLAG_LINEAR;
749
750 abd->abd_u.abd_linear.abd_buf =
751 (char *)sabd->abd_u.abd_linear.abd_buf + off;
752 } else {
98295748
CC
753 int i;
754 struct scatterlist *sg;
a6255b7f 755 size_t new_offset = sabd->abd_u.abd_scatter.abd_offset + off;
a6255b7f 756
98295748 757 abd = abd_alloc_struct();
a6255b7f
DQ
758
759 /*
760 * Even if this buf is filesystem metadata, we only track that
761 * if we own the underlying data buffer, which is not true in
762 * this case. Therefore, we don't ever use ABD_FLAG_META here.
763 */
764 abd->abd_flags = 0;
765
98295748
CC
766 abd_for_each_sg(sabd, sg, ABD_SCATTER(sabd).abd_nents, i) {
767 if (new_offset < sg->length)
768 break;
769 new_offset -= sg->length;
770 }
a6255b7f 771
98295748
CC
772 ABD_SCATTER(abd).abd_sgl = sg;
773 ABD_SCATTER(abd).abd_offset = new_offset;
774 ABD_SCATTER(abd).abd_nents = ABD_SCATTER(sabd).abd_nents - i;
a6255b7f
DQ
775 }
776
a206522c 777 abd->abd_size = size;
a6255b7f
DQ
778 abd->abd_parent = sabd;
779 refcount_create(&abd->abd_children);
780 (void) refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
781
782 return (abd);
783}
784
a206522c
GN
785abd_t *
786abd_get_offset(abd_t *sabd, size_t off)
787{
788 size_t size = sabd->abd_size > off ? sabd->abd_size - off : 0;
789
790 VERIFY3U(size, >, 0);
791
792 return (abd_get_offset_impl(sabd, off, size));
793}
794
795abd_t *
796abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
797{
798 ASSERT3U(off + size, <=, sabd->abd_size);
799
800 return (abd_get_offset_impl(sabd, off, size));
801}
802
a6255b7f
DQ
803/*
804 * Allocate a linear ABD structure for buf. You must free this with abd_put()
805 * since the resulting ABD doesn't own its own buffer.
806 */
807abd_t *
808abd_get_from_buf(void *buf, size_t size)
809{
98295748 810 abd_t *abd = abd_alloc_struct();
a6255b7f
DQ
811
812 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
813
814 /*
815 * Even if this buf is filesystem metadata, we only track that if we
816 * own the underlying data buffer, which is not true in this case.
817 * Therefore, we don't ever use ABD_FLAG_META here.
818 */
819 abd->abd_flags = ABD_FLAG_LINEAR;
820 abd->abd_size = size;
821 abd->abd_parent = NULL;
822 refcount_create(&abd->abd_children);
823
824 abd->abd_u.abd_linear.abd_buf = buf;
825
826 return (abd);
827}
828
829/*
830 * Free an ABD allocated from abd_get_offset() or abd_get_from_buf(). Will not
831 * free the underlying scatterlist or buffer.
832 */
833void
834abd_put(abd_t *abd)
835{
836 abd_verify(abd);
837 ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
838
839 if (abd->abd_parent != NULL) {
840 (void) refcount_remove_many(&abd->abd_parent->abd_children,
841 abd->abd_size, abd);
842 }
843
844 refcount_destroy(&abd->abd_children);
845 abd_free_struct(abd);
846}
847
848/*
849 * Get the raw buffer associated with a linear ABD.
850 */
851void *
852abd_to_buf(abd_t *abd)
853{
854 ASSERT(abd_is_linear(abd));
855 abd_verify(abd);
856 return (abd->abd_u.abd_linear.abd_buf);
857}
858
859/*
860 * Borrow a raw buffer from an ABD without copying the contents of the ABD
861 * into the buffer. If the ABD is scattered, this will allocate a raw buffer
862 * whose contents are undefined. To copy over the existing data in the ABD, use
863 * abd_borrow_buf_copy() instead.
864 */
865void *
866abd_borrow_buf(abd_t *abd, size_t n)
867{
868 void *buf;
869 abd_verify(abd);
870 ASSERT3U(abd->abd_size, >=, n);
871 if (abd_is_linear(abd)) {
872 buf = abd_to_buf(abd);
873 } else {
874 buf = zio_buf_alloc(n);
875 }
876 (void) refcount_add_many(&abd->abd_children, n, buf);
877
878 return (buf);
879}
880
881void *
882abd_borrow_buf_copy(abd_t *abd, size_t n)
883{
884 void *buf = abd_borrow_buf(abd, n);
885 if (!abd_is_linear(abd)) {
886 abd_copy_to_buf(buf, abd, n);
887 }
888 return (buf);
889}
890
891/*
892 * Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
893 * not change the contents of the ABD and will ASSERT that you didn't modify
894 * the buffer since it was borrowed. If you want any changes you made to buf to
895 * be copied back to abd, use abd_return_buf_copy() instead.
896 */
897void
898abd_return_buf(abd_t *abd, void *buf, size_t n)
899{
900 abd_verify(abd);
901 ASSERT3U(abd->abd_size, >=, n);
902 if (abd_is_linear(abd)) {
903 ASSERT3P(buf, ==, abd_to_buf(abd));
904 } else {
905 ASSERT0(abd_cmp_buf(abd, buf, n));
906 zio_buf_free(buf, n);
907 }
908 (void) refcount_remove_many(&abd->abd_children, n, buf);
909}
910
911void
912abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
913{
914 if (!abd_is_linear(abd)) {
915 abd_copy_from_buf(abd, buf, n);
916 }
917 abd_return_buf(abd, buf, n);
918}
919
920/*
921 * Give this ABD ownership of the buffer that it's storing. Can only be used on
922 * linear ABDs which were allocated via abd_get_from_buf(), or ones allocated
923 * with abd_alloc_linear() which subsequently released ownership of their buf
924 * with abd_release_ownership_of_buf().
925 */
926void
927abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
928{
929 ASSERT(abd_is_linear(abd));
930 ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
931 abd_verify(abd);
932
933 abd->abd_flags |= ABD_FLAG_OWNER;
934 if (is_metadata) {
935 abd->abd_flags |= ABD_FLAG_META;
936 }
937
938 ABDSTAT_BUMP(abdstat_linear_cnt);
939 ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
940}
941
942void
943abd_release_ownership_of_buf(abd_t *abd)
944{
945 ASSERT(abd_is_linear(abd));
946 ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
947 abd_verify(abd);
948
949 abd->abd_flags &= ~ABD_FLAG_OWNER;
950 /* Disable this flag since we no longer own the data buffer */
951 abd->abd_flags &= ~ABD_FLAG_META;
952
953 ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
954 ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
955}
956
4f601529
CC
957#ifndef HAVE_1ARG_KMAP_ATOMIC
958#define NR_KM_TYPE (6)
959#ifdef _KERNEL
960int km_table[NR_KM_TYPE] = {
961 KM_USER0,
962 KM_USER1,
963 KM_BIO_SRC_IRQ,
964 KM_BIO_DST_IRQ,
965 KM_PTE0,
966 KM_PTE1,
967};
968#endif
969#endif
970
a6255b7f 971struct abd_iter {
98295748 972 /* public interface */
a6255b7f
DQ
973 void *iter_mapaddr; /* addr corresponding to iter_pos */
974 size_t iter_mapsize; /* length of data valid at mapaddr */
98295748
CC
975
976 /* private */
977 abd_t *iter_abd; /* ABD being iterated through */
978 size_t iter_pos;
979 size_t iter_offset; /* offset in current sg/abd_buf, */
980 /* abd_offset included */
981 struct scatterlist *iter_sg; /* current sg */
4f601529
CC
982#ifndef HAVE_1ARG_KMAP_ATOMIC
983 int iter_km; /* KM_* for kmap_atomic */
984#endif
a6255b7f
DQ
985};
986
a6255b7f
DQ
987/*
988 * Initialize the abd_iter.
989 */
990static void
4f601529 991abd_iter_init(struct abd_iter *aiter, abd_t *abd, int km_type)
a6255b7f
DQ
992{
993 abd_verify(abd);
994 aiter->iter_abd = abd;
a6255b7f
DQ
995 aiter->iter_mapaddr = NULL;
996 aiter->iter_mapsize = 0;
98295748
CC
997 aiter->iter_pos = 0;
998 if (abd_is_linear(abd)) {
999 aiter->iter_offset = 0;
1000 aiter->iter_sg = NULL;
1001 } else {
1002 aiter->iter_offset = ABD_SCATTER(abd).abd_offset;
1003 aiter->iter_sg = ABD_SCATTER(abd).abd_sgl;
1004 }
4f601529
CC
1005#ifndef HAVE_1ARG_KMAP_ATOMIC
1006 ASSERT3U(km_type, <, NR_KM_TYPE);
1007 aiter->iter_km = km_type;
1008#endif
a6255b7f
DQ
1009}
1010
1011/*
1012 * Advance the iterator by a certain amount. Cannot be called when a chunk is
1013 * in use. This can be safely called when the aiter has already exhausted, in
1014 * which case this does nothing.
1015 */
1016static void
1017abd_iter_advance(struct abd_iter *aiter, size_t amount)
1018{
1019 ASSERT3P(aiter->iter_mapaddr, ==, NULL);
1020 ASSERT0(aiter->iter_mapsize);
1021
1022 /* There's nothing left to advance to, so do nothing */
1023 if (aiter->iter_pos == aiter->iter_abd->abd_size)
1024 return;
1025
1026 aiter->iter_pos += amount;
98295748
CC
1027 aiter->iter_offset += amount;
1028 if (!abd_is_linear(aiter->iter_abd)) {
1029 while (aiter->iter_offset >= aiter->iter_sg->length) {
1030 aiter->iter_offset -= aiter->iter_sg->length;
1031 aiter->iter_sg = sg_next(aiter->iter_sg);
1032 if (aiter->iter_sg == NULL) {
1033 ASSERT0(aiter->iter_offset);
1034 break;
1035 }
1036 }
1037 }
a6255b7f
DQ
1038}
1039
1040/*
1041 * Map the current chunk into aiter. This can be safely called when the aiter
1042 * has already exhausted, in which case this does nothing.
1043 */
1044static void
1045abd_iter_map(struct abd_iter *aiter)
1046{
1047 void *paddr;
1048 size_t offset = 0;
1049
1050 ASSERT3P(aiter->iter_mapaddr, ==, NULL);
1051 ASSERT0(aiter->iter_mapsize);
1052
1053 /* There's nothing left to iterate over, so do nothing */
1054 if (aiter->iter_pos == aiter->iter_abd->abd_size)
1055 return;
1056
1057 if (abd_is_linear(aiter->iter_abd)) {
98295748
CC
1058 ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset);
1059 offset = aiter->iter_offset;
a6255b7f
DQ
1060 aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
1061 paddr = aiter->iter_abd->abd_u.abd_linear.abd_buf;
1062 } else {
98295748
CC
1063 offset = aiter->iter_offset;
1064 aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset,
a206522c
GN
1065 aiter->iter_abd->abd_size - aiter->iter_pos);
1066
98295748
CC
1067 paddr = zfs_kmap_atomic(sg_page(aiter->iter_sg),
1068 km_table[aiter->iter_km]);
a6255b7f 1069 }
a206522c 1070
a6255b7f
DQ
1071 aiter->iter_mapaddr = (char *)paddr + offset;
1072}
1073
1074/*
1075 * Unmap the current chunk from aiter. This can be safely called when the aiter
1076 * has already exhausted, in which case this does nothing.
1077 */
1078static void
1079abd_iter_unmap(struct abd_iter *aiter)
1080{
1081 /* There's nothing left to unmap, so do nothing */
1082 if (aiter->iter_pos == aiter->iter_abd->abd_size)
1083 return;
1084
1085 if (!abd_is_linear(aiter->iter_abd)) {
1086 /* LINTED E_FUNC_SET_NOT_USED */
98295748 1087 zfs_kunmap_atomic(aiter->iter_mapaddr - aiter->iter_offset,
4f601529 1088 km_table[aiter->iter_km]);
a6255b7f
DQ
1089 }
1090
1091 ASSERT3P(aiter->iter_mapaddr, !=, NULL);
1092 ASSERT3U(aiter->iter_mapsize, >, 0);
1093
1094 aiter->iter_mapaddr = NULL;
1095 aiter->iter_mapsize = 0;
1096}
1097
1098int
1099abd_iterate_func(abd_t *abd, size_t off, size_t size,
1100 abd_iter_func_t *func, void *private)
1101{
1102 int ret = 0;
1103 struct abd_iter aiter;
1104
1105 abd_verify(abd);
1106 ASSERT3U(off + size, <=, abd->abd_size);
1107
4f601529 1108 abd_iter_init(&aiter, abd, 0);
a6255b7f
DQ
1109 abd_iter_advance(&aiter, off);
1110
1111 while (size > 0) {
1112 size_t len;
1113 abd_iter_map(&aiter);
1114
1115 len = MIN(aiter.iter_mapsize, size);
1116 ASSERT3U(len, >, 0);
1117
1118 ret = func(aiter.iter_mapaddr, len, private);
1119
1120 abd_iter_unmap(&aiter);
1121
1122 if (ret != 0)
1123 break;
1124
1125 size -= len;
1126 abd_iter_advance(&aiter, len);
1127 }
1128
1129 return (ret);
1130}
1131
1132struct buf_arg {
1133 void *arg_buf;
1134};
1135
1136static int
1137abd_copy_to_buf_off_cb(void *buf, size_t size, void *private)
1138{
1139 struct buf_arg *ba_ptr = private;
1140
1141 (void) memcpy(ba_ptr->arg_buf, buf, size);
1142 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
1143
1144 return (0);
1145}
1146
1147/*
1148 * Copy abd to buf. (off is the offset in abd.)
1149 */
1150void
1151abd_copy_to_buf_off(void *buf, abd_t *abd, size_t off, size_t size)
1152{
1153 struct buf_arg ba_ptr = { buf };
1154
1155 (void) abd_iterate_func(abd, off, size, abd_copy_to_buf_off_cb,
1156 &ba_ptr);
1157}
1158
1159static int
1160abd_cmp_buf_off_cb(void *buf, size_t size, void *private)
1161{
1162 int ret;
1163 struct buf_arg *ba_ptr = private;
1164
1165 ret = memcmp(buf, ba_ptr->arg_buf, size);
1166 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
1167
1168 return (ret);
1169}
1170
1171/*
1172 * Compare the contents of abd to buf. (off is the offset in abd.)
1173 */
1174int
1175abd_cmp_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
1176{
1177 struct buf_arg ba_ptr = { (void *) buf };
1178
1179 return (abd_iterate_func(abd, off, size, abd_cmp_buf_off_cb, &ba_ptr));
1180}
1181
1182static int
1183abd_copy_from_buf_off_cb(void *buf, size_t size, void *private)
1184{
1185 struct buf_arg *ba_ptr = private;
1186
1187 (void) memcpy(buf, ba_ptr->arg_buf, size);
1188 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
1189
1190 return (0);
1191}
1192
1193/*
1194 * Copy from buf to abd. (off is the offset in abd.)
1195 */
1196void
1197abd_copy_from_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
1198{
1199 struct buf_arg ba_ptr = { (void *) buf };
1200
1201 (void) abd_iterate_func(abd, off, size, abd_copy_from_buf_off_cb,
1202 &ba_ptr);
1203}
1204
1205/*ARGSUSED*/
1206static int
1207abd_zero_off_cb(void *buf, size_t size, void *private)
1208{
1209 (void) memset(buf, 0, size);
1210 return (0);
1211}
1212
1213/*
1214 * Zero out the abd from a particular offset to the end.
1215 */
1216void
1217abd_zero_off(abd_t *abd, size_t off, size_t size)
1218{
1219 (void) abd_iterate_func(abd, off, size, abd_zero_off_cb, NULL);
1220}
1221
1222/*
1223 * Iterate over two ABDs and call func incrementally on the two ABDs' data in
1224 * equal-sized chunks (passed to func as raw buffers). func could be called many
1225 * times during this iteration.
1226 */
1227int
1228abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
1229 size_t size, abd_iter_func2_t *func, void *private)
1230{
1231 int ret = 0;
1232 struct abd_iter daiter, saiter;
1233
1234 abd_verify(dabd);
1235 abd_verify(sabd);
1236
1237 ASSERT3U(doff + size, <=, dabd->abd_size);
1238 ASSERT3U(soff + size, <=, sabd->abd_size);
1239
4f601529
CC
1240 abd_iter_init(&daiter, dabd, 0);
1241 abd_iter_init(&saiter, sabd, 1);
a6255b7f
DQ
1242 abd_iter_advance(&daiter, doff);
1243 abd_iter_advance(&saiter, soff);
1244
1245 while (size > 0) {
1246 size_t dlen, slen, len;
1247 abd_iter_map(&daiter);
1248 abd_iter_map(&saiter);
1249
1250 dlen = MIN(daiter.iter_mapsize, size);
1251 slen = MIN(saiter.iter_mapsize, size);
1252 len = MIN(dlen, slen);
1253 ASSERT(dlen > 0 || slen > 0);
1254
1255 ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,
1256 private);
1257
1258 abd_iter_unmap(&saiter);
1259 abd_iter_unmap(&daiter);
1260
1261 if (ret != 0)
1262 break;
1263
1264 size -= len;
1265 abd_iter_advance(&daiter, len);
1266 abd_iter_advance(&saiter, len);
1267 }
1268
1269 return (ret);
1270}
1271
1272/*ARGSUSED*/
1273static int
1274abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
1275{
1276 (void) memcpy(dbuf, sbuf, size);
1277 return (0);
1278}
1279
1280/*
1281 * Copy from sabd to dabd starting from soff and doff.
1282 */
1283void
1284abd_copy_off(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, size_t size)
1285{
1286 (void) abd_iterate_func2(dabd, sabd, doff, soff, size,
1287 abd_copy_off_cb, NULL);
1288}
1289
1290/*ARGSUSED*/
1291static int
1292abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
1293{
1294 return (memcmp(bufa, bufb, size));
1295}
1296
1297/*
1298 * Compares the contents of two ABDs.
1299 */
1300int
1301abd_cmp(abd_t *dabd, abd_t *sabd)
1302{
1303 ASSERT3U(dabd->abd_size, ==, sabd->abd_size);
1304 return (abd_iterate_func2(dabd, sabd, 0, 0, dabd->abd_size,
1305 abd_cmp_cb, NULL));
1306}
1307
a206522c
GN
1308/*
1309 * Iterate over code ABDs and a data ABD and call @func_raidz_gen.
1310 *
1311 * @cabds parity ABDs, must have equal size
1312 * @dabd data ABD. Can be NULL (in this case @dsize = 0)
1313 * @func_raidz_gen should be implemented so that its behaviour
1314 * is the same when taking linear and when taking scatter
1315 */
1316void
1317abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
1318 ssize_t csize, ssize_t dsize, const unsigned parity,
1319 void (*func_raidz_gen)(void **, const void *, size_t, size_t))
1320{
1321 int i;
1322 ssize_t len, dlen;
1323 struct abd_iter caiters[3];
1324 struct abd_iter daiter;
1325 void *caddrs[3];
4f601529 1326 unsigned long flags;
a206522c
GN
1327
1328 ASSERT3U(parity, <=, 3);
1329
1330 for (i = 0; i < parity; i++)
4f601529 1331 abd_iter_init(&caiters[i], cabds[i], i);
a206522c
GN
1332
1333 if (dabd)
4f601529 1334 abd_iter_init(&daiter, dabd, i);
a206522c
GN
1335
1336 ASSERT3S(dsize, >=, 0);
1337
4f601529 1338 local_irq_save(flags);
a206522c
GN
1339 while (csize > 0) {
1340 len = csize;
1341
1342 if (dabd && dsize > 0)
1343 abd_iter_map(&daiter);
1344
1345 for (i = 0; i < parity; i++) {
1346 abd_iter_map(&caiters[i]);
1347 caddrs[i] = caiters[i].iter_mapaddr;
1348 }
1349
1350 switch (parity) {
1351 case 3:
1352 len = MIN(caiters[2].iter_mapsize, len);
1353 case 2:
1354 len = MIN(caiters[1].iter_mapsize, len);
1355 case 1:
1356 len = MIN(caiters[0].iter_mapsize, len);
1357 }
1358
1359 /* must be progressive */
1360 ASSERT3S(len, >, 0);
1361
1362 if (dabd && dsize > 0) {
1363 /* this needs precise iter.length */
1364 len = MIN(daiter.iter_mapsize, len);
1365 dlen = len;
1366 } else
1367 dlen = 0;
1368
1369 /* must be progressive */
1370 ASSERT3S(len, >, 0);
1371 /*
1372 * The iterated function likely will not do well if each
1373 * segment except the last one is not multiple of 512 (raidz).
1374 */
1375 ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
1376
1377 func_raidz_gen(caddrs, daiter.iter_mapaddr, len, dlen);
1378
1379 for (i = parity-1; i >= 0; i--) {
1380 abd_iter_unmap(&caiters[i]);
1381 abd_iter_advance(&caiters[i], len);
1382 }
1383
1384 if (dabd && dsize > 0) {
1385 abd_iter_unmap(&daiter);
1386 abd_iter_advance(&daiter, dlen);
1387 dsize -= dlen;
1388 }
1389
1390 csize -= len;
1391
1392 ASSERT3S(dsize, >=, 0);
1393 ASSERT3S(csize, >=, 0);
1394 }
4f601529 1395 local_irq_restore(flags);
a206522c
GN
1396}
1397
1398/*
1399 * Iterate over code ABDs and data reconstruction target ABDs and call
1400 * @func_raidz_rec. Function maps at most 6 pages atomically.
1401 *
1402 * @cabds parity ABDs, must have equal size
1403 * @tabds rec target ABDs, at most 3
1404 * @tsize size of data target columns
1405 * @func_raidz_rec expects syndrome data in target columns. Function
1406 * reconstructs data and overwrites target columns.
1407 */
1408void
1409abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
1410 ssize_t tsize, const unsigned parity,
1411 void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
1412 const unsigned *mul),
1413 const unsigned *mul)
1414{
1415 int i;
1416 ssize_t len;
1417 struct abd_iter citers[3];
1418 struct abd_iter xiters[3];
1419 void *caddrs[3], *xaddrs[3];
4f601529 1420 unsigned long flags;
a206522c
GN
1421
1422 ASSERT3U(parity, <=, 3);
1423
1424 for (i = 0; i < parity; i++) {
4f601529
CC
1425 abd_iter_init(&citers[i], cabds[i], 2*i);
1426 abd_iter_init(&xiters[i], tabds[i], 2*i+1);
a206522c
GN
1427 }
1428
4f601529 1429 local_irq_save(flags);
a206522c
GN
1430 while (tsize > 0) {
1431
1432 for (i = 0; i < parity; i++) {
1433 abd_iter_map(&citers[i]);
1434 abd_iter_map(&xiters[i]);
1435 caddrs[i] = citers[i].iter_mapaddr;
1436 xaddrs[i] = xiters[i].iter_mapaddr;
1437 }
1438
1439 len = tsize;
1440 switch (parity) {
1441 case 3:
1442 len = MIN(xiters[2].iter_mapsize, len);
1443 len = MIN(citers[2].iter_mapsize, len);
1444 case 2:
1445 len = MIN(xiters[1].iter_mapsize, len);
1446 len = MIN(citers[1].iter_mapsize, len);
1447 case 1:
1448 len = MIN(xiters[0].iter_mapsize, len);
1449 len = MIN(citers[0].iter_mapsize, len);
1450 }
1451 /* must be progressive */
1452 ASSERT3S(len, >, 0);
1453 /*
1454 * The iterated function likely will not do well if each
1455 * segment except the last one is not multiple of 512 (raidz).
1456 */
1457 ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
1458
1459 func_raidz_rec(xaddrs, len, caddrs, mul);
1460
1461 for (i = parity-1; i >= 0; i--) {
1462 abd_iter_unmap(&xiters[i]);
1463 abd_iter_unmap(&citers[i]);
1464 abd_iter_advance(&xiters[i], len);
1465 abd_iter_advance(&citers[i], len);
1466 }
1467
1468 tsize -= len;
1469 ASSERT3S(tsize, >=, 0);
1470 }
4f601529 1471 local_irq_restore(flags);
a206522c
GN
1472}
1473
a6255b7f 1474#if defined(_KERNEL) && defined(HAVE_SPL)
b0be93e8
IH
1475/*
1476 * bio_nr_pages for ABD.
1477 * @off is the offset in @abd
1478 */
1479unsigned long
1480abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
1481{
1482 unsigned long pos;
1483
1484 if (abd_is_linear(abd))
1485 pos = (unsigned long)abd_to_buf(abd) + off;
1486 else
1487 pos = abd->abd_u.abd_scatter.abd_offset + off;
1488
1489 return ((pos + size + PAGESIZE - 1) >> PAGE_SHIFT)
1490 - (pos >> PAGE_SHIFT);
1491}
1492
1493/*
1494 * bio_map for scatter ABD.
1495 * @off is the offset in @abd
1496 * Remaining IO size is returned
1497 */
1498unsigned int
1499abd_scatter_bio_map_off(struct bio *bio, abd_t *abd,
1500 unsigned int io_size, size_t off)
1501{
1502 int i;
1503 struct abd_iter aiter;
1504
1505 ASSERT(!abd_is_linear(abd));
1506 ASSERT3U(io_size, <=, abd->abd_size - off);
1507
4f601529 1508 abd_iter_init(&aiter, abd, 0);
b0be93e8
IH
1509 abd_iter_advance(&aiter, off);
1510
1511 for (i = 0; i < bio->bi_max_vecs; i++) {
1512 struct page *pg;
98295748
CC
1513 size_t len, sgoff, pgoff;
1514 struct scatterlist *sg;
b0be93e8
IH
1515
1516 if (io_size <= 0)
1517 break;
1518
98295748
CC
1519 sg = aiter.iter_sg;
1520 sgoff = aiter.iter_offset;
1521 pgoff = sgoff & (PAGESIZE - 1);
b0be93e8
IH
1522 len = MIN(io_size, PAGESIZE - pgoff);
1523 ASSERT(len > 0);
1524
98295748 1525 pg = nth_page(sg_page(sg), sgoff >> PAGE_SHIFT);
b0be93e8
IH
1526 if (bio_add_page(bio, pg, len, pgoff) != len)
1527 break;
1528
1529 io_size -= len;
1530 abd_iter_advance(&aiter, len);
1531 }
1532
1533 return (io_size);
1534}
1535
a6255b7f
DQ
1536/* Tunable Parameters */
1537module_param(zfs_abd_scatter_enabled, int, 0644);
1538MODULE_PARM_DESC(zfs_abd_scatter_enabled,
1539 "Toggle whether ABD allocations must be linear.");
98295748
CC
1540module_param(zfs_abd_scatter_max_order, uint, 0644);
1541MODULE_PARM_DESC(zfs_abd_scatter_max_order,
1542 "Maximum order allocation used for a scatter ABD.");
a6255b7f 1543#endif