]>
Commit | Line | Data |
---|---|---|
fc551d7e BA |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (c) 2014 by Chunwei Chen. All rights reserved. | |
23 | * Copyright (c) 2019 by Delphix. All rights reserved. | |
24 | */ | |
25 | ||
26 | /* | |
fb822260 | 27 | * See abd.c for a general overview of the arc buffered data (ABD). |
fc551d7e BA |
28 | * |
29 | * Linear buffers act exactly like normal buffers and are always mapped into the | |
30 | * kernel's virtual memory space, while scattered ABD data chunks are allocated | |
31 | * as physical pages and then mapped in only while they are actually being | |
32 | * accessed through one of the abd_* library functions. Using scattered ABDs | |
33 | * provides several benefits: | |
34 | * | |
35 | * (1) They avoid use of kmem_*, preventing performance problems where running | |
36 | * kmem_reap on very large memory systems never finishes and causes | |
37 | * constant TLB shootdowns. | |
38 | * | |
39 | * (2) Fragmentation is less of an issue since when we are at the limit of | |
40 | * allocatable space, we won't have to search around for a long free | |
41 | * hole in the VA space for large ARC allocations. Each chunk is mapped in | |
42 | * individually, so even if we are using HIGHMEM (see next point) we | |
43 | * wouldn't need to worry about finding a contiguous address range. | |
44 | * | |
45 | * (3) If we are not using HIGHMEM, then all physical memory is always | |
46 | * mapped into the kernel's address space, so we also avoid the map / | |
47 | * unmap costs on each ABD access. | |
48 | * | |
49 | * If we are not using HIGHMEM, scattered buffers which have only one chunk | |
50 | * can be treated as linear buffers, because they are contiguous in the | |
fb822260 | 51 | * kernel's virtual address space. See abd_alloc_chunks() for details. |
fc551d7e BA |
52 | */ |
53 | ||
54 | #include <sys/abd_impl.h> | |
55 | #include <sys/param.h> | |
56 | #include <sys/zio.h> | |
85ec5cba | 57 | #include <sys/arc.h> |
fc551d7e BA |
58 | #include <sys/zfs_context.h> |
59 | #include <sys/zfs_znode.h> | |
60 | #ifdef _KERNEL | |
61 | #include <linux/kmap_compat.h> | |
62 | #include <linux/scatterlist.h> | |
63 | #else | |
64 | #define MAX_ORDER 1 | |
65 | #endif | |
66 | ||
67 | typedef struct abd_stats { | |
68 | kstat_named_t abdstat_struct_size; | |
69 | kstat_named_t abdstat_linear_cnt; | |
70 | kstat_named_t abdstat_linear_data_size; | |
71 | kstat_named_t abdstat_scatter_cnt; | |
72 | kstat_named_t abdstat_scatter_data_size; | |
73 | kstat_named_t abdstat_scatter_chunk_waste; | |
74 | kstat_named_t abdstat_scatter_orders[MAX_ORDER]; | |
75 | kstat_named_t abdstat_scatter_page_multi_chunk; | |
76 | kstat_named_t abdstat_scatter_page_multi_zone; | |
77 | kstat_named_t abdstat_scatter_page_alloc_retry; | |
78 | kstat_named_t abdstat_scatter_sg_table_retry; | |
79 | } abd_stats_t; | |
80 | ||
81 | static abd_stats_t abd_stats = { | |
82 | /* Amount of memory occupied by all of the abd_t struct allocations */ | |
83 | { "struct_size", KSTAT_DATA_UINT64 }, | |
84 | /* | |
85 | * The number of linear ABDs which are currently allocated, excluding | |
86 | * ABDs which don't own their data (for instance the ones which were | |
87 | * allocated through abd_get_offset() and abd_get_from_buf()). If an | |
88 | * ABD takes ownership of its buf then it will become tracked. | |
89 | */ | |
90 | { "linear_cnt", KSTAT_DATA_UINT64 }, | |
91 | /* Amount of data stored in all linear ABDs tracked by linear_cnt */ | |
92 | { "linear_data_size", KSTAT_DATA_UINT64 }, | |
93 | /* | |
94 | * The number of scatter ABDs which are currently allocated, excluding | |
95 | * ABDs which don't own their data (for instance the ones which were | |
96 | * allocated through abd_get_offset()). | |
97 | */ | |
98 | { "scatter_cnt", KSTAT_DATA_UINT64 }, | |
99 | /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */ | |
100 | { "scatter_data_size", KSTAT_DATA_UINT64 }, | |
101 | /* | |
102 | * The amount of space wasted at the end of the last chunk across all | |
103 | * scatter ABDs tracked by scatter_cnt. | |
104 | */ | |
105 | { "scatter_chunk_waste", KSTAT_DATA_UINT64 }, | |
106 | /* | |
107 | * The number of compound allocations of a given order. These | |
108 | * allocations are spread over all currently allocated ABDs, and | |
109 | * act as a measure of memory fragmentation. | |
110 | */ | |
111 | { { "scatter_order_N", KSTAT_DATA_UINT64 } }, | |
112 | /* | |
113 | * The number of scatter ABDs which contain multiple chunks. | |
114 | * ABDs are preferentially allocated from the minimum number of | |
115 | * contiguous multi-page chunks, a single chunk is optimal. | |
116 | */ | |
117 | { "scatter_page_multi_chunk", KSTAT_DATA_UINT64 }, | |
118 | /* | |
119 | * The number of scatter ABDs which are split across memory zones. | |
120 | * ABDs are preferentially allocated using pages from a single zone. | |
121 | */ | |
122 | { "scatter_page_multi_zone", KSTAT_DATA_UINT64 }, | |
123 | /* | |
124 | * The total number of retries encountered when attempting to | |
125 | * allocate the pages to populate the scatter ABD. | |
126 | */ | |
127 | { "scatter_page_alloc_retry", KSTAT_DATA_UINT64 }, | |
128 | /* | |
129 | * The total number of retries encountered when attempting to | |
130 | * allocate the sg table for an ABD. | |
131 | */ | |
132 | { "scatter_sg_table_retry", KSTAT_DATA_UINT64 }, | |
133 | }; | |
134 | ||
c4c162c1 AM |
135 | struct { |
136 | wmsum_t abdstat_struct_size; | |
137 | wmsum_t abdstat_linear_cnt; | |
138 | wmsum_t abdstat_linear_data_size; | |
139 | wmsum_t abdstat_scatter_cnt; | |
140 | wmsum_t abdstat_scatter_data_size; | |
141 | wmsum_t abdstat_scatter_chunk_waste; | |
142 | wmsum_t abdstat_scatter_orders[MAX_ORDER]; | |
143 | wmsum_t abdstat_scatter_page_multi_chunk; | |
144 | wmsum_t abdstat_scatter_page_multi_zone; | |
145 | wmsum_t abdstat_scatter_page_alloc_retry; | |
146 | wmsum_t abdstat_scatter_sg_table_retry; | |
147 | } abd_sums; | |
148 | ||
fc551d7e BA |
149 | #define abd_for_each_sg(abd, sg, n, i) \ |
150 | for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i) | |
151 | ||
fc551d7e BA |
152 | /* |
153 | * zfs_abd_scatter_min_size is the minimum allocation size to use scatter | |
154 | * ABD's. Smaller allocations will use linear ABD's which uses | |
155 | * zio_[data_]buf_alloc(). | |
156 | * | |
157 | * Scatter ABD's use at least one page each, so sub-page allocations waste | |
158 | * some space when allocated as scatter (e.g. 2KB scatter allocation wastes | |
159 | * half of each page). Using linear ABD's for small allocations means that | |
160 | * they will be put on slabs which contain many allocations. This can | |
161 | * improve memory efficiency, but it also makes it much harder for ARC | |
162 | * evictions to actually free pages, because all the buffers on one slab need | |
163 | * to be freed in order for the slab (and underlying pages) to be freed. | |
164 | * Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's | |
165 | * possible for them to actually waste more memory than scatter (one page per | |
166 | * buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th). | |
167 | * | |
168 | * Spill blocks are typically 512B and are heavily used on systems running | |
169 | * selinux with the default dnode size and the `xattr=sa` property set. | |
170 | * | |
171 | * By default we use linear allocations for 512B and 1KB, and scatter | |
172 | * allocations for larger (1.5KB and up). | |
173 | */ | |
18168da7 | 174 | static int zfs_abd_scatter_min_size = 512 * 3; |
fc551d7e | 175 | |
fb822260 BA |
176 | /* |
177 | * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are | |
178 | * just a single zero'd page. This allows us to conserve memory by | |
179 | * only using a single zero page for the scatterlist. | |
180 | */ | |
181 | abd_t *abd_zero_scatter = NULL; | |
182 | ||
e08b9933 BA |
183 | struct page; |
184 | /* | |
185 | * abd_zero_page we will be an allocated zero'd PAGESIZE buffer, which is | |
186 | * assigned to set each of the pages of abd_zero_scatter. | |
187 | */ | |
188 | static struct page *abd_zero_page = NULL; | |
189 | ||
fc551d7e BA |
190 | static kmem_cache_t *abd_cache = NULL; |
191 | static kstat_t *abd_ksp; | |
192 | ||
6366ef22 | 193 | static uint_t |
fc551d7e BA |
194 | abd_chunkcnt_for_bytes(size_t size) |
195 | { | |
196 | return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE); | |
197 | } | |
198 | ||
199 | abd_t * | |
e2af2acc | 200 | abd_alloc_struct_impl(size_t size) |
fc551d7e BA |
201 | { |
202 | /* | |
203 | * In Linux we do not use the size passed in during ABD | |
204 | * allocation, so we just ignore it. | |
205 | */ | |
66cd33e0 | 206 | (void) size; |
fc551d7e BA |
207 | abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE); |
208 | ASSERT3P(abd, !=, NULL); | |
209 | ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t)); | |
210 | ||
211 | return (abd); | |
212 | } | |
213 | ||
214 | void | |
e2af2acc | 215 | abd_free_struct_impl(abd_t *abd) |
fc551d7e BA |
216 | { |
217 | kmem_cache_free(abd_cache, abd); | |
218 | ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t)); | |
219 | } | |
220 | ||
221 | #ifdef _KERNEL | |
18168da7 AZ |
222 | static unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1; |
223 | ||
fc551d7e BA |
224 | /* |
225 | * Mark zfs data pages so they can be excluded from kernel crash dumps | |
226 | */ | |
227 | #ifdef _LP64 | |
228 | #define ABD_FILE_CACHE_PAGE 0x2F5ABDF11ECAC4E | |
229 | ||
230 | static inline void | |
231 | abd_mark_zfs_page(struct page *page) | |
232 | { | |
233 | get_page(page); | |
234 | SetPagePrivate(page); | |
235 | set_page_private(page, ABD_FILE_CACHE_PAGE); | |
236 | } | |
237 | ||
238 | static inline void | |
239 | abd_unmark_zfs_page(struct page *page) | |
240 | { | |
241 | set_page_private(page, 0UL); | |
242 | ClearPagePrivate(page); | |
243 | put_page(page); | |
244 | } | |
245 | #else | |
246 | #define abd_mark_zfs_page(page) | |
247 | #define abd_unmark_zfs_page(page) | |
248 | #endif /* _LP64 */ | |
249 | ||
250 | #ifndef CONFIG_HIGHMEM | |
251 | ||
252 | #ifndef __GFP_RECLAIM | |
253 | #define __GFP_RECLAIM __GFP_WAIT | |
254 | #endif | |
255 | ||
256 | /* | |
257 | * The goal is to minimize fragmentation by preferentially populating ABDs | |
258 | * with higher order compound pages from a single zone. Allocation size is | |
259 | * progressively decreased until it can be satisfied without performing | |
260 | * reclaim or compaction. When necessary this function will degenerate to | |
261 | * allocating individual pages and allowing reclaim to satisfy allocations. | |
262 | */ | |
263 | void | |
264 | abd_alloc_chunks(abd_t *abd, size_t size) | |
265 | { | |
266 | struct list_head pages; | |
267 | struct sg_table table; | |
268 | struct scatterlist *sg; | |
269 | struct page *page, *tmp_page = NULL; | |
270 | gfp_t gfp = __GFP_NOWARN | GFP_NOIO; | |
271 | gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM; | |
272 | int max_order = MIN(zfs_abd_scatter_max_order, MAX_ORDER - 1); | |
273 | int nr_pages = abd_chunkcnt_for_bytes(size); | |
274 | int chunks = 0, zones = 0; | |
275 | size_t remaining_size; | |
276 | int nid = NUMA_NO_NODE; | |
277 | int alloc_pages = 0; | |
278 | ||
279 | INIT_LIST_HEAD(&pages); | |
280 | ||
281 | while (alloc_pages < nr_pages) { | |
282 | unsigned chunk_pages; | |
283 | int order; | |
284 | ||
285 | order = MIN(highbit64(nr_pages - alloc_pages) - 1, max_order); | |
286 | chunk_pages = (1U << order); | |
287 | ||
288 | page = alloc_pages_node(nid, order ? gfp_comp : gfp, order); | |
289 | if (page == NULL) { | |
290 | if (order == 0) { | |
291 | ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry); | |
292 | schedule_timeout_interruptible(1); | |
293 | } else { | |
294 | max_order = MAX(0, order - 1); | |
295 | } | |
296 | continue; | |
297 | } | |
298 | ||
299 | list_add_tail(&page->lru, &pages); | |
300 | ||
301 | if ((nid != NUMA_NO_NODE) && (page_to_nid(page) != nid)) | |
302 | zones++; | |
303 | ||
304 | nid = page_to_nid(page); | |
305 | ABDSTAT_BUMP(abdstat_scatter_orders[order]); | |
306 | chunks++; | |
307 | alloc_pages += chunk_pages; | |
308 | } | |
309 | ||
310 | ASSERT3S(alloc_pages, ==, nr_pages); | |
311 | ||
312 | while (sg_alloc_table(&table, chunks, gfp)) { | |
313 | ABDSTAT_BUMP(abdstat_scatter_sg_table_retry); | |
314 | schedule_timeout_interruptible(1); | |
315 | } | |
316 | ||
317 | sg = table.sgl; | |
318 | remaining_size = size; | |
319 | list_for_each_entry_safe(page, tmp_page, &pages, lru) { | |
320 | size_t sg_size = MIN(PAGESIZE << compound_order(page), | |
321 | remaining_size); | |
322 | sg_set_page(sg, page, sg_size, 0); | |
323 | abd_mark_zfs_page(page); | |
324 | remaining_size -= sg_size; | |
325 | ||
326 | sg = sg_next(sg); | |
327 | list_del(&page->lru); | |
328 | } | |
329 | ||
330 | /* | |
331 | * These conditions ensure that a possible transformation to a linear | |
332 | * ABD would be valid. | |
333 | */ | |
334 | ASSERT(!PageHighMem(sg_page(table.sgl))); | |
335 | ASSERT0(ABD_SCATTER(abd).abd_offset); | |
336 | ||
337 | if (table.nents == 1) { | |
338 | /* | |
339 | * Since there is only one entry, this ABD can be represented | |
340 | * as a linear buffer. All single-page (4K) ABD's can be | |
341 | * represented this way. Some multi-page ABD's can also be | |
342 | * represented this way, if we were able to allocate a single | |
343 | * "chunk" (higher-order "page" which represents a power-of-2 | |
344 | * series of physically-contiguous pages). This is often the | |
345 | * case for 2-page (8K) ABD's. | |
346 | * | |
347 | * Representing a single-entry scatter ABD as a linear ABD | |
348 | * has the performance advantage of avoiding the copy (and | |
349 | * allocation) in abd_borrow_buf_copy / abd_return_buf_copy. | |
350 | * A performance increase of around 5% has been observed for | |
351 | * ARC-cached reads (of small blocks which can take advantage | |
352 | * of this). | |
353 | * | |
354 | * Note that this optimization is only possible because the | |
355 | * pages are always mapped into the kernel's address space. | |
356 | * This is not the case for highmem pages, so the | |
357 | * optimization can not be made there. | |
358 | */ | |
359 | abd->abd_flags |= ABD_FLAG_LINEAR; | |
360 | abd->abd_flags |= ABD_FLAG_LINEAR_PAGE; | |
361 | abd->abd_u.abd_linear.abd_sgl = table.sgl; | |
362 | ABD_LINEAR_BUF(abd) = page_address(sg_page(table.sgl)); | |
363 | } else if (table.nents > 1) { | |
364 | ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk); | |
365 | abd->abd_flags |= ABD_FLAG_MULTI_CHUNK; | |
366 | ||
367 | if (zones) { | |
368 | ABDSTAT_BUMP(abdstat_scatter_page_multi_zone); | |
369 | abd->abd_flags |= ABD_FLAG_MULTI_ZONE; | |
370 | } | |
371 | ||
372 | ABD_SCATTER(abd).abd_sgl = table.sgl; | |
373 | ABD_SCATTER(abd).abd_nents = table.nents; | |
374 | } | |
375 | } | |
376 | #else | |
377 | ||
378 | /* | |
379 | * Allocate N individual pages to construct a scatter ABD. This function | |
380 | * makes no attempt to request contiguous pages and requires the minimal | |
381 | * number of kernel interfaces. It's designed for maximum compatibility. | |
382 | */ | |
383 | void | |
384 | abd_alloc_chunks(abd_t *abd, size_t size) | |
385 | { | |
386 | struct scatterlist *sg = NULL; | |
387 | struct sg_table table; | |
388 | struct page *page; | |
389 | gfp_t gfp = __GFP_NOWARN | GFP_NOIO; | |
390 | int nr_pages = abd_chunkcnt_for_bytes(size); | |
391 | int i = 0; | |
392 | ||
393 | while (sg_alloc_table(&table, nr_pages, gfp)) { | |
394 | ABDSTAT_BUMP(abdstat_scatter_sg_table_retry); | |
395 | schedule_timeout_interruptible(1); | |
396 | } | |
397 | ||
398 | ASSERT3U(table.nents, ==, nr_pages); | |
399 | ABD_SCATTER(abd).abd_sgl = table.sgl; | |
400 | ABD_SCATTER(abd).abd_nents = nr_pages; | |
401 | ||
402 | abd_for_each_sg(abd, sg, nr_pages, i) { | |
403 | while ((page = __page_cache_alloc(gfp)) == NULL) { | |
404 | ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry); | |
405 | schedule_timeout_interruptible(1); | |
406 | } | |
407 | ||
408 | ABDSTAT_BUMP(abdstat_scatter_orders[0]); | |
409 | sg_set_page(sg, page, PAGESIZE, 0); | |
410 | abd_mark_zfs_page(page); | |
411 | } | |
412 | ||
413 | if (nr_pages > 1) { | |
414 | ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk); | |
415 | abd->abd_flags |= ABD_FLAG_MULTI_CHUNK; | |
416 | } | |
417 | } | |
418 | #endif /* !CONFIG_HIGHMEM */ | |
419 | ||
420 | /* | |
421 | * This must be called if any of the sg_table allocation functions | |
422 | * are called. | |
423 | */ | |
424 | static void | |
425 | abd_free_sg_table(abd_t *abd) | |
426 | { | |
427 | struct sg_table table; | |
428 | ||
429 | table.sgl = ABD_SCATTER(abd).abd_sgl; | |
430 | table.nents = table.orig_nents = ABD_SCATTER(abd).abd_nents; | |
431 | sg_free_table(&table); | |
432 | } | |
433 | ||
434 | void | |
435 | abd_free_chunks(abd_t *abd) | |
436 | { | |
437 | struct scatterlist *sg = NULL; | |
438 | struct page *page; | |
439 | int nr_pages = ABD_SCATTER(abd).abd_nents; | |
440 | int order, i = 0; | |
441 | ||
442 | if (abd->abd_flags & ABD_FLAG_MULTI_ZONE) | |
443 | ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone); | |
444 | ||
445 | if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK) | |
446 | ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk); | |
447 | ||
448 | abd_for_each_sg(abd, sg, nr_pages, i) { | |
449 | page = sg_page(sg); | |
450 | abd_unmark_zfs_page(page); | |
451 | order = compound_order(page); | |
452 | __free_pages(page, order); | |
453 | ASSERT3U(sg->length, <=, PAGE_SIZE << order); | |
454 | ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]); | |
455 | } | |
456 | abd_free_sg_table(abd); | |
457 | } | |
458 | ||
fb822260 BA |
459 | /* |
460 | * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in | |
e08b9933 | 461 | * the scatterlist will be set to the zero'd out buffer abd_zero_page. |
fb822260 BA |
462 | */ |
463 | static void | |
464 | abd_alloc_zero_scatter(void) | |
465 | { | |
466 | struct scatterlist *sg = NULL; | |
467 | struct sg_table table; | |
468 | gfp_t gfp = __GFP_NOWARN | GFP_NOIO; | |
e08b9933 | 469 | gfp_t gfp_zero_page = gfp | __GFP_ZERO; |
fb822260 BA |
470 | int nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE); |
471 | int i = 0; | |
472 | ||
e08b9933 BA |
473 | while ((abd_zero_page = __page_cache_alloc(gfp_zero_page)) == NULL) { |
474 | ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry); | |
475 | schedule_timeout_interruptible(1); | |
476 | } | |
477 | abd_mark_zfs_page(abd_zero_page); | |
478 | ||
fb822260 BA |
479 | while (sg_alloc_table(&table, nr_pages, gfp)) { |
480 | ABDSTAT_BUMP(abdstat_scatter_sg_table_retry); | |
481 | schedule_timeout_interruptible(1); | |
482 | } | |
483 | ASSERT3U(table.nents, ==, nr_pages); | |
484 | ||
485 | abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE); | |
e2af2acc | 486 | abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER; |
fb822260 BA |
487 | ABD_SCATTER(abd_zero_scatter).abd_offset = 0; |
488 | ABD_SCATTER(abd_zero_scatter).abd_sgl = table.sgl; | |
489 | ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages; | |
490 | abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE; | |
fb822260 | 491 | abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS; |
fb822260 BA |
492 | |
493 | abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) { | |
e08b9933 | 494 | sg_set_page(sg, abd_zero_page, PAGESIZE, 0); |
fb822260 BA |
495 | } |
496 | ||
497 | ABDSTAT_BUMP(abdstat_scatter_cnt); | |
498 | ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE); | |
499 | ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk); | |
500 | } | |
501 | ||
fc551d7e BA |
502 | #else /* _KERNEL */ |
503 | ||
504 | #ifndef PAGE_SHIFT | |
505 | #define PAGE_SHIFT (highbit64(PAGESIZE)-1) | |
506 | #endif | |
507 | ||
f52124dc BA |
508 | #define zfs_kmap_atomic(chunk) ((void *)chunk) |
509 | #define zfs_kunmap_atomic(addr) do { (void)(addr); } while (0) | |
fc551d7e BA |
510 | #define local_irq_save(flags) do { (void)(flags); } while (0) |
511 | #define local_irq_restore(flags) do { (void)(flags); } while (0) | |
512 | #define nth_page(pg, i) \ | |
513 | ((struct page *)((void *)(pg) + (i) * PAGESIZE)) | |
514 | ||
515 | struct scatterlist { | |
516 | struct page *page; | |
517 | int length; | |
518 | int end; | |
519 | }; | |
520 | ||
521 | static void | |
522 | sg_init_table(struct scatterlist *sg, int nr) | |
523 | { | |
524 | memset(sg, 0, nr * sizeof (struct scatterlist)); | |
525 | sg[nr - 1].end = 1; | |
526 | } | |
527 | ||
528 | /* | |
529 | * This must be called if any of the sg_table allocation functions | |
530 | * are called. | |
531 | */ | |
532 | static void | |
533 | abd_free_sg_table(abd_t *abd) | |
534 | { | |
535 | int nents = ABD_SCATTER(abd).abd_nents; | |
536 | vmem_free(ABD_SCATTER(abd).abd_sgl, | |
537 | nents * sizeof (struct scatterlist)); | |
538 | } | |
539 | ||
540 | #define for_each_sg(sgl, sg, nr, i) \ | |
541 | for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg)) | |
542 | ||
543 | static inline void | |
544 | sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len, | |
545 | unsigned int offset) | |
546 | { | |
547 | /* currently we don't use offset */ | |
548 | ASSERT(offset == 0); | |
549 | sg->page = page; | |
550 | sg->length = len; | |
551 | } | |
552 | ||
553 | static inline struct page * | |
554 | sg_page(struct scatterlist *sg) | |
555 | { | |
556 | return (sg->page); | |
557 | } | |
558 | ||
559 | static inline struct scatterlist * | |
560 | sg_next(struct scatterlist *sg) | |
561 | { | |
562 | if (sg->end) | |
563 | return (NULL); | |
564 | ||
565 | return (sg + 1); | |
566 | } | |
567 | ||
568 | void | |
569 | abd_alloc_chunks(abd_t *abd, size_t size) | |
570 | { | |
571 | unsigned nr_pages = abd_chunkcnt_for_bytes(size); | |
572 | struct scatterlist *sg; | |
573 | int i; | |
574 | ||
575 | ABD_SCATTER(abd).abd_sgl = vmem_alloc(nr_pages * | |
576 | sizeof (struct scatterlist), KM_SLEEP); | |
577 | sg_init_table(ABD_SCATTER(abd).abd_sgl, nr_pages); | |
578 | ||
579 | abd_for_each_sg(abd, sg, nr_pages, i) { | |
580 | struct page *p = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP); | |
581 | sg_set_page(sg, p, PAGESIZE, 0); | |
582 | } | |
583 | ABD_SCATTER(abd).abd_nents = nr_pages; | |
584 | } | |
585 | ||
586 | void | |
587 | abd_free_chunks(abd_t *abd) | |
588 | { | |
589 | int i, n = ABD_SCATTER(abd).abd_nents; | |
590 | struct scatterlist *sg; | |
591 | ||
592 | abd_for_each_sg(abd, sg, n, i) { | |
593 | for (int j = 0; j < sg->length; j += PAGESIZE) { | |
594 | struct page *p = nth_page(sg_page(sg), j >> PAGE_SHIFT); | |
595 | umem_free(p, PAGESIZE); | |
596 | } | |
597 | } | |
598 | abd_free_sg_table(abd); | |
599 | } | |
600 | ||
fb822260 BA |
601 | static void |
602 | abd_alloc_zero_scatter(void) | |
603 | { | |
604 | unsigned nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE); | |
605 | struct scatterlist *sg; | |
606 | int i; | |
607 | ||
608 | abd_zero_page = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP); | |
609 | memset(abd_zero_page, 0, PAGESIZE); | |
610 | abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE); | |
e2af2acc | 611 | abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER; |
fb822260 BA |
612 | abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS; |
613 | ABD_SCATTER(abd_zero_scatter).abd_offset = 0; | |
614 | ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages; | |
615 | abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE; | |
fb822260 BA |
616 | zfs_refcount_create(&abd_zero_scatter->abd_children); |
617 | ABD_SCATTER(abd_zero_scatter).abd_sgl = vmem_alloc(nr_pages * | |
618 | sizeof (struct scatterlist), KM_SLEEP); | |
619 | ||
620 | sg_init_table(ABD_SCATTER(abd_zero_scatter).abd_sgl, nr_pages); | |
621 | ||
622 | abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) { | |
623 | sg_set_page(sg, abd_zero_page, PAGESIZE, 0); | |
624 | } | |
625 | ||
626 | ABDSTAT_BUMP(abdstat_scatter_cnt); | |
627 | ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE); | |
628 | ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk); | |
629 | } | |
630 | ||
fc551d7e BA |
631 | #endif /* _KERNEL */ |
632 | ||
633 | boolean_t | |
634 | abd_size_alloc_linear(size_t size) | |
635 | { | |
7eebcd2b | 636 | return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size); |
fc551d7e BA |
637 | } |
638 | ||
639 | void | |
640 | abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op) | |
641 | { | |
642 | ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR); | |
85ec5cba | 643 | int waste = P2ROUNDUP(abd->abd_size, PAGESIZE) - abd->abd_size; |
fc551d7e BA |
644 | if (op == ABDSTAT_INCR) { |
645 | ABDSTAT_BUMP(abdstat_scatter_cnt); | |
646 | ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size); | |
85ec5cba MA |
647 | ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste); |
648 | arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE); | |
fc551d7e BA |
649 | } else { |
650 | ABDSTAT_BUMPDOWN(abdstat_scatter_cnt); | |
651 | ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size); | |
85ec5cba MA |
652 | ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste); |
653 | arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE); | |
fc551d7e BA |
654 | } |
655 | } | |
656 | ||
657 | void | |
658 | abd_update_linear_stats(abd_t *abd, abd_stats_op_t op) | |
659 | { | |
660 | ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR); | |
661 | if (op == ABDSTAT_INCR) { | |
662 | ABDSTAT_BUMP(abdstat_linear_cnt); | |
663 | ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size); | |
664 | } else { | |
665 | ABDSTAT_BUMPDOWN(abdstat_linear_cnt); | |
666 | ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size); | |
667 | } | |
668 | } | |
669 | ||
670 | void | |
671 | abd_verify_scatter(abd_t *abd) | |
672 | { | |
673 | size_t n; | |
674 | int i = 0; | |
675 | struct scatterlist *sg = NULL; | |
676 | ||
677 | ASSERT3U(ABD_SCATTER(abd).abd_nents, >, 0); | |
678 | ASSERT3U(ABD_SCATTER(abd).abd_offset, <, | |
679 | ABD_SCATTER(abd).abd_sgl->length); | |
680 | n = ABD_SCATTER(abd).abd_nents; | |
681 | abd_for_each_sg(abd, sg, n, i) { | |
682 | ASSERT3P(sg_page(sg), !=, NULL); | |
683 | } | |
684 | } | |
685 | ||
fb822260 BA |
686 | static void |
687 | abd_free_zero_scatter(void) | |
688 | { | |
fb822260 BA |
689 | ABDSTAT_BUMPDOWN(abdstat_scatter_cnt); |
690 | ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGESIZE); | |
691 | ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk); | |
692 | ||
693 | abd_free_sg_table(abd_zero_scatter); | |
694 | abd_free_struct(abd_zero_scatter); | |
695 | abd_zero_scatter = NULL; | |
e08b9933 BA |
696 | ASSERT3P(abd_zero_page, !=, NULL); |
697 | #if defined(_KERNEL) | |
698 | abd_unmark_zfs_page(abd_zero_page); | |
699 | __free_page(abd_zero_page); | |
700 | #else | |
fb822260 BA |
701 | umem_free(abd_zero_page, PAGESIZE); |
702 | #endif /* _KERNEL */ | |
703 | } | |
704 | ||
c4c162c1 AM |
705 | static int |
706 | abd_kstats_update(kstat_t *ksp, int rw) | |
707 | { | |
708 | abd_stats_t *as = ksp->ks_data; | |
709 | ||
710 | if (rw == KSTAT_WRITE) | |
711 | return (EACCES); | |
712 | as->abdstat_struct_size.value.ui64 = | |
713 | wmsum_value(&abd_sums.abdstat_struct_size); | |
714 | as->abdstat_linear_cnt.value.ui64 = | |
715 | wmsum_value(&abd_sums.abdstat_linear_cnt); | |
716 | as->abdstat_linear_data_size.value.ui64 = | |
717 | wmsum_value(&abd_sums.abdstat_linear_data_size); | |
718 | as->abdstat_scatter_cnt.value.ui64 = | |
719 | wmsum_value(&abd_sums.abdstat_scatter_cnt); | |
720 | as->abdstat_scatter_data_size.value.ui64 = | |
721 | wmsum_value(&abd_sums.abdstat_scatter_data_size); | |
722 | as->abdstat_scatter_chunk_waste.value.ui64 = | |
723 | wmsum_value(&abd_sums.abdstat_scatter_chunk_waste); | |
724 | for (int i = 0; i < MAX_ORDER; i++) { | |
725 | as->abdstat_scatter_orders[i].value.ui64 = | |
726 | wmsum_value(&abd_sums.abdstat_scatter_orders[i]); | |
727 | } | |
728 | as->abdstat_scatter_page_multi_chunk.value.ui64 = | |
729 | wmsum_value(&abd_sums.abdstat_scatter_page_multi_chunk); | |
730 | as->abdstat_scatter_page_multi_zone.value.ui64 = | |
731 | wmsum_value(&abd_sums.abdstat_scatter_page_multi_zone); | |
732 | as->abdstat_scatter_page_alloc_retry.value.ui64 = | |
733 | wmsum_value(&abd_sums.abdstat_scatter_page_alloc_retry); | |
734 | as->abdstat_scatter_sg_table_retry.value.ui64 = | |
735 | wmsum_value(&abd_sums.abdstat_scatter_sg_table_retry); | |
736 | return (0); | |
737 | } | |
738 | ||
fc551d7e BA |
739 | void |
740 | abd_init(void) | |
741 | { | |
742 | int i; | |
743 | ||
744 | abd_cache = kmem_cache_create("abd_t", sizeof (abd_t), | |
745 | 0, NULL, NULL, NULL, NULL, NULL, 0); | |
746 | ||
c4c162c1 AM |
747 | wmsum_init(&abd_sums.abdstat_struct_size, 0); |
748 | wmsum_init(&abd_sums.abdstat_linear_cnt, 0); | |
749 | wmsum_init(&abd_sums.abdstat_linear_data_size, 0); | |
750 | wmsum_init(&abd_sums.abdstat_scatter_cnt, 0); | |
751 | wmsum_init(&abd_sums.abdstat_scatter_data_size, 0); | |
752 | wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0); | |
753 | for (i = 0; i < MAX_ORDER; i++) | |
754 | wmsum_init(&abd_sums.abdstat_scatter_orders[i], 0); | |
755 | wmsum_init(&abd_sums.abdstat_scatter_page_multi_chunk, 0); | |
756 | wmsum_init(&abd_sums.abdstat_scatter_page_multi_zone, 0); | |
757 | wmsum_init(&abd_sums.abdstat_scatter_page_alloc_retry, 0); | |
758 | wmsum_init(&abd_sums.abdstat_scatter_sg_table_retry, 0); | |
759 | ||
fc551d7e BA |
760 | abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED, |
761 | sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); | |
762 | if (abd_ksp != NULL) { | |
763 | for (i = 0; i < MAX_ORDER; i++) { | |
764 | snprintf(abd_stats.abdstat_scatter_orders[i].name, | |
765 | KSTAT_STRLEN, "scatter_order_%d", i); | |
766 | abd_stats.abdstat_scatter_orders[i].data_type = | |
767 | KSTAT_DATA_UINT64; | |
768 | } | |
769 | abd_ksp->ks_data = &abd_stats; | |
c4c162c1 | 770 | abd_ksp->ks_update = abd_kstats_update; |
fc551d7e BA |
771 | kstat_install(abd_ksp); |
772 | } | |
fb822260 BA |
773 | |
774 | abd_alloc_zero_scatter(); | |
fc551d7e BA |
775 | } |
776 | ||
777 | void | |
778 | abd_fini(void) | |
779 | { | |
fb822260 BA |
780 | abd_free_zero_scatter(); |
781 | ||
fc551d7e BA |
782 | if (abd_ksp != NULL) { |
783 | kstat_delete(abd_ksp); | |
784 | abd_ksp = NULL; | |
785 | } | |
786 | ||
c4c162c1 AM |
787 | wmsum_fini(&abd_sums.abdstat_struct_size); |
788 | wmsum_fini(&abd_sums.abdstat_linear_cnt); | |
789 | wmsum_fini(&abd_sums.abdstat_linear_data_size); | |
790 | wmsum_fini(&abd_sums.abdstat_scatter_cnt); | |
791 | wmsum_fini(&abd_sums.abdstat_scatter_data_size); | |
792 | wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste); | |
793 | for (int i = 0; i < MAX_ORDER; i++) | |
794 | wmsum_fini(&abd_sums.abdstat_scatter_orders[i]); | |
795 | wmsum_fini(&abd_sums.abdstat_scatter_page_multi_chunk); | |
796 | wmsum_fini(&abd_sums.abdstat_scatter_page_multi_zone); | |
797 | wmsum_fini(&abd_sums.abdstat_scatter_page_alloc_retry); | |
798 | wmsum_fini(&abd_sums.abdstat_scatter_sg_table_retry); | |
799 | ||
fc551d7e BA |
800 | if (abd_cache) { |
801 | kmem_cache_destroy(abd_cache); | |
802 | abd_cache = NULL; | |
803 | } | |
804 | } | |
805 | ||
806 | void | |
807 | abd_free_linear_page(abd_t *abd) | |
808 | { | |
809 | /* Transform it back into a scatter ABD for freeing */ | |
810 | struct scatterlist *sg = abd->abd_u.abd_linear.abd_sgl; | |
811 | abd->abd_flags &= ~ABD_FLAG_LINEAR; | |
812 | abd->abd_flags &= ~ABD_FLAG_LINEAR_PAGE; | |
813 | ABD_SCATTER(abd).abd_nents = 1; | |
814 | ABD_SCATTER(abd).abd_offset = 0; | |
815 | ABD_SCATTER(abd).abd_sgl = sg; | |
816 | abd_free_chunks(abd); | |
817 | ||
fc551d7e | 818 | abd_update_scatter_stats(abd, ABDSTAT_DECR); |
fc551d7e BA |
819 | } |
820 | ||
821 | /* | |
822 | * If we're going to use this ABD for doing I/O using the block layer, the | |
823 | * consumer of the ABD data doesn't care if it's scattered or not, and we don't | |
824 | * plan to store this ABD in memory for a long period of time, we should | |
825 | * allocate the ABD type that requires the least data copying to do the I/O. | |
826 | * | |
827 | * On Linux the optimal thing to do would be to use abd_get_offset() and | |
828 | * construct a new ABD which shares the original pages thereby eliminating | |
829 | * the copy. But for the moment a new linear ABD is allocated until this | |
830 | * performance optimization can be implemented. | |
831 | */ | |
832 | abd_t * | |
833 | abd_alloc_for_io(size_t size, boolean_t is_metadata) | |
834 | { | |
835 | return (abd_alloc(size, is_metadata)); | |
836 | } | |
837 | ||
838 | abd_t * | |
c6d1112b JL |
839 | abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off, |
840 | size_t size) | |
fc551d7e | 841 | { |
66cd33e0 | 842 | (void) size; |
fc551d7e BA |
843 | int i = 0; |
844 | struct scatterlist *sg = NULL; | |
845 | ||
846 | abd_verify(sabd); | |
847 | ASSERT3U(off, <=, sabd->abd_size); | |
848 | ||
849 | size_t new_offset = ABD_SCATTER(sabd).abd_offset + off; | |
850 | ||
e2af2acc MA |
851 | if (abd == NULL) |
852 | abd = abd_alloc_struct(0); | |
fc551d7e BA |
853 | |
854 | /* | |
855 | * Even if this buf is filesystem metadata, we only track that | |
856 | * if we own the underlying data buffer, which is not true in | |
857 | * this case. Therefore, we don't ever use ABD_FLAG_META here. | |
858 | */ | |
fc551d7e BA |
859 | |
860 | abd_for_each_sg(sabd, sg, ABD_SCATTER(sabd).abd_nents, i) { | |
861 | if (new_offset < sg->length) | |
862 | break; | |
863 | new_offset -= sg->length; | |
864 | } | |
865 | ||
866 | ABD_SCATTER(abd).abd_sgl = sg; | |
867 | ABD_SCATTER(abd).abd_offset = new_offset; | |
868 | ABD_SCATTER(abd).abd_nents = ABD_SCATTER(sabd).abd_nents - i; | |
869 | ||
870 | return (abd); | |
871 | } | |
872 | ||
873 | /* | |
874 | * Initialize the abd_iter. | |
875 | */ | |
876 | void | |
877 | abd_iter_init(struct abd_iter *aiter, abd_t *abd) | |
878 | { | |
fb822260 | 879 | ASSERT(!abd_is_gang(abd)); |
fc551d7e BA |
880 | abd_verify(abd); |
881 | aiter->iter_abd = abd; | |
882 | aiter->iter_mapaddr = NULL; | |
883 | aiter->iter_mapsize = 0; | |
884 | aiter->iter_pos = 0; | |
885 | if (abd_is_linear(abd)) { | |
886 | aiter->iter_offset = 0; | |
887 | aiter->iter_sg = NULL; | |
888 | } else { | |
889 | aiter->iter_offset = ABD_SCATTER(abd).abd_offset; | |
890 | aiter->iter_sg = ABD_SCATTER(abd).abd_sgl; | |
891 | } | |
892 | } | |
893 | ||
894 | /* | |
895 | * This is just a helper function to see if we have exhausted the | |
896 | * abd_iter and reached the end. | |
897 | */ | |
898 | boolean_t | |
899 | abd_iter_at_end(struct abd_iter *aiter) | |
900 | { | |
901 | return (aiter->iter_pos == aiter->iter_abd->abd_size); | |
902 | } | |
903 | ||
904 | /* | |
905 | * Advance the iterator by a certain amount. Cannot be called when a chunk is | |
906 | * in use. This can be safely called when the aiter has already exhausted, in | |
907 | * which case this does nothing. | |
908 | */ | |
909 | void | |
910 | abd_iter_advance(struct abd_iter *aiter, size_t amount) | |
911 | { | |
912 | ASSERT3P(aiter->iter_mapaddr, ==, NULL); | |
913 | ASSERT0(aiter->iter_mapsize); | |
914 | ||
915 | /* There's nothing left to advance to, so do nothing */ | |
916 | if (abd_iter_at_end(aiter)) | |
917 | return; | |
918 | ||
919 | aiter->iter_pos += amount; | |
920 | aiter->iter_offset += amount; | |
921 | if (!abd_is_linear(aiter->iter_abd)) { | |
922 | while (aiter->iter_offset >= aiter->iter_sg->length) { | |
923 | aiter->iter_offset -= aiter->iter_sg->length; | |
924 | aiter->iter_sg = sg_next(aiter->iter_sg); | |
925 | if (aiter->iter_sg == NULL) { | |
926 | ASSERT0(aiter->iter_offset); | |
927 | break; | |
928 | } | |
929 | } | |
930 | } | |
931 | } | |
932 | ||
933 | /* | |
934 | * Map the current chunk into aiter. This can be safely called when the aiter | |
935 | * has already exhausted, in which case this does nothing. | |
936 | */ | |
937 | void | |
938 | abd_iter_map(struct abd_iter *aiter) | |
939 | { | |
940 | void *paddr; | |
941 | size_t offset = 0; | |
942 | ||
943 | ASSERT3P(aiter->iter_mapaddr, ==, NULL); | |
944 | ASSERT0(aiter->iter_mapsize); | |
945 | ||
946 | /* There's nothing left to iterate over, so do nothing */ | |
947 | if (abd_iter_at_end(aiter)) | |
948 | return; | |
949 | ||
950 | if (abd_is_linear(aiter->iter_abd)) { | |
951 | ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset); | |
952 | offset = aiter->iter_offset; | |
953 | aiter->iter_mapsize = aiter->iter_abd->abd_size - offset; | |
954 | paddr = ABD_LINEAR_BUF(aiter->iter_abd); | |
955 | } else { | |
956 | offset = aiter->iter_offset; | |
957 | aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset, | |
958 | aiter->iter_abd->abd_size - aiter->iter_pos); | |
959 | ||
f52124dc | 960 | paddr = zfs_kmap_atomic(sg_page(aiter->iter_sg)); |
fc551d7e BA |
961 | } |
962 | ||
963 | aiter->iter_mapaddr = (char *)paddr + offset; | |
964 | } | |
965 | ||
966 | /* | |
967 | * Unmap the current chunk from aiter. This can be safely called when the aiter | |
968 | * has already exhausted, in which case this does nothing. | |
969 | */ | |
970 | void | |
971 | abd_iter_unmap(struct abd_iter *aiter) | |
972 | { | |
973 | /* There's nothing left to unmap, so do nothing */ | |
974 | if (abd_iter_at_end(aiter)) | |
975 | return; | |
976 | ||
977 | if (!abd_is_linear(aiter->iter_abd)) { | |
978 | /* LINTED E_FUNC_SET_NOT_USED */ | |
f52124dc | 979 | zfs_kunmap_atomic(aiter->iter_mapaddr - aiter->iter_offset); |
fc551d7e BA |
980 | } |
981 | ||
982 | ASSERT3P(aiter->iter_mapaddr, !=, NULL); | |
983 | ASSERT3U(aiter->iter_mapsize, >, 0); | |
984 | ||
985 | aiter->iter_mapaddr = NULL; | |
986 | aiter->iter_mapsize = 0; | |
987 | } | |
988 | ||
7564073e MM |
989 | void |
990 | abd_cache_reap_now(void) | |
991 | { | |
992 | } | |
993 | ||
fc551d7e BA |
994 | #if defined(_KERNEL) |
995 | /* | |
996 | * bio_nr_pages for ABD. | |
997 | * @off is the offset in @abd | |
998 | */ | |
999 | unsigned long | |
1000 | abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off) | |
1001 | { | |
1002 | unsigned long pos; | |
1003 | ||
f8c0d7e1 MA |
1004 | if (abd_is_gang(abd)) { |
1005 | unsigned long count = 0; | |
1006 | ||
1007 | for (abd_t *cabd = abd_gang_get_offset(abd, &off); | |
1008 | cabd != NULL && size != 0; | |
1009 | cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) { | |
1010 | ASSERT3U(off, <, cabd->abd_size); | |
1011 | int mysize = MIN(size, cabd->abd_size - off); | |
1012 | count += abd_nr_pages_off(cabd, mysize, off); | |
1013 | size -= mysize; | |
1014 | off = 0; | |
1015 | } | |
1016 | return (count); | |
1017 | } | |
fb822260 | 1018 | |
fc551d7e BA |
1019 | if (abd_is_linear(abd)) |
1020 | pos = (unsigned long)abd_to_buf(abd) + off; | |
1021 | else | |
1022 | pos = ABD_SCATTER(abd).abd_offset + off; | |
1023 | ||
f8c0d7e1 MA |
1024 | return (((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) - |
1025 | (pos >> PAGE_SHIFT)); | |
fc551d7e BA |
1026 | } |
1027 | ||
fb822260 BA |
1028 | static unsigned int |
1029 | bio_map(struct bio *bio, void *buf_ptr, unsigned int bio_size) | |
1030 | { | |
1031 | unsigned int offset, size, i; | |
1032 | struct page *page; | |
1033 | ||
1034 | offset = offset_in_page(buf_ptr); | |
1035 | for (i = 0; i < bio->bi_max_vecs; i++) { | |
1036 | size = PAGE_SIZE - offset; | |
1037 | ||
1038 | if (bio_size <= 0) | |
1039 | break; | |
1040 | ||
1041 | if (size > bio_size) | |
1042 | size = bio_size; | |
1043 | ||
1044 | if (is_vmalloc_addr(buf_ptr)) | |
1045 | page = vmalloc_to_page(buf_ptr); | |
1046 | else | |
1047 | page = virt_to_page(buf_ptr); | |
1048 | ||
1049 | /* | |
1050 | * Some network related block device uses tcp_sendpage, which | |
1051 | * doesn't behave well when using 0-count page, this is a | |
1052 | * safety net to catch them. | |
1053 | */ | |
1054 | ASSERT3S(page_count(page), >, 0); | |
1055 | ||
1056 | if (bio_add_page(bio, page, size, offset) != size) | |
1057 | break; | |
1058 | ||
1059 | buf_ptr += size; | |
1060 | bio_size -= size; | |
1061 | offset = 0; | |
1062 | } | |
1063 | ||
1064 | return (bio_size); | |
1065 | } | |
1066 | ||
fc551d7e | 1067 | /* |
fb822260 BA |
1068 | * bio_map for gang ABD. |
1069 | */ | |
1070 | static unsigned int | |
1071 | abd_gang_bio_map_off(struct bio *bio, abd_t *abd, | |
1072 | unsigned int io_size, size_t off) | |
1073 | { | |
1074 | ASSERT(abd_is_gang(abd)); | |
1075 | ||
1076 | for (abd_t *cabd = abd_gang_get_offset(abd, &off); | |
1077 | cabd != NULL; | |
1078 | cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) { | |
1079 | ASSERT3U(off, <, cabd->abd_size); | |
1080 | int size = MIN(io_size, cabd->abd_size - off); | |
1081 | int remainder = abd_bio_map_off(bio, cabd, size, off); | |
1082 | io_size -= (size - remainder); | |
1083 | if (io_size == 0 || remainder > 0) | |
1084 | return (io_size); | |
1085 | off = 0; | |
1086 | } | |
1087 | ASSERT0(io_size); | |
1088 | return (io_size); | |
1089 | } | |
1090 | ||
1091 | /* | |
1092 | * bio_map for ABD. | |
fc551d7e BA |
1093 | * @off is the offset in @abd |
1094 | * Remaining IO size is returned | |
1095 | */ | |
1096 | unsigned int | |
fb822260 | 1097 | abd_bio_map_off(struct bio *bio, abd_t *abd, |
fc551d7e BA |
1098 | unsigned int io_size, size_t off) |
1099 | { | |
fc551d7e BA |
1100 | struct abd_iter aiter; |
1101 | ||
fc551d7e | 1102 | ASSERT3U(io_size, <=, abd->abd_size - off); |
fb822260 BA |
1103 | if (abd_is_linear(abd)) |
1104 | return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size)); | |
1105 | ||
1106 | ASSERT(!abd_is_linear(abd)); | |
1107 | if (abd_is_gang(abd)) | |
1108 | return (abd_gang_bio_map_off(bio, abd, io_size, off)); | |
fc551d7e BA |
1109 | |
1110 | abd_iter_init(&aiter, abd); | |
1111 | abd_iter_advance(&aiter, off); | |
1112 | ||
f8c0d7e1 | 1113 | for (int i = 0; i < bio->bi_max_vecs; i++) { |
fc551d7e BA |
1114 | struct page *pg; |
1115 | size_t len, sgoff, pgoff; | |
1116 | struct scatterlist *sg; | |
1117 | ||
1118 | if (io_size <= 0) | |
1119 | break; | |
1120 | ||
1121 | sg = aiter.iter_sg; | |
1122 | sgoff = aiter.iter_offset; | |
1123 | pgoff = sgoff & (PAGESIZE - 1); | |
1124 | len = MIN(io_size, PAGESIZE - pgoff); | |
1125 | ASSERT(len > 0); | |
1126 | ||
1127 | pg = nth_page(sg_page(sg), sgoff >> PAGE_SHIFT); | |
1128 | if (bio_add_page(bio, pg, len, pgoff) != len) | |
1129 | break; | |
1130 | ||
1131 | io_size -= len; | |
1132 | abd_iter_advance(&aiter, len); | |
1133 | } | |
1134 | ||
1135 | return (io_size); | |
1136 | } | |
1137 | ||
1138 | /* Tunable Parameters */ | |
1139 | module_param(zfs_abd_scatter_enabled, int, 0644); | |
1140 | MODULE_PARM_DESC(zfs_abd_scatter_enabled, | |
1141 | "Toggle whether ABD allocations must be linear."); | |
1142 | module_param(zfs_abd_scatter_min_size, int, 0644); | |
1143 | MODULE_PARM_DESC(zfs_abd_scatter_min_size, | |
1144 | "Minimum size of scatter allocations."); | |
1145 | /* CSTYLED */ | |
1146 | module_param(zfs_abd_scatter_max_order, uint, 0644); | |
1147 | MODULE_PARM_DESC(zfs_abd_scatter_max_order, | |
1148 | "Maximum order allocation used for a scatter ABD."); | |
1149 | #endif |