]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com> | |
3 | * | |
4 | * Scatterlist handling helpers. | |
5 | * | |
6 | * This source code is licensed under the GNU General Public License, | |
7 | * Version 2. See the file COPYING for more details. | |
8 | */ | |
9 | #include <linux/export.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/scatterlist.h> | |
12 | #include <linux/highmem.h> | |
13 | #include <linux/kmemleak.h> | |
14 | ||
15 | /** | |
16 | * sg_next - return the next scatterlist entry in a list | |
17 | * @sg: The current sg entry | |
18 | * | |
19 | * Description: | |
20 | * Usually the next entry will be @sg@ + 1, but if this sg element is part | |
21 | * of a chained scatterlist, it could jump to the start of a new | |
22 | * scatterlist array. | |
23 | * | |
24 | **/ | |
25 | struct scatterlist *sg_next(struct scatterlist *sg) | |
26 | { | |
27 | #ifdef CONFIG_DEBUG_SG | |
28 | BUG_ON(sg->sg_magic != SG_MAGIC); | |
29 | #endif | |
30 | if (sg_is_last(sg)) | |
31 | return NULL; | |
32 | ||
33 | sg++; | |
34 | if (unlikely(sg_is_chain(sg))) | |
35 | sg = sg_chain_ptr(sg); | |
36 | ||
37 | return sg; | |
38 | } | |
39 | EXPORT_SYMBOL(sg_next); | |
40 | ||
41 | /** | |
42 | * sg_nents - return total count of entries in scatterlist | |
43 | * @sg: The scatterlist | |
44 | * | |
45 | * Description: | |
46 | * Allows to know how many entries are in sg, taking into acount | |
47 | * chaining as well | |
48 | * | |
49 | **/ | |
50 | int sg_nents(struct scatterlist *sg) | |
51 | { | |
52 | int nents; | |
53 | for (nents = 0; sg; sg = sg_next(sg)) | |
54 | nents++; | |
55 | return nents; | |
56 | } | |
57 | EXPORT_SYMBOL(sg_nents); | |
58 | ||
59 | ||
60 | /** | |
61 | * sg_last - return the last scatterlist entry in a list | |
62 | * @sgl: First entry in the scatterlist | |
63 | * @nents: Number of entries in the scatterlist | |
64 | * | |
65 | * Description: | |
66 | * Should only be used casually, it (currently) scans the entire list | |
67 | * to get the last entry. | |
68 | * | |
69 | * Note that the @sgl@ pointer passed in need not be the first one, | |
70 | * the important bit is that @nents@ denotes the number of entries that | |
71 | * exist from @sgl@. | |
72 | * | |
73 | **/ | |
74 | struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) | |
75 | { | |
76 | #ifndef ARCH_HAS_SG_CHAIN | |
77 | struct scatterlist *ret = &sgl[nents - 1]; | |
78 | #else | |
79 | struct scatterlist *sg, *ret = NULL; | |
80 | unsigned int i; | |
81 | ||
82 | for_each_sg(sgl, sg, nents, i) | |
83 | ret = sg; | |
84 | ||
85 | #endif | |
86 | #ifdef CONFIG_DEBUG_SG | |
87 | BUG_ON(sgl[0].sg_magic != SG_MAGIC); | |
88 | BUG_ON(!sg_is_last(ret)); | |
89 | #endif | |
90 | return ret; | |
91 | } | |
92 | EXPORT_SYMBOL(sg_last); | |
93 | ||
94 | /** | |
95 | * sg_init_table - Initialize SG table | |
96 | * @sgl: The SG table | |
97 | * @nents: Number of entries in table | |
98 | * | |
99 | * Notes: | |
100 | * If this is part of a chained sg table, sg_mark_end() should be | |
101 | * used only on the last table part. | |
102 | * | |
103 | **/ | |
104 | void sg_init_table(struct scatterlist *sgl, unsigned int nents) | |
105 | { | |
106 | memset(sgl, 0, sizeof(*sgl) * nents); | |
107 | #ifdef CONFIG_DEBUG_SG | |
108 | { | |
109 | unsigned int i; | |
110 | for (i = 0; i < nents; i++) | |
111 | sgl[i].sg_magic = SG_MAGIC; | |
112 | } | |
113 | #endif | |
114 | sg_mark_end(&sgl[nents - 1]); | |
115 | } | |
116 | EXPORT_SYMBOL(sg_init_table); | |
117 | ||
118 | /** | |
119 | * sg_init_one - Initialize a single entry sg list | |
120 | * @sg: SG entry | |
121 | * @buf: Virtual address for IO | |
122 | * @buflen: IO length | |
123 | * | |
124 | **/ | |
125 | void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) | |
126 | { | |
127 | sg_init_table(sg, 1); | |
128 | sg_set_buf(sg, buf, buflen); | |
129 | } | |
130 | EXPORT_SYMBOL(sg_init_one); | |
131 | ||
132 | /* | |
133 | * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree | |
134 | * helpers. | |
135 | */ | |
136 | static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) | |
137 | { | |
138 | if (nents == SG_MAX_SINGLE_ALLOC) { | |
139 | /* | |
140 | * Kmemleak doesn't track page allocations as they are not | |
141 | * commonly used (in a raw form) for kernel data structures. | |
142 | * As we chain together a list of pages and then a normal | |
143 | * kmalloc (tracked by kmemleak), in order to for that last | |
144 | * allocation not to become decoupled (and thus a | |
145 | * false-positive) we need to inform kmemleak of all the | |
146 | * intermediate allocations. | |
147 | */ | |
148 | void *ptr = (void *) __get_free_page(gfp_mask); | |
149 | kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); | |
150 | return ptr; | |
151 | } else | |
152 | return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); | |
153 | } | |
154 | ||
155 | static void sg_kfree(struct scatterlist *sg, unsigned int nents) | |
156 | { | |
157 | if (nents == SG_MAX_SINGLE_ALLOC) { | |
158 | kmemleak_free(sg); | |
159 | free_page((unsigned long) sg); | |
160 | } else | |
161 | kfree(sg); | |
162 | } | |
163 | ||
164 | /** | |
165 | * __sg_free_table - Free a previously mapped sg table | |
166 | * @table: The sg table header to use | |
167 | * @max_ents: The maximum number of entries per single scatterlist | |
168 | * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk | |
169 | * @free_fn: Free function | |
170 | * | |
171 | * Description: | |
172 | * Free an sg table previously allocated and setup with | |
173 | * __sg_alloc_table(). The @max_ents value must be identical to | |
174 | * that previously used with __sg_alloc_table(). | |
175 | * | |
176 | **/ | |
177 | void __sg_free_table(struct sg_table *table, unsigned int max_ents, | |
178 | bool skip_first_chunk, sg_free_fn *free_fn) | |
179 | { | |
180 | struct scatterlist *sgl, *next; | |
181 | ||
182 | if (unlikely(!table->sgl)) | |
183 | return; | |
184 | ||
185 | sgl = table->sgl; | |
186 | while (table->orig_nents) { | |
187 | unsigned int alloc_size = table->orig_nents; | |
188 | unsigned int sg_size; | |
189 | ||
190 | /* | |
191 | * If we have more than max_ents segments left, | |
192 | * then assign 'next' to the sg table after the current one. | |
193 | * sg_size is then one less than alloc size, since the last | |
194 | * element is the chain pointer. | |
195 | */ | |
196 | if (alloc_size > max_ents) { | |
197 | next = sg_chain_ptr(&sgl[max_ents - 1]); | |
198 | alloc_size = max_ents; | |
199 | sg_size = alloc_size - 1; | |
200 | } else { | |
201 | sg_size = alloc_size; | |
202 | next = NULL; | |
203 | } | |
204 | ||
205 | table->orig_nents -= sg_size; | |
206 | if (!skip_first_chunk) { | |
207 | free_fn(sgl, alloc_size); | |
208 | skip_first_chunk = false; | |
209 | } | |
210 | sgl = next; | |
211 | } | |
212 | ||
213 | table->sgl = NULL; | |
214 | } | |
215 | EXPORT_SYMBOL(__sg_free_table); | |
216 | ||
217 | /** | |
218 | * sg_free_table - Free a previously allocated sg table | |
219 | * @table: The mapped sg table header | |
220 | * | |
221 | **/ | |
222 | void sg_free_table(struct sg_table *table) | |
223 | { | |
224 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); | |
225 | } | |
226 | EXPORT_SYMBOL(sg_free_table); | |
227 | ||
228 | /** | |
229 | * __sg_alloc_table - Allocate and initialize an sg table with given allocator | |
230 | * @table: The sg table header to use | |
231 | * @nents: Number of entries in sg list | |
232 | * @max_ents: The maximum number of entries the allocator returns per call | |
233 | * @gfp_mask: GFP allocation mask | |
234 | * @alloc_fn: Allocator to use | |
235 | * | |
236 | * Description: | |
237 | * This function returns a @table @nents long. The allocator is | |
238 | * defined to return scatterlist chunks of maximum size @max_ents. | |
239 | * Thus if @nents is bigger than @max_ents, the scatterlists will be | |
240 | * chained in units of @max_ents. | |
241 | * | |
242 | * Notes: | |
243 | * If this function returns non-0 (eg failure), the caller must call | |
244 | * __sg_free_table() to cleanup any leftover allocations. | |
245 | * | |
246 | **/ | |
247 | int __sg_alloc_table(struct sg_table *table, unsigned int nents, | |
248 | unsigned int max_ents, struct scatterlist *first_chunk, | |
249 | gfp_t gfp_mask, sg_alloc_fn *alloc_fn) | |
250 | { | |
251 | struct scatterlist *sg, *prv; | |
252 | unsigned int left; | |
253 | ||
254 | memset(table, 0, sizeof(*table)); | |
255 | ||
256 | if (nents == 0) | |
257 | return -EINVAL; | |
258 | #ifndef ARCH_HAS_SG_CHAIN | |
259 | if (WARN_ON_ONCE(nents > max_ents)) | |
260 | return -EINVAL; | |
261 | #endif | |
262 | ||
263 | left = nents; | |
264 | prv = NULL; | |
265 | do { | |
266 | unsigned int sg_size, alloc_size = left; | |
267 | ||
268 | if (alloc_size > max_ents) { | |
269 | alloc_size = max_ents; | |
270 | sg_size = alloc_size - 1; | |
271 | } else | |
272 | sg_size = alloc_size; | |
273 | ||
274 | left -= sg_size; | |
275 | ||
276 | if (first_chunk) { | |
277 | sg = first_chunk; | |
278 | first_chunk = NULL; | |
279 | } else { | |
280 | sg = alloc_fn(alloc_size, gfp_mask); | |
281 | } | |
282 | if (unlikely(!sg)) { | |
283 | /* | |
284 | * Adjust entry count to reflect that the last | |
285 | * entry of the previous table won't be used for | |
286 | * linkage. Without this, sg_kfree() may get | |
287 | * confused. | |
288 | */ | |
289 | if (prv) | |
290 | table->nents = ++table->orig_nents; | |
291 | ||
292 | return -ENOMEM; | |
293 | } | |
294 | ||
295 | sg_init_table(sg, alloc_size); | |
296 | table->nents = table->orig_nents += sg_size; | |
297 | ||
298 | /* | |
299 | * If this is the first mapping, assign the sg table header. | |
300 | * If this is not the first mapping, chain previous part. | |
301 | */ | |
302 | if (prv) | |
303 | sg_chain(prv, max_ents, sg); | |
304 | else | |
305 | table->sgl = sg; | |
306 | ||
307 | /* | |
308 | * If no more entries after this one, mark the end | |
309 | */ | |
310 | if (!left) | |
311 | sg_mark_end(&sg[sg_size - 1]); | |
312 | ||
313 | prv = sg; | |
314 | } while (left); | |
315 | ||
316 | return 0; | |
317 | } | |
318 | EXPORT_SYMBOL(__sg_alloc_table); | |
319 | ||
320 | /** | |
321 | * sg_alloc_table - Allocate and initialize an sg table | |
322 | * @table: The sg table header to use | |
323 | * @nents: Number of entries in sg list | |
324 | * @gfp_mask: GFP allocation mask | |
325 | * | |
326 | * Description: | |
327 | * Allocate and initialize an sg table. If @nents@ is larger than | |
328 | * SG_MAX_SINGLE_ALLOC a chained sg table will be setup. | |
329 | * | |
330 | **/ | |
331 | int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | |
332 | { | |
333 | int ret; | |
334 | ||
335 | ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, | |
336 | NULL, gfp_mask, sg_kmalloc); | |
337 | if (unlikely(ret)) | |
338 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); | |
339 | ||
340 | return ret; | |
341 | } | |
342 | EXPORT_SYMBOL(sg_alloc_table); | |
343 | ||
344 | /** | |
345 | * sg_alloc_table_from_pages - Allocate and initialize an sg table from | |
346 | * an array of pages | |
347 | * @sgt: The sg table header to use | |
348 | * @pages: Pointer to an array of page pointers | |
349 | * @n_pages: Number of pages in the pages array | |
350 | * @offset: Offset from start of the first page to the start of a buffer | |
351 | * @size: Number of valid bytes in the buffer (after offset) | |
352 | * @gfp_mask: GFP allocation mask | |
353 | * | |
354 | * Description: | |
355 | * Allocate and initialize an sg table from a list of pages. Contiguous | |
356 | * ranges of the pages are squashed into a single scatterlist node. A user | |
357 | * may provide an offset at a start and a size of valid data in a buffer | |
358 | * specified by the page array. The returned sg table is released by | |
359 | * sg_free_table. | |
360 | * | |
361 | * Returns: | |
362 | * 0 on success, negative error on failure | |
363 | */ | |
364 | int sg_alloc_table_from_pages(struct sg_table *sgt, | |
365 | struct page **pages, unsigned int n_pages, | |
366 | unsigned long offset, unsigned long size, | |
367 | gfp_t gfp_mask) | |
368 | { | |
369 | unsigned int chunks; | |
370 | unsigned int i; | |
371 | unsigned int cur_page; | |
372 | int ret; | |
373 | struct scatterlist *s; | |
374 | ||
375 | /* compute number of contiguous chunks */ | |
376 | chunks = 1; | |
377 | for (i = 1; i < n_pages; ++i) | |
378 | if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) | |
379 | ++chunks; | |
380 | ||
381 | ret = sg_alloc_table(sgt, chunks, gfp_mask); | |
382 | if (unlikely(ret)) | |
383 | return ret; | |
384 | ||
385 | /* merging chunks and putting them into the scatterlist */ | |
386 | cur_page = 0; | |
387 | for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { | |
388 | unsigned long chunk_size; | |
389 | unsigned int j; | |
390 | ||
391 | /* look for the end of the current chunk */ | |
392 | for (j = cur_page + 1; j < n_pages; ++j) | |
393 | if (page_to_pfn(pages[j]) != | |
394 | page_to_pfn(pages[j - 1]) + 1) | |
395 | break; | |
396 | ||
397 | chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; | |
398 | sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); | |
399 | size -= chunk_size; | |
400 | offset = 0; | |
401 | cur_page = j; | |
402 | } | |
403 | ||
404 | return 0; | |
405 | } | |
406 | EXPORT_SYMBOL(sg_alloc_table_from_pages); | |
407 | ||
408 | void __sg_page_iter_start(struct sg_page_iter *piter, | |
409 | struct scatterlist *sglist, unsigned int nents, | |
410 | unsigned long pgoffset) | |
411 | { | |
412 | piter->__pg_advance = 0; | |
413 | piter->__nents = nents; | |
414 | ||
415 | piter->sg = sglist; | |
416 | piter->sg_pgoffset = pgoffset; | |
417 | } | |
418 | EXPORT_SYMBOL(__sg_page_iter_start); | |
419 | ||
420 | static int sg_page_count(struct scatterlist *sg) | |
421 | { | |
422 | return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; | |
423 | } | |
424 | ||
425 | bool __sg_page_iter_next(struct sg_page_iter *piter) | |
426 | { | |
427 | if (!piter->__nents || !piter->sg) | |
428 | return false; | |
429 | ||
430 | piter->sg_pgoffset += piter->__pg_advance; | |
431 | piter->__pg_advance = 1; | |
432 | ||
433 | while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { | |
434 | piter->sg_pgoffset -= sg_page_count(piter->sg); | |
435 | piter->sg = sg_next(piter->sg); | |
436 | if (!--piter->__nents || !piter->sg) | |
437 | return false; | |
438 | } | |
439 | ||
440 | return true; | |
441 | } | |
442 | EXPORT_SYMBOL(__sg_page_iter_next); | |
443 | ||
444 | /** | |
445 | * sg_miter_start - start mapping iteration over a sg list | |
446 | * @miter: sg mapping iter to be started | |
447 | * @sgl: sg list to iterate over | |
448 | * @nents: number of sg entries | |
449 | * | |
450 | * Description: | |
451 | * Starts mapping iterator @miter. | |
452 | * | |
453 | * Context: | |
454 | * Don't care. | |
455 | */ | |
456 | void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, | |
457 | unsigned int nents, unsigned int flags) | |
458 | { | |
459 | memset(miter, 0, sizeof(struct sg_mapping_iter)); | |
460 | ||
461 | __sg_page_iter_start(&miter->piter, sgl, nents, 0); | |
462 | WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); | |
463 | miter->__flags = flags; | |
464 | } | |
465 | EXPORT_SYMBOL(sg_miter_start); | |
466 | ||
467 | static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) | |
468 | { | |
469 | if (!miter->__remaining) { | |
470 | struct scatterlist *sg; | |
471 | unsigned long pgoffset; | |
472 | ||
473 | if (!__sg_page_iter_next(&miter->piter)) | |
474 | return false; | |
475 | ||
476 | sg = miter->piter.sg; | |
477 | pgoffset = miter->piter.sg_pgoffset; | |
478 | ||
479 | miter->__offset = pgoffset ? 0 : sg->offset; | |
480 | miter->__remaining = sg->offset + sg->length - | |
481 | (pgoffset << PAGE_SHIFT) - miter->__offset; | |
482 | miter->__remaining = min_t(unsigned long, miter->__remaining, | |
483 | PAGE_SIZE - miter->__offset); | |
484 | } | |
485 | ||
486 | return true; | |
487 | } | |
488 | ||
489 | /** | |
490 | * sg_miter_skip - reposition mapping iterator | |
491 | * @miter: sg mapping iter to be skipped | |
492 | * @offset: number of bytes to plus the current location | |
493 | * | |
494 | * Description: | |
495 | * Sets the offset of @miter to its current location plus @offset bytes. | |
496 | * If mapping iterator @miter has been proceeded by sg_miter_next(), this | |
497 | * stops @miter. | |
498 | * | |
499 | * Context: | |
500 | * Don't care if @miter is stopped, or not proceeded yet. | |
501 | * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. | |
502 | * | |
503 | * Returns: | |
504 | * true if @miter contains the valid mapping. false if end of sg | |
505 | * list is reached. | |
506 | */ | |
507 | bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) | |
508 | { | |
509 | sg_miter_stop(miter); | |
510 | ||
511 | while (offset) { | |
512 | off_t consumed; | |
513 | ||
514 | if (!sg_miter_get_next_page(miter)) | |
515 | return false; | |
516 | ||
517 | consumed = min_t(off_t, offset, miter->__remaining); | |
518 | miter->__offset += consumed; | |
519 | miter->__remaining -= consumed; | |
520 | offset -= consumed; | |
521 | } | |
522 | ||
523 | return true; | |
524 | } | |
525 | EXPORT_SYMBOL(sg_miter_skip); | |
526 | ||
527 | /** | |
528 | * sg_miter_next - proceed mapping iterator to the next mapping | |
529 | * @miter: sg mapping iter to proceed | |
530 | * | |
531 | * Description: | |
532 | * Proceeds @miter to the next mapping. @miter should have been started | |
533 | * using sg_miter_start(). On successful return, @miter->page, | |
534 | * @miter->addr and @miter->length point to the current mapping. | |
535 | * | |
536 | * Context: | |
537 | * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled | |
538 | * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. | |
539 | * | |
540 | * Returns: | |
541 | * true if @miter contains the next mapping. false if end of sg | |
542 | * list is reached. | |
543 | */ | |
544 | bool sg_miter_next(struct sg_mapping_iter *miter) | |
545 | { | |
546 | sg_miter_stop(miter); | |
547 | ||
548 | /* | |
549 | * Get to the next page if necessary. | |
550 | * __remaining, __offset is adjusted by sg_miter_stop | |
551 | */ | |
552 | if (!sg_miter_get_next_page(miter)) | |
553 | return false; | |
554 | ||
555 | miter->page = sg_page_iter_page(&miter->piter); | |
556 | miter->consumed = miter->length = miter->__remaining; | |
557 | ||
558 | if (miter->__flags & SG_MITER_ATOMIC) | |
559 | miter->addr = kmap_atomic(miter->page) + miter->__offset; | |
560 | else | |
561 | miter->addr = kmap(miter->page) + miter->__offset; | |
562 | ||
563 | return true; | |
564 | } | |
565 | EXPORT_SYMBOL(sg_miter_next); | |
566 | ||
567 | /** | |
568 | * sg_miter_stop - stop mapping iteration | |
569 | * @miter: sg mapping iter to be stopped | |
570 | * | |
571 | * Description: | |
572 | * Stops mapping iterator @miter. @miter should have been started | |
573 | * started using sg_miter_start(). A stopped iteration can be | |
574 | * resumed by calling sg_miter_next() on it. This is useful when | |
575 | * resources (kmap) need to be released during iteration. | |
576 | * | |
577 | * Context: | |
578 | * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care | |
579 | * otherwise. | |
580 | */ | |
581 | void sg_miter_stop(struct sg_mapping_iter *miter) | |
582 | { | |
583 | WARN_ON(miter->consumed > miter->length); | |
584 | ||
585 | /* drop resources from the last iteration */ | |
586 | if (miter->addr) { | |
587 | miter->__offset += miter->consumed; | |
588 | miter->__remaining -= miter->consumed; | |
589 | ||
590 | if ((miter->__flags & SG_MITER_TO_SG) && | |
591 | !PageSlab(miter->page)) | |
592 | flush_kernel_dcache_page(miter->page); | |
593 | ||
594 | if (miter->__flags & SG_MITER_ATOMIC) { | |
595 | WARN_ON_ONCE(preemptible()); | |
596 | kunmap_atomic(miter->addr); | |
597 | } else | |
598 | kunmap(miter->page); | |
599 | ||
600 | miter->page = NULL; | |
601 | miter->addr = NULL; | |
602 | miter->length = 0; | |
603 | miter->consumed = 0; | |
604 | } | |
605 | } | |
606 | EXPORT_SYMBOL(sg_miter_stop); | |
607 | ||
608 | /** | |
609 | * sg_copy_buffer - Copy data between a linear buffer and an SG list | |
610 | * @sgl: The SG list | |
611 | * @nents: Number of SG entries | |
612 | * @buf: Where to copy from | |
613 | * @buflen: The number of bytes to copy | |
614 | * @skip: Number of bytes to skip before copying | |
615 | * @to_buffer: transfer direction (true == from an sg list to a | |
616 | * buffer, false == from a buffer to an sg list | |
617 | * | |
618 | * Returns the number of copied bytes. | |
619 | * | |
620 | **/ | |
621 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | |
622 | void *buf, size_t buflen, off_t skip, | |
623 | bool to_buffer) | |
624 | { | |
625 | unsigned int offset = 0; | |
626 | struct sg_mapping_iter miter; | |
627 | unsigned long flags; | |
628 | unsigned int sg_flags = SG_MITER_ATOMIC; | |
629 | ||
630 | if (to_buffer) | |
631 | sg_flags |= SG_MITER_FROM_SG; | |
632 | else | |
633 | sg_flags |= SG_MITER_TO_SG; | |
634 | ||
635 | sg_miter_start(&miter, sgl, nents, sg_flags); | |
636 | ||
637 | if (!sg_miter_skip(&miter, skip)) | |
638 | return false; | |
639 | ||
640 | local_irq_save(flags); | |
641 | ||
642 | while (sg_miter_next(&miter) && offset < buflen) { | |
643 | unsigned int len; | |
644 | ||
645 | len = min(miter.length, buflen - offset); | |
646 | ||
647 | if (to_buffer) | |
648 | memcpy(buf + offset, miter.addr, len); | |
649 | else | |
650 | memcpy(miter.addr, buf + offset, len); | |
651 | ||
652 | offset += len; | |
653 | } | |
654 | ||
655 | sg_miter_stop(&miter); | |
656 | ||
657 | local_irq_restore(flags); | |
658 | return offset; | |
659 | } | |
660 | ||
661 | /** | |
662 | * sg_copy_from_buffer - Copy from a linear buffer to an SG list | |
663 | * @sgl: The SG list | |
664 | * @nents: Number of SG entries | |
665 | * @buf: Where to copy from | |
666 | * @buflen: The number of bytes to copy | |
667 | * | |
668 | * Returns the number of copied bytes. | |
669 | * | |
670 | **/ | |
671 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, | |
672 | void *buf, size_t buflen) | |
673 | { | |
674 | return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); | |
675 | } | |
676 | EXPORT_SYMBOL(sg_copy_from_buffer); | |
677 | ||
678 | /** | |
679 | * sg_copy_to_buffer - Copy from an SG list to a linear buffer | |
680 | * @sgl: The SG list | |
681 | * @nents: Number of SG entries | |
682 | * @buf: Where to copy to | |
683 | * @buflen: The number of bytes to copy | |
684 | * | |
685 | * Returns the number of copied bytes. | |
686 | * | |
687 | **/ | |
688 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, | |
689 | void *buf, size_t buflen) | |
690 | { | |
691 | return sg_copy_buffer(sgl, nents, buf, buflen, 0, true); | |
692 | } | |
693 | EXPORT_SYMBOL(sg_copy_to_buffer); | |
694 | ||
695 | /** | |
696 | * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list | |
697 | * @sgl: The SG list | |
698 | * @nents: Number of SG entries | |
699 | * @buf: Where to copy from | |
700 | * @skip: Number of bytes to skip before copying | |
701 | * @buflen: The number of bytes to copy | |
702 | * | |
703 | * Returns the number of copied bytes. | |
704 | * | |
705 | **/ | |
706 | size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, | |
707 | void *buf, size_t buflen, off_t skip) | |
708 | { | |
709 | return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); | |
710 | } | |
711 | EXPORT_SYMBOL(sg_pcopy_from_buffer); | |
712 | ||
713 | /** | |
714 | * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer | |
715 | * @sgl: The SG list | |
716 | * @nents: Number of SG entries | |
717 | * @buf: Where to copy to | |
718 | * @skip: Number of bytes to skip before copying | |
719 | * @buflen: The number of bytes to copy | |
720 | * | |
721 | * Returns the number of copied bytes. | |
722 | * | |
723 | **/ | |
724 | size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, | |
725 | void *buf, size_t buflen, off_t skip) | |
726 | { | |
727 | return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); | |
728 | } | |
729 | EXPORT_SYMBOL(sg_pcopy_to_buffer); |