]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - lib/scatterlist.c
Merge branch 'overlayfs-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszer...
[mirror_ubuntu-artful-kernel.git] / lib / scatterlist.c
1 /*
2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
3 *
4 * Scatterlist handling helpers.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
9 #include <linux/export.h>
10 #include <linux/slab.h>
11 #include <linux/scatterlist.h>
12 #include <linux/highmem.h>
13 #include <linux/kmemleak.h>
14
15 /**
16 * sg_next - return the next scatterlist entry in a list
17 * @sg: The current sg entry
18 *
19 * Description:
20 * Usually the next entry will be @sg@ + 1, but if this sg element is part
21 * of a chained scatterlist, it could jump to the start of a new
22 * scatterlist array.
23 *
24 **/
25 struct scatterlist *sg_next(struct scatterlist *sg)
26 {
27 #ifdef CONFIG_DEBUG_SG
28 BUG_ON(sg->sg_magic != SG_MAGIC);
29 #endif
30 if (sg_is_last(sg))
31 return NULL;
32
33 sg++;
34 if (unlikely(sg_is_chain(sg)))
35 sg = sg_chain_ptr(sg);
36
37 return sg;
38 }
39 EXPORT_SYMBOL(sg_next);
40
41 /**
42 * sg_nents - return total count of entries in scatterlist
43 * @sg: The scatterlist
44 *
45 * Description:
46 * Allows to know how many entries are in sg, taking into acount
47 * chaining as well
48 *
49 **/
50 int sg_nents(struct scatterlist *sg)
51 {
52 int nents;
53 for (nents = 0; sg; sg = sg_next(sg))
54 nents++;
55 return nents;
56 }
57 EXPORT_SYMBOL(sg_nents);
58
59 /**
60 * sg_nents_for_len - return total count of entries in scatterlist
61 * needed to satisfy the supplied length
62 * @sg: The scatterlist
63 * @len: The total required length
64 *
65 * Description:
66 * Determines the number of entries in sg that are required to meet
67 * the supplied length, taking into acount chaining as well
68 *
69 * Returns:
70 * the number of sg entries needed, negative error on failure
71 *
72 **/
73 int sg_nents_for_len(struct scatterlist *sg, u64 len)
74 {
75 int nents;
76 u64 total;
77
78 if (!len)
79 return 0;
80
81 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
82 nents++;
83 total += sg->length;
84 if (total >= len)
85 return nents;
86 }
87
88 return -EINVAL;
89 }
90 EXPORT_SYMBOL(sg_nents_for_len);
91
92 /**
93 * sg_last - return the last scatterlist entry in a list
94 * @sgl: First entry in the scatterlist
95 * @nents: Number of entries in the scatterlist
96 *
97 * Description:
98 * Should only be used casually, it (currently) scans the entire list
99 * to get the last entry.
100 *
101 * Note that the @sgl@ pointer passed in need not be the first one,
102 * the important bit is that @nents@ denotes the number of entries that
103 * exist from @sgl@.
104 *
105 **/
106 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
107 {
108 struct scatterlist *sg, *ret = NULL;
109 unsigned int i;
110
111 for_each_sg(sgl, sg, nents, i)
112 ret = sg;
113
114 #ifdef CONFIG_DEBUG_SG
115 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
116 BUG_ON(!sg_is_last(ret));
117 #endif
118 return ret;
119 }
120 EXPORT_SYMBOL(sg_last);
121
122 /**
123 * sg_init_table - Initialize SG table
124 * @sgl: The SG table
125 * @nents: Number of entries in table
126 *
127 * Notes:
128 * If this is part of a chained sg table, sg_mark_end() should be
129 * used only on the last table part.
130 *
131 **/
132 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
133 {
134 memset(sgl, 0, sizeof(*sgl) * nents);
135 #ifdef CONFIG_DEBUG_SG
136 {
137 unsigned int i;
138 for (i = 0; i < nents; i++)
139 sgl[i].sg_magic = SG_MAGIC;
140 }
141 #endif
142 sg_mark_end(&sgl[nents - 1]);
143 }
144 EXPORT_SYMBOL(sg_init_table);
145
146 /**
147 * sg_init_one - Initialize a single entry sg list
148 * @sg: SG entry
149 * @buf: Virtual address for IO
150 * @buflen: IO length
151 *
152 **/
153 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
154 {
155 sg_init_table(sg, 1);
156 sg_set_buf(sg, buf, buflen);
157 }
158 EXPORT_SYMBOL(sg_init_one);
159
160 /*
161 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
162 * helpers.
163 */
164 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
165 {
166 if (nents == SG_MAX_SINGLE_ALLOC) {
167 /*
168 * Kmemleak doesn't track page allocations as they are not
169 * commonly used (in a raw form) for kernel data structures.
170 * As we chain together a list of pages and then a normal
171 * kmalloc (tracked by kmemleak), in order to for that last
172 * allocation not to become decoupled (and thus a
173 * false-positive) we need to inform kmemleak of all the
174 * intermediate allocations.
175 */
176 void *ptr = (void *) __get_free_page(gfp_mask);
177 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
178 return ptr;
179 } else
180 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
181 }
182
183 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
184 {
185 if (nents == SG_MAX_SINGLE_ALLOC) {
186 kmemleak_free(sg);
187 free_page((unsigned long) sg);
188 } else
189 kfree(sg);
190 }
191
192 /**
193 * __sg_free_table - Free a previously mapped sg table
194 * @table: The sg table header to use
195 * @max_ents: The maximum number of entries per single scatterlist
196 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
197 * @free_fn: Free function
198 *
199 * Description:
200 * Free an sg table previously allocated and setup with
201 * __sg_alloc_table(). The @max_ents value must be identical to
202 * that previously used with __sg_alloc_table().
203 *
204 **/
205 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
206 bool skip_first_chunk, sg_free_fn *free_fn)
207 {
208 struct scatterlist *sgl, *next;
209
210 if (unlikely(!table->sgl))
211 return;
212
213 sgl = table->sgl;
214 while (table->orig_nents) {
215 unsigned int alloc_size = table->orig_nents;
216 unsigned int sg_size;
217
218 /*
219 * If we have more than max_ents segments left,
220 * then assign 'next' to the sg table after the current one.
221 * sg_size is then one less than alloc size, since the last
222 * element is the chain pointer.
223 */
224 if (alloc_size > max_ents) {
225 next = sg_chain_ptr(&sgl[max_ents - 1]);
226 alloc_size = max_ents;
227 sg_size = alloc_size - 1;
228 } else {
229 sg_size = alloc_size;
230 next = NULL;
231 }
232
233 table->orig_nents -= sg_size;
234 if (skip_first_chunk)
235 skip_first_chunk = false;
236 else
237 free_fn(sgl, alloc_size);
238 sgl = next;
239 }
240
241 table->sgl = NULL;
242 }
243 EXPORT_SYMBOL(__sg_free_table);
244
245 /**
246 * sg_free_table - Free a previously allocated sg table
247 * @table: The mapped sg table header
248 *
249 **/
250 void sg_free_table(struct sg_table *table)
251 {
252 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
253 }
254 EXPORT_SYMBOL(sg_free_table);
255
256 /**
257 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
258 * @table: The sg table header to use
259 * @nents: Number of entries in sg list
260 * @max_ents: The maximum number of entries the allocator returns per call
261 * @gfp_mask: GFP allocation mask
262 * @alloc_fn: Allocator to use
263 *
264 * Description:
265 * This function returns a @table @nents long. The allocator is
266 * defined to return scatterlist chunks of maximum size @max_ents.
267 * Thus if @nents is bigger than @max_ents, the scatterlists will be
268 * chained in units of @max_ents.
269 *
270 * Notes:
271 * If this function returns non-0 (eg failure), the caller must call
272 * __sg_free_table() to cleanup any leftover allocations.
273 *
274 **/
275 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
276 unsigned int max_ents, struct scatterlist *first_chunk,
277 gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
278 {
279 struct scatterlist *sg, *prv;
280 unsigned int left;
281
282 memset(table, 0, sizeof(*table));
283
284 if (nents == 0)
285 return -EINVAL;
286 #ifndef CONFIG_ARCH_HAS_SG_CHAIN
287 if (WARN_ON_ONCE(nents > max_ents))
288 return -EINVAL;
289 #endif
290
291 left = nents;
292 prv = NULL;
293 do {
294 unsigned int sg_size, alloc_size = left;
295
296 if (alloc_size > max_ents) {
297 alloc_size = max_ents;
298 sg_size = alloc_size - 1;
299 } else
300 sg_size = alloc_size;
301
302 left -= sg_size;
303
304 if (first_chunk) {
305 sg = first_chunk;
306 first_chunk = NULL;
307 } else {
308 sg = alloc_fn(alloc_size, gfp_mask);
309 }
310 if (unlikely(!sg)) {
311 /*
312 * Adjust entry count to reflect that the last
313 * entry of the previous table won't be used for
314 * linkage. Without this, sg_kfree() may get
315 * confused.
316 */
317 if (prv)
318 table->nents = ++table->orig_nents;
319
320 return -ENOMEM;
321 }
322
323 sg_init_table(sg, alloc_size);
324 table->nents = table->orig_nents += sg_size;
325
326 /*
327 * If this is the first mapping, assign the sg table header.
328 * If this is not the first mapping, chain previous part.
329 */
330 if (prv)
331 sg_chain(prv, max_ents, sg);
332 else
333 table->sgl = sg;
334
335 /*
336 * If no more entries after this one, mark the end
337 */
338 if (!left)
339 sg_mark_end(&sg[sg_size - 1]);
340
341 prv = sg;
342 } while (left);
343
344 return 0;
345 }
346 EXPORT_SYMBOL(__sg_alloc_table);
347
348 /**
349 * sg_alloc_table - Allocate and initialize an sg table
350 * @table: The sg table header to use
351 * @nents: Number of entries in sg list
352 * @gfp_mask: GFP allocation mask
353 *
354 * Description:
355 * Allocate and initialize an sg table. If @nents@ is larger than
356 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
357 *
358 **/
359 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
360 {
361 int ret;
362
363 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
364 NULL, gfp_mask, sg_kmalloc);
365 if (unlikely(ret))
366 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
367
368 return ret;
369 }
370 EXPORT_SYMBOL(sg_alloc_table);
371
372 /**
373 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
374 * an array of pages
375 * @sgt: The sg table header to use
376 * @pages: Pointer to an array of page pointers
377 * @n_pages: Number of pages in the pages array
378 * @offset: Offset from start of the first page to the start of a buffer
379 * @size: Number of valid bytes in the buffer (after offset)
380 * @gfp_mask: GFP allocation mask
381 *
382 * Description:
383 * Allocate and initialize an sg table from a list of pages. Contiguous
384 * ranges of the pages are squashed into a single scatterlist node. A user
385 * may provide an offset at a start and a size of valid data in a buffer
386 * specified by the page array. The returned sg table is released by
387 * sg_free_table.
388 *
389 * Returns:
390 * 0 on success, negative error on failure
391 */
392 int sg_alloc_table_from_pages(struct sg_table *sgt,
393 struct page **pages, unsigned int n_pages,
394 unsigned long offset, unsigned long size,
395 gfp_t gfp_mask)
396 {
397 unsigned int chunks;
398 unsigned int i;
399 unsigned int cur_page;
400 int ret;
401 struct scatterlist *s;
402
403 /* compute number of contiguous chunks */
404 chunks = 1;
405 for (i = 1; i < n_pages; ++i)
406 if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
407 ++chunks;
408
409 ret = sg_alloc_table(sgt, chunks, gfp_mask);
410 if (unlikely(ret))
411 return ret;
412
413 /* merging chunks and putting them into the scatterlist */
414 cur_page = 0;
415 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
416 unsigned long chunk_size;
417 unsigned int j;
418
419 /* look for the end of the current chunk */
420 for (j = cur_page + 1; j < n_pages; ++j)
421 if (page_to_pfn(pages[j]) !=
422 page_to_pfn(pages[j - 1]) + 1)
423 break;
424
425 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
426 sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
427 size -= chunk_size;
428 offset = 0;
429 cur_page = j;
430 }
431
432 return 0;
433 }
434 EXPORT_SYMBOL(sg_alloc_table_from_pages);
435
436 void __sg_page_iter_start(struct sg_page_iter *piter,
437 struct scatterlist *sglist, unsigned int nents,
438 unsigned long pgoffset)
439 {
440 piter->__pg_advance = 0;
441 piter->__nents = nents;
442
443 piter->sg = sglist;
444 piter->sg_pgoffset = pgoffset;
445 }
446 EXPORT_SYMBOL(__sg_page_iter_start);
447
448 static int sg_page_count(struct scatterlist *sg)
449 {
450 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
451 }
452
453 bool __sg_page_iter_next(struct sg_page_iter *piter)
454 {
455 if (!piter->__nents || !piter->sg)
456 return false;
457
458 piter->sg_pgoffset += piter->__pg_advance;
459 piter->__pg_advance = 1;
460
461 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
462 piter->sg_pgoffset -= sg_page_count(piter->sg);
463 piter->sg = sg_next(piter->sg);
464 if (!--piter->__nents || !piter->sg)
465 return false;
466 }
467
468 return true;
469 }
470 EXPORT_SYMBOL(__sg_page_iter_next);
471
472 /**
473 * sg_miter_start - start mapping iteration over a sg list
474 * @miter: sg mapping iter to be started
475 * @sgl: sg list to iterate over
476 * @nents: number of sg entries
477 *
478 * Description:
479 * Starts mapping iterator @miter.
480 *
481 * Context:
482 * Don't care.
483 */
484 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
485 unsigned int nents, unsigned int flags)
486 {
487 memset(miter, 0, sizeof(struct sg_mapping_iter));
488
489 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
490 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
491 miter->__flags = flags;
492 }
493 EXPORT_SYMBOL(sg_miter_start);
494
495 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
496 {
497 if (!miter->__remaining) {
498 struct scatterlist *sg;
499 unsigned long pgoffset;
500
501 if (!__sg_page_iter_next(&miter->piter))
502 return false;
503
504 sg = miter->piter.sg;
505 pgoffset = miter->piter.sg_pgoffset;
506
507 miter->__offset = pgoffset ? 0 : sg->offset;
508 miter->__remaining = sg->offset + sg->length -
509 (pgoffset << PAGE_SHIFT) - miter->__offset;
510 miter->__remaining = min_t(unsigned long, miter->__remaining,
511 PAGE_SIZE - miter->__offset);
512 }
513
514 return true;
515 }
516
517 /**
518 * sg_miter_skip - reposition mapping iterator
519 * @miter: sg mapping iter to be skipped
520 * @offset: number of bytes to plus the current location
521 *
522 * Description:
523 * Sets the offset of @miter to its current location plus @offset bytes.
524 * If mapping iterator @miter has been proceeded by sg_miter_next(), this
525 * stops @miter.
526 *
527 * Context:
528 * Don't care if @miter is stopped, or not proceeded yet.
529 * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
530 *
531 * Returns:
532 * true if @miter contains the valid mapping. false if end of sg
533 * list is reached.
534 */
535 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
536 {
537 sg_miter_stop(miter);
538
539 while (offset) {
540 off_t consumed;
541
542 if (!sg_miter_get_next_page(miter))
543 return false;
544
545 consumed = min_t(off_t, offset, miter->__remaining);
546 miter->__offset += consumed;
547 miter->__remaining -= consumed;
548 offset -= consumed;
549 }
550
551 return true;
552 }
553 EXPORT_SYMBOL(sg_miter_skip);
554
555 /**
556 * sg_miter_next - proceed mapping iterator to the next mapping
557 * @miter: sg mapping iter to proceed
558 *
559 * Description:
560 * Proceeds @miter to the next mapping. @miter should have been started
561 * using sg_miter_start(). On successful return, @miter->page,
562 * @miter->addr and @miter->length point to the current mapping.
563 *
564 * Context:
565 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
566 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
567 *
568 * Returns:
569 * true if @miter contains the next mapping. false if end of sg
570 * list is reached.
571 */
572 bool sg_miter_next(struct sg_mapping_iter *miter)
573 {
574 sg_miter_stop(miter);
575
576 /*
577 * Get to the next page if necessary.
578 * __remaining, __offset is adjusted by sg_miter_stop
579 */
580 if (!sg_miter_get_next_page(miter))
581 return false;
582
583 miter->page = sg_page_iter_page(&miter->piter);
584 miter->consumed = miter->length = miter->__remaining;
585
586 if (miter->__flags & SG_MITER_ATOMIC)
587 miter->addr = kmap_atomic(miter->page) + miter->__offset;
588 else
589 miter->addr = kmap(miter->page) + miter->__offset;
590
591 return true;
592 }
593 EXPORT_SYMBOL(sg_miter_next);
594
595 /**
596 * sg_miter_stop - stop mapping iteration
597 * @miter: sg mapping iter to be stopped
598 *
599 * Description:
600 * Stops mapping iterator @miter. @miter should have been started
601 * using sg_miter_start(). A stopped iteration can be resumed by
602 * calling sg_miter_next() on it. This is useful when resources (kmap)
603 * need to be released during iteration.
604 *
605 * Context:
606 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
607 * otherwise.
608 */
609 void sg_miter_stop(struct sg_mapping_iter *miter)
610 {
611 WARN_ON(miter->consumed > miter->length);
612
613 /* drop resources from the last iteration */
614 if (miter->addr) {
615 miter->__offset += miter->consumed;
616 miter->__remaining -= miter->consumed;
617
618 if ((miter->__flags & SG_MITER_TO_SG) &&
619 !PageSlab(miter->page))
620 flush_kernel_dcache_page(miter->page);
621
622 if (miter->__flags & SG_MITER_ATOMIC) {
623 WARN_ON_ONCE(preemptible());
624 kunmap_atomic(miter->addr);
625 } else
626 kunmap(miter->page);
627
628 miter->page = NULL;
629 miter->addr = NULL;
630 miter->length = 0;
631 miter->consumed = 0;
632 }
633 }
634 EXPORT_SYMBOL(sg_miter_stop);
635
636 /**
637 * sg_copy_buffer - Copy data between a linear buffer and an SG list
638 * @sgl: The SG list
639 * @nents: Number of SG entries
640 * @buf: Where to copy from
641 * @buflen: The number of bytes to copy
642 * @skip: Number of bytes to skip before copying
643 * @to_buffer: transfer direction (true == from an sg list to a
644 * buffer, false == from a buffer to an sg list
645 *
646 * Returns the number of copied bytes.
647 *
648 **/
649 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
650 size_t buflen, off_t skip, bool to_buffer)
651 {
652 unsigned int offset = 0;
653 struct sg_mapping_iter miter;
654 unsigned int sg_flags = SG_MITER_ATOMIC;
655
656 if (to_buffer)
657 sg_flags |= SG_MITER_FROM_SG;
658 else
659 sg_flags |= SG_MITER_TO_SG;
660
661 sg_miter_start(&miter, sgl, nents, sg_flags);
662
663 if (!sg_miter_skip(&miter, skip))
664 return false;
665
666 while ((offset < buflen) && sg_miter_next(&miter)) {
667 unsigned int len;
668
669 len = min(miter.length, buflen - offset);
670
671 if (to_buffer)
672 memcpy(buf + offset, miter.addr, len);
673 else
674 memcpy(miter.addr, buf + offset, len);
675
676 offset += len;
677 }
678
679 sg_miter_stop(&miter);
680
681 return offset;
682 }
683 EXPORT_SYMBOL(sg_copy_buffer);
684
685 /**
686 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
687 * @sgl: The SG list
688 * @nents: Number of SG entries
689 * @buf: Where to copy from
690 * @buflen: The number of bytes to copy
691 *
692 * Returns the number of copied bytes.
693 *
694 **/
695 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
696 const void *buf, size_t buflen)
697 {
698 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
699 }
700 EXPORT_SYMBOL(sg_copy_from_buffer);
701
702 /**
703 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
704 * @sgl: The SG list
705 * @nents: Number of SG entries
706 * @buf: Where to copy to
707 * @buflen: The number of bytes to copy
708 *
709 * Returns the number of copied bytes.
710 *
711 **/
712 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
713 void *buf, size_t buflen)
714 {
715 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
716 }
717 EXPORT_SYMBOL(sg_copy_to_buffer);
718
719 /**
720 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
721 * @sgl: The SG list
722 * @nents: Number of SG entries
723 * @buf: Where to copy from
724 * @buflen: The number of bytes to copy
725 * @skip: Number of bytes to skip before copying
726 *
727 * Returns the number of copied bytes.
728 *
729 **/
730 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
731 const void *buf, size_t buflen, off_t skip)
732 {
733 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
734 }
735 EXPORT_SYMBOL(sg_pcopy_from_buffer);
736
737 /**
738 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
739 * @sgl: The SG list
740 * @nents: Number of SG entries
741 * @buf: Where to copy to
742 * @buflen: The number of bytes to copy
743 * @skip: Number of bytes to skip before copying
744 *
745 * Returns the number of copied bytes.
746 *
747 **/
748 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
749 void *buf, size_t buflen, off_t skip)
750 {
751 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
752 }
753 EXPORT_SYMBOL(sg_pcopy_to_buffer);
754
755 /**
756 * sg_zero_buffer - Zero-out a part of a SG list
757 * @sgl: The SG list
758 * @nents: Number of SG entries
759 * @buflen: The number of bytes to zero out
760 * @skip: Number of bytes to skip before zeroing
761 *
762 * Returns the number of bytes zeroed.
763 **/
764 size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
765 size_t buflen, off_t skip)
766 {
767 unsigned int offset = 0;
768 struct sg_mapping_iter miter;
769 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
770
771 sg_miter_start(&miter, sgl, nents, sg_flags);
772
773 if (!sg_miter_skip(&miter, skip))
774 return false;
775
776 while (offset < buflen && sg_miter_next(&miter)) {
777 unsigned int len;
778
779 len = min(miter.length, buflen - offset);
780 memset(miter.addr, 0, len);
781
782 offset += len;
783 }
784
785 sg_miter_stop(&miter);
786 return offset;
787 }
788 EXPORT_SYMBOL(sg_zero_buffer);