]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - lib/scatterlist.c
Merge tag 'mvebu-dt-fixes-3.15' of git://git.infradead.org/linux-mvebu into fixes
[mirror_ubuntu-artful-kernel.git] / lib / scatterlist.c
1 /*
2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
3 *
4 * Scatterlist handling helpers.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
9 #include <linux/export.h>
10 #include <linux/slab.h>
11 #include <linux/scatterlist.h>
12 #include <linux/highmem.h>
13 #include <linux/kmemleak.h>
14
15 /**
16 * sg_next - return the next scatterlist entry in a list
17 * @sg: The current sg entry
18 *
19 * Description:
20 * Usually the next entry will be @sg@ + 1, but if this sg element is part
21 * of a chained scatterlist, it could jump to the start of a new
22 * scatterlist array.
23 *
24 **/
25 struct scatterlist *sg_next(struct scatterlist *sg)
26 {
27 #ifdef CONFIG_DEBUG_SG
28 BUG_ON(sg->sg_magic != SG_MAGIC);
29 #endif
30 if (sg_is_last(sg))
31 return NULL;
32
33 sg++;
34 if (unlikely(sg_is_chain(sg)))
35 sg = sg_chain_ptr(sg);
36
37 return sg;
38 }
39 EXPORT_SYMBOL(sg_next);
40
41 /**
42 * sg_nents - return total count of entries in scatterlist
43 * @sg: The scatterlist
44 *
45 * Description:
46 * Allows to know how many entries are in sg, taking into acount
47 * chaining as well
48 *
49 **/
50 int sg_nents(struct scatterlist *sg)
51 {
52 int nents;
53 for (nents = 0; sg; sg = sg_next(sg))
54 nents++;
55 return nents;
56 }
57 EXPORT_SYMBOL(sg_nents);
58
59
60 /**
61 * sg_last - return the last scatterlist entry in a list
62 * @sgl: First entry in the scatterlist
63 * @nents: Number of entries in the scatterlist
64 *
65 * Description:
66 * Should only be used casually, it (currently) scans the entire list
67 * to get the last entry.
68 *
69 * Note that the @sgl@ pointer passed in need not be the first one,
70 * the important bit is that @nents@ denotes the number of entries that
71 * exist from @sgl@.
72 *
73 **/
74 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
75 {
76 #ifndef ARCH_HAS_SG_CHAIN
77 struct scatterlist *ret = &sgl[nents - 1];
78 #else
79 struct scatterlist *sg, *ret = NULL;
80 unsigned int i;
81
82 for_each_sg(sgl, sg, nents, i)
83 ret = sg;
84
85 #endif
86 #ifdef CONFIG_DEBUG_SG
87 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
88 BUG_ON(!sg_is_last(ret));
89 #endif
90 return ret;
91 }
92 EXPORT_SYMBOL(sg_last);
93
94 /**
95 * sg_init_table - Initialize SG table
96 * @sgl: The SG table
97 * @nents: Number of entries in table
98 *
99 * Notes:
100 * If this is part of a chained sg table, sg_mark_end() should be
101 * used only on the last table part.
102 *
103 **/
104 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
105 {
106 memset(sgl, 0, sizeof(*sgl) * nents);
107 #ifdef CONFIG_DEBUG_SG
108 {
109 unsigned int i;
110 for (i = 0; i < nents; i++)
111 sgl[i].sg_magic = SG_MAGIC;
112 }
113 #endif
114 sg_mark_end(&sgl[nents - 1]);
115 }
116 EXPORT_SYMBOL(sg_init_table);
117
118 /**
119 * sg_init_one - Initialize a single entry sg list
120 * @sg: SG entry
121 * @buf: Virtual address for IO
122 * @buflen: IO length
123 *
124 **/
125 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
126 {
127 sg_init_table(sg, 1);
128 sg_set_buf(sg, buf, buflen);
129 }
130 EXPORT_SYMBOL(sg_init_one);
131
132 /*
133 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
134 * helpers.
135 */
136 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
137 {
138 if (nents == SG_MAX_SINGLE_ALLOC) {
139 /*
140 * Kmemleak doesn't track page allocations as they are not
141 * commonly used (in a raw form) for kernel data structures.
142 * As we chain together a list of pages and then a normal
143 * kmalloc (tracked by kmemleak), in order to for that last
144 * allocation not to become decoupled (and thus a
145 * false-positive) we need to inform kmemleak of all the
146 * intermediate allocations.
147 */
148 void *ptr = (void *) __get_free_page(gfp_mask);
149 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
150 return ptr;
151 } else
152 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
153 }
154
155 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
156 {
157 if (nents == SG_MAX_SINGLE_ALLOC) {
158 kmemleak_free(sg);
159 free_page((unsigned long) sg);
160 } else
161 kfree(sg);
162 }
163
164 /**
165 * __sg_free_table - Free a previously mapped sg table
166 * @table: The sg table header to use
167 * @max_ents: The maximum number of entries per single scatterlist
168 * @free_fn: Free function
169 *
170 * Description:
171 * Free an sg table previously allocated and setup with
172 * __sg_alloc_table(). The @max_ents value must be identical to
173 * that previously used with __sg_alloc_table().
174 *
175 **/
176 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
177 sg_free_fn *free_fn)
178 {
179 struct scatterlist *sgl, *next;
180
181 if (unlikely(!table->sgl))
182 return;
183
184 sgl = table->sgl;
185 while (table->orig_nents) {
186 unsigned int alloc_size = table->orig_nents;
187 unsigned int sg_size;
188
189 /*
190 * If we have more than max_ents segments left,
191 * then assign 'next' to the sg table after the current one.
192 * sg_size is then one less than alloc size, since the last
193 * element is the chain pointer.
194 */
195 if (alloc_size > max_ents) {
196 next = sg_chain_ptr(&sgl[max_ents - 1]);
197 alloc_size = max_ents;
198 sg_size = alloc_size - 1;
199 } else {
200 sg_size = alloc_size;
201 next = NULL;
202 }
203
204 table->orig_nents -= sg_size;
205 free_fn(sgl, alloc_size);
206 sgl = next;
207 }
208
209 table->sgl = NULL;
210 }
211 EXPORT_SYMBOL(__sg_free_table);
212
213 /**
214 * sg_free_table - Free a previously allocated sg table
215 * @table: The mapped sg table header
216 *
217 **/
218 void sg_free_table(struct sg_table *table)
219 {
220 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
221 }
222 EXPORT_SYMBOL(sg_free_table);
223
224 /**
225 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
226 * @table: The sg table header to use
227 * @nents: Number of entries in sg list
228 * @max_ents: The maximum number of entries the allocator returns per call
229 * @gfp_mask: GFP allocation mask
230 * @alloc_fn: Allocator to use
231 *
232 * Description:
233 * This function returns a @table @nents long. The allocator is
234 * defined to return scatterlist chunks of maximum size @max_ents.
235 * Thus if @nents is bigger than @max_ents, the scatterlists will be
236 * chained in units of @max_ents.
237 *
238 * Notes:
239 * If this function returns non-0 (eg failure), the caller must call
240 * __sg_free_table() to cleanup any leftover allocations.
241 *
242 **/
243 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
244 unsigned int max_ents, gfp_t gfp_mask,
245 sg_alloc_fn *alloc_fn)
246 {
247 struct scatterlist *sg, *prv;
248 unsigned int left;
249
250 memset(table, 0, sizeof(*table));
251
252 if (nents == 0)
253 return -EINVAL;
254 #ifndef ARCH_HAS_SG_CHAIN
255 if (WARN_ON_ONCE(nents > max_ents))
256 return -EINVAL;
257 #endif
258
259 left = nents;
260 prv = NULL;
261 do {
262 unsigned int sg_size, alloc_size = left;
263
264 if (alloc_size > max_ents) {
265 alloc_size = max_ents;
266 sg_size = alloc_size - 1;
267 } else
268 sg_size = alloc_size;
269
270 left -= sg_size;
271
272 sg = alloc_fn(alloc_size, gfp_mask);
273 if (unlikely(!sg)) {
274 /*
275 * Adjust entry count to reflect that the last
276 * entry of the previous table won't be used for
277 * linkage. Without this, sg_kfree() may get
278 * confused.
279 */
280 if (prv)
281 table->nents = ++table->orig_nents;
282
283 return -ENOMEM;
284 }
285
286 sg_init_table(sg, alloc_size);
287 table->nents = table->orig_nents += sg_size;
288
289 /*
290 * If this is the first mapping, assign the sg table header.
291 * If this is not the first mapping, chain previous part.
292 */
293 if (prv)
294 sg_chain(prv, max_ents, sg);
295 else
296 table->sgl = sg;
297
298 /*
299 * If no more entries after this one, mark the end
300 */
301 if (!left)
302 sg_mark_end(&sg[sg_size - 1]);
303
304 prv = sg;
305 } while (left);
306
307 return 0;
308 }
309 EXPORT_SYMBOL(__sg_alloc_table);
310
311 /**
312 * sg_alloc_table - Allocate and initialize an sg table
313 * @table: The sg table header to use
314 * @nents: Number of entries in sg list
315 * @gfp_mask: GFP allocation mask
316 *
317 * Description:
318 * Allocate and initialize an sg table. If @nents@ is larger than
319 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
320 *
321 **/
322 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
323 {
324 int ret;
325
326 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
327 gfp_mask, sg_kmalloc);
328 if (unlikely(ret))
329 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
330
331 return ret;
332 }
333 EXPORT_SYMBOL(sg_alloc_table);
334
335 /**
336 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
337 * an array of pages
338 * @sgt: The sg table header to use
339 * @pages: Pointer to an array of page pointers
340 * @n_pages: Number of pages in the pages array
341 * @offset: Offset from start of the first page to the start of a buffer
342 * @size: Number of valid bytes in the buffer (after offset)
343 * @gfp_mask: GFP allocation mask
344 *
345 * Description:
346 * Allocate and initialize an sg table from a list of pages. Contiguous
347 * ranges of the pages are squashed into a single scatterlist node. A user
348 * may provide an offset at a start and a size of valid data in a buffer
349 * specified by the page array. The returned sg table is released by
350 * sg_free_table.
351 *
352 * Returns:
353 * 0 on success, negative error on failure
354 */
355 int sg_alloc_table_from_pages(struct sg_table *sgt,
356 struct page **pages, unsigned int n_pages,
357 unsigned long offset, unsigned long size,
358 gfp_t gfp_mask)
359 {
360 unsigned int chunks;
361 unsigned int i;
362 unsigned int cur_page;
363 int ret;
364 struct scatterlist *s;
365
366 /* compute number of contiguous chunks */
367 chunks = 1;
368 for (i = 1; i < n_pages; ++i)
369 if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
370 ++chunks;
371
372 ret = sg_alloc_table(sgt, chunks, gfp_mask);
373 if (unlikely(ret))
374 return ret;
375
376 /* merging chunks and putting them into the scatterlist */
377 cur_page = 0;
378 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
379 unsigned long chunk_size;
380 unsigned int j;
381
382 /* look for the end of the current chunk */
383 for (j = cur_page + 1; j < n_pages; ++j)
384 if (page_to_pfn(pages[j]) !=
385 page_to_pfn(pages[j - 1]) + 1)
386 break;
387
388 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
389 sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
390 size -= chunk_size;
391 offset = 0;
392 cur_page = j;
393 }
394
395 return 0;
396 }
397 EXPORT_SYMBOL(sg_alloc_table_from_pages);
398
399 void __sg_page_iter_start(struct sg_page_iter *piter,
400 struct scatterlist *sglist, unsigned int nents,
401 unsigned long pgoffset)
402 {
403 piter->__pg_advance = 0;
404 piter->__nents = nents;
405
406 piter->sg = sglist;
407 piter->sg_pgoffset = pgoffset;
408 }
409 EXPORT_SYMBOL(__sg_page_iter_start);
410
411 static int sg_page_count(struct scatterlist *sg)
412 {
413 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
414 }
415
416 bool __sg_page_iter_next(struct sg_page_iter *piter)
417 {
418 if (!piter->__nents || !piter->sg)
419 return false;
420
421 piter->sg_pgoffset += piter->__pg_advance;
422 piter->__pg_advance = 1;
423
424 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
425 piter->sg_pgoffset -= sg_page_count(piter->sg);
426 piter->sg = sg_next(piter->sg);
427 if (!--piter->__nents || !piter->sg)
428 return false;
429 }
430
431 return true;
432 }
433 EXPORT_SYMBOL(__sg_page_iter_next);
434
435 /**
436 * sg_miter_start - start mapping iteration over a sg list
437 * @miter: sg mapping iter to be started
438 * @sgl: sg list to iterate over
439 * @nents: number of sg entries
440 *
441 * Description:
442 * Starts mapping iterator @miter.
443 *
444 * Context:
445 * Don't care.
446 */
447 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
448 unsigned int nents, unsigned int flags)
449 {
450 memset(miter, 0, sizeof(struct sg_mapping_iter));
451
452 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
453 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
454 miter->__flags = flags;
455 }
456 EXPORT_SYMBOL(sg_miter_start);
457
458 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
459 {
460 if (!miter->__remaining) {
461 struct scatterlist *sg;
462 unsigned long pgoffset;
463
464 if (!__sg_page_iter_next(&miter->piter))
465 return false;
466
467 sg = miter->piter.sg;
468 pgoffset = miter->piter.sg_pgoffset;
469
470 miter->__offset = pgoffset ? 0 : sg->offset;
471 miter->__remaining = sg->offset + sg->length -
472 (pgoffset << PAGE_SHIFT) - miter->__offset;
473 miter->__remaining = min_t(unsigned long, miter->__remaining,
474 PAGE_SIZE - miter->__offset);
475 }
476
477 return true;
478 }
479
480 /**
481 * sg_miter_skip - reposition mapping iterator
482 * @miter: sg mapping iter to be skipped
483 * @offset: number of bytes to plus the current location
484 *
485 * Description:
486 * Sets the offset of @miter to its current location plus @offset bytes.
487 * If mapping iterator @miter has been proceeded by sg_miter_next(), this
488 * stops @miter.
489 *
490 * Context:
491 * Don't care if @miter is stopped, or not proceeded yet.
492 * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
493 *
494 * Returns:
495 * true if @miter contains the valid mapping. false if end of sg
496 * list is reached.
497 */
498 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
499 {
500 sg_miter_stop(miter);
501
502 while (offset) {
503 off_t consumed;
504
505 if (!sg_miter_get_next_page(miter))
506 return false;
507
508 consumed = min_t(off_t, offset, miter->__remaining);
509 miter->__offset += consumed;
510 miter->__remaining -= consumed;
511 offset -= consumed;
512 }
513
514 return true;
515 }
516 EXPORT_SYMBOL(sg_miter_skip);
517
518 /**
519 * sg_miter_next - proceed mapping iterator to the next mapping
520 * @miter: sg mapping iter to proceed
521 *
522 * Description:
523 * Proceeds @miter to the next mapping. @miter should have been started
524 * using sg_miter_start(). On successful return, @miter->page,
525 * @miter->addr and @miter->length point to the current mapping.
526 *
527 * Context:
528 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
529 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
530 *
531 * Returns:
532 * true if @miter contains the next mapping. false if end of sg
533 * list is reached.
534 */
535 bool sg_miter_next(struct sg_mapping_iter *miter)
536 {
537 sg_miter_stop(miter);
538
539 /*
540 * Get to the next page if necessary.
541 * __remaining, __offset is adjusted by sg_miter_stop
542 */
543 if (!sg_miter_get_next_page(miter))
544 return false;
545
546 miter->page = sg_page_iter_page(&miter->piter);
547 miter->consumed = miter->length = miter->__remaining;
548
549 if (miter->__flags & SG_MITER_ATOMIC)
550 miter->addr = kmap_atomic(miter->page) + miter->__offset;
551 else
552 miter->addr = kmap(miter->page) + miter->__offset;
553
554 return true;
555 }
556 EXPORT_SYMBOL(sg_miter_next);
557
558 /**
559 * sg_miter_stop - stop mapping iteration
560 * @miter: sg mapping iter to be stopped
561 *
562 * Description:
563 * Stops mapping iterator @miter. @miter should have been started
564 * started using sg_miter_start(). A stopped iteration can be
565 * resumed by calling sg_miter_next() on it. This is useful when
566 * resources (kmap) need to be released during iteration.
567 *
568 * Context:
569 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
570 * otherwise.
571 */
572 void sg_miter_stop(struct sg_mapping_iter *miter)
573 {
574 WARN_ON(miter->consumed > miter->length);
575
576 /* drop resources from the last iteration */
577 if (miter->addr) {
578 miter->__offset += miter->consumed;
579 miter->__remaining -= miter->consumed;
580
581 if ((miter->__flags & SG_MITER_TO_SG) &&
582 !PageSlab(miter->page))
583 flush_kernel_dcache_page(miter->page);
584
585 if (miter->__flags & SG_MITER_ATOMIC) {
586 WARN_ON_ONCE(preemptible());
587 kunmap_atomic(miter->addr);
588 } else
589 kunmap(miter->page);
590
591 miter->page = NULL;
592 miter->addr = NULL;
593 miter->length = 0;
594 miter->consumed = 0;
595 }
596 }
597 EXPORT_SYMBOL(sg_miter_stop);
598
599 /**
600 * sg_copy_buffer - Copy data between a linear buffer and an SG list
601 * @sgl: The SG list
602 * @nents: Number of SG entries
603 * @buf: Where to copy from
604 * @buflen: The number of bytes to copy
605 * @skip: Number of bytes to skip before copying
606 * @to_buffer: transfer direction (true == from an sg list to a
607 * buffer, false == from a buffer to an sg list
608 *
609 * Returns the number of copied bytes.
610 *
611 **/
612 static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
613 void *buf, size_t buflen, off_t skip,
614 bool to_buffer)
615 {
616 unsigned int offset = 0;
617 struct sg_mapping_iter miter;
618 unsigned long flags;
619 unsigned int sg_flags = SG_MITER_ATOMIC;
620
621 if (to_buffer)
622 sg_flags |= SG_MITER_FROM_SG;
623 else
624 sg_flags |= SG_MITER_TO_SG;
625
626 sg_miter_start(&miter, sgl, nents, sg_flags);
627
628 if (!sg_miter_skip(&miter, skip))
629 return false;
630
631 local_irq_save(flags);
632
633 while (sg_miter_next(&miter) && offset < buflen) {
634 unsigned int len;
635
636 len = min(miter.length, buflen - offset);
637
638 if (to_buffer)
639 memcpy(buf + offset, miter.addr, len);
640 else
641 memcpy(miter.addr, buf + offset, len);
642
643 offset += len;
644 }
645
646 sg_miter_stop(&miter);
647
648 local_irq_restore(flags);
649 return offset;
650 }
651
652 /**
653 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
654 * @sgl: The SG list
655 * @nents: Number of SG entries
656 * @buf: Where to copy from
657 * @buflen: The number of bytes to copy
658 *
659 * Returns the number of copied bytes.
660 *
661 **/
662 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
663 void *buf, size_t buflen)
664 {
665 return sg_copy_buffer(sgl, nents, buf, buflen, 0, false);
666 }
667 EXPORT_SYMBOL(sg_copy_from_buffer);
668
669 /**
670 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
671 * @sgl: The SG list
672 * @nents: Number of SG entries
673 * @buf: Where to copy to
674 * @buflen: The number of bytes to copy
675 *
676 * Returns the number of copied bytes.
677 *
678 **/
679 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
680 void *buf, size_t buflen)
681 {
682 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
683 }
684 EXPORT_SYMBOL(sg_copy_to_buffer);
685
686 /**
687 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
688 * @sgl: The SG list
689 * @nents: Number of SG entries
690 * @buf: Where to copy from
691 * @skip: Number of bytes to skip before copying
692 * @buflen: The number of bytes to copy
693 *
694 * Returns the number of copied bytes.
695 *
696 **/
697 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
698 void *buf, size_t buflen, off_t skip)
699 {
700 return sg_copy_buffer(sgl, nents, buf, buflen, skip, false);
701 }
702 EXPORT_SYMBOL(sg_pcopy_from_buffer);
703
704 /**
705 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
706 * @sgl: The SG list
707 * @nents: Number of SG entries
708 * @buf: Where to copy to
709 * @skip: Number of bytes to skip before copying
710 * @buflen: The number of bytes to copy
711 *
712 * Returns the number of copied bytes.
713 *
714 **/
715 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
716 void *buf, size_t buflen, off_t skip)
717 {
718 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
719 }
720 EXPORT_SYMBOL(sg_pcopy_to_buffer);