]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - lib/scatterlist.c
scatterlist: add sg_nents
[mirror_ubuntu-zesty-kernel.git] / lib / scatterlist.c
CommitLineData
0db9299f
JA
1/*
2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
3 *
4 * Scatterlist handling helpers.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
8bc3bcc9 9#include <linux/export.h>
5a0e3ad6 10#include <linux/slab.h>
0db9299f 11#include <linux/scatterlist.h>
b1adaf65 12#include <linux/highmem.h>
b94de9bb 13#include <linux/kmemleak.h>
0db9299f
JA
14
15/**
16 * sg_next - return the next scatterlist entry in a list
17 * @sg: The current sg entry
18 *
19 * Description:
20 * Usually the next entry will be @sg@ + 1, but if this sg element is part
21 * of a chained scatterlist, it could jump to the start of a new
22 * scatterlist array.
23 *
24 **/
25struct scatterlist *sg_next(struct scatterlist *sg)
26{
27#ifdef CONFIG_DEBUG_SG
28 BUG_ON(sg->sg_magic != SG_MAGIC);
29#endif
30 if (sg_is_last(sg))
31 return NULL;
32
33 sg++;
34 if (unlikely(sg_is_chain(sg)))
35 sg = sg_chain_ptr(sg);
36
37 return sg;
38}
39EXPORT_SYMBOL(sg_next);
40
2e484610
ML
41/**
42 * sg_nents - return total count of entries in scatterlist
43 * @sg: The scatterlist
44 *
45 * Description:
46 * Allows to know how many entries are in sg, taking into acount
47 * chaining as well
48 *
49 **/
50int sg_nents(struct scatterlist *sg)
51{
52 int nents = 0;
53 while (sg) {
54 nents++;
55 sg = sg_next(sg);
56 }
57
58 return nents;
59}
60EXPORT_SYMBOL(sg_nents);
61
62
0db9299f
JA
63/**
64 * sg_last - return the last scatterlist entry in a list
65 * @sgl: First entry in the scatterlist
66 * @nents: Number of entries in the scatterlist
67 *
68 * Description:
69 * Should only be used casually, it (currently) scans the entire list
70 * to get the last entry.
71 *
72 * Note that the @sgl@ pointer passed in need not be the first one,
73 * the important bit is that @nents@ denotes the number of entries that
74 * exist from @sgl@.
75 *
76 **/
77struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
78{
79#ifndef ARCH_HAS_SG_CHAIN
80 struct scatterlist *ret = &sgl[nents - 1];
81#else
82 struct scatterlist *sg, *ret = NULL;
83 unsigned int i;
84
85 for_each_sg(sgl, sg, nents, i)
86 ret = sg;
87
88#endif
89#ifdef CONFIG_DEBUG_SG
90 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
91 BUG_ON(!sg_is_last(ret));
92#endif
93 return ret;
94}
95EXPORT_SYMBOL(sg_last);
96
97/**
98 * sg_init_table - Initialize SG table
99 * @sgl: The SG table
100 * @nents: Number of entries in table
101 *
102 * Notes:
103 * If this is part of a chained sg table, sg_mark_end() should be
104 * used only on the last table part.
105 *
106 **/
107void sg_init_table(struct scatterlist *sgl, unsigned int nents)
108{
109 memset(sgl, 0, sizeof(*sgl) * nents);
110#ifdef CONFIG_DEBUG_SG
111 {
112 unsigned int i;
113 for (i = 0; i < nents; i++)
114 sgl[i].sg_magic = SG_MAGIC;
115 }
116#endif
117 sg_mark_end(&sgl[nents - 1]);
118}
119EXPORT_SYMBOL(sg_init_table);
120
121/**
122 * sg_init_one - Initialize a single entry sg list
123 * @sg: SG entry
124 * @buf: Virtual address for IO
125 * @buflen: IO length
126 *
127 **/
128void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
129{
130 sg_init_table(sg, 1);
131 sg_set_buf(sg, buf, buflen);
132}
133EXPORT_SYMBOL(sg_init_one);
134
135/*
136 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
137 * helpers.
138 */
139static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
140{
b94de9bb
CW
141 if (nents == SG_MAX_SINGLE_ALLOC) {
142 /*
143 * Kmemleak doesn't track page allocations as they are not
144 * commonly used (in a raw form) for kernel data structures.
145 * As we chain together a list of pages and then a normal
146 * kmalloc (tracked by kmemleak), in order to for that last
147 * allocation not to become decoupled (and thus a
148 * false-positive) we need to inform kmemleak of all the
149 * intermediate allocations.
150 */
151 void *ptr = (void *) __get_free_page(gfp_mask);
152 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
153 return ptr;
154 } else
0db9299f
JA
155 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
156}
157
158static void sg_kfree(struct scatterlist *sg, unsigned int nents)
159{
b94de9bb
CW
160 if (nents == SG_MAX_SINGLE_ALLOC) {
161 kmemleak_free(sg);
0db9299f 162 free_page((unsigned long) sg);
b94de9bb 163 } else
0db9299f
JA
164 kfree(sg);
165}
166
167/**
168 * __sg_free_table - Free a previously mapped sg table
169 * @table: The sg table header to use
7cedb1f1 170 * @max_ents: The maximum number of entries per single scatterlist
0db9299f
JA
171 * @free_fn: Free function
172 *
173 * Description:
7cedb1f1
JB
174 * Free an sg table previously allocated and setup with
175 * __sg_alloc_table(). The @max_ents value must be identical to
176 * that previously used with __sg_alloc_table().
0db9299f
JA
177 *
178 **/
7cedb1f1
JB
179void __sg_free_table(struct sg_table *table, unsigned int max_ents,
180 sg_free_fn *free_fn)
0db9299f
JA
181{
182 struct scatterlist *sgl, *next;
183
184 if (unlikely(!table->sgl))
185 return;
186
187 sgl = table->sgl;
188 while (table->orig_nents) {
189 unsigned int alloc_size = table->orig_nents;
190 unsigned int sg_size;
191
192 /*
7cedb1f1 193 * If we have more than max_ents segments left,
0db9299f
JA
194 * then assign 'next' to the sg table after the current one.
195 * sg_size is then one less than alloc size, since the last
196 * element is the chain pointer.
197 */
7cedb1f1
JB
198 if (alloc_size > max_ents) {
199 next = sg_chain_ptr(&sgl[max_ents - 1]);
200 alloc_size = max_ents;
0db9299f
JA
201 sg_size = alloc_size - 1;
202 } else {
203 sg_size = alloc_size;
204 next = NULL;
205 }
206
207 table->orig_nents -= sg_size;
208 free_fn(sgl, alloc_size);
209 sgl = next;
210 }
211
212 table->sgl = NULL;
213}
214EXPORT_SYMBOL(__sg_free_table);
215
216/**
217 * sg_free_table - Free a previously allocated sg table
218 * @table: The mapped sg table header
219 *
220 **/
221void sg_free_table(struct sg_table *table)
222{
7cedb1f1 223 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
0db9299f
JA
224}
225EXPORT_SYMBOL(sg_free_table);
226
227/**
228 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
229 * @table: The sg table header to use
230 * @nents: Number of entries in sg list
7cedb1f1 231 * @max_ents: The maximum number of entries the allocator returns per call
0db9299f
JA
232 * @gfp_mask: GFP allocation mask
233 * @alloc_fn: Allocator to use
234 *
7cedb1f1
JB
235 * Description:
236 * This function returns a @table @nents long. The allocator is
237 * defined to return scatterlist chunks of maximum size @max_ents.
238 * Thus if @nents is bigger than @max_ents, the scatterlists will be
239 * chained in units of @max_ents.
240 *
0db9299f
JA
241 * Notes:
242 * If this function returns non-0 (eg failure), the caller must call
243 * __sg_free_table() to cleanup any leftover allocations.
244 *
245 **/
7cedb1f1
JB
246int __sg_alloc_table(struct sg_table *table, unsigned int nents,
247 unsigned int max_ents, gfp_t gfp_mask,
0db9299f
JA
248 sg_alloc_fn *alloc_fn)
249{
250 struct scatterlist *sg, *prv;
251 unsigned int left;
252
253#ifndef ARCH_HAS_SG_CHAIN
7cedb1f1 254 BUG_ON(nents > max_ents);
0db9299f
JA
255#endif
256
257 memset(table, 0, sizeof(*table));
258
259 left = nents;
260 prv = NULL;
261 do {
262 unsigned int sg_size, alloc_size = left;
263
7cedb1f1
JB
264 if (alloc_size > max_ents) {
265 alloc_size = max_ents;
0db9299f
JA
266 sg_size = alloc_size - 1;
267 } else
268 sg_size = alloc_size;
269
270 left -= sg_size;
271
272 sg = alloc_fn(alloc_size, gfp_mask);
edce6820
JC
273 if (unlikely(!sg)) {
274 /*
275 * Adjust entry count to reflect that the last
276 * entry of the previous table won't be used for
277 * linkage. Without this, sg_kfree() may get
278 * confused.
279 */
280 if (prv)
281 table->nents = ++table->orig_nents;
282
283 return -ENOMEM;
284 }
0db9299f
JA
285
286 sg_init_table(sg, alloc_size);
287 table->nents = table->orig_nents += sg_size;
288
289 /*
290 * If this is the first mapping, assign the sg table header.
291 * If this is not the first mapping, chain previous part.
292 */
293 if (prv)
7cedb1f1 294 sg_chain(prv, max_ents, sg);
0db9299f
JA
295 else
296 table->sgl = sg;
297
298 /*
299 * If no more entries after this one, mark the end
300 */
301 if (!left)
302 sg_mark_end(&sg[sg_size - 1]);
303
0db9299f
JA
304 prv = sg;
305 } while (left);
306
307 return 0;
308}
309EXPORT_SYMBOL(__sg_alloc_table);
310
311/**
312 * sg_alloc_table - Allocate and initialize an sg table
313 * @table: The sg table header to use
314 * @nents: Number of entries in sg list
315 * @gfp_mask: GFP allocation mask
316 *
317 * Description:
318 * Allocate and initialize an sg table. If @nents@ is larger than
319 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
320 *
321 **/
322int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
323{
324 int ret;
325
7cedb1f1
JB
326 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
327 gfp_mask, sg_kmalloc);
0db9299f 328 if (unlikely(ret))
7cedb1f1 329 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
0db9299f
JA
330
331 return ret;
332}
333EXPORT_SYMBOL(sg_alloc_table);
b1adaf65 334
efc42bc9
TS
335/**
336 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
337 * an array of pages
338 * @sgt: The sg table header to use
339 * @pages: Pointer to an array of page pointers
340 * @n_pages: Number of pages in the pages array
341 * @offset: Offset from start of the first page to the start of a buffer
342 * @size: Number of valid bytes in the buffer (after offset)
343 * @gfp_mask: GFP allocation mask
344 *
345 * Description:
346 * Allocate and initialize an sg table from a list of pages. Contiguous
347 * ranges of the pages are squashed into a single scatterlist node. A user
348 * may provide an offset at a start and a size of valid data in a buffer
349 * specified by the page array. The returned sg table is released by
350 * sg_free_table.
351 *
352 * Returns:
353 * 0 on success, negative error on failure
354 */
355int sg_alloc_table_from_pages(struct sg_table *sgt,
356 struct page **pages, unsigned int n_pages,
357 unsigned long offset, unsigned long size,
358 gfp_t gfp_mask)
359{
360 unsigned int chunks;
361 unsigned int i;
362 unsigned int cur_page;
363 int ret;
364 struct scatterlist *s;
365
366 /* compute number of contiguous chunks */
367 chunks = 1;
368 for (i = 1; i < n_pages; ++i)
369 if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
370 ++chunks;
371
372 ret = sg_alloc_table(sgt, chunks, gfp_mask);
373 if (unlikely(ret))
374 return ret;
375
376 /* merging chunks and putting them into the scatterlist */
377 cur_page = 0;
378 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
379 unsigned long chunk_size;
380 unsigned int j;
381
382 /* look for the end of the current chunk */
383 for (j = cur_page + 1; j < n_pages; ++j)
384 if (page_to_pfn(pages[j]) !=
385 page_to_pfn(pages[j - 1]) + 1)
386 break;
387
388 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
389 sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
390 size -= chunk_size;
391 offset = 0;
392 cur_page = j;
393 }
394
395 return 0;
396}
397EXPORT_SYMBOL(sg_alloc_table_from_pages);
398
137d3edb
TH
399/**
400 * sg_miter_start - start mapping iteration over a sg list
401 * @miter: sg mapping iter to be started
402 * @sgl: sg list to iterate over
403 * @nents: number of sg entries
404 *
405 * Description:
406 * Starts mapping iterator @miter.
407 *
408 * Context:
409 * Don't care.
410 */
411void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
412 unsigned int nents, unsigned int flags)
413{
414 memset(miter, 0, sizeof(struct sg_mapping_iter));
415
416 miter->__sg = sgl;
417 miter->__nents = nents;
418 miter->__offset = 0;
6de7e356 419 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
137d3edb
TH
420 miter->__flags = flags;
421}
422EXPORT_SYMBOL(sg_miter_start);
423
424/**
425 * sg_miter_next - proceed mapping iterator to the next mapping
426 * @miter: sg mapping iter to proceed
427 *
428 * Description:
429 * Proceeds @miter@ to the next mapping. @miter@ should have been
430 * started using sg_miter_start(). On successful return,
431 * @miter@->page, @miter@->addr and @miter@->length point to the
432 * current mapping.
433 *
434 * Context:
435 * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
436 * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
437 *
438 * Returns:
439 * true if @miter contains the next mapping. false if end of sg
440 * list is reached.
441 */
442bool sg_miter_next(struct sg_mapping_iter *miter)
443{
444 unsigned int off, len;
445
446 /* check for end and drop resources from the last iteration */
447 if (!miter->__nents)
448 return false;
449
450 sg_miter_stop(miter);
451
452 /* get to the next sg if necessary. __offset is adjusted by stop */
23c560a9
TH
453 while (miter->__offset == miter->__sg->length) {
454 if (--miter->__nents) {
455 miter->__sg = sg_next(miter->__sg);
456 miter->__offset = 0;
457 } else
458 return false;
137d3edb
TH
459 }
460
461 /* map the next page */
462 off = miter->__sg->offset + miter->__offset;
463 len = miter->__sg->length - miter->__offset;
464
465 miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
466 off &= ~PAGE_MASK;
467 miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
468 miter->consumed = miter->length;
469
470 if (miter->__flags & SG_MITER_ATOMIC)
c3eede8e 471 miter->addr = kmap_atomic(miter->page) + off;
137d3edb
TH
472 else
473 miter->addr = kmap(miter->page) + off;
474
475 return true;
476}
477EXPORT_SYMBOL(sg_miter_next);
478
479/**
480 * sg_miter_stop - stop mapping iteration
481 * @miter: sg mapping iter to be stopped
482 *
483 * Description:
484 * Stops mapping iterator @miter. @miter should have been started
485 * started using sg_miter_start(). A stopped iteration can be
486 * resumed by calling sg_miter_next() on it. This is useful when
487 * resources (kmap) need to be released during iteration.
488 *
489 * Context:
490 * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
491 */
492void sg_miter_stop(struct sg_mapping_iter *miter)
493{
494 WARN_ON(miter->consumed > miter->length);
495
496 /* drop resources from the last iteration */
497 if (miter->addr) {
498 miter->__offset += miter->consumed;
499
6de7e356
SAS
500 if (miter->__flags & SG_MITER_TO_SG)
501 flush_kernel_dcache_page(miter->page);
502
137d3edb
TH
503 if (miter->__flags & SG_MITER_ATOMIC) {
504 WARN_ON(!irqs_disabled());
c3eede8e 505 kunmap_atomic(miter->addr);
137d3edb 506 } else
f652c521 507 kunmap(miter->page);
137d3edb
TH
508
509 miter->page = NULL;
510 miter->addr = NULL;
511 miter->length = 0;
512 miter->consumed = 0;
513 }
514}
515EXPORT_SYMBOL(sg_miter_stop);
516
b1adaf65
FT
517/**
518 * sg_copy_buffer - Copy data between a linear buffer and an SG list
519 * @sgl: The SG list
520 * @nents: Number of SG entries
521 * @buf: Where to copy from
522 * @buflen: The number of bytes to copy
523 * @to_buffer: transfer direction (non zero == from an sg list to a
524 * buffer, 0 == from a buffer to an sg list
525 *
526 * Returns the number of copied bytes.
527 *
528 **/
529static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
530 void *buf, size_t buflen, int to_buffer)
531{
137d3edb
TH
532 unsigned int offset = 0;
533 struct sg_mapping_iter miter;
50bed2e2 534 unsigned long flags;
6de7e356
SAS
535 unsigned int sg_flags = SG_MITER_ATOMIC;
536
537 if (to_buffer)
538 sg_flags |= SG_MITER_FROM_SG;
539 else
540 sg_flags |= SG_MITER_TO_SG;
137d3edb 541
6de7e356 542 sg_miter_start(&miter, sgl, nents, sg_flags);
137d3edb 543
50bed2e2
FT
544 local_irq_save(flags);
545
137d3edb
TH
546 while (sg_miter_next(&miter) && offset < buflen) {
547 unsigned int len;
548
549 len = min(miter.length, buflen - offset);
550
551 if (to_buffer)
552 memcpy(buf + offset, miter.addr, len);
6de7e356 553 else
137d3edb 554 memcpy(miter.addr, buf + offset, len);
b1adaf65 555
137d3edb 556 offset += len;
b1adaf65
FT
557 }
558
137d3edb
TH
559 sg_miter_stop(&miter);
560
50bed2e2 561 local_irq_restore(flags);
137d3edb 562 return offset;
b1adaf65
FT
563}
564
565/**
566 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
567 * @sgl: The SG list
568 * @nents: Number of SG entries
569 * @buf: Where to copy from
570 * @buflen: The number of bytes to copy
571 *
572 * Returns the number of copied bytes.
573 *
574 **/
575size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
576 void *buf, size_t buflen)
577{
578 return sg_copy_buffer(sgl, nents, buf, buflen, 0);
579}
580EXPORT_SYMBOL(sg_copy_from_buffer);
581
582/**
583 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
584 * @sgl: The SG list
585 * @nents: Number of SG entries
586 * @buf: Where to copy to
587 * @buflen: The number of bytes to copy
588 *
589 * Returns the number of copied bytes.
590 *
591 **/
592size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
593 void *buf, size_t buflen)
594{
595 return sg_copy_buffer(sgl, nents, buf, buflen, 1);
596}
597EXPORT_SYMBOL(sg_copy_to_buffer);