]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/iov_iter.c
iov_iter.c: convert iov_iter_npages() to iterate_all_kinds
[mirror_ubuntu-bionic-kernel.git] / mm / iov_iter.c
CommitLineData
4f18cd31
AV
1#include <linux/export.h>
2#include <linux/uio.h>
3#include <linux/pagemap.h>
91f79c43
AV
4#include <linux/slab.h>
5#include <linux/vmalloc.h>
4f18cd31 6
04a31165
AV
7#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
8 size_t left; \
9 size_t wanted = n; \
10 __p = i->iov; \
11 __v.iov_len = min(n, __p->iov_len - skip); \
12 if (likely(__v.iov_len)) { \
13 __v.iov_base = __p->iov_base + skip; \
14 left = (STEP); \
15 __v.iov_len -= left; \
16 skip += __v.iov_len; \
17 n -= __v.iov_len; \
18 } else { \
19 left = 0; \
20 } \
21 while (unlikely(!left && n)) { \
22 __p++; \
23 __v.iov_len = min(n, __p->iov_len); \
24 if (unlikely(!__v.iov_len)) \
25 continue; \
26 __v.iov_base = __p->iov_base; \
27 left = (STEP); \
28 __v.iov_len -= left; \
29 skip = __v.iov_len; \
30 n -= __v.iov_len; \
31 } \
32 n = wanted - n; \
33}
34
35#define iterate_bvec(i, n, __v, __p, skip, STEP) { \
36 size_t wanted = n; \
37 __p = i->bvec; \
38 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
39 if (likely(__v.bv_len)) { \
40 __v.bv_page = __p->bv_page; \
41 __v.bv_offset = __p->bv_offset + skip; \
42 (void)(STEP); \
43 skip += __v.bv_len; \
44 n -= __v.bv_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.bv_len = min_t(size_t, n, __p->bv_len); \
49 if (unlikely(!__v.bv_len)) \
50 continue; \
51 __v.bv_page = __p->bv_page; \
52 __v.bv_offset = __p->bv_offset; \
53 (void)(STEP); \
54 skip = __v.bv_len; \
55 n -= __v.bv_len; \
56 } \
57 n = wanted; \
58}
59
60#define iterate_all_kinds(i, n, v, I, B) { \
61 size_t skip = i->iov_offset; \
62 if (unlikely(i->type & ITER_BVEC)) { \
63 const struct bio_vec *bvec; \
64 struct bio_vec v; \
65 iterate_bvec(i, n, v, bvec, skip, (B)) \
66 } else { \
67 const struct iovec *iov; \
68 struct iovec v; \
69 iterate_iovec(i, n, v, iov, skip, (I)) \
70 } \
71}
72
7ce2a91e
AV
73#define iterate_and_advance(i, n, v, I, B) { \
74 size_t skip = i->iov_offset; \
75 if (unlikely(i->type & ITER_BVEC)) { \
76 const struct bio_vec *bvec; \
77 struct bio_vec v; \
78 iterate_bvec(i, n, v, bvec, skip, (B)) \
79 if (skip == bvec->bv_len) { \
80 bvec++; \
81 skip = 0; \
82 } \
83 i->nr_segs -= bvec - i->bvec; \
84 i->bvec = bvec; \
85 } else { \
86 const struct iovec *iov; \
87 struct iovec v; \
88 iterate_iovec(i, n, v, iov, skip, (I)) \
89 if (skip == iov->iov_len) { \
90 iov++; \
91 skip = 0; \
92 } \
93 i->nr_segs -= iov - i->iov; \
94 i->iov = iov; \
95 } \
96 i->count -= n; \
97 i->iov_offset = skip; \
98}
99
c35e0248
MW
100static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i)
101{
102 size_t skip, copy, left, wanted;
103 const struct iovec *iov;
104 char __user *buf;
105
106 if (unlikely(bytes > i->count))
107 bytes = i->count;
108
109 if (unlikely(!bytes))
110 return 0;
111
112 wanted = bytes;
113 iov = i->iov;
114 skip = i->iov_offset;
115 buf = iov->iov_base + skip;
116 copy = min(bytes, iov->iov_len - skip);
117
118 left = __copy_to_user(buf, from, copy);
119 copy -= left;
120 skip += copy;
121 from += copy;
122 bytes -= copy;
123 while (unlikely(!left && bytes)) {
124 iov++;
125 buf = iov->iov_base;
126 copy = min(bytes, iov->iov_len);
127 left = __copy_to_user(buf, from, copy);
128 copy -= left;
129 skip = copy;
130 from += copy;
131 bytes -= copy;
132 }
133
134 if (skip == iov->iov_len) {
135 iov++;
136 skip = 0;
137 }
138 i->count -= wanted - bytes;
139 i->nr_segs -= iov - i->iov;
140 i->iov = iov;
141 i->iov_offset = skip;
142 return wanted - bytes;
143}
144
145static size_t copy_from_iter_iovec(void *to, size_t bytes, struct iov_iter *i)
146{
147 size_t skip, copy, left, wanted;
148 const struct iovec *iov;
149 char __user *buf;
150
151 if (unlikely(bytes > i->count))
152 bytes = i->count;
153
154 if (unlikely(!bytes))
155 return 0;
156
157 wanted = bytes;
158 iov = i->iov;
159 skip = i->iov_offset;
160 buf = iov->iov_base + skip;
161 copy = min(bytes, iov->iov_len - skip);
162
163 left = __copy_from_user(to, buf, copy);
164 copy -= left;
165 skip += copy;
166 to += copy;
167 bytes -= copy;
168 while (unlikely(!left && bytes)) {
169 iov++;
170 buf = iov->iov_base;
171 copy = min(bytes, iov->iov_len);
172 left = __copy_from_user(to, buf, copy);
173 copy -= left;
174 skip = copy;
175 to += copy;
176 bytes -= copy;
177 }
178
179 if (skip == iov->iov_len) {
180 iov++;
181 skip = 0;
182 }
183 i->count -= wanted - bytes;
184 i->nr_segs -= iov - i->iov;
185 i->iov = iov;
186 i->iov_offset = skip;
187 return wanted - bytes;
188}
189
62a8067a 190static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
4f18cd31
AV
191 struct iov_iter *i)
192{
193 size_t skip, copy, left, wanted;
194 const struct iovec *iov;
195 char __user *buf;
196 void *kaddr, *from;
197
198 if (unlikely(bytes > i->count))
199 bytes = i->count;
200
201 if (unlikely(!bytes))
202 return 0;
203
204 wanted = bytes;
205 iov = i->iov;
206 skip = i->iov_offset;
207 buf = iov->iov_base + skip;
208 copy = min(bytes, iov->iov_len - skip);
209
210 if (!fault_in_pages_writeable(buf, copy)) {
211 kaddr = kmap_atomic(page);
212 from = kaddr + offset;
213
214 /* first chunk, usually the only one */
215 left = __copy_to_user_inatomic(buf, from, copy);
216 copy -= left;
217 skip += copy;
218 from += copy;
219 bytes -= copy;
220
221 while (unlikely(!left && bytes)) {
222 iov++;
223 buf = iov->iov_base;
224 copy = min(bytes, iov->iov_len);
225 left = __copy_to_user_inatomic(buf, from, copy);
226 copy -= left;
227 skip = copy;
228 from += copy;
229 bytes -= copy;
230 }
231 if (likely(!bytes)) {
232 kunmap_atomic(kaddr);
233 goto done;
234 }
235 offset = from - kaddr;
236 buf += copy;
237 kunmap_atomic(kaddr);
238 copy = min(bytes, iov->iov_len - skip);
239 }
240 /* Too bad - revert to non-atomic kmap */
241 kaddr = kmap(page);
242 from = kaddr + offset;
243 left = __copy_to_user(buf, from, copy);
244 copy -= left;
245 skip += copy;
246 from += copy;
247 bytes -= copy;
248 while (unlikely(!left && bytes)) {
249 iov++;
250 buf = iov->iov_base;
251 copy = min(bytes, iov->iov_len);
252 left = __copy_to_user(buf, from, copy);
253 copy -= left;
254 skip = copy;
255 from += copy;
256 bytes -= copy;
257 }
258 kunmap(page);
259done:
81055e58
AV
260 if (skip == iov->iov_len) {
261 iov++;
262 skip = 0;
263 }
4f18cd31
AV
264 i->count -= wanted - bytes;
265 i->nr_segs -= iov - i->iov;
266 i->iov = iov;
267 i->iov_offset = skip;
268 return wanted - bytes;
269}
4f18cd31 270
62a8067a 271static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
f0d1bec9
AV
272 struct iov_iter *i)
273{
274 size_t skip, copy, left, wanted;
275 const struct iovec *iov;
276 char __user *buf;
277 void *kaddr, *to;
278
279 if (unlikely(bytes > i->count))
280 bytes = i->count;
281
282 if (unlikely(!bytes))
283 return 0;
284
285 wanted = bytes;
286 iov = i->iov;
287 skip = i->iov_offset;
288 buf = iov->iov_base + skip;
289 copy = min(bytes, iov->iov_len - skip);
290
291 if (!fault_in_pages_readable(buf, copy)) {
292 kaddr = kmap_atomic(page);
293 to = kaddr + offset;
294
295 /* first chunk, usually the only one */
296 left = __copy_from_user_inatomic(to, buf, copy);
297 copy -= left;
298 skip += copy;
299 to += copy;
300 bytes -= copy;
301
302 while (unlikely(!left && bytes)) {
303 iov++;
304 buf = iov->iov_base;
305 copy = min(bytes, iov->iov_len);
306 left = __copy_from_user_inatomic(to, buf, copy);
307 copy -= left;
308 skip = copy;
309 to += copy;
310 bytes -= copy;
311 }
312 if (likely(!bytes)) {
313 kunmap_atomic(kaddr);
314 goto done;
315 }
316 offset = to - kaddr;
317 buf += copy;
318 kunmap_atomic(kaddr);
319 copy = min(bytes, iov->iov_len - skip);
320 }
321 /* Too bad - revert to non-atomic kmap */
322 kaddr = kmap(page);
323 to = kaddr + offset;
324 left = __copy_from_user(to, buf, copy);
325 copy -= left;
326 skip += copy;
327 to += copy;
328 bytes -= copy;
329 while (unlikely(!left && bytes)) {
330 iov++;
331 buf = iov->iov_base;
332 copy = min(bytes, iov->iov_len);
333 left = __copy_from_user(to, buf, copy);
334 copy -= left;
335 skip = copy;
336 to += copy;
337 bytes -= copy;
338 }
339 kunmap(page);
340done:
81055e58
AV
341 if (skip == iov->iov_len) {
342 iov++;
343 skip = 0;
344 }
f0d1bec9
AV
345 i->count -= wanted - bytes;
346 i->nr_segs -= iov - i->iov;
347 i->iov = iov;
348 i->iov_offset = skip;
349 return wanted - bytes;
350}
f0d1bec9 351
c35e0248
MW
352static size_t zero_iovec(size_t bytes, struct iov_iter *i)
353{
354 size_t skip, copy, left, wanted;
355 const struct iovec *iov;
356 char __user *buf;
357
358 if (unlikely(bytes > i->count))
359 bytes = i->count;
360
361 if (unlikely(!bytes))
362 return 0;
363
364 wanted = bytes;
365 iov = i->iov;
366 skip = i->iov_offset;
367 buf = iov->iov_base + skip;
368 copy = min(bytes, iov->iov_len - skip);
369
370 left = __clear_user(buf, copy);
371 copy -= left;
372 skip += copy;
373 bytes -= copy;
374
375 while (unlikely(!left && bytes)) {
376 iov++;
377 buf = iov->iov_base;
378 copy = min(bytes, iov->iov_len);
379 left = __clear_user(buf, copy);
380 copy -= left;
381 skip = copy;
382 bytes -= copy;
383 }
384
385 if (skip == iov->iov_len) {
386 iov++;
387 skip = 0;
388 }
389 i->count -= wanted - bytes;
390 i->nr_segs -= iov - i->iov;
391 i->iov = iov;
392 i->iov_offset = skip;
393 return wanted - bytes;
394}
395
4f18cd31
AV
396/*
397 * Fault in the first iovec of the given iov_iter, to a maximum length
398 * of bytes. Returns 0 on success, or non-zero if the memory could not be
399 * accessed (ie. because it is an invalid address).
400 *
401 * writev-intensive code may want this to prefault several iovecs -- that
402 * would be possible (callers must not rely on the fact that _only_ the
403 * first iovec will be faulted with the current implementation).
404 */
405int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
406{
62a8067a
AV
407 if (!(i->type & ITER_BVEC)) {
408 char __user *buf = i->iov->iov_base + i->iov_offset;
409 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
410 return fault_in_pages_readable(buf, bytes);
411 }
412 return 0;
4f18cd31
AV
413}
414EXPORT_SYMBOL(iov_iter_fault_in_readable);
415
71d8e532
AV
416void iov_iter_init(struct iov_iter *i, int direction,
417 const struct iovec *iov, unsigned long nr_segs,
418 size_t count)
419{
420 /* It will get better. Eventually... */
421 if (segment_eq(get_fs(), KERNEL_DS))
62a8067a 422 direction |= ITER_KVEC;
71d8e532
AV
423 i->type = direction;
424 i->iov = iov;
425 i->nr_segs = nr_segs;
426 i->iov_offset = 0;
427 i->count = count;
428}
429EXPORT_SYMBOL(iov_iter_init);
7b2c99d1 430
62a8067a 431static ssize_t get_pages_iovec(struct iov_iter *i,
2c80929c 432 struct page **pages, size_t maxsize, unsigned maxpages,
7b2c99d1
AV
433 size_t *start)
434{
435 size_t offset = i->iov_offset;
436 const struct iovec *iov = i->iov;
437 size_t len;
438 unsigned long addr;
439 int n;
440 int res;
441
442 len = iov->iov_len - offset;
443 if (len > i->count)
444 len = i->count;
2c80929c
MS
445 if (len > maxsize)
446 len = maxsize;
7b2c99d1
AV
447 addr = (unsigned long)iov->iov_base + offset;
448 len += *start = addr & (PAGE_SIZE - 1);
c7f3888a
AV
449 if (len > maxpages * PAGE_SIZE)
450 len = maxpages * PAGE_SIZE;
7b2c99d1
AV
451 addr &= ~(PAGE_SIZE - 1);
452 n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
453 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
454 if (unlikely(res < 0))
455 return res;
456 return (res == n ? len : res * PAGE_SIZE) - *start;
457}
f67da30c 458
62a8067a 459static ssize_t get_pages_alloc_iovec(struct iov_iter *i,
91f79c43
AV
460 struct page ***pages, size_t maxsize,
461 size_t *start)
462{
463 size_t offset = i->iov_offset;
464 const struct iovec *iov = i->iov;
465 size_t len;
466 unsigned long addr;
467 void *p;
468 int n;
469 int res;
470
471 len = iov->iov_len - offset;
472 if (len > i->count)
473 len = i->count;
474 if (len > maxsize)
475 len = maxsize;
476 addr = (unsigned long)iov->iov_base + offset;
477 len += *start = addr & (PAGE_SIZE - 1);
478 addr &= ~(PAGE_SIZE - 1);
479 n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
480
481 p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
482 if (!p)
483 p = vmalloc(n * sizeof(struct page *));
484 if (!p)
485 return -ENOMEM;
486
487 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
488 if (unlikely(res < 0)) {
489 kvfree(p);
490 return res;
491 }
492 *pages = p;
493 return (res == n ? len : res * PAGE_SIZE) - *start;
494}
91f79c43 495
62a8067a
AV
496static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
497{
498 char *from = kmap_atomic(page);
499 memcpy(to, from + offset, len);
500 kunmap_atomic(from);
501}
502
503static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
504{
505 char *to = kmap_atomic(page);
506 memcpy(to + offset, from, len);
507 kunmap_atomic(to);
508}
509
c35e0248
MW
510static void memzero_page(struct page *page, size_t offset, size_t len)
511{
512 char *addr = kmap_atomic(page);
513 memset(addr + offset, 0, len);
514 kunmap_atomic(addr);
515}
516
517static size_t copy_to_iter_bvec(void *from, size_t bytes, struct iov_iter *i)
62a8067a
AV
518{
519 size_t skip, copy, wanted;
520 const struct bio_vec *bvec;
62a8067a
AV
521
522 if (unlikely(bytes > i->count))
523 bytes = i->count;
524
525 if (unlikely(!bytes))
526 return 0;
527
528 wanted = bytes;
529 bvec = i->bvec;
530 skip = i->iov_offset;
531 copy = min_t(size_t, bytes, bvec->bv_len - skip);
532
62a8067a
AV
533 memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy);
534 skip += copy;
535 from += copy;
536 bytes -= copy;
537 while (bytes) {
538 bvec++;
539 copy = min(bytes, (size_t)bvec->bv_len);
540 memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, copy);
541 skip = copy;
542 from += copy;
543 bytes -= copy;
544 }
62a8067a
AV
545 if (skip == bvec->bv_len) {
546 bvec++;
547 skip = 0;
548 }
549 i->count -= wanted - bytes;
550 i->nr_segs -= bvec - i->bvec;
551 i->bvec = bvec;
552 i->iov_offset = skip;
553 return wanted - bytes;
554}
555
c35e0248 556static size_t copy_from_iter_bvec(void *to, size_t bytes, struct iov_iter *i)
62a8067a
AV
557{
558 size_t skip, copy, wanted;
559 const struct bio_vec *bvec;
62a8067a
AV
560
561 if (unlikely(bytes > i->count))
562 bytes = i->count;
563
564 if (unlikely(!bytes))
565 return 0;
566
567 wanted = bytes;
568 bvec = i->bvec;
569 skip = i->iov_offset;
570
62a8067a
AV
571 copy = min(bytes, bvec->bv_len - skip);
572
573 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy);
574
575 to += copy;
576 skip += copy;
577 bytes -= copy;
578
579 while (bytes) {
580 bvec++;
581 copy = min(bytes, (size_t)bvec->bv_len);
582 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, copy);
583 skip = copy;
584 to += copy;
585 bytes -= copy;
586 }
62a8067a
AV
587 if (skip == bvec->bv_len) {
588 bvec++;
589 skip = 0;
590 }
591 i->count -= wanted;
592 i->nr_segs -= bvec - i->bvec;
593 i->bvec = bvec;
594 i->iov_offset = skip;
595 return wanted;
596}
597
c35e0248
MW
598static size_t copy_page_to_iter_bvec(struct page *page, size_t offset,
599 size_t bytes, struct iov_iter *i)
600{
601 void *kaddr = kmap_atomic(page);
602 size_t wanted = copy_to_iter_bvec(kaddr + offset, bytes, i);
603 kunmap_atomic(kaddr);
604 return wanted;
605}
606
607static size_t copy_page_from_iter_bvec(struct page *page, size_t offset,
608 size_t bytes, struct iov_iter *i)
609{
610 void *kaddr = kmap_atomic(page);
611 size_t wanted = copy_from_iter_bvec(kaddr + offset, bytes, i);
612 kunmap_atomic(kaddr);
613 return wanted;
614}
615
616static size_t zero_bvec(size_t bytes, struct iov_iter *i)
617{
618 size_t skip, copy, wanted;
619 const struct bio_vec *bvec;
620
621 if (unlikely(bytes > i->count))
622 bytes = i->count;
623
624 if (unlikely(!bytes))
625 return 0;
626
627 wanted = bytes;
628 bvec = i->bvec;
629 skip = i->iov_offset;
630 copy = min_t(size_t, bytes, bvec->bv_len - skip);
631
632 memzero_page(bvec->bv_page, skip + bvec->bv_offset, copy);
633 skip += copy;
634 bytes -= copy;
635 while (bytes) {
636 bvec++;
637 copy = min(bytes, (size_t)bvec->bv_len);
638 memzero_page(bvec->bv_page, bvec->bv_offset, copy);
639 skip = copy;
640 bytes -= copy;
641 }
642 if (skip == bvec->bv_len) {
643 bvec++;
644 skip = 0;
645 }
646 i->count -= wanted - bytes;
647 i->nr_segs -= bvec - i->bvec;
648 i->bvec = bvec;
649 i->iov_offset = skip;
650 return wanted - bytes;
651}
652
62a8067a 653static ssize_t get_pages_bvec(struct iov_iter *i,
2c80929c 654 struct page **pages, size_t maxsize, unsigned maxpages,
62a8067a
AV
655 size_t *start)
656{
657 const struct bio_vec *bvec = i->bvec;
658 size_t len = bvec->bv_len - i->iov_offset;
659 if (len > i->count)
660 len = i->count;
2c80929c
MS
661 if (len > maxsize)
662 len = maxsize;
c7f3888a 663 /* can't be more than PAGE_SIZE */
62a8067a
AV
664 *start = bvec->bv_offset + i->iov_offset;
665
666 get_page(*pages = bvec->bv_page);
667
668 return len;
669}
670
671static ssize_t get_pages_alloc_bvec(struct iov_iter *i,
672 struct page ***pages, size_t maxsize,
673 size_t *start)
674{
675 const struct bio_vec *bvec = i->bvec;
676 size_t len = bvec->bv_len - i->iov_offset;
677 if (len > i->count)
678 len = i->count;
679 if (len > maxsize)
680 len = maxsize;
681 *start = bvec->bv_offset + i->iov_offset;
682
683 *pages = kmalloc(sizeof(struct page *), GFP_KERNEL);
684 if (!*pages)
685 return -ENOMEM;
686
687 get_page(**pages = bvec->bv_page);
688
689 return len;
690}
691
62a8067a
AV
692size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
693 struct iov_iter *i)
694{
695 if (i->type & ITER_BVEC)
696 return copy_page_to_iter_bvec(page, offset, bytes, i);
697 else
698 return copy_page_to_iter_iovec(page, offset, bytes, i);
699}
700EXPORT_SYMBOL(copy_page_to_iter);
701
702size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
703 struct iov_iter *i)
704{
705 if (i->type & ITER_BVEC)
706 return copy_page_from_iter_bvec(page, offset, bytes, i);
707 else
708 return copy_page_from_iter_iovec(page, offset, bytes, i);
709}
710EXPORT_SYMBOL(copy_page_from_iter);
711
c35e0248
MW
712size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
713{
714 if (i->type & ITER_BVEC)
715 return copy_to_iter_bvec(addr, bytes, i);
716 else
717 return copy_to_iter_iovec(addr, bytes, i);
718}
719EXPORT_SYMBOL(copy_to_iter);
720
721size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
722{
723 if (i->type & ITER_BVEC)
724 return copy_from_iter_bvec(addr, bytes, i);
725 else
726 return copy_from_iter_iovec(addr, bytes, i);
727}
728EXPORT_SYMBOL(copy_from_iter);
729
730size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
731{
732 if (i->type & ITER_BVEC) {
733 return zero_bvec(bytes, i);
734 } else {
735 return zero_iovec(bytes, i);
736 }
737}
738EXPORT_SYMBOL(iov_iter_zero);
739
62a8067a
AV
740size_t iov_iter_copy_from_user_atomic(struct page *page,
741 struct iov_iter *i, unsigned long offset, size_t bytes)
742{
04a31165
AV
743 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
744 iterate_all_kinds(i, bytes, v,
745 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
746 v.iov_base, v.iov_len),
747 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
748 v.bv_offset, v.bv_len)
749 )
750 kunmap_atomic(kaddr);
751 return bytes;
62a8067a
AV
752}
753EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
754
755void iov_iter_advance(struct iov_iter *i, size_t size)
756{
7ce2a91e 757 iterate_and_advance(i, size, v, 0, 0)
62a8067a
AV
758}
759EXPORT_SYMBOL(iov_iter_advance);
760
761/*
762 * Return the count of just the current iov_iter segment.
763 */
764size_t iov_iter_single_seg_count(const struct iov_iter *i)
765{
766 if (i->nr_segs == 1)
767 return i->count;
768 else if (i->type & ITER_BVEC)
62a8067a 769 return min(i->count, i->bvec->bv_len - i->iov_offset);
ad0eab92
PM
770 else
771 return min(i->count, i->iov->iov_len - i->iov_offset);
62a8067a
AV
772}
773EXPORT_SYMBOL(iov_iter_single_seg_count);
774
775unsigned long iov_iter_alignment(const struct iov_iter *i)
776{
04a31165
AV
777 unsigned long res = 0;
778 size_t size = i->count;
779
780 if (!size)
781 return 0;
782
783 iterate_all_kinds(i, size, v,
784 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
785 res |= v.bv_offset | v.bv_len
786 )
787 return res;
62a8067a
AV
788}
789EXPORT_SYMBOL(iov_iter_alignment);
790
791ssize_t iov_iter_get_pages(struct iov_iter *i,
2c80929c 792 struct page **pages, size_t maxsize, unsigned maxpages,
62a8067a
AV
793 size_t *start)
794{
795 if (i->type & ITER_BVEC)
2c80929c 796 return get_pages_bvec(i, pages, maxsize, maxpages, start);
62a8067a 797 else
2c80929c 798 return get_pages_iovec(i, pages, maxsize, maxpages, start);
62a8067a
AV
799}
800EXPORT_SYMBOL(iov_iter_get_pages);
801
802ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
803 struct page ***pages, size_t maxsize,
804 size_t *start)
805{
806 if (i->type & ITER_BVEC)
807 return get_pages_alloc_bvec(i, pages, maxsize, start);
808 else
809 return get_pages_alloc_iovec(i, pages, maxsize, start);
810}
811EXPORT_SYMBOL(iov_iter_get_pages_alloc);
812
813int iov_iter_npages(const struct iov_iter *i, int maxpages)
814{
e0f2dc40
AV
815 size_t size = i->count;
816 int npages = 0;
817
818 if (!size)
819 return 0;
820
821 iterate_all_kinds(i, size, v, ({
822 unsigned long p = (unsigned long)v.iov_base;
823 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
824 - p / PAGE_SIZE;
825 if (npages >= maxpages)
826 return maxpages;
827 0;}),({
828 npages++;
829 if (npages >= maxpages)
830 return maxpages;
831 })
832 )
833 return npages;
62a8067a 834}
f67da30c 835EXPORT_SYMBOL(iov_iter_npages);