]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - lib/iov_iter.c
amd64: get rid of zeroing
[mirror_ubuntu-artful-kernel.git] / lib / iov_iter.c
CommitLineData
4f18cd31 1#include <linux/export.h>
2f8b5444 2#include <linux/bvec.h>
4f18cd31
AV
3#include <linux/uio.h>
4#include <linux/pagemap.h>
91f79c43
AV
5#include <linux/slab.h>
6#include <linux/vmalloc.h>
241699cd 7#include <linux/splice.h>
a604ec7e 8#include <net/checksum.h>
4f18cd31 9
241699cd
AV
10#define PIPE_PARANOIA /* for now */
11
04a31165
AV
12#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
13 size_t left; \
14 size_t wanted = n; \
15 __p = i->iov; \
16 __v.iov_len = min(n, __p->iov_len - skip); \
17 if (likely(__v.iov_len)) { \
18 __v.iov_base = __p->iov_base + skip; \
19 left = (STEP); \
20 __v.iov_len -= left; \
21 skip += __v.iov_len; \
22 n -= __v.iov_len; \
23 } else { \
24 left = 0; \
25 } \
26 while (unlikely(!left && n)) { \
27 __p++; \
28 __v.iov_len = min(n, __p->iov_len); \
29 if (unlikely(!__v.iov_len)) \
30 continue; \
31 __v.iov_base = __p->iov_base; \
32 left = (STEP); \
33 __v.iov_len -= left; \
34 skip = __v.iov_len; \
35 n -= __v.iov_len; \
36 } \
37 n = wanted - n; \
38}
39
a280455f
AV
40#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
41 size_t wanted = n; \
42 __p = i->kvec; \
43 __v.iov_len = min(n, __p->iov_len - skip); \
44 if (likely(__v.iov_len)) { \
45 __v.iov_base = __p->iov_base + skip; \
46 (void)(STEP); \
47 skip += __v.iov_len; \
48 n -= __v.iov_len; \
49 } \
50 while (unlikely(n)) { \
51 __p++; \
52 __v.iov_len = min(n, __p->iov_len); \
53 if (unlikely(!__v.iov_len)) \
54 continue; \
55 __v.iov_base = __p->iov_base; \
56 (void)(STEP); \
57 skip = __v.iov_len; \
58 n -= __v.iov_len; \
59 } \
60 n = wanted; \
61}
62
1bdc76ae
ML
63#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
64 struct bvec_iter __start; \
65 __start.bi_size = n; \
66 __start.bi_bvec_done = skip; \
67 __start.bi_idx = 0; \
68 for_each_bvec(__v, i->bvec, __bi, __start) { \
69 if (!__v.bv_len) \
04a31165 70 continue; \
04a31165 71 (void)(STEP); \
04a31165 72 } \
04a31165
AV
73}
74
a280455f 75#define iterate_all_kinds(i, n, v, I, B, K) { \
33844e66
AV
76 if (likely(n)) { \
77 size_t skip = i->iov_offset; \
78 if (unlikely(i->type & ITER_BVEC)) { \
79 struct bio_vec v; \
80 struct bvec_iter __bi; \
81 iterate_bvec(i, n, v, __bi, skip, (B)) \
82 } else if (unlikely(i->type & ITER_KVEC)) { \
83 const struct kvec *kvec; \
84 struct kvec v; \
85 iterate_kvec(i, n, v, kvec, skip, (K)) \
86 } else { \
87 const struct iovec *iov; \
88 struct iovec v; \
89 iterate_iovec(i, n, v, iov, skip, (I)) \
90 } \
04a31165
AV
91 } \
92}
93
a280455f 94#define iterate_and_advance(i, n, v, I, B, K) { \
dd254f5a
AV
95 if (unlikely(i->count < n)) \
96 n = i->count; \
19f18459 97 if (i->count) { \
dd254f5a
AV
98 size_t skip = i->iov_offset; \
99 if (unlikely(i->type & ITER_BVEC)) { \
1bdc76ae 100 const struct bio_vec *bvec = i->bvec; \
dd254f5a 101 struct bio_vec v; \
1bdc76ae
ML
102 struct bvec_iter __bi; \
103 iterate_bvec(i, n, v, __bi, skip, (B)) \
104 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
105 i->nr_segs -= i->bvec - bvec; \
106 skip = __bi.bi_bvec_done; \
dd254f5a
AV
107 } else if (unlikely(i->type & ITER_KVEC)) { \
108 const struct kvec *kvec; \
109 struct kvec v; \
110 iterate_kvec(i, n, v, kvec, skip, (K)) \
111 if (skip == kvec->iov_len) { \
112 kvec++; \
113 skip = 0; \
114 } \
115 i->nr_segs -= kvec - i->kvec; \
116 i->kvec = kvec; \
117 } else { \
118 const struct iovec *iov; \
119 struct iovec v; \
120 iterate_iovec(i, n, v, iov, skip, (I)) \
121 if (skip == iov->iov_len) { \
122 iov++; \
123 skip = 0; \
124 } \
125 i->nr_segs -= iov - i->iov; \
126 i->iov = iov; \
7ce2a91e 127 } \
dd254f5a
AV
128 i->count -= n; \
129 i->iov_offset = skip; \
7ce2a91e 130 } \
7ce2a91e
AV
131}
132
62a8067a 133static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
4f18cd31
AV
134 struct iov_iter *i)
135{
136 size_t skip, copy, left, wanted;
137 const struct iovec *iov;
138 char __user *buf;
139 void *kaddr, *from;
140
141 if (unlikely(bytes > i->count))
142 bytes = i->count;
143
144 if (unlikely(!bytes))
145 return 0;
146
147 wanted = bytes;
148 iov = i->iov;
149 skip = i->iov_offset;
150 buf = iov->iov_base + skip;
151 copy = min(bytes, iov->iov_len - skip);
152
3fa6c507 153 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
4f18cd31
AV
154 kaddr = kmap_atomic(page);
155 from = kaddr + offset;
156
157 /* first chunk, usually the only one */
158 left = __copy_to_user_inatomic(buf, from, copy);
159 copy -= left;
160 skip += copy;
161 from += copy;
162 bytes -= copy;
163
164 while (unlikely(!left && bytes)) {
165 iov++;
166 buf = iov->iov_base;
167 copy = min(bytes, iov->iov_len);
168 left = __copy_to_user_inatomic(buf, from, copy);
169 copy -= left;
170 skip = copy;
171 from += copy;
172 bytes -= copy;
173 }
174 if (likely(!bytes)) {
175 kunmap_atomic(kaddr);
176 goto done;
177 }
178 offset = from - kaddr;
179 buf += copy;
180 kunmap_atomic(kaddr);
181 copy = min(bytes, iov->iov_len - skip);
182 }
183 /* Too bad - revert to non-atomic kmap */
3fa6c507 184
4f18cd31
AV
185 kaddr = kmap(page);
186 from = kaddr + offset;
187 left = __copy_to_user(buf, from, copy);
188 copy -= left;
189 skip += copy;
190 from += copy;
191 bytes -= copy;
192 while (unlikely(!left && bytes)) {
193 iov++;
194 buf = iov->iov_base;
195 copy = min(bytes, iov->iov_len);
196 left = __copy_to_user(buf, from, copy);
197 copy -= left;
198 skip = copy;
199 from += copy;
200 bytes -= copy;
201 }
202 kunmap(page);
3fa6c507 203
4f18cd31 204done:
81055e58
AV
205 if (skip == iov->iov_len) {
206 iov++;
207 skip = 0;
208 }
4f18cd31
AV
209 i->count -= wanted - bytes;
210 i->nr_segs -= iov - i->iov;
211 i->iov = iov;
212 i->iov_offset = skip;
213 return wanted - bytes;
214}
4f18cd31 215
62a8067a 216static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
f0d1bec9
AV
217 struct iov_iter *i)
218{
219 size_t skip, copy, left, wanted;
220 const struct iovec *iov;
221 char __user *buf;
222 void *kaddr, *to;
223
224 if (unlikely(bytes > i->count))
225 bytes = i->count;
226
227 if (unlikely(!bytes))
228 return 0;
229
230 wanted = bytes;
231 iov = i->iov;
232 skip = i->iov_offset;
233 buf = iov->iov_base + skip;
234 copy = min(bytes, iov->iov_len - skip);
235
3fa6c507 236 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
f0d1bec9
AV
237 kaddr = kmap_atomic(page);
238 to = kaddr + offset;
239
240 /* first chunk, usually the only one */
241 left = __copy_from_user_inatomic(to, buf, copy);
242 copy -= left;
243 skip += copy;
244 to += copy;
245 bytes -= copy;
246
247 while (unlikely(!left && bytes)) {
248 iov++;
249 buf = iov->iov_base;
250 copy = min(bytes, iov->iov_len);
251 left = __copy_from_user_inatomic(to, buf, copy);
252 copy -= left;
253 skip = copy;
254 to += copy;
255 bytes -= copy;
256 }
257 if (likely(!bytes)) {
258 kunmap_atomic(kaddr);
259 goto done;
260 }
261 offset = to - kaddr;
262 buf += copy;
263 kunmap_atomic(kaddr);
264 copy = min(bytes, iov->iov_len - skip);
265 }
266 /* Too bad - revert to non-atomic kmap */
3fa6c507 267
f0d1bec9
AV
268 kaddr = kmap(page);
269 to = kaddr + offset;
270 left = __copy_from_user(to, buf, copy);
271 copy -= left;
272 skip += copy;
273 to += copy;
274 bytes -= copy;
275 while (unlikely(!left && bytes)) {
276 iov++;
277 buf = iov->iov_base;
278 copy = min(bytes, iov->iov_len);
279 left = __copy_from_user(to, buf, copy);
280 copy -= left;
281 skip = copy;
282 to += copy;
283 bytes -= copy;
284 }
285 kunmap(page);
3fa6c507 286
f0d1bec9 287done:
81055e58
AV
288 if (skip == iov->iov_len) {
289 iov++;
290 skip = 0;
291 }
f0d1bec9
AV
292 i->count -= wanted - bytes;
293 i->nr_segs -= iov - i->iov;
294 i->iov = iov;
295 i->iov_offset = skip;
296 return wanted - bytes;
297}
f0d1bec9 298
241699cd
AV
299#ifdef PIPE_PARANOIA
300static bool sanity(const struct iov_iter *i)
301{
302 struct pipe_inode_info *pipe = i->pipe;
303 int idx = i->idx;
304 int next = pipe->curbuf + pipe->nrbufs;
305 if (i->iov_offset) {
306 struct pipe_buffer *p;
307 if (unlikely(!pipe->nrbufs))
308 goto Bad; // pipe must be non-empty
309 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
310 goto Bad; // must be at the last buffer...
311
312 p = &pipe->bufs[idx];
313 if (unlikely(p->offset + p->len != i->iov_offset))
314 goto Bad; // ... at the end of segment
315 } else {
316 if (idx != (next & (pipe->buffers - 1)))
317 goto Bad; // must be right after the last buffer
318 }
319 return true;
320Bad:
321 printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
322 printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
323 pipe->curbuf, pipe->nrbufs, pipe->buffers);
324 for (idx = 0; idx < pipe->buffers; idx++)
325 printk(KERN_ERR "[%p %p %d %d]\n",
326 pipe->bufs[idx].ops,
327 pipe->bufs[idx].page,
328 pipe->bufs[idx].offset,
329 pipe->bufs[idx].len);
330 WARN_ON(1);
331 return false;
332}
333#else
334#define sanity(i) true
335#endif
336
337static inline int next_idx(int idx, struct pipe_inode_info *pipe)
338{
339 return (idx + 1) & (pipe->buffers - 1);
340}
341
342static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
343 struct iov_iter *i)
344{
345 struct pipe_inode_info *pipe = i->pipe;
346 struct pipe_buffer *buf;
347 size_t off;
348 int idx;
349
350 if (unlikely(bytes > i->count))
351 bytes = i->count;
352
353 if (unlikely(!bytes))
354 return 0;
355
356 if (!sanity(i))
357 return 0;
358
359 off = i->iov_offset;
360 idx = i->idx;
361 buf = &pipe->bufs[idx];
362 if (off) {
363 if (offset == off && buf->page == page) {
364 /* merge with the last one */
365 buf->len += bytes;
366 i->iov_offset += bytes;
367 goto out;
368 }
369 idx = next_idx(idx, pipe);
370 buf = &pipe->bufs[idx];
371 }
372 if (idx == pipe->curbuf && pipe->nrbufs)
373 return 0;
374 pipe->nrbufs++;
375 buf->ops = &page_cache_pipe_buf_ops;
376 get_page(buf->page = page);
377 buf->offset = offset;
378 buf->len = bytes;
379 i->iov_offset = offset + bytes;
380 i->idx = idx;
381out:
382 i->count -= bytes;
383 return bytes;
384}
385
171a0203
AA
386/*
387 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
388 * bytes. For each iovec, fault in each page that constitutes the iovec.
389 *
390 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
391 * because it is an invalid address).
392 */
d4690f1e 393int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
171a0203
AA
394{
395 size_t skip = i->iov_offset;
396 const struct iovec *iov;
397 int err;
398 struct iovec v;
399
400 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
401 iterate_iovec(i, bytes, v, iov, skip, ({
4bce9f6e 402 err = fault_in_pages_readable(v.iov_base, v.iov_len);
171a0203
AA
403 if (unlikely(err))
404 return err;
405 0;}))
406 }
407 return 0;
408}
d4690f1e 409EXPORT_SYMBOL(iov_iter_fault_in_readable);
171a0203 410
71d8e532
AV
411void iov_iter_init(struct iov_iter *i, int direction,
412 const struct iovec *iov, unsigned long nr_segs,
413 size_t count)
414{
415 /* It will get better. Eventually... */
db68ce10 416 if (uaccess_kernel()) {
62a8067a 417 direction |= ITER_KVEC;
a280455f
AV
418 i->type = direction;
419 i->kvec = (struct kvec *)iov;
420 } else {
421 i->type = direction;
422 i->iov = iov;
423 }
71d8e532
AV
424 i->nr_segs = nr_segs;
425 i->iov_offset = 0;
426 i->count = count;
427}
428EXPORT_SYMBOL(iov_iter_init);
7b2c99d1 429
62a8067a
AV
430static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
431{
432 char *from = kmap_atomic(page);
433 memcpy(to, from + offset, len);
434 kunmap_atomic(from);
435}
436
36f7a8a4 437static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
62a8067a
AV
438{
439 char *to = kmap_atomic(page);
440 memcpy(to + offset, from, len);
441 kunmap_atomic(to);
442}
443
c35e0248
MW
444static void memzero_page(struct page *page, size_t offset, size_t len)
445{
446 char *addr = kmap_atomic(page);
447 memset(addr + offset, 0, len);
448 kunmap_atomic(addr);
449}
450
241699cd
AV
451static inline bool allocated(struct pipe_buffer *buf)
452{
453 return buf->ops == &default_pipe_buf_ops;
454}
455
456static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
457{
458 size_t off = i->iov_offset;
459 int idx = i->idx;
460 if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
461 idx = next_idx(idx, i->pipe);
462 off = 0;
463 }
464 *idxp = idx;
465 *offp = off;
466}
467
468static size_t push_pipe(struct iov_iter *i, size_t size,
469 int *idxp, size_t *offp)
470{
471 struct pipe_inode_info *pipe = i->pipe;
472 size_t off;
473 int idx;
474 ssize_t left;
475
476 if (unlikely(size > i->count))
477 size = i->count;
478 if (unlikely(!size))
479 return 0;
480
481 left = size;
482 data_start(i, &idx, &off);
483 *idxp = idx;
484 *offp = off;
485 if (off) {
486 left -= PAGE_SIZE - off;
487 if (left <= 0) {
488 pipe->bufs[idx].len += size;
489 return size;
490 }
491 pipe->bufs[idx].len = PAGE_SIZE;
492 idx = next_idx(idx, pipe);
493 }
494 while (idx != pipe->curbuf || !pipe->nrbufs) {
495 struct page *page = alloc_page(GFP_USER);
496 if (!page)
497 break;
498 pipe->nrbufs++;
499 pipe->bufs[idx].ops = &default_pipe_buf_ops;
500 pipe->bufs[idx].page = page;
501 pipe->bufs[idx].offset = 0;
502 if (left <= PAGE_SIZE) {
503 pipe->bufs[idx].len = left;
504 return size;
505 }
506 pipe->bufs[idx].len = PAGE_SIZE;
507 left -= PAGE_SIZE;
508 idx = next_idx(idx, pipe);
509 }
510 return size - left;
511}
512
513static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
514 struct iov_iter *i)
515{
516 struct pipe_inode_info *pipe = i->pipe;
517 size_t n, off;
518 int idx;
519
520 if (!sanity(i))
521 return 0;
522
523 bytes = n = push_pipe(i, bytes, &idx, &off);
524 if (unlikely(!n))
525 return 0;
526 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
527 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
528 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
529 i->idx = idx;
530 i->iov_offset = off + chunk;
531 n -= chunk;
532 addr += chunk;
533 }
534 i->count -= bytes;
535 return bytes;
536}
537
36f7a8a4 538size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
62a8067a 539{
36f7a8a4 540 const char *from = addr;
241699cd
AV
541 if (unlikely(i->type & ITER_PIPE))
542 return copy_pipe_to_iter(addr, bytes, i);
3d4d3e48
AV
543 iterate_and_advance(i, bytes, v,
544 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
545 v.iov_len),
546 memcpy_to_page(v.bv_page, v.bv_offset,
a280455f
AV
547 (from += v.bv_len) - v.bv_len, v.bv_len),
548 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
3d4d3e48 549 )
62a8067a 550
3d4d3e48 551 return bytes;
c35e0248 552}
d271524a 553EXPORT_SYMBOL(copy_to_iter);
c35e0248 554
d271524a 555size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
c35e0248 556{
0dbca9a4 557 char *to = addr;
241699cd
AV
558 if (unlikely(i->type & ITER_PIPE)) {
559 WARN_ON(1);
560 return 0;
561 }
0dbca9a4
AV
562 iterate_and_advance(i, bytes, v,
563 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
564 v.iov_len),
565 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
a280455f
AV
566 v.bv_offset, v.bv_len),
567 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
0dbca9a4
AV
568 )
569
570 return bytes;
c35e0248 571}
d271524a 572EXPORT_SYMBOL(copy_from_iter);
c35e0248 573
cbbd26b8
AV
574bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
575{
576 char *to = addr;
577 if (unlikely(i->type & ITER_PIPE)) {
578 WARN_ON(1);
579 return false;
580 }
33844e66 581 if (unlikely(i->count < bytes))
cbbd26b8
AV
582 return false;
583
584 iterate_all_kinds(i, bytes, v, ({
585 if (__copy_from_user((to += v.iov_len) - v.iov_len,
586 v.iov_base, v.iov_len))
587 return false;
588 0;}),
589 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
590 v.bv_offset, v.bv_len),
591 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
592 )
593
594 iov_iter_advance(i, bytes);
595 return true;
596}
597EXPORT_SYMBOL(copy_from_iter_full);
598
aa583096
AV
599size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
600{
601 char *to = addr;
241699cd
AV
602 if (unlikely(i->type & ITER_PIPE)) {
603 WARN_ON(1);
604 return 0;
605 }
aa583096
AV
606 iterate_and_advance(i, bytes, v,
607 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
608 v.iov_base, v.iov_len),
609 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
610 v.bv_offset, v.bv_len),
611 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
612 )
613
614 return bytes;
615}
616EXPORT_SYMBOL(copy_from_iter_nocache);
617
cbbd26b8
AV
618bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
619{
620 char *to = addr;
621 if (unlikely(i->type & ITER_PIPE)) {
622 WARN_ON(1);
623 return false;
624 }
33844e66 625 if (unlikely(i->count < bytes))
cbbd26b8
AV
626 return false;
627 iterate_all_kinds(i, bytes, v, ({
628 if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
629 v.iov_base, v.iov_len))
630 return false;
631 0;}),
632 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
633 v.bv_offset, v.bv_len),
634 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
635 )
636
637 iov_iter_advance(i, bytes);
638 return true;
639}
640EXPORT_SYMBOL(copy_from_iter_full_nocache);
641
62a8067a
AV
642size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
643 struct iov_iter *i)
644{
d271524a
AV
645 if (i->type & (ITER_BVEC|ITER_KVEC)) {
646 void *kaddr = kmap_atomic(page);
647 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
648 kunmap_atomic(kaddr);
649 return wanted;
241699cd 650 } else if (likely(!(i->type & ITER_PIPE)))
62a8067a 651 return copy_page_to_iter_iovec(page, offset, bytes, i);
241699cd
AV
652 else
653 return copy_page_to_iter_pipe(page, offset, bytes, i);
62a8067a
AV
654}
655EXPORT_SYMBOL(copy_page_to_iter);
656
657size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
658 struct iov_iter *i)
659{
241699cd
AV
660 if (unlikely(i->type & ITER_PIPE)) {
661 WARN_ON(1);
662 return 0;
663 }
a280455f 664 if (i->type & (ITER_BVEC|ITER_KVEC)) {
d271524a
AV
665 void *kaddr = kmap_atomic(page);
666 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
667 kunmap_atomic(kaddr);
668 return wanted;
669 } else
62a8067a
AV
670 return copy_page_from_iter_iovec(page, offset, bytes, i);
671}
672EXPORT_SYMBOL(copy_page_from_iter);
673
241699cd
AV
674static size_t pipe_zero(size_t bytes, struct iov_iter *i)
675{
676 struct pipe_inode_info *pipe = i->pipe;
677 size_t n, off;
678 int idx;
679
680 if (!sanity(i))
681 return 0;
682
683 bytes = n = push_pipe(i, bytes, &idx, &off);
684 if (unlikely(!n))
685 return 0;
686
687 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
688 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
689 memzero_page(pipe->bufs[idx].page, off, chunk);
690 i->idx = idx;
691 i->iov_offset = off + chunk;
692 n -= chunk;
693 }
694 i->count -= bytes;
695 return bytes;
696}
697
c35e0248
MW
698size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
699{
241699cd
AV
700 if (unlikely(i->type & ITER_PIPE))
701 return pipe_zero(bytes, i);
8442fa46
AV
702 iterate_and_advance(i, bytes, v,
703 __clear_user(v.iov_base, v.iov_len),
a280455f
AV
704 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
705 memset(v.iov_base, 0, v.iov_len)
8442fa46
AV
706 )
707
708 return bytes;
c35e0248
MW
709}
710EXPORT_SYMBOL(iov_iter_zero);
711
62a8067a
AV
712size_t iov_iter_copy_from_user_atomic(struct page *page,
713 struct iov_iter *i, unsigned long offset, size_t bytes)
714{
04a31165 715 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
241699cd
AV
716 if (unlikely(i->type & ITER_PIPE)) {
717 kunmap_atomic(kaddr);
718 WARN_ON(1);
719 return 0;
720 }
04a31165
AV
721 iterate_all_kinds(i, bytes, v,
722 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
723 v.iov_base, v.iov_len),
724 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
a280455f
AV
725 v.bv_offset, v.bv_len),
726 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
04a31165
AV
727 )
728 kunmap_atomic(kaddr);
729 return bytes;
62a8067a
AV
730}
731EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
732
b9dc6f65
AV
733static inline void pipe_truncate(struct iov_iter *i)
734{
735 struct pipe_inode_info *pipe = i->pipe;
736 if (pipe->nrbufs) {
737 size_t off = i->iov_offset;
738 int idx = i->idx;
739 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
740 if (off) {
741 pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
742 idx = next_idx(idx, pipe);
743 nrbufs++;
744 }
745 while (pipe->nrbufs > nrbufs) {
746 pipe_buf_release(pipe, &pipe->bufs[idx]);
747 idx = next_idx(idx, pipe);
748 pipe->nrbufs--;
749 }
750 }
751}
752
241699cd
AV
753static void pipe_advance(struct iov_iter *i, size_t size)
754{
755 struct pipe_inode_info *pipe = i->pipe;
241699cd
AV
756 if (unlikely(i->count < size))
757 size = i->count;
241699cd 758 if (size) {
b9dc6f65
AV
759 struct pipe_buffer *buf;
760 size_t off = i->iov_offset, left = size;
761 int idx = i->idx;
241699cd 762 if (off) /* make it relative to the beginning of buffer */
b9dc6f65 763 left += off - pipe->bufs[idx].offset;
241699cd
AV
764 while (1) {
765 buf = &pipe->bufs[idx];
b9dc6f65 766 if (left <= buf->len)
241699cd 767 break;
b9dc6f65 768 left -= buf->len;
241699cd
AV
769 idx = next_idx(idx, pipe);
770 }
241699cd 771 i->idx = idx;
b9dc6f65 772 i->iov_offset = buf->offset + left;
241699cd 773 }
b9dc6f65
AV
774 i->count -= size;
775 /* ... and discard everything past that point */
776 pipe_truncate(i);
241699cd
AV
777}
778
62a8067a
AV
779void iov_iter_advance(struct iov_iter *i, size_t size)
780{
241699cd
AV
781 if (unlikely(i->type & ITER_PIPE)) {
782 pipe_advance(i, size);
783 return;
784 }
a280455f 785 iterate_and_advance(i, size, v, 0, 0, 0)
62a8067a
AV
786}
787EXPORT_SYMBOL(iov_iter_advance);
788
789/*
790 * Return the count of just the current iov_iter segment.
791 */
792size_t iov_iter_single_seg_count(const struct iov_iter *i)
793{
241699cd
AV
794 if (unlikely(i->type & ITER_PIPE))
795 return i->count; // it is a silly place, anyway
62a8067a
AV
796 if (i->nr_segs == 1)
797 return i->count;
798 else if (i->type & ITER_BVEC)
62a8067a 799 return min(i->count, i->bvec->bv_len - i->iov_offset);
ad0eab92
PM
800 else
801 return min(i->count, i->iov->iov_len - i->iov_offset);
62a8067a
AV
802}
803EXPORT_SYMBOL(iov_iter_single_seg_count);
804
abb78f87 805void iov_iter_kvec(struct iov_iter *i, int direction,
05afcb77 806 const struct kvec *kvec, unsigned long nr_segs,
abb78f87
AV
807 size_t count)
808{
809 BUG_ON(!(direction & ITER_KVEC));
810 i->type = direction;
05afcb77 811 i->kvec = kvec;
abb78f87
AV
812 i->nr_segs = nr_segs;
813 i->iov_offset = 0;
814 i->count = count;
815}
816EXPORT_SYMBOL(iov_iter_kvec);
817
05afcb77
AV
818void iov_iter_bvec(struct iov_iter *i, int direction,
819 const struct bio_vec *bvec, unsigned long nr_segs,
820 size_t count)
821{
822 BUG_ON(!(direction & ITER_BVEC));
823 i->type = direction;
824 i->bvec = bvec;
825 i->nr_segs = nr_segs;
826 i->iov_offset = 0;
827 i->count = count;
828}
829EXPORT_SYMBOL(iov_iter_bvec);
830
241699cd
AV
831void iov_iter_pipe(struct iov_iter *i, int direction,
832 struct pipe_inode_info *pipe,
833 size_t count)
834{
835 BUG_ON(direction != ITER_PIPE);
b9dc6f65 836 WARN_ON(pipe->nrbufs == pipe->buffers);
241699cd
AV
837 i->type = direction;
838 i->pipe = pipe;
839 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
840 i->iov_offset = 0;
841 i->count = count;
842}
843EXPORT_SYMBOL(iov_iter_pipe);
844
62a8067a
AV
845unsigned long iov_iter_alignment(const struct iov_iter *i)
846{
04a31165
AV
847 unsigned long res = 0;
848 size_t size = i->count;
849
241699cd 850 if (unlikely(i->type & ITER_PIPE)) {
33844e66 851 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
241699cd
AV
852 return size | i->iov_offset;
853 return size;
854 }
04a31165
AV
855 iterate_all_kinds(i, size, v,
856 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
a280455f
AV
857 res |= v.bv_offset | v.bv_len,
858 res |= (unsigned long)v.iov_base | v.iov_len
04a31165
AV
859 )
860 return res;
62a8067a
AV
861}
862EXPORT_SYMBOL(iov_iter_alignment);
863
357f435d
AV
864unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
865{
33844e66 866 unsigned long res = 0;
357f435d 867 size_t size = i->count;
357f435d 868
241699cd
AV
869 if (unlikely(i->type & ITER_PIPE)) {
870 WARN_ON(1);
871 return ~0U;
872 }
873
357f435d
AV
874 iterate_all_kinds(i, size, v,
875 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
876 (size != v.iov_len ? size : 0), 0),
877 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
878 (size != v.bv_len ? size : 0)),
879 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
880 (size != v.iov_len ? size : 0))
881 );
33844e66 882 return res;
357f435d
AV
883}
884EXPORT_SYMBOL(iov_iter_gap_alignment);
885
241699cd
AV
886static inline size_t __pipe_get_pages(struct iov_iter *i,
887 size_t maxsize,
888 struct page **pages,
889 int idx,
890 size_t *start)
891{
892 struct pipe_inode_info *pipe = i->pipe;
1689c73a 893 ssize_t n = push_pipe(i, maxsize, &idx, start);
241699cd
AV
894 if (!n)
895 return -EFAULT;
896
897 maxsize = n;
898 n += *start;
1689c73a 899 while (n > 0) {
241699cd
AV
900 get_page(*pages++ = pipe->bufs[idx].page);
901 idx = next_idx(idx, pipe);
902 n -= PAGE_SIZE;
903 }
904
905 return maxsize;
906}
907
908static ssize_t pipe_get_pages(struct iov_iter *i,
909 struct page **pages, size_t maxsize, unsigned maxpages,
910 size_t *start)
911{
912 unsigned npages;
913 size_t capacity;
914 int idx;
915
33844e66
AV
916 if (!maxsize)
917 return 0;
918
241699cd
AV
919 if (!sanity(i))
920 return -EFAULT;
921
922 data_start(i, &idx, start);
923 /* some of this one + all after this one */
924 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
925 capacity = min(npages,maxpages) * PAGE_SIZE - *start;
926
927 return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
928}
929
62a8067a 930ssize_t iov_iter_get_pages(struct iov_iter *i,
2c80929c 931 struct page **pages, size_t maxsize, unsigned maxpages,
62a8067a
AV
932 size_t *start)
933{
e5393fae
AV
934 if (maxsize > i->count)
935 maxsize = i->count;
936
241699cd
AV
937 if (unlikely(i->type & ITER_PIPE))
938 return pipe_get_pages(i, pages, maxsize, maxpages, start);
e5393fae
AV
939 iterate_all_kinds(i, maxsize, v, ({
940 unsigned long addr = (unsigned long)v.iov_base;
941 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
942 int n;
943 int res;
944
945 if (len > maxpages * PAGE_SIZE)
946 len = maxpages * PAGE_SIZE;
947 addr &= ~(PAGE_SIZE - 1);
948 n = DIV_ROUND_UP(len, PAGE_SIZE);
949 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
950 if (unlikely(res < 0))
951 return res;
952 return (res == n ? len : res * PAGE_SIZE) - *start;
953 0;}),({
954 /* can't be more than PAGE_SIZE */
955 *start = v.bv_offset;
956 get_page(*pages = v.bv_page);
957 return v.bv_len;
a280455f
AV
958 }),({
959 return -EFAULT;
e5393fae
AV
960 })
961 )
962 return 0;
62a8067a
AV
963}
964EXPORT_SYMBOL(iov_iter_get_pages);
965
1b17f1f2
AV
966static struct page **get_pages_array(size_t n)
967{
968 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
969 if (!p)
970 p = vmalloc(n * sizeof(struct page *));
971 return p;
972}
973
241699cd
AV
974static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
975 struct page ***pages, size_t maxsize,
976 size_t *start)
977{
978 struct page **p;
979 size_t n;
980 int idx;
981 int npages;
982
33844e66
AV
983 if (!maxsize)
984 return 0;
985
241699cd
AV
986 if (!sanity(i))
987 return -EFAULT;
988
989 data_start(i, &idx, start);
990 /* some of this one + all after this one */
991 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
992 n = npages * PAGE_SIZE - *start;
993 if (maxsize > n)
994 maxsize = n;
995 else
996 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
997 p = get_pages_array(npages);
998 if (!p)
999 return -ENOMEM;
1000 n = __pipe_get_pages(i, maxsize, p, idx, start);
1001 if (n > 0)
1002 *pages = p;
1003 else
1004 kvfree(p);
1005 return n;
1006}
1007
62a8067a
AV
1008ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1009 struct page ***pages, size_t maxsize,
1010 size_t *start)
1011{
1b17f1f2
AV
1012 struct page **p;
1013
1014 if (maxsize > i->count)
1015 maxsize = i->count;
1016
241699cd
AV
1017 if (unlikely(i->type & ITER_PIPE))
1018 return pipe_get_pages_alloc(i, pages, maxsize, start);
1b17f1f2
AV
1019 iterate_all_kinds(i, maxsize, v, ({
1020 unsigned long addr = (unsigned long)v.iov_base;
1021 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1022 int n;
1023 int res;
1024
1025 addr &= ~(PAGE_SIZE - 1);
1026 n = DIV_ROUND_UP(len, PAGE_SIZE);
1027 p = get_pages_array(n);
1028 if (!p)
1029 return -ENOMEM;
1030 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1031 if (unlikely(res < 0)) {
1032 kvfree(p);
1033 return res;
1034 }
1035 *pages = p;
1036 return (res == n ? len : res * PAGE_SIZE) - *start;
1037 0;}),({
1038 /* can't be more than PAGE_SIZE */
1039 *start = v.bv_offset;
1040 *pages = p = get_pages_array(1);
1041 if (!p)
1042 return -ENOMEM;
1043 get_page(*p = v.bv_page);
1044 return v.bv_len;
a280455f
AV
1045 }),({
1046 return -EFAULT;
1b17f1f2
AV
1047 })
1048 )
1049 return 0;
62a8067a
AV
1050}
1051EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1052
a604ec7e
AV
1053size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1054 struct iov_iter *i)
1055{
1056 char *to = addr;
1057 __wsum sum, next;
1058 size_t off = 0;
a604ec7e 1059 sum = *csum;
241699cd
AV
1060 if (unlikely(i->type & ITER_PIPE)) {
1061 WARN_ON(1);
1062 return 0;
1063 }
a604ec7e
AV
1064 iterate_and_advance(i, bytes, v, ({
1065 int err = 0;
cbbd26b8 1066 next = csum_and_copy_from_user(v.iov_base,
a604ec7e
AV
1067 (to += v.iov_len) - v.iov_len,
1068 v.iov_len, 0, &err);
1069 if (!err) {
1070 sum = csum_block_add(sum, next, off);
1071 off += v.iov_len;
1072 }
1073 err ? v.iov_len : 0;
1074 }), ({
1075 char *p = kmap_atomic(v.bv_page);
1076 next = csum_partial_copy_nocheck(p + v.bv_offset,
1077 (to += v.bv_len) - v.bv_len,
1078 v.bv_len, 0);
1079 kunmap_atomic(p);
1080 sum = csum_block_add(sum, next, off);
1081 off += v.bv_len;
1082 }),({
1083 next = csum_partial_copy_nocheck(v.iov_base,
1084 (to += v.iov_len) - v.iov_len,
1085 v.iov_len, 0);
1086 sum = csum_block_add(sum, next, off);
1087 off += v.iov_len;
1088 })
1089 )
1090 *csum = sum;
1091 return bytes;
1092}
1093EXPORT_SYMBOL(csum_and_copy_from_iter);
1094
cbbd26b8
AV
1095bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1096 struct iov_iter *i)
1097{
1098 char *to = addr;
1099 __wsum sum, next;
1100 size_t off = 0;
1101 sum = *csum;
1102 if (unlikely(i->type & ITER_PIPE)) {
1103 WARN_ON(1);
1104 return false;
1105 }
1106 if (unlikely(i->count < bytes))
1107 return false;
1108 iterate_all_kinds(i, bytes, v, ({
1109 int err = 0;
1110 next = csum_and_copy_from_user(v.iov_base,
1111 (to += v.iov_len) - v.iov_len,
1112 v.iov_len, 0, &err);
1113 if (err)
1114 return false;
1115 sum = csum_block_add(sum, next, off);
1116 off += v.iov_len;
1117 0;
1118 }), ({
1119 char *p = kmap_atomic(v.bv_page);
1120 next = csum_partial_copy_nocheck(p + v.bv_offset,
1121 (to += v.bv_len) - v.bv_len,
1122 v.bv_len, 0);
1123 kunmap_atomic(p);
1124 sum = csum_block_add(sum, next, off);
1125 off += v.bv_len;
1126 }),({
1127 next = csum_partial_copy_nocheck(v.iov_base,
1128 (to += v.iov_len) - v.iov_len,
1129 v.iov_len, 0);
1130 sum = csum_block_add(sum, next, off);
1131 off += v.iov_len;
1132 })
1133 )
1134 *csum = sum;
1135 iov_iter_advance(i, bytes);
1136 return true;
1137}
1138EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1139
36f7a8a4 1140size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
a604ec7e
AV
1141 struct iov_iter *i)
1142{
36f7a8a4 1143 const char *from = addr;
a604ec7e
AV
1144 __wsum sum, next;
1145 size_t off = 0;
a604ec7e 1146 sum = *csum;
241699cd
AV
1147 if (unlikely(i->type & ITER_PIPE)) {
1148 WARN_ON(1); /* for now */
1149 return 0;
1150 }
a604ec7e
AV
1151 iterate_and_advance(i, bytes, v, ({
1152 int err = 0;
1153 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
cbbd26b8 1154 v.iov_base,
a604ec7e
AV
1155 v.iov_len, 0, &err);
1156 if (!err) {
1157 sum = csum_block_add(sum, next, off);
1158 off += v.iov_len;
1159 }
1160 err ? v.iov_len : 0;
1161 }), ({
1162 char *p = kmap_atomic(v.bv_page);
1163 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1164 p + v.bv_offset,
1165 v.bv_len, 0);
1166 kunmap_atomic(p);
1167 sum = csum_block_add(sum, next, off);
1168 off += v.bv_len;
1169 }),({
1170 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1171 v.iov_base,
1172 v.iov_len, 0);
1173 sum = csum_block_add(sum, next, off);
1174 off += v.iov_len;
1175 })
1176 )
1177 *csum = sum;
1178 return bytes;
1179}
1180EXPORT_SYMBOL(csum_and_copy_to_iter);
1181
62a8067a
AV
1182int iov_iter_npages(const struct iov_iter *i, int maxpages)
1183{
e0f2dc40
AV
1184 size_t size = i->count;
1185 int npages = 0;
1186
1187 if (!size)
1188 return 0;
1189
241699cd
AV
1190 if (unlikely(i->type & ITER_PIPE)) {
1191 struct pipe_inode_info *pipe = i->pipe;
1192 size_t off;
1193 int idx;
1194
1195 if (!sanity(i))
1196 return 0;
1197
1198 data_start(i, &idx, &off);
1199 /* some of this one + all after this one */
1200 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1201 if (npages >= maxpages)
1202 return maxpages;
1203 } else iterate_all_kinds(i, size, v, ({
e0f2dc40
AV
1204 unsigned long p = (unsigned long)v.iov_base;
1205 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1206 - p / PAGE_SIZE;
1207 if (npages >= maxpages)
1208 return maxpages;
1209 0;}),({
1210 npages++;
1211 if (npages >= maxpages)
1212 return maxpages;
a280455f
AV
1213 }),({
1214 unsigned long p = (unsigned long)v.iov_base;
1215 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1216 - p / PAGE_SIZE;
1217 if (npages >= maxpages)
1218 return maxpages;
e0f2dc40
AV
1219 })
1220 )
1221 return npages;
62a8067a 1222}
f67da30c 1223EXPORT_SYMBOL(iov_iter_npages);
4b8164b9
AV
1224
1225const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1226{
1227 *new = *old;
241699cd
AV
1228 if (unlikely(new->type & ITER_PIPE)) {
1229 WARN_ON(1);
1230 return NULL;
1231 }
4b8164b9
AV
1232 if (new->type & ITER_BVEC)
1233 return new->bvec = kmemdup(new->bvec,
1234 new->nr_segs * sizeof(struct bio_vec),
1235 flags);
1236 else
1237 /* iovec and kvec have identical layout */
1238 return new->iov = kmemdup(new->iov,
1239 new->nr_segs * sizeof(struct iovec),
1240 flags);
1241}
1242EXPORT_SYMBOL(dup_iter);
bc917be8 1243
ffecee4f
VN
1244/**
1245 * import_iovec() - Copy an array of &struct iovec from userspace
1246 * into the kernel, check that it is valid, and initialize a new
1247 * &struct iov_iter iterator to access it.
1248 *
1249 * @type: One of %READ or %WRITE.
1250 * @uvector: Pointer to the userspace array.
1251 * @nr_segs: Number of elements in userspace array.
1252 * @fast_segs: Number of elements in @iov.
1253 * @iov: (input and output parameter) Pointer to pointer to (usually small
1254 * on-stack) kernel array.
1255 * @i: Pointer to iterator that will be initialized on success.
1256 *
1257 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1258 * then this function places %NULL in *@iov on return. Otherwise, a new
1259 * array will be allocated and the result placed in *@iov. This means that
1260 * the caller may call kfree() on *@iov regardless of whether the small
1261 * on-stack array was used or not (and regardless of whether this function
1262 * returns an error or not).
1263 *
1264 * Return: 0 on success or negative error code on error.
1265 */
bc917be8
AV
1266int import_iovec(int type, const struct iovec __user * uvector,
1267 unsigned nr_segs, unsigned fast_segs,
1268 struct iovec **iov, struct iov_iter *i)
1269{
1270 ssize_t n;
1271 struct iovec *p;
1272 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1273 *iov, &p);
1274 if (n < 0) {
1275 if (p != *iov)
1276 kfree(p);
1277 *iov = NULL;
1278 return n;
1279 }
1280 iov_iter_init(i, type, p, nr_segs, n);
1281 *iov = p == *iov ? NULL : p;
1282 return 0;
1283}
1284EXPORT_SYMBOL(import_iovec);
1285
1286#ifdef CONFIG_COMPAT
1287#include <linux/compat.h>
1288
1289int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1290 unsigned nr_segs, unsigned fast_segs,
1291 struct iovec **iov, struct iov_iter *i)
1292{
1293 ssize_t n;
1294 struct iovec *p;
1295 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1296 *iov, &p);
1297 if (n < 0) {
1298 if (p != *iov)
1299 kfree(p);
1300 *iov = NULL;
1301 return n;
1302 }
1303 iov_iter_init(i, type, p, nr_segs, n);
1304 *iov = p == *iov ? NULL : p;
1305 return 0;
1306}
1307#endif
1308
1309int import_single_range(int rw, void __user *buf, size_t len,
1310 struct iovec *iov, struct iov_iter *i)
1311{
1312 if (len > MAX_RW_COUNT)
1313 len = MAX_RW_COUNT;
1314 if (unlikely(!access_ok(!rw, buf, len)))
1315 return -EFAULT;
1316
1317 iov->iov_base = buf;
1318 iov->iov_len = len;
1319 iov_iter_init(i, rw, iov, 1, len);
1320 return 0;
1321}
e1267585 1322EXPORT_SYMBOL(import_single_range);