]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - lib/iov_iter.c
arm64: tegra: Add power-domain for Tegra210 HDA
[mirror_ubuntu-hirsute-kernel.git] / lib / iov_iter.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
7999096f 2#include <crypto/hash.h>
4f18cd31 3#include <linux/export.h>
2f8b5444 4#include <linux/bvec.h>
4d0e9df5 5#include <linux/fault-inject-usercopy.h>
4f18cd31
AV
6#include <linux/uio.h>
7#include <linux/pagemap.h>
91f79c43
AV
8#include <linux/slab.h>
9#include <linux/vmalloc.h>
241699cd 10#include <linux/splice.h>
bfdc5970 11#include <linux/compat.h>
a604ec7e 12#include <net/checksum.h>
d05f4435 13#include <linux/scatterlist.h>
d0ef4c36 14#include <linux/instrumented.h>
4f18cd31 15
241699cd
AV
16#define PIPE_PARANOIA /* for now */
17
04a31165
AV
18#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
19 size_t left; \
20 size_t wanted = n; \
21 __p = i->iov; \
22 __v.iov_len = min(n, __p->iov_len - skip); \
23 if (likely(__v.iov_len)) { \
24 __v.iov_base = __p->iov_base + skip; \
25 left = (STEP); \
26 __v.iov_len -= left; \
27 skip += __v.iov_len; \
28 n -= __v.iov_len; \
29 } else { \
30 left = 0; \
31 } \
32 while (unlikely(!left && n)) { \
33 __p++; \
34 __v.iov_len = min(n, __p->iov_len); \
35 if (unlikely(!__v.iov_len)) \
36 continue; \
37 __v.iov_base = __p->iov_base; \
38 left = (STEP); \
39 __v.iov_len -= left; \
40 skip = __v.iov_len; \
41 n -= __v.iov_len; \
42 } \
43 n = wanted - n; \
44}
45
a280455f
AV
46#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
47 size_t wanted = n; \
48 __p = i->kvec; \
49 __v.iov_len = min(n, __p->iov_len - skip); \
50 if (likely(__v.iov_len)) { \
51 __v.iov_base = __p->iov_base + skip; \
52 (void)(STEP); \
53 skip += __v.iov_len; \
54 n -= __v.iov_len; \
55 } \
56 while (unlikely(n)) { \
57 __p++; \
58 __v.iov_len = min(n, __p->iov_len); \
59 if (unlikely(!__v.iov_len)) \
60 continue; \
61 __v.iov_base = __p->iov_base; \
62 (void)(STEP); \
63 skip = __v.iov_len; \
64 n -= __v.iov_len; \
65 } \
66 n = wanted; \
67}
68
1bdc76ae
ML
69#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
70 struct bvec_iter __start; \
71 __start.bi_size = n; \
72 __start.bi_bvec_done = skip; \
73 __start.bi_idx = 0; \
74 for_each_bvec(__v, i->bvec, __bi, __start) { \
75 if (!__v.bv_len) \
04a31165 76 continue; \
04a31165 77 (void)(STEP); \
04a31165 78 } \
04a31165
AV
79}
80
a280455f 81#define iterate_all_kinds(i, n, v, I, B, K) { \
33844e66
AV
82 if (likely(n)) { \
83 size_t skip = i->iov_offset; \
84 if (unlikely(i->type & ITER_BVEC)) { \
85 struct bio_vec v; \
86 struct bvec_iter __bi; \
87 iterate_bvec(i, n, v, __bi, skip, (B)) \
88 } else if (unlikely(i->type & ITER_KVEC)) { \
89 const struct kvec *kvec; \
90 struct kvec v; \
91 iterate_kvec(i, n, v, kvec, skip, (K)) \
9ea9ce04 92 } else if (unlikely(i->type & ITER_DISCARD)) { \
33844e66
AV
93 } else { \
94 const struct iovec *iov; \
95 struct iovec v; \
96 iterate_iovec(i, n, v, iov, skip, (I)) \
97 } \
04a31165
AV
98 } \
99}
100
a280455f 101#define iterate_and_advance(i, n, v, I, B, K) { \
dd254f5a
AV
102 if (unlikely(i->count < n)) \
103 n = i->count; \
19f18459 104 if (i->count) { \
dd254f5a
AV
105 size_t skip = i->iov_offset; \
106 if (unlikely(i->type & ITER_BVEC)) { \
1bdc76ae 107 const struct bio_vec *bvec = i->bvec; \
dd254f5a 108 struct bio_vec v; \
1bdc76ae
ML
109 struct bvec_iter __bi; \
110 iterate_bvec(i, n, v, __bi, skip, (B)) \
111 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
112 i->nr_segs -= i->bvec - bvec; \
113 skip = __bi.bi_bvec_done; \
dd254f5a
AV
114 } else if (unlikely(i->type & ITER_KVEC)) { \
115 const struct kvec *kvec; \
116 struct kvec v; \
117 iterate_kvec(i, n, v, kvec, skip, (K)) \
118 if (skip == kvec->iov_len) { \
119 kvec++; \
120 skip = 0; \
121 } \
122 i->nr_segs -= kvec - i->kvec; \
123 i->kvec = kvec; \
9ea9ce04
DH
124 } else if (unlikely(i->type & ITER_DISCARD)) { \
125 skip += n; \
dd254f5a
AV
126 } else { \
127 const struct iovec *iov; \
128 struct iovec v; \
129 iterate_iovec(i, n, v, iov, skip, (I)) \
130 if (skip == iov->iov_len) { \
131 iov++; \
132 skip = 0; \
133 } \
134 i->nr_segs -= iov - i->iov; \
135 i->iov = iov; \
7ce2a91e 136 } \
dd254f5a
AV
137 i->count -= n; \
138 i->iov_offset = skip; \
7ce2a91e 139 } \
7ce2a91e
AV
140}
141
09fc68dc
AV
142static int copyout(void __user *to, const void *from, size_t n)
143{
4d0e9df5
AL
144 if (should_fail_usercopy())
145 return n;
96d4f267 146 if (access_ok(to, n)) {
d0ef4c36 147 instrument_copy_to_user(to, from, n);
09fc68dc
AV
148 n = raw_copy_to_user(to, from, n);
149 }
150 return n;
151}
152
153static int copyin(void *to, const void __user *from, size_t n)
154{
4d0e9df5
AL
155 if (should_fail_usercopy())
156 return n;
96d4f267 157 if (access_ok(from, n)) {
d0ef4c36 158 instrument_copy_from_user(to, from, n);
09fc68dc
AV
159 n = raw_copy_from_user(to, from, n);
160 }
161 return n;
162}
163
62a8067a 164static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
4f18cd31
AV
165 struct iov_iter *i)
166{
167 size_t skip, copy, left, wanted;
168 const struct iovec *iov;
169 char __user *buf;
170 void *kaddr, *from;
171
172 if (unlikely(bytes > i->count))
173 bytes = i->count;
174
175 if (unlikely(!bytes))
176 return 0;
177
09fc68dc 178 might_fault();
4f18cd31
AV
179 wanted = bytes;
180 iov = i->iov;
181 skip = i->iov_offset;
182 buf = iov->iov_base + skip;
183 copy = min(bytes, iov->iov_len - skip);
184
3fa6c507 185 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
4f18cd31
AV
186 kaddr = kmap_atomic(page);
187 from = kaddr + offset;
188
189 /* first chunk, usually the only one */
09fc68dc 190 left = copyout(buf, from, copy);
4f18cd31
AV
191 copy -= left;
192 skip += copy;
193 from += copy;
194 bytes -= copy;
195
196 while (unlikely(!left && bytes)) {
197 iov++;
198 buf = iov->iov_base;
199 copy = min(bytes, iov->iov_len);
09fc68dc 200 left = copyout(buf, from, copy);
4f18cd31
AV
201 copy -= left;
202 skip = copy;
203 from += copy;
204 bytes -= copy;
205 }
206 if (likely(!bytes)) {
207 kunmap_atomic(kaddr);
208 goto done;
209 }
210 offset = from - kaddr;
211 buf += copy;
212 kunmap_atomic(kaddr);
213 copy = min(bytes, iov->iov_len - skip);
214 }
215 /* Too bad - revert to non-atomic kmap */
3fa6c507 216
4f18cd31
AV
217 kaddr = kmap(page);
218 from = kaddr + offset;
09fc68dc 219 left = copyout(buf, from, copy);
4f18cd31
AV
220 copy -= left;
221 skip += copy;
222 from += copy;
223 bytes -= copy;
224 while (unlikely(!left && bytes)) {
225 iov++;
226 buf = iov->iov_base;
227 copy = min(bytes, iov->iov_len);
09fc68dc 228 left = copyout(buf, from, copy);
4f18cd31
AV
229 copy -= left;
230 skip = copy;
231 from += copy;
232 bytes -= copy;
233 }
234 kunmap(page);
3fa6c507 235
4f18cd31 236done:
81055e58
AV
237 if (skip == iov->iov_len) {
238 iov++;
239 skip = 0;
240 }
4f18cd31
AV
241 i->count -= wanted - bytes;
242 i->nr_segs -= iov - i->iov;
243 i->iov = iov;
244 i->iov_offset = skip;
245 return wanted - bytes;
246}
4f18cd31 247
62a8067a 248static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
f0d1bec9
AV
249 struct iov_iter *i)
250{
251 size_t skip, copy, left, wanted;
252 const struct iovec *iov;
253 char __user *buf;
254 void *kaddr, *to;
255
256 if (unlikely(bytes > i->count))
257 bytes = i->count;
258
259 if (unlikely(!bytes))
260 return 0;
261
09fc68dc 262 might_fault();
f0d1bec9
AV
263 wanted = bytes;
264 iov = i->iov;
265 skip = i->iov_offset;
266 buf = iov->iov_base + skip;
267 copy = min(bytes, iov->iov_len - skip);
268
3fa6c507 269 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
f0d1bec9
AV
270 kaddr = kmap_atomic(page);
271 to = kaddr + offset;
272
273 /* first chunk, usually the only one */
09fc68dc 274 left = copyin(to, buf, copy);
f0d1bec9
AV
275 copy -= left;
276 skip += copy;
277 to += copy;
278 bytes -= copy;
279
280 while (unlikely(!left && bytes)) {
281 iov++;
282 buf = iov->iov_base;
283 copy = min(bytes, iov->iov_len);
09fc68dc 284 left = copyin(to, buf, copy);
f0d1bec9
AV
285 copy -= left;
286 skip = copy;
287 to += copy;
288 bytes -= copy;
289 }
290 if (likely(!bytes)) {
291 kunmap_atomic(kaddr);
292 goto done;
293 }
294 offset = to - kaddr;
295 buf += copy;
296 kunmap_atomic(kaddr);
297 copy = min(bytes, iov->iov_len - skip);
298 }
299 /* Too bad - revert to non-atomic kmap */
3fa6c507 300
f0d1bec9
AV
301 kaddr = kmap(page);
302 to = kaddr + offset;
09fc68dc 303 left = copyin(to, buf, copy);
f0d1bec9
AV
304 copy -= left;
305 skip += copy;
306 to += copy;
307 bytes -= copy;
308 while (unlikely(!left && bytes)) {
309 iov++;
310 buf = iov->iov_base;
311 copy = min(bytes, iov->iov_len);
09fc68dc 312 left = copyin(to, buf, copy);
f0d1bec9
AV
313 copy -= left;
314 skip = copy;
315 to += copy;
316 bytes -= copy;
317 }
318 kunmap(page);
3fa6c507 319
f0d1bec9 320done:
81055e58
AV
321 if (skip == iov->iov_len) {
322 iov++;
323 skip = 0;
324 }
f0d1bec9
AV
325 i->count -= wanted - bytes;
326 i->nr_segs -= iov - i->iov;
327 i->iov = iov;
328 i->iov_offset = skip;
329 return wanted - bytes;
330}
f0d1bec9 331
241699cd
AV
332#ifdef PIPE_PARANOIA
333static bool sanity(const struct iov_iter *i)
334{
335 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
336 unsigned int p_head = pipe->head;
337 unsigned int p_tail = pipe->tail;
338 unsigned int p_mask = pipe->ring_size - 1;
339 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
340 unsigned int i_head = i->head;
341 unsigned int idx;
342
241699cd
AV
343 if (i->iov_offset) {
344 struct pipe_buffer *p;
8cefc107 345 if (unlikely(p_occupancy == 0))
241699cd 346 goto Bad; // pipe must be non-empty
8cefc107 347 if (unlikely(i_head != p_head - 1))
241699cd
AV
348 goto Bad; // must be at the last buffer...
349
8cefc107 350 p = &pipe->bufs[i_head & p_mask];
241699cd
AV
351 if (unlikely(p->offset + p->len != i->iov_offset))
352 goto Bad; // ... at the end of segment
353 } else {
8cefc107 354 if (i_head != p_head)
241699cd
AV
355 goto Bad; // must be right after the last buffer
356 }
357 return true;
358Bad:
8cefc107
DH
359 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
360 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
361 p_head, p_tail, pipe->ring_size);
362 for (idx = 0; idx < pipe->ring_size; idx++)
241699cd
AV
363 printk(KERN_ERR "[%p %p %d %d]\n",
364 pipe->bufs[idx].ops,
365 pipe->bufs[idx].page,
366 pipe->bufs[idx].offset,
367 pipe->bufs[idx].len);
368 WARN_ON(1);
369 return false;
370}
371#else
372#define sanity(i) true
373#endif
374
241699cd
AV
375static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
376 struct iov_iter *i)
377{
378 struct pipe_inode_info *pipe = i->pipe;
379 struct pipe_buffer *buf;
8cefc107
DH
380 unsigned int p_tail = pipe->tail;
381 unsigned int p_mask = pipe->ring_size - 1;
382 unsigned int i_head = i->head;
241699cd 383 size_t off;
241699cd
AV
384
385 if (unlikely(bytes > i->count))
386 bytes = i->count;
387
388 if (unlikely(!bytes))
389 return 0;
390
391 if (!sanity(i))
392 return 0;
393
394 off = i->iov_offset;
8cefc107 395 buf = &pipe->bufs[i_head & p_mask];
241699cd
AV
396 if (off) {
397 if (offset == off && buf->page == page) {
398 /* merge with the last one */
399 buf->len += bytes;
400 i->iov_offset += bytes;
401 goto out;
402 }
8cefc107
DH
403 i_head++;
404 buf = &pipe->bufs[i_head & p_mask];
241699cd 405 }
6718b6f8 406 if (pipe_full(i_head, p_tail, pipe->max_usage))
241699cd 407 return 0;
8cefc107 408
241699cd 409 buf->ops = &page_cache_pipe_buf_ops;
8cefc107
DH
410 get_page(page);
411 buf->page = page;
241699cd
AV
412 buf->offset = offset;
413 buf->len = bytes;
8cefc107
DH
414
415 pipe->head = i_head + 1;
241699cd 416 i->iov_offset = offset + bytes;
8cefc107 417 i->head = i_head;
241699cd
AV
418out:
419 i->count -= bytes;
420 return bytes;
421}
422
171a0203
AA
423/*
424 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
425 * bytes. For each iovec, fault in each page that constitutes the iovec.
426 *
427 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
428 * because it is an invalid address).
429 */
d4690f1e 430int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
171a0203
AA
431{
432 size_t skip = i->iov_offset;
433 const struct iovec *iov;
434 int err;
435 struct iovec v;
436
437 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
438 iterate_iovec(i, bytes, v, iov, skip, ({
4bce9f6e 439 err = fault_in_pages_readable(v.iov_base, v.iov_len);
171a0203
AA
440 if (unlikely(err))
441 return err;
442 0;}))
443 }
444 return 0;
445}
d4690f1e 446EXPORT_SYMBOL(iov_iter_fault_in_readable);
171a0203 447
aa563d7b 448void iov_iter_init(struct iov_iter *i, unsigned int direction,
71d8e532
AV
449 const struct iovec *iov, unsigned long nr_segs,
450 size_t count)
451{
aa563d7b
DH
452 WARN_ON(direction & ~(READ | WRITE));
453 direction &= READ | WRITE;
454
71d8e532 455 /* It will get better. Eventually... */
db68ce10 456 if (uaccess_kernel()) {
aa563d7b 457 i->type = ITER_KVEC | direction;
a280455f
AV
458 i->kvec = (struct kvec *)iov;
459 } else {
aa563d7b 460 i->type = ITER_IOVEC | direction;
a280455f
AV
461 i->iov = iov;
462 }
71d8e532
AV
463 i->nr_segs = nr_segs;
464 i->iov_offset = 0;
465 i->count = count;
466}
467EXPORT_SYMBOL(iov_iter_init);
7b2c99d1 468
62a8067a
AV
469static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
470{
471 char *from = kmap_atomic(page);
472 memcpy(to, from + offset, len);
473 kunmap_atomic(from);
474}
475
36f7a8a4 476static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
62a8067a
AV
477{
478 char *to = kmap_atomic(page);
479 memcpy(to + offset, from, len);
480 kunmap_atomic(to);
481}
482
c35e0248
MW
483static void memzero_page(struct page *page, size_t offset, size_t len)
484{
485 char *addr = kmap_atomic(page);
486 memset(addr + offset, 0, len);
487 kunmap_atomic(addr);
488}
489
241699cd
AV
490static inline bool allocated(struct pipe_buffer *buf)
491{
492 return buf->ops == &default_pipe_buf_ops;
493}
494
8cefc107
DH
495static inline void data_start(const struct iov_iter *i,
496 unsigned int *iter_headp, size_t *offp)
241699cd 497{
8cefc107
DH
498 unsigned int p_mask = i->pipe->ring_size - 1;
499 unsigned int iter_head = i->head;
241699cd 500 size_t off = i->iov_offset;
8cefc107
DH
501
502 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
503 off == PAGE_SIZE)) {
504 iter_head++;
241699cd
AV
505 off = 0;
506 }
8cefc107 507 *iter_headp = iter_head;
241699cd
AV
508 *offp = off;
509}
510
511static size_t push_pipe(struct iov_iter *i, size_t size,
8cefc107 512 int *iter_headp, size_t *offp)
241699cd
AV
513{
514 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
515 unsigned int p_tail = pipe->tail;
516 unsigned int p_mask = pipe->ring_size - 1;
517 unsigned int iter_head;
241699cd 518 size_t off;
241699cd
AV
519 ssize_t left;
520
521 if (unlikely(size > i->count))
522 size = i->count;
523 if (unlikely(!size))
524 return 0;
525
526 left = size;
8cefc107
DH
527 data_start(i, &iter_head, &off);
528 *iter_headp = iter_head;
241699cd
AV
529 *offp = off;
530 if (off) {
531 left -= PAGE_SIZE - off;
532 if (left <= 0) {
8cefc107 533 pipe->bufs[iter_head & p_mask].len += size;
241699cd
AV
534 return size;
535 }
8cefc107
DH
536 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
537 iter_head++;
241699cd 538 }
6718b6f8 539 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
8cefc107 540 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
241699cd
AV
541 struct page *page = alloc_page(GFP_USER);
542 if (!page)
543 break;
8cefc107
DH
544
545 buf->ops = &default_pipe_buf_ops;
546 buf->page = page;
547 buf->offset = 0;
548 buf->len = min_t(ssize_t, left, PAGE_SIZE);
549 left -= buf->len;
550 iter_head++;
551 pipe->head = iter_head;
552
553 if (left == 0)
241699cd 554 return size;
241699cd
AV
555 }
556 return size - left;
557}
558
559static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
560 struct iov_iter *i)
561{
562 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
563 unsigned int p_mask = pipe->ring_size - 1;
564 unsigned int i_head;
241699cd 565 size_t n, off;
241699cd
AV
566
567 if (!sanity(i))
568 return 0;
569
8cefc107 570 bytes = n = push_pipe(i, bytes, &i_head, &off);
241699cd
AV
571 if (unlikely(!n))
572 return 0;
8cefc107 573 do {
241699cd 574 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
8cefc107
DH
575 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
576 i->head = i_head;
241699cd
AV
577 i->iov_offset = off + chunk;
578 n -= chunk;
579 addr += chunk;
8cefc107
DH
580 off = 0;
581 i_head++;
582 } while (n);
241699cd
AV
583 i->count -= bytes;
584 return bytes;
585}
586
f9152895
AV
587static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
588 __wsum sum, size_t off)
589{
cc44c17b 590 __wsum next = csum_partial_copy_nocheck(from, to, len);
f9152895
AV
591 return csum_block_add(sum, next, off);
592}
593
78e1f386 594static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
52cbd23a
WB
595 struct csum_state *csstate,
596 struct iov_iter *i)
78e1f386
AV
597{
598 struct pipe_inode_info *pipe = i->pipe;
8cefc107 599 unsigned int p_mask = pipe->ring_size - 1;
52cbd23a
WB
600 __wsum sum = csstate->csum;
601 size_t off = csstate->off;
8cefc107 602 unsigned int i_head;
78e1f386 603 size_t n, r;
78e1f386
AV
604
605 if (!sanity(i))
606 return 0;
607
8cefc107 608 bytes = n = push_pipe(i, bytes, &i_head, &r);
78e1f386
AV
609 if (unlikely(!n))
610 return 0;
8cefc107 611 do {
78e1f386 612 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
8cefc107 613 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
f9152895 614 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
78e1f386 615 kunmap_atomic(p);
8cefc107 616 i->head = i_head;
78e1f386
AV
617 i->iov_offset = r + chunk;
618 n -= chunk;
619 off += chunk;
620 addr += chunk;
8cefc107
DH
621 r = 0;
622 i_head++;
623 } while (n);
78e1f386 624 i->count -= bytes;
52cbd23a
WB
625 csstate->csum = sum;
626 csstate->off = off;
78e1f386
AV
627 return bytes;
628}
629
aa28de27 630size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
62a8067a 631{
36f7a8a4 632 const char *from = addr;
00e23707 633 if (unlikely(iov_iter_is_pipe(i)))
241699cd 634 return copy_pipe_to_iter(addr, bytes, i);
09fc68dc
AV
635 if (iter_is_iovec(i))
636 might_fault();
3d4d3e48 637 iterate_and_advance(i, bytes, v,
09fc68dc 638 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
3d4d3e48 639 memcpy_to_page(v.bv_page, v.bv_offset,
a280455f
AV
640 (from += v.bv_len) - v.bv_len, v.bv_len),
641 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
3d4d3e48 642 )
62a8067a 643
3d4d3e48 644 return bytes;
c35e0248 645}
aa28de27 646EXPORT_SYMBOL(_copy_to_iter);
c35e0248 647
ec6347bb
DW
648#ifdef CONFIG_ARCH_HAS_COPY_MC
649static int copyout_mc(void __user *to, const void *from, size_t n)
8780356e 650{
96d4f267 651 if (access_ok(to, n)) {
d0ef4c36 652 instrument_copy_to_user(to, from, n);
ec6347bb 653 n = copy_mc_to_user((__force void *) to, from, n);
8780356e
DW
654 }
655 return n;
656}
657
ec6347bb 658static unsigned long copy_mc_to_page(struct page *page, size_t offset,
8780356e
DW
659 const char *from, size_t len)
660{
661 unsigned long ret;
662 char *to;
663
664 to = kmap_atomic(page);
ec6347bb 665 ret = copy_mc_to_kernel(to + offset, from, len);
8780356e
DW
666 kunmap_atomic(to);
667
668 return ret;
669}
670
ec6347bb 671static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
ca146f6f
DW
672 struct iov_iter *i)
673{
674 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
675 unsigned int p_mask = pipe->ring_size - 1;
676 unsigned int i_head;
ca146f6f 677 size_t n, off, xfer = 0;
ca146f6f
DW
678
679 if (!sanity(i))
680 return 0;
681
8cefc107 682 bytes = n = push_pipe(i, bytes, &i_head, &off);
ca146f6f
DW
683 if (unlikely(!n))
684 return 0;
8cefc107 685 do {
ca146f6f
DW
686 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
687 unsigned long rem;
688
ec6347bb 689 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
8cefc107
DH
690 off, addr, chunk);
691 i->head = i_head;
ca146f6f
DW
692 i->iov_offset = off + chunk - rem;
693 xfer += chunk - rem;
694 if (rem)
695 break;
696 n -= chunk;
697 addr += chunk;
8cefc107
DH
698 off = 0;
699 i_head++;
700 } while (n);
ca146f6f
DW
701 i->count -= xfer;
702 return xfer;
703}
704
bf3eeb9b 705/**
ec6347bb 706 * _copy_mc_to_iter - copy to iter with source memory error exception handling
bf3eeb9b
DW
707 * @addr: source kernel address
708 * @bytes: total transfer length
709 * @iter: destination iterator
710 *
ec6347bb
DW
711 * The pmem driver deploys this for the dax operation
712 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
713 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
714 * successfully copied.
bf3eeb9b 715 *
ec6347bb 716 * The main differences between this and typical _copy_to_iter().
bf3eeb9b
DW
717 *
718 * * Typical tail/residue handling after a fault retries the copy
719 * byte-by-byte until the fault happens again. Re-triggering machine
720 * checks is potentially fatal so the implementation uses source
721 * alignment and poison alignment assumptions to avoid re-triggering
722 * hardware exceptions.
723 *
724 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
725 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
726 * a short copy.
bf3eeb9b 727 */
ec6347bb 728size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
8780356e
DW
729{
730 const char *from = addr;
731 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
732
00e23707 733 if (unlikely(iov_iter_is_pipe(i)))
ec6347bb 734 return copy_mc_pipe_to_iter(addr, bytes, i);
8780356e
DW
735 if (iter_is_iovec(i))
736 might_fault();
737 iterate_and_advance(i, bytes, v,
ec6347bb
DW
738 copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
739 v.iov_len),
8780356e 740 ({
ec6347bb
DW
741 rem = copy_mc_to_page(v.bv_page, v.bv_offset,
742 (from += v.bv_len) - v.bv_len, v.bv_len);
8780356e
DW
743 if (rem) {
744 curr_addr = (unsigned long) from;
745 bytes = curr_addr - s_addr - rem;
746 return bytes;
747 }
748 }),
749 ({
ec6347bb
DW
750 rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
751 - v.iov_len, v.iov_len);
8780356e
DW
752 if (rem) {
753 curr_addr = (unsigned long) from;
754 bytes = curr_addr - s_addr - rem;
755 return bytes;
756 }
757 })
758 )
759
760 return bytes;
761}
ec6347bb
DW
762EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
763#endif /* CONFIG_ARCH_HAS_COPY_MC */
8780356e 764
aa28de27 765size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
c35e0248 766{
0dbca9a4 767 char *to = addr;
00e23707 768 if (unlikely(iov_iter_is_pipe(i))) {
241699cd
AV
769 WARN_ON(1);
770 return 0;
771 }
09fc68dc
AV
772 if (iter_is_iovec(i))
773 might_fault();
0dbca9a4 774 iterate_and_advance(i, bytes, v,
09fc68dc 775 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
0dbca9a4 776 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
a280455f
AV
777 v.bv_offset, v.bv_len),
778 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
0dbca9a4
AV
779 )
780
781 return bytes;
c35e0248 782}
aa28de27 783EXPORT_SYMBOL(_copy_from_iter);
c35e0248 784
aa28de27 785bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
cbbd26b8
AV
786{
787 char *to = addr;
00e23707 788 if (unlikely(iov_iter_is_pipe(i))) {
cbbd26b8
AV
789 WARN_ON(1);
790 return false;
791 }
33844e66 792 if (unlikely(i->count < bytes))
cbbd26b8
AV
793 return false;
794
09fc68dc
AV
795 if (iter_is_iovec(i))
796 might_fault();
cbbd26b8 797 iterate_all_kinds(i, bytes, v, ({
09fc68dc 798 if (copyin((to += v.iov_len) - v.iov_len,
cbbd26b8
AV
799 v.iov_base, v.iov_len))
800 return false;
801 0;}),
802 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
803 v.bv_offset, v.bv_len),
804 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
805 )
806
807 iov_iter_advance(i, bytes);
808 return true;
809}
aa28de27 810EXPORT_SYMBOL(_copy_from_iter_full);
cbbd26b8 811
aa28de27 812size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
aa583096
AV
813{
814 char *to = addr;
00e23707 815 if (unlikely(iov_iter_is_pipe(i))) {
241699cd
AV
816 WARN_ON(1);
817 return 0;
818 }
aa583096 819 iterate_and_advance(i, bytes, v,
3f763453 820 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
aa583096
AV
821 v.iov_base, v.iov_len),
822 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
823 v.bv_offset, v.bv_len),
824 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
825 )
826
827 return bytes;
828}
aa28de27 829EXPORT_SYMBOL(_copy_from_iter_nocache);
aa583096 830
0aed55af 831#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
abd08d7d
DW
832/**
833 * _copy_from_iter_flushcache - write destination through cpu cache
834 * @addr: destination kernel address
835 * @bytes: total transfer length
836 * @iter: source iterator
837 *
838 * The pmem driver arranges for filesystem-dax to use this facility via
839 * dax_copy_from_iter() for ensuring that writes to persistent memory
840 * are flushed through the CPU cache. It is differentiated from
841 * _copy_from_iter_nocache() in that guarantees all data is flushed for
842 * all iterator types. The _copy_from_iter_nocache() only attempts to
843 * bypass the cache for the ITER_IOVEC case, and on some archs may use
844 * instructions that strand dirty-data in the cache.
845 */
6a37e940 846size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
0aed55af
DW
847{
848 char *to = addr;
00e23707 849 if (unlikely(iov_iter_is_pipe(i))) {
0aed55af
DW
850 WARN_ON(1);
851 return 0;
852 }
853 iterate_and_advance(i, bytes, v,
854 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
855 v.iov_base, v.iov_len),
856 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
857 v.bv_offset, v.bv_len),
858 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
859 v.iov_len)
860 )
861
862 return bytes;
863}
6a37e940 864EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
0aed55af
DW
865#endif
866
aa28de27 867bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
cbbd26b8
AV
868{
869 char *to = addr;
00e23707 870 if (unlikely(iov_iter_is_pipe(i))) {
cbbd26b8
AV
871 WARN_ON(1);
872 return false;
873 }
33844e66 874 if (unlikely(i->count < bytes))
cbbd26b8
AV
875 return false;
876 iterate_all_kinds(i, bytes, v, ({
3f763453 877 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
cbbd26b8
AV
878 v.iov_base, v.iov_len))
879 return false;
880 0;}),
881 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
882 v.bv_offset, v.bv_len),
883 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
884 )
885
886 iov_iter_advance(i, bytes);
887 return true;
888}
aa28de27 889EXPORT_SYMBOL(_copy_from_iter_full_nocache);
cbbd26b8 890
72e809ed
AV
891static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
892{
6daef95b
ED
893 struct page *head;
894 size_t v = n + offset;
895
896 /*
897 * The general case needs to access the page order in order
898 * to compute the page size.
899 * However, we mostly deal with order-0 pages and thus can
900 * avoid a possible cache line miss for requests that fit all
901 * page orders.
902 */
903 if (n <= v && v <= PAGE_SIZE)
904 return true;
905
906 head = compound_head(page);
907 v += (page - head) << PAGE_SHIFT;
a90bcb86 908
a50b854e 909 if (likely(n <= v && v <= (page_size(head))))
72e809ed
AV
910 return true;
911 WARN_ON(1);
912 return false;
913}
cbbd26b8 914
62a8067a
AV
915size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
916 struct iov_iter *i)
917{
72e809ed
AV
918 if (unlikely(!page_copy_sane(page, offset, bytes)))
919 return 0;
d271524a
AV
920 if (i->type & (ITER_BVEC|ITER_KVEC)) {
921 void *kaddr = kmap_atomic(page);
922 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
923 kunmap_atomic(kaddr);
924 return wanted;
9ea9ce04
DH
925 } else if (unlikely(iov_iter_is_discard(i)))
926 return bytes;
927 else if (likely(!iov_iter_is_pipe(i)))
62a8067a 928 return copy_page_to_iter_iovec(page, offset, bytes, i);
241699cd
AV
929 else
930 return copy_page_to_iter_pipe(page, offset, bytes, i);
62a8067a
AV
931}
932EXPORT_SYMBOL(copy_page_to_iter);
933
934size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
935 struct iov_iter *i)
936{
72e809ed
AV
937 if (unlikely(!page_copy_sane(page, offset, bytes)))
938 return 0;
9ea9ce04 939 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd
AV
940 WARN_ON(1);
941 return 0;
942 }
a280455f 943 if (i->type & (ITER_BVEC|ITER_KVEC)) {
d271524a 944 void *kaddr = kmap_atomic(page);
aa28de27 945 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
d271524a
AV
946 kunmap_atomic(kaddr);
947 return wanted;
948 } else
62a8067a
AV
949 return copy_page_from_iter_iovec(page, offset, bytes, i);
950}
951EXPORT_SYMBOL(copy_page_from_iter);
952
241699cd
AV
953static size_t pipe_zero(size_t bytes, struct iov_iter *i)
954{
955 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
956 unsigned int p_mask = pipe->ring_size - 1;
957 unsigned int i_head;
241699cd 958 size_t n, off;
241699cd
AV
959
960 if (!sanity(i))
961 return 0;
962
8cefc107 963 bytes = n = push_pipe(i, bytes, &i_head, &off);
241699cd
AV
964 if (unlikely(!n))
965 return 0;
966
8cefc107 967 do {
241699cd 968 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
8cefc107
DH
969 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
970 i->head = i_head;
241699cd
AV
971 i->iov_offset = off + chunk;
972 n -= chunk;
8cefc107
DH
973 off = 0;
974 i_head++;
975 } while (n);
241699cd
AV
976 i->count -= bytes;
977 return bytes;
978}
979
c35e0248
MW
980size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
981{
00e23707 982 if (unlikely(iov_iter_is_pipe(i)))
241699cd 983 return pipe_zero(bytes, i);
8442fa46 984 iterate_and_advance(i, bytes, v,
09fc68dc 985 clear_user(v.iov_base, v.iov_len),
a280455f
AV
986 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
987 memset(v.iov_base, 0, v.iov_len)
8442fa46
AV
988 )
989
990 return bytes;
c35e0248
MW
991}
992EXPORT_SYMBOL(iov_iter_zero);
993
62a8067a
AV
994size_t iov_iter_copy_from_user_atomic(struct page *page,
995 struct iov_iter *i, unsigned long offset, size_t bytes)
996{
04a31165 997 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
72e809ed
AV
998 if (unlikely(!page_copy_sane(page, offset, bytes))) {
999 kunmap_atomic(kaddr);
1000 return 0;
1001 }
9ea9ce04 1002 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd
AV
1003 kunmap_atomic(kaddr);
1004 WARN_ON(1);
1005 return 0;
1006 }
04a31165 1007 iterate_all_kinds(i, bytes, v,
09fc68dc 1008 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
04a31165 1009 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
a280455f
AV
1010 v.bv_offset, v.bv_len),
1011 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
04a31165
AV
1012 )
1013 kunmap_atomic(kaddr);
1014 return bytes;
62a8067a
AV
1015}
1016EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1017
b9dc6f65
AV
1018static inline void pipe_truncate(struct iov_iter *i)
1019{
1020 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
1021 unsigned int p_tail = pipe->tail;
1022 unsigned int p_head = pipe->head;
1023 unsigned int p_mask = pipe->ring_size - 1;
1024
1025 if (!pipe_empty(p_head, p_tail)) {
1026 struct pipe_buffer *buf;
1027 unsigned int i_head = i->head;
b9dc6f65 1028 size_t off = i->iov_offset;
8cefc107 1029
b9dc6f65 1030 if (off) {
8cefc107
DH
1031 buf = &pipe->bufs[i_head & p_mask];
1032 buf->len = off - buf->offset;
1033 i_head++;
b9dc6f65 1034 }
8cefc107
DH
1035 while (p_head != i_head) {
1036 p_head--;
1037 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
b9dc6f65 1038 }
8cefc107
DH
1039
1040 pipe->head = p_head;
b9dc6f65
AV
1041 }
1042}
1043
241699cd
AV
1044static void pipe_advance(struct iov_iter *i, size_t size)
1045{
1046 struct pipe_inode_info *pipe = i->pipe;
241699cd
AV
1047 if (unlikely(i->count < size))
1048 size = i->count;
241699cd 1049 if (size) {
b9dc6f65 1050 struct pipe_buffer *buf;
8cefc107
DH
1051 unsigned int p_mask = pipe->ring_size - 1;
1052 unsigned int i_head = i->head;
b9dc6f65 1053 size_t off = i->iov_offset, left = size;
8cefc107 1054
241699cd 1055 if (off) /* make it relative to the beginning of buffer */
8cefc107 1056 left += off - pipe->bufs[i_head & p_mask].offset;
241699cd 1057 while (1) {
8cefc107 1058 buf = &pipe->bufs[i_head & p_mask];
b9dc6f65 1059 if (left <= buf->len)
241699cd 1060 break;
b9dc6f65 1061 left -= buf->len;
8cefc107 1062 i_head++;
241699cd 1063 }
8cefc107 1064 i->head = i_head;
b9dc6f65 1065 i->iov_offset = buf->offset + left;
241699cd 1066 }
b9dc6f65
AV
1067 i->count -= size;
1068 /* ... and discard everything past that point */
1069 pipe_truncate(i);
241699cd
AV
1070}
1071
62a8067a
AV
1072void iov_iter_advance(struct iov_iter *i, size_t size)
1073{
00e23707 1074 if (unlikely(iov_iter_is_pipe(i))) {
241699cd
AV
1075 pipe_advance(i, size);
1076 return;
1077 }
9ea9ce04
DH
1078 if (unlikely(iov_iter_is_discard(i))) {
1079 i->count -= size;
1080 return;
1081 }
a280455f 1082 iterate_and_advance(i, size, v, 0, 0, 0)
62a8067a
AV
1083}
1084EXPORT_SYMBOL(iov_iter_advance);
1085
27c0e374
AV
1086void iov_iter_revert(struct iov_iter *i, size_t unroll)
1087{
1088 if (!unroll)
1089 return;
5b47d59a
AV
1090 if (WARN_ON(unroll > MAX_RW_COUNT))
1091 return;
27c0e374 1092 i->count += unroll;
00e23707 1093 if (unlikely(iov_iter_is_pipe(i))) {
27c0e374 1094 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
1095 unsigned int p_mask = pipe->ring_size - 1;
1096 unsigned int i_head = i->head;
27c0e374
AV
1097 size_t off = i->iov_offset;
1098 while (1) {
8cefc107
DH
1099 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1100 size_t n = off - b->offset;
27c0e374 1101 if (unroll < n) {
4fa55cef 1102 off -= unroll;
27c0e374
AV
1103 break;
1104 }
1105 unroll -= n;
8cefc107 1106 if (!unroll && i_head == i->start_head) {
27c0e374
AV
1107 off = 0;
1108 break;
1109 }
8cefc107
DH
1110 i_head--;
1111 b = &pipe->bufs[i_head & p_mask];
1112 off = b->offset + b->len;
27c0e374
AV
1113 }
1114 i->iov_offset = off;
8cefc107 1115 i->head = i_head;
27c0e374
AV
1116 pipe_truncate(i);
1117 return;
1118 }
9ea9ce04
DH
1119 if (unlikely(iov_iter_is_discard(i)))
1120 return;
27c0e374
AV
1121 if (unroll <= i->iov_offset) {
1122 i->iov_offset -= unroll;
1123 return;
1124 }
1125 unroll -= i->iov_offset;
00e23707 1126 if (iov_iter_is_bvec(i)) {
27c0e374
AV
1127 const struct bio_vec *bvec = i->bvec;
1128 while (1) {
1129 size_t n = (--bvec)->bv_len;
1130 i->nr_segs++;
1131 if (unroll <= n) {
1132 i->bvec = bvec;
1133 i->iov_offset = n - unroll;
1134 return;
1135 }
1136 unroll -= n;
1137 }
1138 } else { /* same logics for iovec and kvec */
1139 const struct iovec *iov = i->iov;
1140 while (1) {
1141 size_t n = (--iov)->iov_len;
1142 i->nr_segs++;
1143 if (unroll <= n) {
1144 i->iov = iov;
1145 i->iov_offset = n - unroll;
1146 return;
1147 }
1148 unroll -= n;
1149 }
1150 }
1151}
1152EXPORT_SYMBOL(iov_iter_revert);
1153
62a8067a
AV
1154/*
1155 * Return the count of just the current iov_iter segment.
1156 */
1157size_t iov_iter_single_seg_count(const struct iov_iter *i)
1158{
00e23707 1159 if (unlikely(iov_iter_is_pipe(i)))
241699cd 1160 return i->count; // it is a silly place, anyway
62a8067a
AV
1161 if (i->nr_segs == 1)
1162 return i->count;
9ea9ce04
DH
1163 if (unlikely(iov_iter_is_discard(i)))
1164 return i->count;
00e23707 1165 else if (iov_iter_is_bvec(i))
62a8067a 1166 return min(i->count, i->bvec->bv_len - i->iov_offset);
ad0eab92
PM
1167 else
1168 return min(i->count, i->iov->iov_len - i->iov_offset);
62a8067a
AV
1169}
1170EXPORT_SYMBOL(iov_iter_single_seg_count);
1171
aa563d7b 1172void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
05afcb77 1173 const struct kvec *kvec, unsigned long nr_segs,
abb78f87
AV
1174 size_t count)
1175{
aa563d7b
DH
1176 WARN_ON(direction & ~(READ | WRITE));
1177 i->type = ITER_KVEC | (direction & (READ | WRITE));
05afcb77 1178 i->kvec = kvec;
abb78f87
AV
1179 i->nr_segs = nr_segs;
1180 i->iov_offset = 0;
1181 i->count = count;
1182}
1183EXPORT_SYMBOL(iov_iter_kvec);
1184
aa563d7b 1185void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
05afcb77
AV
1186 const struct bio_vec *bvec, unsigned long nr_segs,
1187 size_t count)
1188{
aa563d7b
DH
1189 WARN_ON(direction & ~(READ | WRITE));
1190 i->type = ITER_BVEC | (direction & (READ | WRITE));
05afcb77
AV
1191 i->bvec = bvec;
1192 i->nr_segs = nr_segs;
1193 i->iov_offset = 0;
1194 i->count = count;
1195}
1196EXPORT_SYMBOL(iov_iter_bvec);
1197
aa563d7b 1198void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
241699cd
AV
1199 struct pipe_inode_info *pipe,
1200 size_t count)
1201{
aa563d7b 1202 BUG_ON(direction != READ);
8cefc107 1203 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
aa563d7b 1204 i->type = ITER_PIPE | READ;
241699cd 1205 i->pipe = pipe;
8cefc107 1206 i->head = pipe->head;
241699cd
AV
1207 i->iov_offset = 0;
1208 i->count = count;
8cefc107 1209 i->start_head = i->head;
241699cd
AV
1210}
1211EXPORT_SYMBOL(iov_iter_pipe);
1212
9ea9ce04
DH
1213/**
1214 * iov_iter_discard - Initialise an I/O iterator that discards data
1215 * @i: The iterator to initialise.
1216 * @direction: The direction of the transfer.
1217 * @count: The size of the I/O buffer in bytes.
1218 *
1219 * Set up an I/O iterator that just discards everything that's written to it.
1220 * It's only available as a READ iterator.
1221 */
1222void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1223{
1224 BUG_ON(direction != READ);
1225 i->type = ITER_DISCARD | READ;
1226 i->count = count;
1227 i->iov_offset = 0;
1228}
1229EXPORT_SYMBOL(iov_iter_discard);
1230
62a8067a
AV
1231unsigned long iov_iter_alignment(const struct iov_iter *i)
1232{
04a31165
AV
1233 unsigned long res = 0;
1234 size_t size = i->count;
1235
00e23707 1236 if (unlikely(iov_iter_is_pipe(i))) {
e0ff126e
JK
1237 unsigned int p_mask = i->pipe->ring_size - 1;
1238
8cefc107 1239 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
241699cd
AV
1240 return size | i->iov_offset;
1241 return size;
1242 }
04a31165
AV
1243 iterate_all_kinds(i, size, v,
1244 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
a280455f
AV
1245 res |= v.bv_offset | v.bv_len,
1246 res |= (unsigned long)v.iov_base | v.iov_len
04a31165
AV
1247 )
1248 return res;
62a8067a
AV
1249}
1250EXPORT_SYMBOL(iov_iter_alignment);
1251
357f435d
AV
1252unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1253{
33844e66 1254 unsigned long res = 0;
357f435d 1255 size_t size = i->count;
357f435d 1256
9ea9ce04 1257 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd
AV
1258 WARN_ON(1);
1259 return ~0U;
1260 }
1261
357f435d
AV
1262 iterate_all_kinds(i, size, v,
1263 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1264 (size != v.iov_len ? size : 0), 0),
1265 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1266 (size != v.bv_len ? size : 0)),
1267 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1268 (size != v.iov_len ? size : 0))
1269 );
33844e66 1270 return res;
357f435d
AV
1271}
1272EXPORT_SYMBOL(iov_iter_gap_alignment);
1273
e76b6312 1274static inline ssize_t __pipe_get_pages(struct iov_iter *i,
241699cd
AV
1275 size_t maxsize,
1276 struct page **pages,
8cefc107 1277 int iter_head,
241699cd
AV
1278 size_t *start)
1279{
1280 struct pipe_inode_info *pipe = i->pipe;
8cefc107
DH
1281 unsigned int p_mask = pipe->ring_size - 1;
1282 ssize_t n = push_pipe(i, maxsize, &iter_head, start);
241699cd
AV
1283 if (!n)
1284 return -EFAULT;
1285
1286 maxsize = n;
1287 n += *start;
1689c73a 1288 while (n > 0) {
8cefc107
DH
1289 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1290 iter_head++;
241699cd
AV
1291 n -= PAGE_SIZE;
1292 }
1293
1294 return maxsize;
1295}
1296
1297static ssize_t pipe_get_pages(struct iov_iter *i,
1298 struct page **pages, size_t maxsize, unsigned maxpages,
1299 size_t *start)
1300{
8cefc107 1301 unsigned int iter_head, npages;
241699cd 1302 size_t capacity;
241699cd 1303
33844e66
AV
1304 if (!maxsize)
1305 return 0;
1306
241699cd
AV
1307 if (!sanity(i))
1308 return -EFAULT;
1309
8cefc107
DH
1310 data_start(i, &iter_head, start);
1311 /* Amount of free space: some of this one + all after this one */
1312 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1313 capacity = min(npages, maxpages) * PAGE_SIZE - *start;
241699cd 1314
8cefc107 1315 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
241699cd
AV
1316}
1317
62a8067a 1318ssize_t iov_iter_get_pages(struct iov_iter *i,
2c80929c 1319 struct page **pages, size_t maxsize, unsigned maxpages,
62a8067a
AV
1320 size_t *start)
1321{
e5393fae
AV
1322 if (maxsize > i->count)
1323 maxsize = i->count;
1324
00e23707 1325 if (unlikely(iov_iter_is_pipe(i)))
241699cd 1326 return pipe_get_pages(i, pages, maxsize, maxpages, start);
9ea9ce04
DH
1327 if (unlikely(iov_iter_is_discard(i)))
1328 return -EFAULT;
1329
e5393fae
AV
1330 iterate_all_kinds(i, maxsize, v, ({
1331 unsigned long addr = (unsigned long)v.iov_base;
1332 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1333 int n;
1334 int res;
1335
1336 if (len > maxpages * PAGE_SIZE)
1337 len = maxpages * PAGE_SIZE;
1338 addr &= ~(PAGE_SIZE - 1);
1339 n = DIV_ROUND_UP(len, PAGE_SIZE);
73b0140b
IW
1340 res = get_user_pages_fast(addr, n,
1341 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
1342 pages);
e5393fae
AV
1343 if (unlikely(res < 0))
1344 return res;
1345 return (res == n ? len : res * PAGE_SIZE) - *start;
1346 0;}),({
1347 /* can't be more than PAGE_SIZE */
1348 *start = v.bv_offset;
1349 get_page(*pages = v.bv_page);
1350 return v.bv_len;
a280455f
AV
1351 }),({
1352 return -EFAULT;
e5393fae
AV
1353 })
1354 )
1355 return 0;
62a8067a
AV
1356}
1357EXPORT_SYMBOL(iov_iter_get_pages);
1358
1b17f1f2
AV
1359static struct page **get_pages_array(size_t n)
1360{
752ade68 1361 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1b17f1f2
AV
1362}
1363
241699cd
AV
1364static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1365 struct page ***pages, size_t maxsize,
1366 size_t *start)
1367{
1368 struct page **p;
8cefc107 1369 unsigned int iter_head, npages;
d7760d63 1370 ssize_t n;
241699cd 1371
33844e66
AV
1372 if (!maxsize)
1373 return 0;
1374
241699cd
AV
1375 if (!sanity(i))
1376 return -EFAULT;
1377
8cefc107
DH
1378 data_start(i, &iter_head, start);
1379 /* Amount of free space: some of this one + all after this one */
1380 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
241699cd
AV
1381 n = npages * PAGE_SIZE - *start;
1382 if (maxsize > n)
1383 maxsize = n;
1384 else
1385 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1386 p = get_pages_array(npages);
1387 if (!p)
1388 return -ENOMEM;
8cefc107 1389 n = __pipe_get_pages(i, maxsize, p, iter_head, start);
241699cd
AV
1390 if (n > 0)
1391 *pages = p;
1392 else
1393 kvfree(p);
1394 return n;
1395}
1396
62a8067a
AV
1397ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1398 struct page ***pages, size_t maxsize,
1399 size_t *start)
1400{
1b17f1f2
AV
1401 struct page **p;
1402
1403 if (maxsize > i->count)
1404 maxsize = i->count;
1405
00e23707 1406 if (unlikely(iov_iter_is_pipe(i)))
241699cd 1407 return pipe_get_pages_alloc(i, pages, maxsize, start);
9ea9ce04
DH
1408 if (unlikely(iov_iter_is_discard(i)))
1409 return -EFAULT;
1410
1b17f1f2
AV
1411 iterate_all_kinds(i, maxsize, v, ({
1412 unsigned long addr = (unsigned long)v.iov_base;
1413 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1414 int n;
1415 int res;
1416
1417 addr &= ~(PAGE_SIZE - 1);
1418 n = DIV_ROUND_UP(len, PAGE_SIZE);
1419 p = get_pages_array(n);
1420 if (!p)
1421 return -ENOMEM;
73b0140b
IW
1422 res = get_user_pages_fast(addr, n,
1423 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
1b17f1f2
AV
1424 if (unlikely(res < 0)) {
1425 kvfree(p);
1426 return res;
1427 }
1428 *pages = p;
1429 return (res == n ? len : res * PAGE_SIZE) - *start;
1430 0;}),({
1431 /* can't be more than PAGE_SIZE */
1432 *start = v.bv_offset;
1433 *pages = p = get_pages_array(1);
1434 if (!p)
1435 return -ENOMEM;
1436 get_page(*p = v.bv_page);
1437 return v.bv_len;
a280455f
AV
1438 }),({
1439 return -EFAULT;
1b17f1f2
AV
1440 })
1441 )
1442 return 0;
62a8067a
AV
1443}
1444EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1445
a604ec7e
AV
1446size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1447 struct iov_iter *i)
1448{
1449 char *to = addr;
1450 __wsum sum, next;
1451 size_t off = 0;
a604ec7e 1452 sum = *csum;
9ea9ce04 1453 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
241699cd
AV
1454 WARN_ON(1);
1455 return 0;
1456 }
a604ec7e 1457 iterate_and_advance(i, bytes, v, ({
cbbd26b8 1458 next = csum_and_copy_from_user(v.iov_base,
a604ec7e 1459 (to += v.iov_len) - v.iov_len,
c693cc46
AV
1460 v.iov_len);
1461 if (next) {
a604ec7e
AV
1462 sum = csum_block_add(sum, next, off);
1463 off += v.iov_len;
1464 }
c693cc46 1465 next ? 0 : v.iov_len;
a604ec7e
AV
1466 }), ({
1467 char *p = kmap_atomic(v.bv_page);
f9152895
AV
1468 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1469 p + v.bv_offset, v.bv_len,
1470 sum, off);
a604ec7e 1471 kunmap_atomic(p);
a604ec7e
AV
1472 off += v.bv_len;
1473 }),({
f9152895
AV
1474 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1475 v.iov_base, v.iov_len,
1476 sum, off);
a604ec7e
AV
1477 off += v.iov_len;
1478 })
1479 )
1480 *csum = sum;
1481 return bytes;
1482}
1483EXPORT_SYMBOL(csum_and_copy_from_iter);
1484
cbbd26b8
AV
1485bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1486 struct iov_iter *i)
1487{
1488 char *to = addr;
1489 __wsum sum, next;
1490 size_t off = 0;
1491 sum = *csum;
9ea9ce04 1492 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
cbbd26b8
AV
1493 WARN_ON(1);
1494 return false;
1495 }
1496 if (unlikely(i->count < bytes))
1497 return false;
1498 iterate_all_kinds(i, bytes, v, ({
cbbd26b8
AV
1499 next = csum_and_copy_from_user(v.iov_base,
1500 (to += v.iov_len) - v.iov_len,
c693cc46
AV
1501 v.iov_len);
1502 if (!next)
cbbd26b8
AV
1503 return false;
1504 sum = csum_block_add(sum, next, off);
1505 off += v.iov_len;
1506 0;
1507 }), ({
1508 char *p = kmap_atomic(v.bv_page);
f9152895
AV
1509 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1510 p + v.bv_offset, v.bv_len,
1511 sum, off);
cbbd26b8 1512 kunmap_atomic(p);
cbbd26b8
AV
1513 off += v.bv_len;
1514 }),({
f9152895
AV
1515 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1516 v.iov_base, v.iov_len,
1517 sum, off);
cbbd26b8
AV
1518 off += v.iov_len;
1519 })
1520 )
1521 *csum = sum;
1522 iov_iter_advance(i, bytes);
1523 return true;
1524}
1525EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1526
52cbd23a 1527size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
a604ec7e
AV
1528 struct iov_iter *i)
1529{
52cbd23a 1530 struct csum_state *csstate = _csstate;
36f7a8a4 1531 const char *from = addr;
a604ec7e 1532 __wsum sum, next;
52cbd23a 1533 size_t off;
78e1f386
AV
1534
1535 if (unlikely(iov_iter_is_pipe(i)))
52cbd23a 1536 return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
78e1f386 1537
52cbd23a
WB
1538 sum = csstate->csum;
1539 off = csstate->off;
78e1f386 1540 if (unlikely(iov_iter_is_discard(i))) {
241699cd
AV
1541 WARN_ON(1); /* for now */
1542 return 0;
1543 }
a604ec7e 1544 iterate_and_advance(i, bytes, v, ({
a604ec7e 1545 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
cbbd26b8 1546 v.iov_base,
c693cc46
AV
1547 v.iov_len);
1548 if (next) {
a604ec7e
AV
1549 sum = csum_block_add(sum, next, off);
1550 off += v.iov_len;
1551 }
c693cc46 1552 next ? 0 : v.iov_len;
a604ec7e
AV
1553 }), ({
1554 char *p = kmap_atomic(v.bv_page);
f9152895
AV
1555 sum = csum_and_memcpy(p + v.bv_offset,
1556 (from += v.bv_len) - v.bv_len,
1557 v.bv_len, sum, off);
a604ec7e 1558 kunmap_atomic(p);
a604ec7e
AV
1559 off += v.bv_len;
1560 }),({
f9152895
AV
1561 sum = csum_and_memcpy(v.iov_base,
1562 (from += v.iov_len) - v.iov_len,
1563 v.iov_len, sum, off);
a604ec7e
AV
1564 off += v.iov_len;
1565 })
1566 )
52cbd23a
WB
1567 csstate->csum = sum;
1568 csstate->off = off;
a604ec7e
AV
1569 return bytes;
1570}
1571EXPORT_SYMBOL(csum_and_copy_to_iter);
1572
d05f4435
SG
1573size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1574 struct iov_iter *i)
1575{
7999096f 1576#ifdef CONFIG_CRYPTO_HASH
d05f4435
SG
1577 struct ahash_request *hash = hashp;
1578 struct scatterlist sg;
1579 size_t copied;
1580
1581 copied = copy_to_iter(addr, bytes, i);
1582 sg_init_one(&sg, addr, copied);
1583 ahash_request_set_crypt(hash, &sg, NULL, copied);
1584 crypto_ahash_update(hash);
1585 return copied;
27fad74a
Y
1586#else
1587 return 0;
1588#endif
d05f4435
SG
1589}
1590EXPORT_SYMBOL(hash_and_copy_to_iter);
1591
62a8067a
AV
1592int iov_iter_npages(const struct iov_iter *i, int maxpages)
1593{
e0f2dc40
AV
1594 size_t size = i->count;
1595 int npages = 0;
1596
1597 if (!size)
1598 return 0;
9ea9ce04
DH
1599 if (unlikely(iov_iter_is_discard(i)))
1600 return 0;
e0f2dc40 1601
00e23707 1602 if (unlikely(iov_iter_is_pipe(i))) {
241699cd 1603 struct pipe_inode_info *pipe = i->pipe;
8cefc107 1604 unsigned int iter_head;
241699cd 1605 size_t off;
241699cd
AV
1606
1607 if (!sanity(i))
1608 return 0;
1609
8cefc107 1610 data_start(i, &iter_head, &off);
241699cd 1611 /* some of this one + all after this one */
8cefc107 1612 npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
241699cd
AV
1613 if (npages >= maxpages)
1614 return maxpages;
1615 } else iterate_all_kinds(i, size, v, ({
e0f2dc40
AV
1616 unsigned long p = (unsigned long)v.iov_base;
1617 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1618 - p / PAGE_SIZE;
1619 if (npages >= maxpages)
1620 return maxpages;
1621 0;}),({
1622 npages++;
1623 if (npages >= maxpages)
1624 return maxpages;
a280455f
AV
1625 }),({
1626 unsigned long p = (unsigned long)v.iov_base;
1627 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1628 - p / PAGE_SIZE;
1629 if (npages >= maxpages)
1630 return maxpages;
e0f2dc40
AV
1631 })
1632 )
1633 return npages;
62a8067a 1634}
f67da30c 1635EXPORT_SYMBOL(iov_iter_npages);
4b8164b9
AV
1636
1637const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1638{
1639 *new = *old;
00e23707 1640 if (unlikely(iov_iter_is_pipe(new))) {
241699cd
AV
1641 WARN_ON(1);
1642 return NULL;
1643 }
9ea9ce04
DH
1644 if (unlikely(iov_iter_is_discard(new)))
1645 return NULL;
00e23707 1646 if (iov_iter_is_bvec(new))
4b8164b9
AV
1647 return new->bvec = kmemdup(new->bvec,
1648 new->nr_segs * sizeof(struct bio_vec),
1649 flags);
1650 else
1651 /* iovec and kvec have identical layout */
1652 return new->iov = kmemdup(new->iov,
1653 new->nr_segs * sizeof(struct iovec),
1654 flags);
1655}
1656EXPORT_SYMBOL(dup_iter);
bc917be8 1657
bfdc5970
CH
1658static int copy_compat_iovec_from_user(struct iovec *iov,
1659 const struct iovec __user *uvec, unsigned long nr_segs)
1660{
1661 const struct compat_iovec __user *uiov =
1662 (const struct compat_iovec __user *)uvec;
1663 int ret = -EFAULT, i;
1664
a959a978 1665 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
bfdc5970
CH
1666 return -EFAULT;
1667
1668 for (i = 0; i < nr_segs; i++) {
1669 compat_uptr_t buf;
1670 compat_ssize_t len;
1671
1672 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1673 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1674
1675 /* check for compat_size_t not fitting in compat_ssize_t .. */
1676 if (len < 0) {
1677 ret = -EINVAL;
1678 goto uaccess_end;
1679 }
1680 iov[i].iov_base = compat_ptr(buf);
1681 iov[i].iov_len = len;
1682 }
1683
1684 ret = 0;
1685uaccess_end:
1686 user_access_end();
1687 return ret;
1688}
1689
1690static int copy_iovec_from_user(struct iovec *iov,
1691 const struct iovec __user *uvec, unsigned long nr_segs)
fb041b59
DL
1692{
1693 unsigned long seg;
fb041b59 1694
bfdc5970
CH
1695 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1696 return -EFAULT;
1697 for (seg = 0; seg < nr_segs; seg++) {
1698 if ((ssize_t)iov[seg].iov_len < 0)
1699 return -EINVAL;
fb041b59
DL
1700 }
1701
bfdc5970
CH
1702 return 0;
1703}
1704
1705struct iovec *iovec_from_user(const struct iovec __user *uvec,
1706 unsigned long nr_segs, unsigned long fast_segs,
1707 struct iovec *fast_iov, bool compat)
1708{
1709 struct iovec *iov = fast_iov;
1710 int ret;
1711
fb041b59 1712 /*
bfdc5970
CH
1713 * SuS says "The readv() function *may* fail if the iovcnt argument was
1714 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1715 * traditionally returned zero for zero segments, so...
fb041b59 1716 */
bfdc5970
CH
1717 if (nr_segs == 0)
1718 return iov;
1719 if (nr_segs > UIO_MAXIOV)
1720 return ERR_PTR(-EINVAL);
fb041b59
DL
1721 if (nr_segs > fast_segs) {
1722 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
bfdc5970
CH
1723 if (!iov)
1724 return ERR_PTR(-ENOMEM);
fb041b59 1725 }
bfdc5970
CH
1726
1727 if (compat)
1728 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1729 else
1730 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1731 if (ret) {
1732 if (iov != fast_iov)
1733 kfree(iov);
1734 return ERR_PTR(ret);
1735 }
1736
1737 return iov;
1738}
1739
1740ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1741 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1742 struct iov_iter *i, bool compat)
1743{
1744 ssize_t total_len = 0;
1745 unsigned long seg;
1746 struct iovec *iov;
1747
1748 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1749 if (IS_ERR(iov)) {
1750 *iovp = NULL;
1751 return PTR_ERR(iov);
fb041b59
DL
1752 }
1753
1754 /*
bfdc5970
CH
1755 * According to the Single Unix Specification we should return EINVAL if
1756 * an element length is < 0 when cast to ssize_t or if the total length
1757 * would overflow the ssize_t return value of the system call.
fb041b59
DL
1758 *
1759 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1760 * overflow case.
1761 */
fb041b59 1762 for (seg = 0; seg < nr_segs; seg++) {
fb041b59
DL
1763 ssize_t len = (ssize_t)iov[seg].iov_len;
1764
bfdc5970
CH
1765 if (!access_ok(iov[seg].iov_base, len)) {
1766 if (iov != *iovp)
1767 kfree(iov);
1768 *iovp = NULL;
1769 return -EFAULT;
fb041b59 1770 }
bfdc5970
CH
1771
1772 if (len > MAX_RW_COUNT - total_len) {
1773 len = MAX_RW_COUNT - total_len;
fb041b59
DL
1774 iov[seg].iov_len = len;
1775 }
bfdc5970 1776 total_len += len;
fb041b59 1777 }
bfdc5970
CH
1778
1779 iov_iter_init(i, type, iov, nr_segs, total_len);
1780 if (iov == *iovp)
1781 *iovp = NULL;
1782 else
1783 *iovp = iov;
1784 return total_len;
fb041b59
DL
1785}
1786
ffecee4f
VN
1787/**
1788 * import_iovec() - Copy an array of &struct iovec from userspace
1789 * into the kernel, check that it is valid, and initialize a new
1790 * &struct iov_iter iterator to access it.
1791 *
1792 * @type: One of %READ or %WRITE.
bfdc5970 1793 * @uvec: Pointer to the userspace array.
ffecee4f
VN
1794 * @nr_segs: Number of elements in userspace array.
1795 * @fast_segs: Number of elements in @iov.
bfdc5970 1796 * @iovp: (input and output parameter) Pointer to pointer to (usually small
ffecee4f
VN
1797 * on-stack) kernel array.
1798 * @i: Pointer to iterator that will be initialized on success.
1799 *
1800 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1801 * then this function places %NULL in *@iov on return. Otherwise, a new
1802 * array will be allocated and the result placed in *@iov. This means that
1803 * the caller may call kfree() on *@iov regardless of whether the small
1804 * on-stack array was used or not (and regardless of whether this function
1805 * returns an error or not).
1806 *
87e5e6da 1807 * Return: Negative error code on error, bytes imported on success
ffecee4f 1808 */
bfdc5970 1809ssize_t import_iovec(int type, const struct iovec __user *uvec,
bc917be8 1810 unsigned nr_segs, unsigned fast_segs,
bfdc5970 1811 struct iovec **iovp, struct iov_iter *i)
bc917be8 1812{
89cd35c5
CH
1813 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1814 in_compat_syscall());
bc917be8
AV
1815}
1816EXPORT_SYMBOL(import_iovec);
1817
bc917be8
AV
1818int import_single_range(int rw, void __user *buf, size_t len,
1819 struct iovec *iov, struct iov_iter *i)
1820{
1821 if (len > MAX_RW_COUNT)
1822 len = MAX_RW_COUNT;
96d4f267 1823 if (unlikely(!access_ok(buf, len)))
bc917be8
AV
1824 return -EFAULT;
1825
1826 iov->iov_base = buf;
1827 iov->iov_len = len;
1828 iov_iter_init(i, rw, iov, 1, len);
1829 return 0;
1830}
e1267585 1831EXPORT_SYMBOL(import_single_range);
09cf698a
AV
1832
1833int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1834 int (*f)(struct kvec *vec, void *context),
1835 void *context)
1836{
1837 struct kvec w;
1838 int err = -EINVAL;
1839 if (!bytes)
1840 return 0;
1841
1842 iterate_all_kinds(i, bytes, v, -EINVAL, ({
1843 w.iov_base = kmap(v.bv_page) + v.bv_offset;
1844 w.iov_len = v.bv_len;
1845 err = f(&w, context);
1846 kunmap(v.bv_page);
1847 err;}), ({
1848 w = v;
1849 err = f(&w, context);})
1850 )
1851 return err;
1852}
1853EXPORT_SYMBOL(iov_iter_for_each_range);