1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
17 #define PIPE_PARANOIA /* for now */
19 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
23 __v.iov_len = min(n, __p->iov_len - skip); \
24 if (likely(__v.iov_len)) { \
25 __v.iov_base = __p->iov_base + skip; \
27 __v.iov_len -= left; \
28 skip += __v.iov_len; \
33 while (unlikely(!left && n)) { \
35 __v.iov_len = min(n, __p->iov_len); \
36 if (unlikely(!__v.iov_len)) \
38 __v.iov_base = __p->iov_base; \
40 __v.iov_len -= left; \
47 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
50 __v.iov_len = min(n, __p->iov_len - skip); \
51 if (likely(__v.iov_len)) { \
52 __v.iov_base = __p->iov_base + skip; \
54 skip += __v.iov_len; \
57 while (unlikely(n)) { \
59 __v.iov_len = min(n, __p->iov_len); \
60 if (unlikely(!__v.iov_len)) \
62 __v.iov_base = __p->iov_base; \
70 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
71 struct bvec_iter __start; \
72 __start.bi_size = n; \
73 __start.bi_bvec_done = skip; \
75 for_each_bvec(__v, i->bvec, __bi, __start) { \
80 #define iterate_xarray(i, n, __v, skip, STEP) { \
81 struct page *head = NULL; \
82 size_t wanted = n, seg, offset; \
83 loff_t start = i->xarray_start + skip; \
84 pgoff_t index = start >> PAGE_SHIFT; \
87 XA_STATE(xas, i->xarray, index); \
90 xas_for_each(&xas, head, ULONG_MAX) { \
91 if (xas_retry(&xas, head)) \
93 if (WARN_ON(xa_is_value(head))) \
95 if (WARN_ON(PageHuge(head))) \
97 for (j = (head->index < index) ? index - head->index : 0; \
98 j < thp_nr_pages(head); j++) { \
99 __v.bv_page = head + j; \
100 offset = (i->xarray_start + skip) & ~PAGE_MASK; \
101 seg = PAGE_SIZE - offset; \
102 __v.bv_offset = offset; \
103 __v.bv_len = min(n, seg); \
106 skip += __v.bv_len; \
117 #define iterate_all_kinds(i, n, v, I, B, K, X) { \
119 size_t skip = i->iov_offset; \
120 if (unlikely(i->type & ITER_BVEC)) { \
122 struct bvec_iter __bi; \
123 iterate_bvec(i, n, v, __bi, skip, (B)) \
124 } else if (unlikely(i->type & ITER_KVEC)) { \
125 const struct kvec *kvec; \
127 iterate_kvec(i, n, v, kvec, skip, (K)) \
128 } else if (unlikely(i->type & ITER_DISCARD)) { \
129 } else if (unlikely(i->type & ITER_XARRAY)) { \
131 iterate_xarray(i, n, v, skip, (X)); \
133 const struct iovec *iov; \
135 iterate_iovec(i, n, v, iov, skip, (I)) \
140 #define iterate_and_advance(i, n, v, I, B, K, X) { \
141 if (unlikely(i->count < n)) \
144 size_t skip = i->iov_offset; \
145 if (unlikely(i->type & ITER_BVEC)) { \
146 const struct bio_vec *bvec = i->bvec; \
148 struct bvec_iter __bi; \
149 iterate_bvec(i, n, v, __bi, skip, (B)) \
150 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
151 i->nr_segs -= i->bvec - bvec; \
152 skip = __bi.bi_bvec_done; \
153 } else if (unlikely(i->type & ITER_KVEC)) { \
154 const struct kvec *kvec; \
156 iterate_kvec(i, n, v, kvec, skip, (K)) \
157 if (skip == kvec->iov_len) { \
161 i->nr_segs -= kvec - i->kvec; \
163 } else if (unlikely(i->type & ITER_DISCARD)) { \
165 } else if (unlikely(i->type & ITER_XARRAY)) { \
167 iterate_xarray(i, n, v, skip, (X)) \
169 const struct iovec *iov; \
171 iterate_iovec(i, n, v, iov, skip, (I)) \
172 if (skip == iov->iov_len) { \
176 i->nr_segs -= iov - i->iov; \
180 i->iov_offset = skip; \
184 static int copyout(void __user
*to
, const void *from
, size_t n
)
186 if (should_fail_usercopy())
188 if (access_ok(to
, n
)) {
189 instrument_copy_to_user(to
, from
, n
);
190 n
= raw_copy_to_user(to
, from
, n
);
195 static int copyin(void *to
, const void __user
*from
, size_t n
)
197 if (should_fail_usercopy())
199 if (access_ok(from
, n
)) {
200 instrument_copy_from_user(to
, from
, n
);
201 n
= raw_copy_from_user(to
, from
, n
);
206 static size_t copy_page_to_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
209 size_t skip
, copy
, left
, wanted
;
210 const struct iovec
*iov
;
214 if (unlikely(bytes
> i
->count
))
217 if (unlikely(!bytes
))
223 skip
= i
->iov_offset
;
224 buf
= iov
->iov_base
+ skip
;
225 copy
= min(bytes
, iov
->iov_len
- skip
);
227 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_pages_writeable(buf
, copy
)) {
228 kaddr
= kmap_atomic(page
);
229 from
= kaddr
+ offset
;
231 /* first chunk, usually the only one */
232 left
= copyout(buf
, from
, copy
);
238 while (unlikely(!left
&& bytes
)) {
241 copy
= min(bytes
, iov
->iov_len
);
242 left
= copyout(buf
, from
, copy
);
248 if (likely(!bytes
)) {
249 kunmap_atomic(kaddr
);
252 offset
= from
- kaddr
;
254 kunmap_atomic(kaddr
);
255 copy
= min(bytes
, iov
->iov_len
- skip
);
257 /* Too bad - revert to non-atomic kmap */
260 from
= kaddr
+ offset
;
261 left
= copyout(buf
, from
, copy
);
266 while (unlikely(!left
&& bytes
)) {
269 copy
= min(bytes
, iov
->iov_len
);
270 left
= copyout(buf
, from
, copy
);
279 if (skip
== iov
->iov_len
) {
283 i
->count
-= wanted
- bytes
;
284 i
->nr_segs
-= iov
- i
->iov
;
286 i
->iov_offset
= skip
;
287 return wanted
- bytes
;
290 static size_t copy_page_from_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
293 size_t skip
, copy
, left
, wanted
;
294 const struct iovec
*iov
;
298 if (unlikely(bytes
> i
->count
))
301 if (unlikely(!bytes
))
307 skip
= i
->iov_offset
;
308 buf
= iov
->iov_base
+ skip
;
309 copy
= min(bytes
, iov
->iov_len
- skip
);
311 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_pages_readable(buf
, copy
)) {
312 kaddr
= kmap_atomic(page
);
315 /* first chunk, usually the only one */
316 left
= copyin(to
, buf
, copy
);
322 while (unlikely(!left
&& bytes
)) {
325 copy
= min(bytes
, iov
->iov_len
);
326 left
= copyin(to
, buf
, copy
);
332 if (likely(!bytes
)) {
333 kunmap_atomic(kaddr
);
338 kunmap_atomic(kaddr
);
339 copy
= min(bytes
, iov
->iov_len
- skip
);
341 /* Too bad - revert to non-atomic kmap */
345 left
= copyin(to
, buf
, copy
);
350 while (unlikely(!left
&& bytes
)) {
353 copy
= min(bytes
, iov
->iov_len
);
354 left
= copyin(to
, buf
, copy
);
363 if (skip
== iov
->iov_len
) {
367 i
->count
-= wanted
- bytes
;
368 i
->nr_segs
-= iov
- i
->iov
;
370 i
->iov_offset
= skip
;
371 return wanted
- bytes
;
375 static bool sanity(const struct iov_iter
*i
)
377 struct pipe_inode_info
*pipe
= i
->pipe
;
378 unsigned int p_head
= pipe
->head
;
379 unsigned int p_tail
= pipe
->tail
;
380 unsigned int p_mask
= pipe
->ring_size
- 1;
381 unsigned int p_occupancy
= pipe_occupancy(p_head
, p_tail
);
382 unsigned int i_head
= i
->head
;
386 struct pipe_buffer
*p
;
387 if (unlikely(p_occupancy
== 0))
388 goto Bad
; // pipe must be non-empty
389 if (unlikely(i_head
!= p_head
- 1))
390 goto Bad
; // must be at the last buffer...
392 p
= &pipe
->bufs
[i_head
& p_mask
];
393 if (unlikely(p
->offset
+ p
->len
!= i
->iov_offset
))
394 goto Bad
; // ... at the end of segment
396 if (i_head
!= p_head
)
397 goto Bad
; // must be right after the last buffer
401 printk(KERN_ERR
"idx = %d, offset = %zd\n", i_head
, i
->iov_offset
);
402 printk(KERN_ERR
"head = %d, tail = %d, buffers = %d\n",
403 p_head
, p_tail
, pipe
->ring_size
);
404 for (idx
= 0; idx
< pipe
->ring_size
; idx
++)
405 printk(KERN_ERR
"[%p %p %d %d]\n",
407 pipe
->bufs
[idx
].page
,
408 pipe
->bufs
[idx
].offset
,
409 pipe
->bufs
[idx
].len
);
414 #define sanity(i) true
417 static size_t copy_page_to_iter_pipe(struct page
*page
, size_t offset
, size_t bytes
,
420 struct pipe_inode_info
*pipe
= i
->pipe
;
421 struct pipe_buffer
*buf
;
422 unsigned int p_tail
= pipe
->tail
;
423 unsigned int p_mask
= pipe
->ring_size
- 1;
424 unsigned int i_head
= i
->head
;
427 if (unlikely(bytes
> i
->count
))
430 if (unlikely(!bytes
))
437 buf
= &pipe
->bufs
[i_head
& p_mask
];
439 if (offset
== off
&& buf
->page
== page
) {
440 /* merge with the last one */
442 i
->iov_offset
+= bytes
;
446 buf
= &pipe
->bufs
[i_head
& p_mask
];
448 if (pipe_full(i_head
, p_tail
, pipe
->max_usage
))
451 buf
->ops
= &page_cache_pipe_buf_ops
;
454 buf
->offset
= offset
;
457 pipe
->head
= i_head
+ 1;
458 i
->iov_offset
= offset
+ bytes
;
466 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
467 * bytes. For each iovec, fault in each page that constitutes the iovec.
469 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
470 * because it is an invalid address).
472 int iov_iter_fault_in_readable(struct iov_iter
*i
, size_t bytes
)
474 size_t skip
= i
->iov_offset
;
475 const struct iovec
*iov
;
479 if (!(i
->type
& (ITER_BVEC
|ITER_KVEC
))) {
480 iterate_iovec(i
, bytes
, v
, iov
, skip
, ({
481 err
= fault_in_pages_readable(v
.iov_base
, v
.iov_len
);
488 EXPORT_SYMBOL(iov_iter_fault_in_readable
);
490 void iov_iter_init(struct iov_iter
*i
, unsigned int direction
,
491 const struct iovec
*iov
, unsigned long nr_segs
,
494 WARN_ON(direction
& ~(READ
| WRITE
));
495 direction
&= READ
| WRITE
;
497 /* It will get better. Eventually... */
498 if (uaccess_kernel()) {
499 i
->type
= ITER_KVEC
| direction
;
500 i
->kvec
= (struct kvec
*)iov
;
502 i
->type
= ITER_IOVEC
| direction
;
505 i
->nr_segs
= nr_segs
;
509 EXPORT_SYMBOL(iov_iter_init
);
511 static inline bool allocated(struct pipe_buffer
*buf
)
513 return buf
->ops
== &default_pipe_buf_ops
;
516 static inline void data_start(const struct iov_iter
*i
,
517 unsigned int *iter_headp
, size_t *offp
)
519 unsigned int p_mask
= i
->pipe
->ring_size
- 1;
520 unsigned int iter_head
= i
->head
;
521 size_t off
= i
->iov_offset
;
523 if (off
&& (!allocated(&i
->pipe
->bufs
[iter_head
& p_mask
]) ||
528 *iter_headp
= iter_head
;
532 static size_t push_pipe(struct iov_iter
*i
, size_t size
,
533 int *iter_headp
, size_t *offp
)
535 struct pipe_inode_info
*pipe
= i
->pipe
;
536 unsigned int p_tail
= pipe
->tail
;
537 unsigned int p_mask
= pipe
->ring_size
- 1;
538 unsigned int iter_head
;
542 if (unlikely(size
> i
->count
))
548 data_start(i
, &iter_head
, &off
);
549 *iter_headp
= iter_head
;
552 left
-= PAGE_SIZE
- off
;
554 pipe
->bufs
[iter_head
& p_mask
].len
+= size
;
557 pipe
->bufs
[iter_head
& p_mask
].len
= PAGE_SIZE
;
560 while (!pipe_full(iter_head
, p_tail
, pipe
->max_usage
)) {
561 struct pipe_buffer
*buf
= &pipe
->bufs
[iter_head
& p_mask
];
562 struct page
*page
= alloc_page(GFP_USER
);
566 buf
->ops
= &default_pipe_buf_ops
;
569 buf
->len
= min_t(ssize_t
, left
, PAGE_SIZE
);
572 pipe
->head
= iter_head
;
580 static size_t copy_pipe_to_iter(const void *addr
, size_t bytes
,
583 struct pipe_inode_info
*pipe
= i
->pipe
;
584 unsigned int p_mask
= pipe
->ring_size
- 1;
591 bytes
= n
= push_pipe(i
, bytes
, &i_head
, &off
);
595 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
596 memcpy_to_page(pipe
->bufs
[i_head
& p_mask
].page
, off
, addr
, chunk
);
598 i
->iov_offset
= off
+ chunk
;
608 static __wsum
csum_and_memcpy(void *to
, const void *from
, size_t len
,
609 __wsum sum
, size_t off
)
611 __wsum next
= csum_partial_copy_nocheck(from
, to
, len
);
612 return csum_block_add(sum
, next
, off
);
615 static size_t csum_and_copy_to_pipe_iter(const void *addr
, size_t bytes
,
616 struct csum_state
*csstate
,
619 struct pipe_inode_info
*pipe
= i
->pipe
;
620 unsigned int p_mask
= pipe
->ring_size
- 1;
621 __wsum sum
= csstate
->csum
;
622 size_t off
= csstate
->off
;
629 bytes
= n
= push_pipe(i
, bytes
, &i_head
, &r
);
633 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- r
);
634 char *p
= kmap_atomic(pipe
->bufs
[i_head
& p_mask
].page
);
635 sum
= csum_and_memcpy(p
+ r
, addr
, chunk
, sum
, off
);
638 i
->iov_offset
= r
+ chunk
;
651 size_t _copy_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
653 const char *from
= addr
;
654 if (unlikely(iov_iter_is_pipe(i
)))
655 return copy_pipe_to_iter(addr
, bytes
, i
);
656 if (iter_is_iovec(i
))
658 iterate_and_advance(i
, bytes
, v
,
659 copyout(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
, v
.iov_len
),
660 memcpy_to_page(v
.bv_page
, v
.bv_offset
,
661 (from
+= v
.bv_len
) - v
.bv_len
, v
.bv_len
),
662 memcpy(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
, v
.iov_len
),
663 memcpy_to_page(v
.bv_page
, v
.bv_offset
,
664 (from
+= v
.bv_len
) - v
.bv_len
, v
.bv_len
)
669 EXPORT_SYMBOL(_copy_to_iter
);
671 #ifdef CONFIG_ARCH_HAS_COPY_MC
672 static int copyout_mc(void __user
*to
, const void *from
, size_t n
)
674 if (access_ok(to
, n
)) {
675 instrument_copy_to_user(to
, from
, n
);
676 n
= copy_mc_to_user((__force
void *) to
, from
, n
);
681 static unsigned long copy_mc_to_page(struct page
*page
, size_t offset
,
682 const char *from
, size_t len
)
687 to
= kmap_atomic(page
);
688 ret
= copy_mc_to_kernel(to
+ offset
, from
, len
);
694 static size_t copy_mc_pipe_to_iter(const void *addr
, size_t bytes
,
697 struct pipe_inode_info
*pipe
= i
->pipe
;
698 unsigned int p_mask
= pipe
->ring_size
- 1;
700 size_t n
, off
, xfer
= 0;
705 bytes
= n
= push_pipe(i
, bytes
, &i_head
, &off
);
709 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
712 rem
= copy_mc_to_page(pipe
->bufs
[i_head
& p_mask
].page
,
715 i
->iov_offset
= off
+ chunk
- rem
;
729 * _copy_mc_to_iter - copy to iter with source memory error exception handling
730 * @addr: source kernel address
731 * @bytes: total transfer length
732 * @iter: destination iterator
734 * The pmem driver deploys this for the dax operation
735 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
736 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
737 * successfully copied.
739 * The main differences between this and typical _copy_to_iter().
741 * * Typical tail/residue handling after a fault retries the copy
742 * byte-by-byte until the fault happens again. Re-triggering machine
743 * checks is potentially fatal so the implementation uses source
744 * alignment and poison alignment assumptions to avoid re-triggering
745 * hardware exceptions.
747 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
748 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
751 size_t _copy_mc_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
753 const char *from
= addr
;
754 unsigned long rem
, curr_addr
, s_addr
= (unsigned long) addr
;
756 if (unlikely(iov_iter_is_pipe(i
)))
757 return copy_mc_pipe_to_iter(addr
, bytes
, i
);
758 if (iter_is_iovec(i
))
760 iterate_and_advance(i
, bytes
, v
,
761 copyout_mc(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
,
764 rem
= copy_mc_to_page(v
.bv_page
, v
.bv_offset
,
765 (from
+= v
.bv_len
) - v
.bv_len
, v
.bv_len
);
767 curr_addr
= (unsigned long) from
;
768 bytes
= curr_addr
- s_addr
- rem
;
773 rem
= copy_mc_to_kernel(v
.iov_base
, (from
+= v
.iov_len
)
774 - v
.iov_len
, v
.iov_len
);
776 curr_addr
= (unsigned long) from
;
777 bytes
= curr_addr
- s_addr
- rem
;
782 rem
= copy_mc_to_page(v
.bv_page
, v
.bv_offset
,
783 (from
+= v
.bv_len
) - v
.bv_len
, v
.bv_len
);
785 curr_addr
= (unsigned long) from
;
786 bytes
= curr_addr
- s_addr
- rem
;
788 i
->iov_offset
+= bytes
;
797 EXPORT_SYMBOL_GPL(_copy_mc_to_iter
);
798 #endif /* CONFIG_ARCH_HAS_COPY_MC */
800 size_t _copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
803 if (unlikely(iov_iter_is_pipe(i
))) {
807 if (iter_is_iovec(i
))
809 iterate_and_advance(i
, bytes
, v
,
810 copyin((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
),
811 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
812 v
.bv_offset
, v
.bv_len
),
813 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
),
814 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
815 v
.bv_offset
, v
.bv_len
)
820 EXPORT_SYMBOL(_copy_from_iter
);
822 bool _copy_from_iter_full(void *addr
, size_t bytes
, struct iov_iter
*i
)
825 if (unlikely(iov_iter_is_pipe(i
))) {
829 if (unlikely(i
->count
< bytes
))
832 if (iter_is_iovec(i
))
834 iterate_all_kinds(i
, bytes
, v
, ({
835 if (copyin((to
+= v
.iov_len
) - v
.iov_len
,
836 v
.iov_base
, v
.iov_len
))
839 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
840 v
.bv_offset
, v
.bv_len
),
841 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
),
842 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
843 v
.bv_offset
, v
.bv_len
)
846 iov_iter_advance(i
, bytes
);
849 EXPORT_SYMBOL(_copy_from_iter_full
);
851 size_t _copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
854 if (unlikely(iov_iter_is_pipe(i
))) {
858 iterate_and_advance(i
, bytes
, v
,
859 __copy_from_user_inatomic_nocache((to
+= v
.iov_len
) - v
.iov_len
,
860 v
.iov_base
, v
.iov_len
),
861 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
862 v
.bv_offset
, v
.bv_len
),
863 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
),
864 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
865 v
.bv_offset
, v
.bv_len
)
870 EXPORT_SYMBOL(_copy_from_iter_nocache
);
872 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
874 * _copy_from_iter_flushcache - write destination through cpu cache
875 * @addr: destination kernel address
876 * @bytes: total transfer length
877 * @iter: source iterator
879 * The pmem driver arranges for filesystem-dax to use this facility via
880 * dax_copy_from_iter() for ensuring that writes to persistent memory
881 * are flushed through the CPU cache. It is differentiated from
882 * _copy_from_iter_nocache() in that guarantees all data is flushed for
883 * all iterator types. The _copy_from_iter_nocache() only attempts to
884 * bypass the cache for the ITER_IOVEC case, and on some archs may use
885 * instructions that strand dirty-data in the cache.
887 size_t _copy_from_iter_flushcache(void *addr
, size_t bytes
, struct iov_iter
*i
)
890 if (unlikely(iov_iter_is_pipe(i
))) {
894 iterate_and_advance(i
, bytes
, v
,
895 __copy_from_user_flushcache((to
+= v
.iov_len
) - v
.iov_len
,
896 v
.iov_base
, v
.iov_len
),
897 memcpy_page_flushcache((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
898 v
.bv_offset
, v
.bv_len
),
899 memcpy_flushcache((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
,
901 memcpy_page_flushcache((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
902 v
.bv_offset
, v
.bv_len
)
907 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache
);
910 bool _copy_from_iter_full_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
913 if (unlikely(iov_iter_is_pipe(i
))) {
917 if (unlikely(i
->count
< bytes
))
919 iterate_all_kinds(i
, bytes
, v
, ({
920 if (__copy_from_user_inatomic_nocache((to
+= v
.iov_len
) - v
.iov_len
,
921 v
.iov_base
, v
.iov_len
))
924 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
925 v
.bv_offset
, v
.bv_len
),
926 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
),
927 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
928 v
.bv_offset
, v
.bv_len
)
931 iov_iter_advance(i
, bytes
);
934 EXPORT_SYMBOL(_copy_from_iter_full_nocache
);
936 static inline bool page_copy_sane(struct page
*page
, size_t offset
, size_t n
)
939 size_t v
= n
+ offset
;
942 * The general case needs to access the page order in order
943 * to compute the page size.
944 * However, we mostly deal with order-0 pages and thus can
945 * avoid a possible cache line miss for requests that fit all
948 if (n
<= v
&& v
<= PAGE_SIZE
)
951 head
= compound_head(page
);
952 v
+= (page
- head
) << PAGE_SHIFT
;
954 if (likely(n
<= v
&& v
<= (page_size(head
))))
960 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
963 if (unlikely(!page_copy_sane(page
, offset
, bytes
)))
965 if (i
->type
& (ITER_BVEC
| ITER_KVEC
| ITER_XARRAY
)) {
966 void *kaddr
= kmap_atomic(page
);
967 size_t wanted
= copy_to_iter(kaddr
+ offset
, bytes
, i
);
968 kunmap_atomic(kaddr
);
970 } else if (unlikely(iov_iter_is_discard(i
)))
972 else if (likely(!iov_iter_is_pipe(i
)))
973 return copy_page_to_iter_iovec(page
, offset
, bytes
, i
);
975 return copy_page_to_iter_pipe(page
, offset
, bytes
, i
);
977 EXPORT_SYMBOL(copy_page_to_iter
);
979 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
982 if (unlikely(!page_copy_sane(page
, offset
, bytes
)))
984 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
988 if (i
->type
& (ITER_BVEC
| ITER_KVEC
| ITER_XARRAY
)) {
989 void *kaddr
= kmap_atomic(page
);
990 size_t wanted
= _copy_from_iter(kaddr
+ offset
, bytes
, i
);
991 kunmap_atomic(kaddr
);
994 return copy_page_from_iter_iovec(page
, offset
, bytes
, i
);
996 EXPORT_SYMBOL(copy_page_from_iter
);
998 static size_t pipe_zero(size_t bytes
, struct iov_iter
*i
)
1000 struct pipe_inode_info
*pipe
= i
->pipe
;
1001 unsigned int p_mask
= pipe
->ring_size
- 1;
1002 unsigned int i_head
;
1008 bytes
= n
= push_pipe(i
, bytes
, &i_head
, &off
);
1013 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
1014 memzero_page(pipe
->bufs
[i_head
& p_mask
].page
, off
, chunk
);
1016 i
->iov_offset
= off
+ chunk
;
1025 size_t iov_iter_zero(size_t bytes
, struct iov_iter
*i
)
1027 if (unlikely(iov_iter_is_pipe(i
)))
1028 return pipe_zero(bytes
, i
);
1029 iterate_and_advance(i
, bytes
, v
,
1030 clear_user(v
.iov_base
, v
.iov_len
),
1031 memzero_page(v
.bv_page
, v
.bv_offset
, v
.bv_len
),
1032 memset(v
.iov_base
, 0, v
.iov_len
),
1033 memzero_page(v
.bv_page
, v
.bv_offset
, v
.bv_len
)
1038 EXPORT_SYMBOL(iov_iter_zero
);
1040 size_t iov_iter_copy_from_user_atomic(struct page
*page
,
1041 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
1043 char *kaddr
= kmap_atomic(page
), *p
= kaddr
+ offset
;
1044 if (unlikely(!page_copy_sane(page
, offset
, bytes
))) {
1045 kunmap_atomic(kaddr
);
1048 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
1049 kunmap_atomic(kaddr
);
1053 iterate_all_kinds(i
, bytes
, v
,
1054 copyin((p
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
),
1055 memcpy_from_page((p
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
1056 v
.bv_offset
, v
.bv_len
),
1057 memcpy((p
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
),
1058 memcpy_from_page((p
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
1059 v
.bv_offset
, v
.bv_len
)
1061 kunmap_atomic(kaddr
);
1064 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic
);
1066 static inline void pipe_truncate(struct iov_iter
*i
)
1068 struct pipe_inode_info
*pipe
= i
->pipe
;
1069 unsigned int p_tail
= pipe
->tail
;
1070 unsigned int p_head
= pipe
->head
;
1071 unsigned int p_mask
= pipe
->ring_size
- 1;
1073 if (!pipe_empty(p_head
, p_tail
)) {
1074 struct pipe_buffer
*buf
;
1075 unsigned int i_head
= i
->head
;
1076 size_t off
= i
->iov_offset
;
1079 buf
= &pipe
->bufs
[i_head
& p_mask
];
1080 buf
->len
= off
- buf
->offset
;
1083 while (p_head
!= i_head
) {
1085 pipe_buf_release(pipe
, &pipe
->bufs
[p_head
& p_mask
]);
1088 pipe
->head
= p_head
;
1092 static void pipe_advance(struct iov_iter
*i
, size_t size
)
1094 struct pipe_inode_info
*pipe
= i
->pipe
;
1095 if (unlikely(i
->count
< size
))
1098 struct pipe_buffer
*buf
;
1099 unsigned int p_mask
= pipe
->ring_size
- 1;
1100 unsigned int i_head
= i
->head
;
1101 size_t off
= i
->iov_offset
, left
= size
;
1103 if (off
) /* make it relative to the beginning of buffer */
1104 left
+= off
- pipe
->bufs
[i_head
& p_mask
].offset
;
1106 buf
= &pipe
->bufs
[i_head
& p_mask
];
1107 if (left
<= buf
->len
)
1113 i
->iov_offset
= buf
->offset
+ left
;
1116 /* ... and discard everything past that point */
1120 static void iov_iter_bvec_advance(struct iov_iter
*i
, size_t size
)
1122 struct bvec_iter bi
;
1124 bi
.bi_size
= i
->count
;
1125 bi
.bi_bvec_done
= i
->iov_offset
;
1127 bvec_iter_advance(i
->bvec
, &bi
, size
);
1129 i
->bvec
+= bi
.bi_idx
;
1130 i
->nr_segs
-= bi
.bi_idx
;
1131 i
->count
= bi
.bi_size
;
1132 i
->iov_offset
= bi
.bi_bvec_done
;
1135 void iov_iter_advance(struct iov_iter
*i
, size_t size
)
1137 if (unlikely(iov_iter_is_pipe(i
))) {
1138 pipe_advance(i
, size
);
1141 if (unlikely(iov_iter_is_discard(i
))) {
1145 if (unlikely(iov_iter_is_xarray(i
))) {
1146 size
= min(size
, i
->count
);
1147 i
->iov_offset
+= size
;
1151 if (iov_iter_is_bvec(i
)) {
1152 iov_iter_bvec_advance(i
, size
);
1155 iterate_and_advance(i
, size
, v
, 0, 0, 0, 0)
1157 EXPORT_SYMBOL(iov_iter_advance
);
1159 void iov_iter_revert(struct iov_iter
*i
, size_t unroll
)
1163 if (WARN_ON(unroll
> MAX_RW_COUNT
))
1166 if (unlikely(iov_iter_is_pipe(i
))) {
1167 struct pipe_inode_info
*pipe
= i
->pipe
;
1168 unsigned int p_mask
= pipe
->ring_size
- 1;
1169 unsigned int i_head
= i
->head
;
1170 size_t off
= i
->iov_offset
;
1172 struct pipe_buffer
*b
= &pipe
->bufs
[i_head
& p_mask
];
1173 size_t n
= off
- b
->offset
;
1179 if (!unroll
&& i_head
== i
->start_head
) {
1184 b
= &pipe
->bufs
[i_head
& p_mask
];
1185 off
= b
->offset
+ b
->len
;
1187 i
->iov_offset
= off
;
1192 if (unlikely(iov_iter_is_discard(i
)))
1194 if (unroll
<= i
->iov_offset
) {
1195 i
->iov_offset
-= unroll
;
1198 unroll
-= i
->iov_offset
;
1199 if (iov_iter_is_xarray(i
)) {
1200 BUG(); /* We should never go beyond the start of the specified
1201 * range since we might then be straying into pages that
1204 } else if (iov_iter_is_bvec(i
)) {
1205 const struct bio_vec
*bvec
= i
->bvec
;
1207 size_t n
= (--bvec
)->bv_len
;
1211 i
->iov_offset
= n
- unroll
;
1216 } else { /* same logics for iovec and kvec */
1217 const struct iovec
*iov
= i
->iov
;
1219 size_t n
= (--iov
)->iov_len
;
1223 i
->iov_offset
= n
- unroll
;
1230 EXPORT_SYMBOL(iov_iter_revert
);
1233 * Return the count of just the current iov_iter segment.
1235 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
1237 if (unlikely(iov_iter_is_pipe(i
)))
1238 return i
->count
; // it is a silly place, anyway
1239 if (i
->nr_segs
== 1)
1241 if (unlikely(iov_iter_is_discard(i
) || iov_iter_is_xarray(i
)))
1243 if (iov_iter_is_bvec(i
))
1244 return min(i
->count
, i
->bvec
->bv_len
- i
->iov_offset
);
1246 return min(i
->count
, i
->iov
->iov_len
- i
->iov_offset
);
1248 EXPORT_SYMBOL(iov_iter_single_seg_count
);
1250 void iov_iter_kvec(struct iov_iter
*i
, unsigned int direction
,
1251 const struct kvec
*kvec
, unsigned long nr_segs
,
1254 WARN_ON(direction
& ~(READ
| WRITE
));
1255 i
->type
= ITER_KVEC
| (direction
& (READ
| WRITE
));
1257 i
->nr_segs
= nr_segs
;
1261 EXPORT_SYMBOL(iov_iter_kvec
);
1263 void iov_iter_bvec(struct iov_iter
*i
, unsigned int direction
,
1264 const struct bio_vec
*bvec
, unsigned long nr_segs
,
1267 WARN_ON(direction
& ~(READ
| WRITE
));
1268 i
->type
= ITER_BVEC
| (direction
& (READ
| WRITE
));
1270 i
->nr_segs
= nr_segs
;
1274 EXPORT_SYMBOL(iov_iter_bvec
);
1276 void iov_iter_pipe(struct iov_iter
*i
, unsigned int direction
,
1277 struct pipe_inode_info
*pipe
,
1280 BUG_ON(direction
!= READ
);
1281 WARN_ON(pipe_full(pipe
->head
, pipe
->tail
, pipe
->ring_size
));
1282 i
->type
= ITER_PIPE
| READ
;
1284 i
->head
= pipe
->head
;
1287 i
->start_head
= i
->head
;
1289 EXPORT_SYMBOL(iov_iter_pipe
);
1292 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1293 * @i: The iterator to initialise.
1294 * @direction: The direction of the transfer.
1295 * @xarray: The xarray to access.
1296 * @start: The start file position.
1297 * @count: The size of the I/O buffer in bytes.
1299 * Set up an I/O iterator to either draw data out of the pages attached to an
1300 * inode or to inject data into those pages. The pages *must* be prevented
1301 * from evaporation, either by taking a ref on them or locking them by the
1304 void iov_iter_xarray(struct iov_iter
*i
, unsigned int direction
,
1305 struct xarray
*xarray
, loff_t start
, size_t count
)
1307 BUG_ON(direction
& ~1);
1308 i
->type
= ITER_XARRAY
| (direction
& (READ
| WRITE
));
1310 i
->xarray_start
= start
;
1314 EXPORT_SYMBOL(iov_iter_xarray
);
1317 * iov_iter_discard - Initialise an I/O iterator that discards data
1318 * @i: The iterator to initialise.
1319 * @direction: The direction of the transfer.
1320 * @count: The size of the I/O buffer in bytes.
1322 * Set up an I/O iterator that just discards everything that's written to it.
1323 * It's only available as a READ iterator.
1325 void iov_iter_discard(struct iov_iter
*i
, unsigned int direction
, size_t count
)
1327 BUG_ON(direction
!= READ
);
1328 i
->type
= ITER_DISCARD
| READ
;
1332 EXPORT_SYMBOL(iov_iter_discard
);
1334 unsigned long iov_iter_alignment(const struct iov_iter
*i
)
1336 unsigned long res
= 0;
1337 size_t size
= i
->count
;
1339 if (unlikely(iov_iter_is_pipe(i
))) {
1340 unsigned int p_mask
= i
->pipe
->ring_size
- 1;
1342 if (size
&& i
->iov_offset
&& allocated(&i
->pipe
->bufs
[i
->head
& p_mask
]))
1343 return size
| i
->iov_offset
;
1346 if (unlikely(iov_iter_is_xarray(i
)))
1347 return (i
->xarray_start
+ i
->iov_offset
) | i
->count
;
1348 iterate_all_kinds(i
, size
, v
,
1349 (res
|= (unsigned long)v
.iov_base
| v
.iov_len
, 0),
1350 res
|= v
.bv_offset
| v
.bv_len
,
1351 res
|= (unsigned long)v
.iov_base
| v
.iov_len
,
1352 res
|= v
.bv_offset
| v
.bv_len
1356 EXPORT_SYMBOL(iov_iter_alignment
);
1358 unsigned long iov_iter_gap_alignment(const struct iov_iter
*i
)
1360 unsigned long res
= 0;
1361 size_t size
= i
->count
;
1363 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
1368 iterate_all_kinds(i
, size
, v
,
1369 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
1370 (size
!= v
.iov_len
? size
: 0), 0),
1371 (res
|= (!res
? 0 : (unsigned long)v
.bv_offset
) |
1372 (size
!= v
.bv_len
? size
: 0)),
1373 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
1374 (size
!= v
.iov_len
? size
: 0)),
1375 (res
|= (!res
? 0 : (unsigned long)v
.bv_offset
) |
1376 (size
!= v
.bv_len
? size
: 0))
1380 EXPORT_SYMBOL(iov_iter_gap_alignment
);
1382 static inline ssize_t
__pipe_get_pages(struct iov_iter
*i
,
1384 struct page
**pages
,
1388 struct pipe_inode_info
*pipe
= i
->pipe
;
1389 unsigned int p_mask
= pipe
->ring_size
- 1;
1390 ssize_t n
= push_pipe(i
, maxsize
, &iter_head
, start
);
1397 get_page(*pages
++ = pipe
->bufs
[iter_head
& p_mask
].page
);
1405 static ssize_t
pipe_get_pages(struct iov_iter
*i
,
1406 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
1409 unsigned int iter_head
, npages
;
1418 data_start(i
, &iter_head
, start
);
1419 /* Amount of free space: some of this one + all after this one */
1420 npages
= pipe_space_for_user(iter_head
, i
->pipe
->tail
, i
->pipe
);
1421 capacity
= min(npages
, maxpages
) * PAGE_SIZE
- *start
;
1423 return __pipe_get_pages(i
, min(maxsize
, capacity
), pages
, iter_head
, start
);
1426 static ssize_t
iter_xarray_populate_pages(struct page
**pages
, struct xarray
*xa
,
1427 pgoff_t index
, unsigned int nr_pages
)
1429 XA_STATE(xas
, xa
, index
);
1431 unsigned int ret
= 0;
1434 for (page
= xas_load(&xas
); page
; page
= xas_next(&xas
)) {
1435 if (xas_retry(&xas
, page
))
1438 /* Has the page moved or been split? */
1439 if (unlikely(page
!= xas_reload(&xas
))) {
1444 pages
[ret
] = find_subpage(page
, xas
.xa_index
);
1445 get_page(pages
[ret
]);
1446 if (++ret
== nr_pages
)
1453 static ssize_t
iter_xarray_get_pages(struct iov_iter
*i
,
1454 struct page
**pages
, size_t maxsize
,
1455 unsigned maxpages
, size_t *_start_offset
)
1457 unsigned nr
, offset
;
1458 pgoff_t index
, count
;
1459 size_t size
= maxsize
, actual
;
1462 if (!size
|| !maxpages
)
1465 pos
= i
->xarray_start
+ i
->iov_offset
;
1466 index
= pos
>> PAGE_SHIFT
;
1467 offset
= pos
& ~PAGE_MASK
;
1468 *_start_offset
= offset
;
1471 if (size
> PAGE_SIZE
- offset
) {
1472 size
-= PAGE_SIZE
- offset
;
1473 count
+= size
>> PAGE_SHIFT
;
1479 if (count
> maxpages
)
1482 nr
= iter_xarray_populate_pages(pages
, i
->xarray
, index
, count
);
1486 actual
= PAGE_SIZE
* nr
;
1488 if (nr
== count
&& size
> 0) {
1489 unsigned last_offset
= (nr
> 1) ? 0 : offset
;
1490 actual
-= PAGE_SIZE
- (last_offset
+ size
);
1495 ssize_t
iov_iter_get_pages(struct iov_iter
*i
,
1496 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
1499 if (maxsize
> i
->count
)
1502 if (unlikely(iov_iter_is_pipe(i
)))
1503 return pipe_get_pages(i
, pages
, maxsize
, maxpages
, start
);
1504 if (unlikely(iov_iter_is_xarray(i
)))
1505 return iter_xarray_get_pages(i
, pages
, maxsize
, maxpages
, start
);
1506 if (unlikely(iov_iter_is_discard(i
)))
1509 iterate_all_kinds(i
, maxsize
, v
, ({
1510 unsigned long addr
= (unsigned long)v
.iov_base
;
1511 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
1515 if (len
> maxpages
* PAGE_SIZE
)
1516 len
= maxpages
* PAGE_SIZE
;
1517 addr
&= ~(PAGE_SIZE
- 1);
1518 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
1519 res
= get_user_pages_fast(addr
, n
,
1520 iov_iter_rw(i
) != WRITE
? FOLL_WRITE
: 0,
1522 if (unlikely(res
< 0))
1524 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
1526 /* can't be more than PAGE_SIZE */
1527 *start
= v
.bv_offset
;
1528 get_page(*pages
= v
.bv_page
);
1537 EXPORT_SYMBOL(iov_iter_get_pages
);
1539 static struct page
**get_pages_array(size_t n
)
1541 return kvmalloc_array(n
, sizeof(struct page
*), GFP_KERNEL
);
1544 static ssize_t
pipe_get_pages_alloc(struct iov_iter
*i
,
1545 struct page
***pages
, size_t maxsize
,
1549 unsigned int iter_head
, npages
;
1558 data_start(i
, &iter_head
, start
);
1559 /* Amount of free space: some of this one + all after this one */
1560 npages
= pipe_space_for_user(iter_head
, i
->pipe
->tail
, i
->pipe
);
1561 n
= npages
* PAGE_SIZE
- *start
;
1565 npages
= DIV_ROUND_UP(maxsize
+ *start
, PAGE_SIZE
);
1566 p
= get_pages_array(npages
);
1569 n
= __pipe_get_pages(i
, maxsize
, p
, iter_head
, start
);
1577 static ssize_t
iter_xarray_get_pages_alloc(struct iov_iter
*i
,
1578 struct page
***pages
, size_t maxsize
,
1579 size_t *_start_offset
)
1582 unsigned nr
, offset
;
1583 pgoff_t index
, count
;
1584 size_t size
= maxsize
, actual
;
1590 pos
= i
->xarray_start
+ i
->iov_offset
;
1591 index
= pos
>> PAGE_SHIFT
;
1592 offset
= pos
& ~PAGE_MASK
;
1593 *_start_offset
= offset
;
1596 if (size
> PAGE_SIZE
- offset
) {
1597 size
-= PAGE_SIZE
- offset
;
1598 count
+= size
>> PAGE_SHIFT
;
1604 p
= get_pages_array(count
);
1609 nr
= iter_xarray_populate_pages(p
, i
->xarray
, index
, count
);
1613 actual
= PAGE_SIZE
* nr
;
1615 if (nr
== count
&& size
> 0) {
1616 unsigned last_offset
= (nr
> 1) ? 0 : offset
;
1617 actual
-= PAGE_SIZE
- (last_offset
+ size
);
1622 ssize_t
iov_iter_get_pages_alloc(struct iov_iter
*i
,
1623 struct page
***pages
, size_t maxsize
,
1628 if (maxsize
> i
->count
)
1631 if (unlikely(iov_iter_is_pipe(i
)))
1632 return pipe_get_pages_alloc(i
, pages
, maxsize
, start
);
1633 if (unlikely(iov_iter_is_xarray(i
)))
1634 return iter_xarray_get_pages_alloc(i
, pages
, maxsize
, start
);
1635 if (unlikely(iov_iter_is_discard(i
)))
1638 iterate_all_kinds(i
, maxsize
, v
, ({
1639 unsigned long addr
= (unsigned long)v
.iov_base
;
1640 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
1644 addr
&= ~(PAGE_SIZE
- 1);
1645 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
1646 p
= get_pages_array(n
);
1649 res
= get_user_pages_fast(addr
, n
,
1650 iov_iter_rw(i
) != WRITE
? FOLL_WRITE
: 0, p
);
1651 if (unlikely(res
< 0)) {
1656 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
1658 /* can't be more than PAGE_SIZE */
1659 *start
= v
.bv_offset
;
1660 *pages
= p
= get_pages_array(1);
1663 get_page(*p
= v
.bv_page
);
1671 EXPORT_SYMBOL(iov_iter_get_pages_alloc
);
1673 size_t csum_and_copy_from_iter(void *addr
, size_t bytes
, __wsum
*csum
,
1680 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
1684 iterate_and_advance(i
, bytes
, v
, ({
1685 next
= csum_and_copy_from_user(v
.iov_base
,
1686 (to
+= v
.iov_len
) - v
.iov_len
,
1689 sum
= csum_block_add(sum
, next
, off
);
1692 next
? 0 : v
.iov_len
;
1694 char *p
= kmap_atomic(v
.bv_page
);
1695 sum
= csum_and_memcpy((to
+= v
.bv_len
) - v
.bv_len
,
1696 p
+ v
.bv_offset
, v
.bv_len
,
1701 sum
= csum_and_memcpy((to
+= v
.iov_len
) - v
.iov_len
,
1702 v
.iov_base
, v
.iov_len
,
1706 char *p
= kmap_atomic(v
.bv_page
);
1707 sum
= csum_and_memcpy((to
+= v
.bv_len
) - v
.bv_len
,
1708 p
+ v
.bv_offset
, v
.bv_len
,
1717 EXPORT_SYMBOL(csum_and_copy_from_iter
);
1719 bool csum_and_copy_from_iter_full(void *addr
, size_t bytes
, __wsum
*csum
,
1726 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
1730 if (unlikely(i
->count
< bytes
))
1732 iterate_all_kinds(i
, bytes
, v
, ({
1733 next
= csum_and_copy_from_user(v
.iov_base
,
1734 (to
+= v
.iov_len
) - v
.iov_len
,
1738 sum
= csum_block_add(sum
, next
, off
);
1742 char *p
= kmap_atomic(v
.bv_page
);
1743 sum
= csum_and_memcpy((to
+= v
.bv_len
) - v
.bv_len
,
1744 p
+ v
.bv_offset
, v
.bv_len
,
1749 sum
= csum_and_memcpy((to
+= v
.iov_len
) - v
.iov_len
,
1750 v
.iov_base
, v
.iov_len
,
1754 char *p
= kmap_atomic(v
.bv_page
);
1755 sum
= csum_and_memcpy((to
+= v
.bv_len
) - v
.bv_len
,
1756 p
+ v
.bv_offset
, v
.bv_len
,
1763 iov_iter_advance(i
, bytes
);
1766 EXPORT_SYMBOL(csum_and_copy_from_iter_full
);
1768 size_t csum_and_copy_to_iter(const void *addr
, size_t bytes
, void *_csstate
,
1771 struct csum_state
*csstate
= _csstate
;
1772 const char *from
= addr
;
1776 if (unlikely(iov_iter_is_pipe(i
)))
1777 return csum_and_copy_to_pipe_iter(addr
, bytes
, _csstate
, i
);
1779 sum
= csstate
->csum
;
1781 if (unlikely(iov_iter_is_discard(i
))) {
1782 WARN_ON(1); /* for now */
1785 iterate_and_advance(i
, bytes
, v
, ({
1786 next
= csum_and_copy_to_user((from
+= v
.iov_len
) - v
.iov_len
,
1790 sum
= csum_block_add(sum
, next
, off
);
1793 next
? 0 : v
.iov_len
;
1795 char *p
= kmap_atomic(v
.bv_page
);
1796 sum
= csum_and_memcpy(p
+ v
.bv_offset
,
1797 (from
+= v
.bv_len
) - v
.bv_len
,
1798 v
.bv_len
, sum
, off
);
1802 sum
= csum_and_memcpy(v
.iov_base
,
1803 (from
+= v
.iov_len
) - v
.iov_len
,
1804 v
.iov_len
, sum
, off
);
1807 char *p
= kmap_atomic(v
.bv_page
);
1808 sum
= csum_and_memcpy(p
+ v
.bv_offset
,
1809 (from
+= v
.bv_len
) - v
.bv_len
,
1810 v
.bv_len
, sum
, off
);
1815 csstate
->csum
= sum
;
1819 EXPORT_SYMBOL(csum_and_copy_to_iter
);
1821 size_t hash_and_copy_to_iter(const void *addr
, size_t bytes
, void *hashp
,
1824 #ifdef CONFIG_CRYPTO_HASH
1825 struct ahash_request
*hash
= hashp
;
1826 struct scatterlist sg
;
1829 copied
= copy_to_iter(addr
, bytes
, i
);
1830 sg_init_one(&sg
, addr
, copied
);
1831 ahash_request_set_crypt(hash
, &sg
, NULL
, copied
);
1832 crypto_ahash_update(hash
);
1838 EXPORT_SYMBOL(hash_and_copy_to_iter
);
1840 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
)
1842 size_t size
= i
->count
;
1847 if (unlikely(iov_iter_is_discard(i
)))
1850 if (unlikely(iov_iter_is_pipe(i
))) {
1851 struct pipe_inode_info
*pipe
= i
->pipe
;
1852 unsigned int iter_head
;
1858 data_start(i
, &iter_head
, &off
);
1859 /* some of this one + all after this one */
1860 npages
= pipe_space_for_user(iter_head
, pipe
->tail
, pipe
);
1861 if (npages
>= maxpages
)
1863 } else if (unlikely(iov_iter_is_xarray(i
))) {
1866 offset
= (i
->xarray_start
+ i
->iov_offset
) & ~PAGE_MASK
;
1869 if (size
> PAGE_SIZE
- offset
) {
1870 size
-= PAGE_SIZE
- offset
;
1871 npages
+= size
>> PAGE_SHIFT
;
1876 if (npages
>= maxpages
)
1878 } else iterate_all_kinds(i
, size
, v
, ({
1879 unsigned long p
= (unsigned long)v
.iov_base
;
1880 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
1882 if (npages
>= maxpages
)
1886 if (npages
>= maxpages
)
1889 unsigned long p
= (unsigned long)v
.iov_base
;
1890 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
1892 if (npages
>= maxpages
)
1899 EXPORT_SYMBOL(iov_iter_npages
);
1901 const void *dup_iter(struct iov_iter
*new, struct iov_iter
*old
, gfp_t flags
)
1904 if (unlikely(iov_iter_is_pipe(new))) {
1908 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1910 if (iov_iter_is_bvec(new))
1911 return new->bvec
= kmemdup(new->bvec
,
1912 new->nr_segs
* sizeof(struct bio_vec
),
1915 /* iovec and kvec have identical layout */
1916 return new->iov
= kmemdup(new->iov
,
1917 new->nr_segs
* sizeof(struct iovec
),
1920 EXPORT_SYMBOL(dup_iter
);
1922 static int copy_compat_iovec_from_user(struct iovec
*iov
,
1923 const struct iovec __user
*uvec
, unsigned long nr_segs
)
1925 const struct compat_iovec __user
*uiov
=
1926 (const struct compat_iovec __user
*)uvec
;
1927 int ret
= -EFAULT
, i
;
1929 if (!user_access_begin(uiov
, nr_segs
* sizeof(*uiov
)))
1932 for (i
= 0; i
< nr_segs
; i
++) {
1936 unsafe_get_user(len
, &uiov
[i
].iov_len
, uaccess_end
);
1937 unsafe_get_user(buf
, &uiov
[i
].iov_base
, uaccess_end
);
1939 /* check for compat_size_t not fitting in compat_ssize_t .. */
1944 iov
[i
].iov_base
= compat_ptr(buf
);
1945 iov
[i
].iov_len
= len
;
1954 static int copy_iovec_from_user(struct iovec
*iov
,
1955 const struct iovec __user
*uvec
, unsigned long nr_segs
)
1959 if (copy_from_user(iov
, uvec
, nr_segs
* sizeof(*uvec
)))
1961 for (seg
= 0; seg
< nr_segs
; seg
++) {
1962 if ((ssize_t
)iov
[seg
].iov_len
< 0)
1969 struct iovec
*iovec_from_user(const struct iovec __user
*uvec
,
1970 unsigned long nr_segs
, unsigned long fast_segs
,
1971 struct iovec
*fast_iov
, bool compat
)
1973 struct iovec
*iov
= fast_iov
;
1977 * SuS says "The readv() function *may* fail if the iovcnt argument was
1978 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1979 * traditionally returned zero for zero segments, so...
1983 if (nr_segs
> UIO_MAXIOV
)
1984 return ERR_PTR(-EINVAL
);
1985 if (nr_segs
> fast_segs
) {
1986 iov
= kmalloc_array(nr_segs
, sizeof(struct iovec
), GFP_KERNEL
);
1988 return ERR_PTR(-ENOMEM
);
1992 ret
= copy_compat_iovec_from_user(iov
, uvec
, nr_segs
);
1994 ret
= copy_iovec_from_user(iov
, uvec
, nr_segs
);
1996 if (iov
!= fast_iov
)
1998 return ERR_PTR(ret
);
2004 ssize_t
__import_iovec(int type
, const struct iovec __user
*uvec
,
2005 unsigned nr_segs
, unsigned fast_segs
, struct iovec
**iovp
,
2006 struct iov_iter
*i
, bool compat
)
2008 ssize_t total_len
= 0;
2012 iov
= iovec_from_user(uvec
, nr_segs
, fast_segs
, *iovp
, compat
);
2015 return PTR_ERR(iov
);
2019 * According to the Single Unix Specification we should return EINVAL if
2020 * an element length is < 0 when cast to ssize_t or if the total length
2021 * would overflow the ssize_t return value of the system call.
2023 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
2026 for (seg
= 0; seg
< nr_segs
; seg
++) {
2027 ssize_t len
= (ssize_t
)iov
[seg
].iov_len
;
2029 if (!access_ok(iov
[seg
].iov_base
, len
)) {
2036 if (len
> MAX_RW_COUNT
- total_len
) {
2037 len
= MAX_RW_COUNT
- total_len
;
2038 iov
[seg
].iov_len
= len
;
2043 iov_iter_init(i
, type
, iov
, nr_segs
, total_len
);
2052 * import_iovec() - Copy an array of &struct iovec from userspace
2053 * into the kernel, check that it is valid, and initialize a new
2054 * &struct iov_iter iterator to access it.
2056 * @type: One of %READ or %WRITE.
2057 * @uvec: Pointer to the userspace array.
2058 * @nr_segs: Number of elements in userspace array.
2059 * @fast_segs: Number of elements in @iov.
2060 * @iovp: (input and output parameter) Pointer to pointer to (usually small
2061 * on-stack) kernel array.
2062 * @i: Pointer to iterator that will be initialized on success.
2064 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
2065 * then this function places %NULL in *@iov on return. Otherwise, a new
2066 * array will be allocated and the result placed in *@iov. This means that
2067 * the caller may call kfree() on *@iov regardless of whether the small
2068 * on-stack array was used or not (and regardless of whether this function
2069 * returns an error or not).
2071 * Return: Negative error code on error, bytes imported on success
2073 ssize_t
import_iovec(int type
, const struct iovec __user
*uvec
,
2074 unsigned nr_segs
, unsigned fast_segs
,
2075 struct iovec
**iovp
, struct iov_iter
*i
)
2077 return __import_iovec(type
, uvec
, nr_segs
, fast_segs
, iovp
, i
,
2078 in_compat_syscall());
2080 EXPORT_SYMBOL(import_iovec
);
2082 int import_single_range(int rw
, void __user
*buf
, size_t len
,
2083 struct iovec
*iov
, struct iov_iter
*i
)
2085 if (len
> MAX_RW_COUNT
)
2087 if (unlikely(!access_ok(buf
, len
)))
2090 iov
->iov_base
= buf
;
2092 iov_iter_init(i
, rw
, iov
, 1, len
);
2095 EXPORT_SYMBOL(import_single_range
);
2097 int iov_iter_for_each_range(struct iov_iter
*i
, size_t bytes
,
2098 int (*f
)(struct kvec
*vec
, void *context
),
2106 iterate_all_kinds(i
, bytes
, v
, -EINVAL
, ({
2107 w
.iov_base
= kmap(v
.bv_page
) + v
.bv_offset
;
2108 w
.iov_len
= v
.bv_len
;
2109 err
= f(&w
, context
);
2113 err
= f(&w
, context
);}), ({
2114 w
.iov_base
= kmap(v
.bv_page
) + v
.bv_offset
;
2115 w
.iov_len
= v
.bv_len
;
2116 err
= f(&w
, context
);
2122 EXPORT_SYMBOL(iov_iter_for_each_range
);