1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
17 #define PIPE_PARANOIA /* for now */
19 /* covers iovec and kvec alike */
20 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \
22 size_t skip = i->iov_offset; \
24 len = min(n, __p->iov_len - skip); \
26 base = __p->iov_base + skip; \
31 if (skip < __p->iov_len) \
37 i->iov_offset = skip; \
41 #define iterate_bvec(i, n, base, len, off, p, STEP) { \
43 unsigned skip = i->iov_offset; \
45 unsigned offset = p->bv_offset + skip; \
47 void *kaddr = kmap_local_page(p->bv_page + \
48 offset / PAGE_SIZE); \
49 base = kaddr + offset % PAGE_SIZE; \
50 len = min(min(n, (size_t)(p->bv_len - skip)), \
51 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
53 kunmap_local(kaddr); \
57 if (skip == p->bv_len) { \
65 i->iov_offset = skip; \
69 #define iterate_xarray(i, n, base, len, __off, STEP) { \
72 struct page *head = NULL; \
73 loff_t start = i->xarray_start + i->iov_offset; \
74 unsigned offset = start % PAGE_SIZE; \
75 pgoff_t index = start / PAGE_SIZE; \
78 XA_STATE(xas, i->xarray, index); \
81 xas_for_each(&xas, head, ULONG_MAX) { \
83 if (xas_retry(&xas, head)) \
85 if (WARN_ON(xa_is_value(head))) \
87 if (WARN_ON(PageHuge(head))) \
89 for (j = (head->index < index) ? index - head->index : 0; \
90 j < thp_nr_pages(head); j++) { \
91 void *kaddr = kmap_local_page(head + j); \
92 base = kaddr + offset; \
93 len = PAGE_SIZE - offset; \
96 kunmap_local(kaddr); \
100 if (left || n == 0) \
107 i->iov_offset += __off; \
111 #define __iterate_and_advance(i, n, base, len, off, I, K) { \
112 if (unlikely(i->count < n)) \
115 if (likely(iter_is_iovec(i))) { \
116 const struct iovec *iov = i->iov; \
119 iterate_iovec(i, n, base, len, off, \
121 i->nr_segs -= iov - i->iov; \
123 } else if (iov_iter_is_bvec(i)) { \
124 const struct bio_vec *bvec = i->bvec; \
127 iterate_bvec(i, n, base, len, off, \
129 i->nr_segs -= bvec - i->bvec; \
131 } else if (iov_iter_is_kvec(i)) { \
132 const struct kvec *kvec = i->kvec; \
135 iterate_iovec(i, n, base, len, off, \
137 i->nr_segs -= kvec - i->kvec; \
139 } else if (iov_iter_is_xarray(i)) { \
142 iterate_xarray(i, n, base, len, off, \
148 #define iterate_and_advance(i, n, base, len, off, I, K) \
149 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
151 static int copyout(void __user
*to
, const void *from
, size_t n
)
153 if (should_fail_usercopy())
155 if (access_ok(to
, n
)) {
156 instrument_copy_to_user(to
, from
, n
);
157 n
= raw_copy_to_user(to
, from
, n
);
162 static int copyin(void *to
, const void __user
*from
, size_t n
)
164 if (should_fail_usercopy())
166 if (access_ok(from
, n
)) {
167 instrument_copy_from_user(to
, from
, n
);
168 n
= raw_copy_from_user(to
, from
, n
);
173 static size_t copy_page_to_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
176 size_t skip
, copy
, left
, wanted
;
177 const struct iovec
*iov
;
181 if (unlikely(bytes
> i
->count
))
184 if (unlikely(!bytes
))
190 skip
= i
->iov_offset
;
191 buf
= iov
->iov_base
+ skip
;
192 copy
= min(bytes
, iov
->iov_len
- skip
);
194 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_writeable(buf
, copy
)) {
195 kaddr
= kmap_atomic(page
);
196 from
= kaddr
+ offset
;
198 /* first chunk, usually the only one */
199 left
= copyout(buf
, from
, copy
);
205 while (unlikely(!left
&& bytes
)) {
208 copy
= min(bytes
, iov
->iov_len
);
209 left
= copyout(buf
, from
, copy
);
215 if (likely(!bytes
)) {
216 kunmap_atomic(kaddr
);
219 offset
= from
- kaddr
;
221 kunmap_atomic(kaddr
);
222 copy
= min(bytes
, iov
->iov_len
- skip
);
224 /* Too bad - revert to non-atomic kmap */
227 from
= kaddr
+ offset
;
228 left
= copyout(buf
, from
, copy
);
233 while (unlikely(!left
&& bytes
)) {
236 copy
= min(bytes
, iov
->iov_len
);
237 left
= copyout(buf
, from
, copy
);
246 if (skip
== iov
->iov_len
) {
250 i
->count
-= wanted
- bytes
;
251 i
->nr_segs
-= iov
- i
->iov
;
253 i
->iov_offset
= skip
;
254 return wanted
- bytes
;
257 static size_t copy_page_from_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
260 size_t skip
, copy
, left
, wanted
;
261 const struct iovec
*iov
;
265 if (unlikely(bytes
> i
->count
))
268 if (unlikely(!bytes
))
274 skip
= i
->iov_offset
;
275 buf
= iov
->iov_base
+ skip
;
276 copy
= min(bytes
, iov
->iov_len
- skip
);
278 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_readable(buf
, copy
)) {
279 kaddr
= kmap_atomic(page
);
282 /* first chunk, usually the only one */
283 left
= copyin(to
, buf
, copy
);
289 while (unlikely(!left
&& bytes
)) {
292 copy
= min(bytes
, iov
->iov_len
);
293 left
= copyin(to
, buf
, copy
);
299 if (likely(!bytes
)) {
300 kunmap_atomic(kaddr
);
305 kunmap_atomic(kaddr
);
306 copy
= min(bytes
, iov
->iov_len
- skip
);
308 /* Too bad - revert to non-atomic kmap */
312 left
= copyin(to
, buf
, copy
);
317 while (unlikely(!left
&& bytes
)) {
320 copy
= min(bytes
, iov
->iov_len
);
321 left
= copyin(to
, buf
, copy
);
330 if (skip
== iov
->iov_len
) {
334 i
->count
-= wanted
- bytes
;
335 i
->nr_segs
-= iov
- i
->iov
;
337 i
->iov_offset
= skip
;
338 return wanted
- bytes
;
342 static bool sanity(const struct iov_iter
*i
)
344 struct pipe_inode_info
*pipe
= i
->pipe
;
345 unsigned int p_head
= pipe
->head
;
346 unsigned int p_tail
= pipe
->tail
;
347 unsigned int p_mask
= pipe
->ring_size
- 1;
348 unsigned int p_occupancy
= pipe_occupancy(p_head
, p_tail
);
349 unsigned int i_head
= i
->head
;
353 struct pipe_buffer
*p
;
354 if (unlikely(p_occupancy
== 0))
355 goto Bad
; // pipe must be non-empty
356 if (unlikely(i_head
!= p_head
- 1))
357 goto Bad
; // must be at the last buffer...
359 p
= &pipe
->bufs
[i_head
& p_mask
];
360 if (unlikely(p
->offset
+ p
->len
!= i
->iov_offset
))
361 goto Bad
; // ... at the end of segment
363 if (i_head
!= p_head
)
364 goto Bad
; // must be right after the last buffer
368 printk(KERN_ERR
"idx = %d, offset = %zd\n", i_head
, i
->iov_offset
);
369 printk(KERN_ERR
"head = %d, tail = %d, buffers = %d\n",
370 p_head
, p_tail
, pipe
->ring_size
);
371 for (idx
= 0; idx
< pipe
->ring_size
; idx
++)
372 printk(KERN_ERR
"[%p %p %d %d]\n",
374 pipe
->bufs
[idx
].page
,
375 pipe
->bufs
[idx
].offset
,
376 pipe
->bufs
[idx
].len
);
381 #define sanity(i) true
384 static size_t copy_page_to_iter_pipe(struct page
*page
, size_t offset
, size_t bytes
,
387 struct pipe_inode_info
*pipe
= i
->pipe
;
388 struct pipe_buffer
*buf
;
389 unsigned int p_tail
= pipe
->tail
;
390 unsigned int p_mask
= pipe
->ring_size
- 1;
391 unsigned int i_head
= i
->head
;
394 if (unlikely(bytes
> i
->count
))
397 if (unlikely(!bytes
))
404 buf
= &pipe
->bufs
[i_head
& p_mask
];
406 if (offset
== off
&& buf
->page
== page
) {
407 /* merge with the last one */
409 i
->iov_offset
+= bytes
;
413 buf
= &pipe
->bufs
[i_head
& p_mask
];
415 if (pipe_full(i_head
, p_tail
, pipe
->max_usage
))
418 buf
->ops
= &page_cache_pipe_buf_ops
;
421 buf
->offset
= offset
;
424 pipe
->head
= i_head
+ 1;
425 i
->iov_offset
= offset
+ bytes
;
433 * fault_in_iov_iter_readable - fault in iov iterator for reading
435 * @size: maximum length
437 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
438 * @size. For each iovec, fault in each page that constitutes the iovec.
440 * Returns the number of bytes not faulted in (like copy_to_user() and
443 * Always returns 0 for non-userspace iterators.
445 size_t fault_in_iov_iter_readable(const struct iov_iter
*i
, size_t size
)
447 if (iter_is_iovec(i
)) {
448 size_t count
= min(size
, iov_iter_count(i
));
449 const struct iovec
*p
;
453 for (p
= i
->iov
, skip
= i
->iov_offset
; count
; p
++, skip
= 0) {
454 size_t len
= min(count
, p
->iov_len
- skip
);
459 ret
= fault_in_readable(p
->iov_base
+ skip
, len
);
468 EXPORT_SYMBOL(fault_in_iov_iter_readable
);
471 * fault_in_iov_iter_writeable - fault in iov iterator for writing
473 * @size: maximum length
475 * Faults in the iterator using get_user_pages(), i.e., without triggering
476 * hardware page faults. This is primarily useful when we already know that
477 * some or all of the pages in @i aren't in memory.
479 * Returns the number of bytes not faulted in, like copy_to_user() and
482 * Always returns 0 for non-user-space iterators.
484 size_t fault_in_iov_iter_writeable(const struct iov_iter
*i
, size_t size
)
486 if (iter_is_iovec(i
)) {
487 size_t count
= min(size
, iov_iter_count(i
));
488 const struct iovec
*p
;
492 for (p
= i
->iov
, skip
= i
->iov_offset
; count
; p
++, skip
= 0) {
493 size_t len
= min(count
, p
->iov_len
- skip
);
498 ret
= fault_in_safe_writeable(p
->iov_base
+ skip
, len
);
507 EXPORT_SYMBOL(fault_in_iov_iter_writeable
);
509 void iov_iter_init(struct iov_iter
*i
, unsigned int direction
,
510 const struct iovec
*iov
, unsigned long nr_segs
,
513 WARN_ON(direction
& ~(READ
| WRITE
));
514 *i
= (struct iov_iter
) {
515 .iter_type
= ITER_IOVEC
,
517 .data_source
= direction
,
524 EXPORT_SYMBOL(iov_iter_init
);
526 static inline bool allocated(struct pipe_buffer
*buf
)
528 return buf
->ops
== &default_pipe_buf_ops
;
531 static inline void data_start(const struct iov_iter
*i
,
532 unsigned int *iter_headp
, size_t *offp
)
534 unsigned int p_mask
= i
->pipe
->ring_size
- 1;
535 unsigned int iter_head
= i
->head
;
536 size_t off
= i
->iov_offset
;
538 if (off
&& (!allocated(&i
->pipe
->bufs
[iter_head
& p_mask
]) ||
543 *iter_headp
= iter_head
;
547 static size_t push_pipe(struct iov_iter
*i
, size_t size
,
548 int *iter_headp
, size_t *offp
)
550 struct pipe_inode_info
*pipe
= i
->pipe
;
551 unsigned int p_tail
= pipe
->tail
;
552 unsigned int p_mask
= pipe
->ring_size
- 1;
553 unsigned int iter_head
;
557 if (unlikely(size
> i
->count
))
563 data_start(i
, &iter_head
, &off
);
564 *iter_headp
= iter_head
;
567 left
-= PAGE_SIZE
- off
;
569 pipe
->bufs
[iter_head
& p_mask
].len
+= size
;
572 pipe
->bufs
[iter_head
& p_mask
].len
= PAGE_SIZE
;
575 while (!pipe_full(iter_head
, p_tail
, pipe
->max_usage
)) {
576 struct pipe_buffer
*buf
= &pipe
->bufs
[iter_head
& p_mask
];
577 struct page
*page
= alloc_page(GFP_USER
);
581 buf
->ops
= &default_pipe_buf_ops
;
584 buf
->len
= min_t(ssize_t
, left
, PAGE_SIZE
);
587 pipe
->head
= iter_head
;
595 static size_t copy_pipe_to_iter(const void *addr
, size_t bytes
,
598 struct pipe_inode_info
*pipe
= i
->pipe
;
599 unsigned int p_mask
= pipe
->ring_size
- 1;
606 bytes
= n
= push_pipe(i
, bytes
, &i_head
, &off
);
610 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
611 memcpy_to_page(pipe
->bufs
[i_head
& p_mask
].page
, off
, addr
, chunk
);
613 i
->iov_offset
= off
+ chunk
;
623 static __wsum
csum_and_memcpy(void *to
, const void *from
, size_t len
,
624 __wsum sum
, size_t off
)
626 __wsum next
= csum_partial_copy_nocheck(from
, to
, len
);
627 return csum_block_add(sum
, next
, off
);
630 static size_t csum_and_copy_to_pipe_iter(const void *addr
, size_t bytes
,
631 struct iov_iter
*i
, __wsum
*sump
)
633 struct pipe_inode_info
*pipe
= i
->pipe
;
634 unsigned int p_mask
= pipe
->ring_size
- 1;
643 bytes
= push_pipe(i
, bytes
, &i_head
, &r
);
645 size_t chunk
= min_t(size_t, bytes
, PAGE_SIZE
- r
);
646 char *p
= kmap_local_page(pipe
->bufs
[i_head
& p_mask
].page
);
647 sum
= csum_and_memcpy(p
+ r
, addr
+ off
, chunk
, sum
, off
);
650 i
->iov_offset
= r
+ chunk
;
661 size_t _copy_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
663 if (unlikely(iov_iter_is_pipe(i
)))
664 return copy_pipe_to_iter(addr
, bytes
, i
);
665 if (iter_is_iovec(i
))
667 iterate_and_advance(i
, bytes
, base
, len
, off
,
668 copyout(base
, addr
+ off
, len
),
669 memcpy(base
, addr
+ off
, len
)
674 EXPORT_SYMBOL(_copy_to_iter
);
676 #ifdef CONFIG_ARCH_HAS_COPY_MC
677 static int copyout_mc(void __user
*to
, const void *from
, size_t n
)
679 if (access_ok(to
, n
)) {
680 instrument_copy_to_user(to
, from
, n
);
681 n
= copy_mc_to_user((__force
void *) to
, from
, n
);
686 static size_t copy_mc_pipe_to_iter(const void *addr
, size_t bytes
,
689 struct pipe_inode_info
*pipe
= i
->pipe
;
690 unsigned int p_mask
= pipe
->ring_size
- 1;
692 size_t n
, off
, xfer
= 0;
697 n
= push_pipe(i
, bytes
, &i_head
, &off
);
699 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
700 char *p
= kmap_local_page(pipe
->bufs
[i_head
& p_mask
].page
);
702 rem
= copy_mc_to_kernel(p
+ off
, addr
+ xfer
, chunk
);
706 i
->iov_offset
= off
+ chunk
;
719 * _copy_mc_to_iter - copy to iter with source memory error exception handling
720 * @addr: source kernel address
721 * @bytes: total transfer length
722 * @i: destination iterator
724 * The pmem driver deploys this for the dax operation
725 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
726 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
727 * successfully copied.
729 * The main differences between this and typical _copy_to_iter().
731 * * Typical tail/residue handling after a fault retries the copy
732 * byte-by-byte until the fault happens again. Re-triggering machine
733 * checks is potentially fatal so the implementation uses source
734 * alignment and poison alignment assumptions to avoid re-triggering
735 * hardware exceptions.
737 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
738 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
741 * Return: number of bytes copied (may be %0)
743 size_t _copy_mc_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
745 if (unlikely(iov_iter_is_pipe(i
)))
746 return copy_mc_pipe_to_iter(addr
, bytes
, i
);
747 if (iter_is_iovec(i
))
749 __iterate_and_advance(i
, bytes
, base
, len
, off
,
750 copyout_mc(base
, addr
+ off
, len
),
751 copy_mc_to_kernel(base
, addr
+ off
, len
)
756 EXPORT_SYMBOL_GPL(_copy_mc_to_iter
);
757 #endif /* CONFIG_ARCH_HAS_COPY_MC */
759 size_t _copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
761 if (unlikely(iov_iter_is_pipe(i
))) {
765 if (iter_is_iovec(i
))
767 iterate_and_advance(i
, bytes
, base
, len
, off
,
768 copyin(addr
+ off
, base
, len
),
769 memcpy(addr
+ off
, base
, len
)
774 EXPORT_SYMBOL(_copy_from_iter
);
776 size_t _copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
778 if (unlikely(iov_iter_is_pipe(i
))) {
782 iterate_and_advance(i
, bytes
, base
, len
, off
,
783 __copy_from_user_inatomic_nocache(addr
+ off
, base
, len
),
784 memcpy(addr
+ off
, base
, len
)
789 EXPORT_SYMBOL(_copy_from_iter_nocache
);
791 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
793 * _copy_from_iter_flushcache - write destination through cpu cache
794 * @addr: destination kernel address
795 * @bytes: total transfer length
796 * @i: source iterator
798 * The pmem driver arranges for filesystem-dax to use this facility via
799 * dax_copy_from_iter() for ensuring that writes to persistent memory
800 * are flushed through the CPU cache. It is differentiated from
801 * _copy_from_iter_nocache() in that guarantees all data is flushed for
802 * all iterator types. The _copy_from_iter_nocache() only attempts to
803 * bypass the cache for the ITER_IOVEC case, and on some archs may use
804 * instructions that strand dirty-data in the cache.
806 * Return: number of bytes copied (may be %0)
808 size_t _copy_from_iter_flushcache(void *addr
, size_t bytes
, struct iov_iter
*i
)
810 if (unlikely(iov_iter_is_pipe(i
))) {
814 iterate_and_advance(i
, bytes
, base
, len
, off
,
815 __copy_from_user_flushcache(addr
+ off
, base
, len
),
816 memcpy_flushcache(addr
+ off
, base
, len
)
821 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache
);
824 static inline bool page_copy_sane(struct page
*page
, size_t offset
, size_t n
)
827 size_t v
= n
+ offset
;
830 * The general case needs to access the page order in order
831 * to compute the page size.
832 * However, we mostly deal with order-0 pages and thus can
833 * avoid a possible cache line miss for requests that fit all
836 if (n
<= v
&& v
<= PAGE_SIZE
)
839 head
= compound_head(page
);
840 v
+= (page
- head
) << PAGE_SHIFT
;
842 if (likely(n
<= v
&& v
<= (page_size(head
))))
848 static size_t __copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
851 if (likely(iter_is_iovec(i
)))
852 return copy_page_to_iter_iovec(page
, offset
, bytes
, i
);
853 if (iov_iter_is_bvec(i
) || iov_iter_is_kvec(i
) || iov_iter_is_xarray(i
)) {
854 void *kaddr
= kmap_local_page(page
);
855 size_t wanted
= _copy_to_iter(kaddr
+ offset
, bytes
, i
);
859 if (iov_iter_is_pipe(i
))
860 return copy_page_to_iter_pipe(page
, offset
, bytes
, i
);
861 if (unlikely(iov_iter_is_discard(i
))) {
862 if (unlikely(i
->count
< bytes
))
871 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
875 if (unlikely(!page_copy_sane(page
, offset
, bytes
)))
877 page
+= offset
/ PAGE_SIZE
; // first subpage
880 size_t n
= __copy_page_to_iter(page
, offset
,
881 min(bytes
, (size_t)PAGE_SIZE
- offset
), i
);
887 if (offset
== PAGE_SIZE
) {
894 EXPORT_SYMBOL(copy_page_to_iter
);
896 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
899 if (unlikely(!page_copy_sane(page
, offset
, bytes
)))
901 if (likely(iter_is_iovec(i
)))
902 return copy_page_from_iter_iovec(page
, offset
, bytes
, i
);
903 if (iov_iter_is_bvec(i
) || iov_iter_is_kvec(i
) || iov_iter_is_xarray(i
)) {
904 void *kaddr
= kmap_local_page(page
);
905 size_t wanted
= _copy_from_iter(kaddr
+ offset
, bytes
, i
);
912 EXPORT_SYMBOL(copy_page_from_iter
);
914 static size_t pipe_zero(size_t bytes
, struct iov_iter
*i
)
916 struct pipe_inode_info
*pipe
= i
->pipe
;
917 unsigned int p_mask
= pipe
->ring_size
- 1;
924 bytes
= n
= push_pipe(i
, bytes
, &i_head
, &off
);
929 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
930 char *p
= kmap_local_page(pipe
->bufs
[i_head
& p_mask
].page
);
931 memset(p
+ off
, 0, chunk
);
934 i
->iov_offset
= off
+ chunk
;
943 size_t iov_iter_zero(size_t bytes
, struct iov_iter
*i
)
945 if (unlikely(iov_iter_is_pipe(i
)))
946 return pipe_zero(bytes
, i
);
947 iterate_and_advance(i
, bytes
, base
, len
, count
,
948 clear_user(base
, len
),
954 EXPORT_SYMBOL(iov_iter_zero
);
956 size_t copy_page_from_iter_atomic(struct page
*page
, unsigned offset
, size_t bytes
,
959 char *kaddr
= kmap_atomic(page
), *p
= kaddr
+ offset
;
960 if (unlikely(!page_copy_sane(page
, offset
, bytes
))) {
961 kunmap_atomic(kaddr
);
964 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
965 kunmap_atomic(kaddr
);
969 iterate_and_advance(i
, bytes
, base
, len
, off
,
970 copyin(p
+ off
, base
, len
),
971 memcpy(p
+ off
, base
, len
)
973 kunmap_atomic(kaddr
);
976 EXPORT_SYMBOL(copy_page_from_iter_atomic
);
978 static inline void pipe_truncate(struct iov_iter
*i
)
980 struct pipe_inode_info
*pipe
= i
->pipe
;
981 unsigned int p_tail
= pipe
->tail
;
982 unsigned int p_head
= pipe
->head
;
983 unsigned int p_mask
= pipe
->ring_size
- 1;
985 if (!pipe_empty(p_head
, p_tail
)) {
986 struct pipe_buffer
*buf
;
987 unsigned int i_head
= i
->head
;
988 size_t off
= i
->iov_offset
;
991 buf
= &pipe
->bufs
[i_head
& p_mask
];
992 buf
->len
= off
- buf
->offset
;
995 while (p_head
!= i_head
) {
997 pipe_buf_release(pipe
, &pipe
->bufs
[p_head
& p_mask
]);
1000 pipe
->head
= p_head
;
1004 static void pipe_advance(struct iov_iter
*i
, size_t size
)
1006 struct pipe_inode_info
*pipe
= i
->pipe
;
1008 struct pipe_buffer
*buf
;
1009 unsigned int p_mask
= pipe
->ring_size
- 1;
1010 unsigned int i_head
= i
->head
;
1011 size_t off
= i
->iov_offset
, left
= size
;
1013 if (off
) /* make it relative to the beginning of buffer */
1014 left
+= off
- pipe
->bufs
[i_head
& p_mask
].offset
;
1016 buf
= &pipe
->bufs
[i_head
& p_mask
];
1017 if (left
<= buf
->len
)
1023 i
->iov_offset
= buf
->offset
+ left
;
1026 /* ... and discard everything past that point */
1030 static void iov_iter_bvec_advance(struct iov_iter
*i
, size_t size
)
1032 struct bvec_iter bi
;
1034 bi
.bi_size
= i
->count
;
1035 bi
.bi_bvec_done
= i
->iov_offset
;
1037 bvec_iter_advance(i
->bvec
, &bi
, size
);
1039 i
->bvec
+= bi
.bi_idx
;
1040 i
->nr_segs
-= bi
.bi_idx
;
1041 i
->count
= bi
.bi_size
;
1042 i
->iov_offset
= bi
.bi_bvec_done
;
1045 static void iov_iter_iovec_advance(struct iov_iter
*i
, size_t size
)
1047 const struct iovec
*iov
, *end
;
1053 size
+= i
->iov_offset
; // from beginning of current segment
1054 for (iov
= i
->iov
, end
= iov
+ i
->nr_segs
; iov
< end
; iov
++) {
1055 if (likely(size
< iov
->iov_len
))
1057 size
-= iov
->iov_len
;
1059 i
->iov_offset
= size
;
1060 i
->nr_segs
-= iov
- i
->iov
;
1064 void iov_iter_advance(struct iov_iter
*i
, size_t size
)
1066 if (unlikely(i
->count
< size
))
1068 if (likely(iter_is_iovec(i
) || iov_iter_is_kvec(i
))) {
1069 /* iovec and kvec have identical layouts */
1070 iov_iter_iovec_advance(i
, size
);
1071 } else if (iov_iter_is_bvec(i
)) {
1072 iov_iter_bvec_advance(i
, size
);
1073 } else if (iov_iter_is_pipe(i
)) {
1074 pipe_advance(i
, size
);
1075 } else if (unlikely(iov_iter_is_xarray(i
))) {
1076 i
->iov_offset
+= size
;
1078 } else if (iov_iter_is_discard(i
)) {
1082 EXPORT_SYMBOL(iov_iter_advance
);
1084 void iov_iter_revert(struct iov_iter
*i
, size_t unroll
)
1088 if (WARN_ON(unroll
> MAX_RW_COUNT
))
1091 if (unlikely(iov_iter_is_pipe(i
))) {
1092 struct pipe_inode_info
*pipe
= i
->pipe
;
1093 unsigned int p_mask
= pipe
->ring_size
- 1;
1094 unsigned int i_head
= i
->head
;
1095 size_t off
= i
->iov_offset
;
1097 struct pipe_buffer
*b
= &pipe
->bufs
[i_head
& p_mask
];
1098 size_t n
= off
- b
->offset
;
1104 if (!unroll
&& i_head
== i
->start_head
) {
1109 b
= &pipe
->bufs
[i_head
& p_mask
];
1110 off
= b
->offset
+ b
->len
;
1112 i
->iov_offset
= off
;
1117 if (unlikely(iov_iter_is_discard(i
)))
1119 if (unroll
<= i
->iov_offset
) {
1120 i
->iov_offset
-= unroll
;
1123 unroll
-= i
->iov_offset
;
1124 if (iov_iter_is_xarray(i
)) {
1125 BUG(); /* We should never go beyond the start of the specified
1126 * range since we might then be straying into pages that
1129 } else if (iov_iter_is_bvec(i
)) {
1130 const struct bio_vec
*bvec
= i
->bvec
;
1132 size_t n
= (--bvec
)->bv_len
;
1136 i
->iov_offset
= n
- unroll
;
1141 } else { /* same logics for iovec and kvec */
1142 const struct iovec
*iov
= i
->iov
;
1144 size_t n
= (--iov
)->iov_len
;
1148 i
->iov_offset
= n
- unroll
;
1155 EXPORT_SYMBOL(iov_iter_revert
);
1158 * Return the count of just the current iov_iter segment.
1160 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
1162 if (i
->nr_segs
> 1) {
1163 if (likely(iter_is_iovec(i
) || iov_iter_is_kvec(i
)))
1164 return min(i
->count
, i
->iov
->iov_len
- i
->iov_offset
);
1165 if (iov_iter_is_bvec(i
))
1166 return min(i
->count
, i
->bvec
->bv_len
- i
->iov_offset
);
1170 EXPORT_SYMBOL(iov_iter_single_seg_count
);
1172 void iov_iter_kvec(struct iov_iter
*i
, unsigned int direction
,
1173 const struct kvec
*kvec
, unsigned long nr_segs
,
1176 WARN_ON(direction
& ~(READ
| WRITE
));
1177 *i
= (struct iov_iter
){
1178 .iter_type
= ITER_KVEC
,
1179 .data_source
= direction
,
1186 EXPORT_SYMBOL(iov_iter_kvec
);
1188 void iov_iter_bvec(struct iov_iter
*i
, unsigned int direction
,
1189 const struct bio_vec
*bvec
, unsigned long nr_segs
,
1192 WARN_ON(direction
& ~(READ
| WRITE
));
1193 *i
= (struct iov_iter
){
1194 .iter_type
= ITER_BVEC
,
1195 .data_source
= direction
,
1202 EXPORT_SYMBOL(iov_iter_bvec
);
1204 void iov_iter_pipe(struct iov_iter
*i
, unsigned int direction
,
1205 struct pipe_inode_info
*pipe
,
1208 BUG_ON(direction
!= READ
);
1209 WARN_ON(pipe_full(pipe
->head
, pipe
->tail
, pipe
->ring_size
));
1210 *i
= (struct iov_iter
){
1211 .iter_type
= ITER_PIPE
,
1212 .data_source
= false,
1215 .start_head
= pipe
->head
,
1220 EXPORT_SYMBOL(iov_iter_pipe
);
1223 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1224 * @i: The iterator to initialise.
1225 * @direction: The direction of the transfer.
1226 * @xarray: The xarray to access.
1227 * @start: The start file position.
1228 * @count: The size of the I/O buffer in bytes.
1230 * Set up an I/O iterator to either draw data out of the pages attached to an
1231 * inode or to inject data into those pages. The pages *must* be prevented
1232 * from evaporation, either by taking a ref on them or locking them by the
1235 void iov_iter_xarray(struct iov_iter
*i
, unsigned int direction
,
1236 struct xarray
*xarray
, loff_t start
, size_t count
)
1238 BUG_ON(direction
& ~1);
1239 *i
= (struct iov_iter
) {
1240 .iter_type
= ITER_XARRAY
,
1241 .data_source
= direction
,
1243 .xarray_start
= start
,
1248 EXPORT_SYMBOL(iov_iter_xarray
);
1251 * iov_iter_discard - Initialise an I/O iterator that discards data
1252 * @i: The iterator to initialise.
1253 * @direction: The direction of the transfer.
1254 * @count: The size of the I/O buffer in bytes.
1256 * Set up an I/O iterator that just discards everything that's written to it.
1257 * It's only available as a READ iterator.
1259 void iov_iter_discard(struct iov_iter
*i
, unsigned int direction
, size_t count
)
1261 BUG_ON(direction
!= READ
);
1262 *i
= (struct iov_iter
){
1263 .iter_type
= ITER_DISCARD
,
1264 .data_source
= false,
1269 EXPORT_SYMBOL(iov_iter_discard
);
1271 static unsigned long iov_iter_alignment_iovec(const struct iov_iter
*i
)
1273 unsigned long res
= 0;
1274 size_t size
= i
->count
;
1275 size_t skip
= i
->iov_offset
;
1278 for (k
= 0; k
< i
->nr_segs
; k
++, skip
= 0) {
1279 size_t len
= i
->iov
[k
].iov_len
- skip
;
1281 res
|= (unsigned long)i
->iov
[k
].iov_base
+ skip
;
1293 static unsigned long iov_iter_alignment_bvec(const struct iov_iter
*i
)
1296 size_t size
= i
->count
;
1297 unsigned skip
= i
->iov_offset
;
1300 for (k
= 0; k
< i
->nr_segs
; k
++, skip
= 0) {
1301 size_t len
= i
->bvec
[k
].bv_len
- skip
;
1302 res
|= (unsigned long)i
->bvec
[k
].bv_offset
+ skip
;
1313 unsigned long iov_iter_alignment(const struct iov_iter
*i
)
1315 /* iovec and kvec have identical layouts */
1316 if (likely(iter_is_iovec(i
) || iov_iter_is_kvec(i
)))
1317 return iov_iter_alignment_iovec(i
);
1319 if (iov_iter_is_bvec(i
))
1320 return iov_iter_alignment_bvec(i
);
1322 if (iov_iter_is_pipe(i
)) {
1323 unsigned int p_mask
= i
->pipe
->ring_size
- 1;
1324 size_t size
= i
->count
;
1326 if (size
&& i
->iov_offset
&& allocated(&i
->pipe
->bufs
[i
->head
& p_mask
]))
1327 return size
| i
->iov_offset
;
1331 if (iov_iter_is_xarray(i
))
1332 return (i
->xarray_start
+ i
->iov_offset
) | i
->count
;
1336 EXPORT_SYMBOL(iov_iter_alignment
);
1338 unsigned long iov_iter_gap_alignment(const struct iov_iter
*i
)
1340 unsigned long res
= 0;
1341 unsigned long v
= 0;
1342 size_t size
= i
->count
;
1345 if (WARN_ON(!iter_is_iovec(i
)))
1348 for (k
= 0; k
< i
->nr_segs
; k
++) {
1349 if (i
->iov
[k
].iov_len
) {
1350 unsigned long base
= (unsigned long)i
->iov
[k
].iov_base
;
1351 if (v
) // if not the first one
1352 res
|= base
| v
; // this start | previous end
1353 v
= base
+ i
->iov
[k
].iov_len
;
1354 if (size
<= i
->iov
[k
].iov_len
)
1356 size
-= i
->iov
[k
].iov_len
;
1361 EXPORT_SYMBOL(iov_iter_gap_alignment
);
1363 static inline ssize_t
__pipe_get_pages(struct iov_iter
*i
,
1365 struct page
**pages
,
1369 struct pipe_inode_info
*pipe
= i
->pipe
;
1370 unsigned int p_mask
= pipe
->ring_size
- 1;
1371 ssize_t n
= push_pipe(i
, maxsize
, &iter_head
, start
);
1378 get_page(*pages
++ = pipe
->bufs
[iter_head
& p_mask
].page
);
1386 static ssize_t
pipe_get_pages(struct iov_iter
*i
,
1387 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
1390 unsigned int iter_head
, npages
;
1396 data_start(i
, &iter_head
, start
);
1397 /* Amount of free space: some of this one + all after this one */
1398 npages
= pipe_space_for_user(iter_head
, i
->pipe
->tail
, i
->pipe
);
1399 capacity
= min(npages
, maxpages
) * PAGE_SIZE
- *start
;
1401 return __pipe_get_pages(i
, min(maxsize
, capacity
), pages
, iter_head
, start
);
1404 static ssize_t
iter_xarray_populate_pages(struct page
**pages
, struct xarray
*xa
,
1405 pgoff_t index
, unsigned int nr_pages
)
1407 XA_STATE(xas
, xa
, index
);
1409 unsigned int ret
= 0;
1412 for (page
= xas_load(&xas
); page
; page
= xas_next(&xas
)) {
1413 if (xas_retry(&xas
, page
))
1416 /* Has the page moved or been split? */
1417 if (unlikely(page
!= xas_reload(&xas
))) {
1422 pages
[ret
] = find_subpage(page
, xas
.xa_index
);
1423 get_page(pages
[ret
]);
1424 if (++ret
== nr_pages
)
1431 static ssize_t
iter_xarray_get_pages(struct iov_iter
*i
,
1432 struct page
**pages
, size_t maxsize
,
1433 unsigned maxpages
, size_t *_start_offset
)
1435 unsigned nr
, offset
;
1436 pgoff_t index
, count
;
1437 size_t size
= maxsize
, actual
;
1440 if (!size
|| !maxpages
)
1443 pos
= i
->xarray_start
+ i
->iov_offset
;
1444 index
= pos
>> PAGE_SHIFT
;
1445 offset
= pos
& ~PAGE_MASK
;
1446 *_start_offset
= offset
;
1449 if (size
> PAGE_SIZE
- offset
) {
1450 size
-= PAGE_SIZE
- offset
;
1451 count
+= size
>> PAGE_SHIFT
;
1457 if (count
> maxpages
)
1460 nr
= iter_xarray_populate_pages(pages
, i
->xarray
, index
, count
);
1464 actual
= PAGE_SIZE
* nr
;
1466 if (nr
== count
&& size
> 0) {
1467 unsigned last_offset
= (nr
> 1) ? 0 : offset
;
1468 actual
-= PAGE_SIZE
- (last_offset
+ size
);
1473 /* must be done on non-empty ITER_IOVEC one */
1474 static unsigned long first_iovec_segment(const struct iov_iter
*i
,
1475 size_t *size
, size_t *start
,
1476 size_t maxsize
, unsigned maxpages
)
1481 for (k
= 0, skip
= i
->iov_offset
; k
< i
->nr_segs
; k
++, skip
= 0) {
1482 unsigned long addr
= (unsigned long)i
->iov
[k
].iov_base
+ skip
;
1483 size_t len
= i
->iov
[k
].iov_len
- skip
;
1489 len
+= (*start
= addr
% PAGE_SIZE
);
1490 if (len
> maxpages
* PAGE_SIZE
)
1491 len
= maxpages
* PAGE_SIZE
;
1493 return addr
& PAGE_MASK
;
1495 BUG(); // if it had been empty, we wouldn't get called
1498 /* must be done on non-empty ITER_BVEC one */
1499 static struct page
*first_bvec_segment(const struct iov_iter
*i
,
1500 size_t *size
, size_t *start
,
1501 size_t maxsize
, unsigned maxpages
)
1504 size_t skip
= i
->iov_offset
, len
;
1506 len
= i
->bvec
->bv_len
- skip
;
1509 skip
+= i
->bvec
->bv_offset
;
1510 page
= i
->bvec
->bv_page
+ skip
/ PAGE_SIZE
;
1511 len
+= (*start
= skip
% PAGE_SIZE
);
1512 if (len
> maxpages
* PAGE_SIZE
)
1513 len
= maxpages
* PAGE_SIZE
;
1518 ssize_t
iov_iter_get_pages(struct iov_iter
*i
,
1519 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
1525 if (maxsize
> i
->count
)
1530 if (likely(iter_is_iovec(i
))) {
1531 unsigned int gup_flags
= 0;
1534 if (iov_iter_rw(i
) != WRITE
)
1535 gup_flags
|= FOLL_WRITE
;
1537 gup_flags
|= FOLL_NOFAULT
;
1539 addr
= first_iovec_segment(i
, &len
, start
, maxsize
, maxpages
);
1540 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
1541 res
= get_user_pages_fast(addr
, n
, gup_flags
, pages
);
1542 if (unlikely(res
<= 0))
1544 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
1546 if (iov_iter_is_bvec(i
)) {
1549 page
= first_bvec_segment(i
, &len
, start
, maxsize
, maxpages
);
1550 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
1552 get_page(*pages
++ = page
++);
1553 return len
- *start
;
1555 if (iov_iter_is_pipe(i
))
1556 return pipe_get_pages(i
, pages
, maxsize
, maxpages
, start
);
1557 if (iov_iter_is_xarray(i
))
1558 return iter_xarray_get_pages(i
, pages
, maxsize
, maxpages
, start
);
1561 EXPORT_SYMBOL(iov_iter_get_pages
);
1563 static struct page
**get_pages_array(size_t n
)
1565 return kvmalloc_array(n
, sizeof(struct page
*), GFP_KERNEL
);
1568 static ssize_t
pipe_get_pages_alloc(struct iov_iter
*i
,
1569 struct page
***pages
, size_t maxsize
,
1573 unsigned int iter_head
, npages
;
1579 data_start(i
, &iter_head
, start
);
1580 /* Amount of free space: some of this one + all after this one */
1581 npages
= pipe_space_for_user(iter_head
, i
->pipe
->tail
, i
->pipe
);
1582 n
= npages
* PAGE_SIZE
- *start
;
1586 npages
= DIV_ROUND_UP(maxsize
+ *start
, PAGE_SIZE
);
1587 p
= get_pages_array(npages
);
1590 n
= __pipe_get_pages(i
, maxsize
, p
, iter_head
, start
);
1598 static ssize_t
iter_xarray_get_pages_alloc(struct iov_iter
*i
,
1599 struct page
***pages
, size_t maxsize
,
1600 size_t *_start_offset
)
1603 unsigned nr
, offset
;
1604 pgoff_t index
, count
;
1605 size_t size
= maxsize
, actual
;
1611 pos
= i
->xarray_start
+ i
->iov_offset
;
1612 index
= pos
>> PAGE_SHIFT
;
1613 offset
= pos
& ~PAGE_MASK
;
1614 *_start_offset
= offset
;
1617 if (size
> PAGE_SIZE
- offset
) {
1618 size
-= PAGE_SIZE
- offset
;
1619 count
+= size
>> PAGE_SHIFT
;
1625 p
= get_pages_array(count
);
1630 nr
= iter_xarray_populate_pages(p
, i
->xarray
, index
, count
);
1634 actual
= PAGE_SIZE
* nr
;
1636 if (nr
== count
&& size
> 0) {
1637 unsigned last_offset
= (nr
> 1) ? 0 : offset
;
1638 actual
-= PAGE_SIZE
- (last_offset
+ size
);
1643 ssize_t
iov_iter_get_pages_alloc(struct iov_iter
*i
,
1644 struct page
***pages
, size_t maxsize
,
1651 if (maxsize
> i
->count
)
1656 if (likely(iter_is_iovec(i
))) {
1657 unsigned int gup_flags
= 0;
1660 if (iov_iter_rw(i
) != WRITE
)
1661 gup_flags
|= FOLL_WRITE
;
1663 gup_flags
|= FOLL_NOFAULT
;
1665 addr
= first_iovec_segment(i
, &len
, start
, maxsize
, ~0U);
1666 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
1667 p
= get_pages_array(n
);
1670 res
= get_user_pages_fast(addr
, n
, gup_flags
, p
);
1671 if (unlikely(res
<= 0)) {
1677 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
1679 if (iov_iter_is_bvec(i
)) {
1682 page
= first_bvec_segment(i
, &len
, start
, maxsize
, ~0U);
1683 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
1684 *pages
= p
= get_pages_array(n
);
1688 get_page(*p
++ = page
++);
1689 return len
- *start
;
1691 if (iov_iter_is_pipe(i
))
1692 return pipe_get_pages_alloc(i
, pages
, maxsize
, start
);
1693 if (iov_iter_is_xarray(i
))
1694 return iter_xarray_get_pages_alloc(i
, pages
, maxsize
, start
);
1697 EXPORT_SYMBOL(iov_iter_get_pages_alloc
);
1699 size_t csum_and_copy_from_iter(void *addr
, size_t bytes
, __wsum
*csum
,
1704 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
1708 iterate_and_advance(i
, bytes
, base
, len
, off
, ({
1709 next
= csum_and_copy_from_user(base
, addr
+ off
, len
);
1710 sum
= csum_block_add(sum
, next
, off
);
1713 sum
= csum_and_memcpy(addr
+ off
, base
, len
, sum
, off
);
1719 EXPORT_SYMBOL(csum_and_copy_from_iter
);
1721 size_t csum_and_copy_to_iter(const void *addr
, size_t bytes
, void *_csstate
,
1724 struct csum_state
*csstate
= _csstate
;
1727 if (unlikely(iov_iter_is_discard(i
))) {
1728 WARN_ON(1); /* for now */
1732 sum
= csum_shift(csstate
->csum
, csstate
->off
);
1733 if (unlikely(iov_iter_is_pipe(i
)))
1734 bytes
= csum_and_copy_to_pipe_iter(addr
, bytes
, i
, &sum
);
1735 else iterate_and_advance(i
, bytes
, base
, len
, off
, ({
1736 next
= csum_and_copy_to_user(addr
+ off
, base
, len
);
1737 sum
= csum_block_add(sum
, next
, off
);
1740 sum
= csum_and_memcpy(base
, addr
+ off
, len
, sum
, off
);
1743 csstate
->csum
= csum_shift(sum
, csstate
->off
);
1744 csstate
->off
+= bytes
;
1747 EXPORT_SYMBOL(csum_and_copy_to_iter
);
1749 size_t hash_and_copy_to_iter(const void *addr
, size_t bytes
, void *hashp
,
1752 #ifdef CONFIG_CRYPTO_HASH
1753 struct ahash_request
*hash
= hashp
;
1754 struct scatterlist sg
;
1757 copied
= copy_to_iter(addr
, bytes
, i
);
1758 sg_init_one(&sg
, addr
, copied
);
1759 ahash_request_set_crypt(hash
, &sg
, NULL
, copied
);
1760 crypto_ahash_update(hash
);
1766 EXPORT_SYMBOL(hash_and_copy_to_iter
);
1768 static int iov_npages(const struct iov_iter
*i
, int maxpages
)
1770 size_t skip
= i
->iov_offset
, size
= i
->count
;
1771 const struct iovec
*p
;
1774 for (p
= i
->iov
; size
; skip
= 0, p
++) {
1775 unsigned offs
= offset_in_page(p
->iov_base
+ skip
);
1776 size_t len
= min(p
->iov_len
- skip
, size
);
1780 npages
+= DIV_ROUND_UP(offs
+ len
, PAGE_SIZE
);
1781 if (unlikely(npages
> maxpages
))
1788 static int bvec_npages(const struct iov_iter
*i
, int maxpages
)
1790 size_t skip
= i
->iov_offset
, size
= i
->count
;
1791 const struct bio_vec
*p
;
1794 for (p
= i
->bvec
; size
; skip
= 0, p
++) {
1795 unsigned offs
= (p
->bv_offset
+ skip
) % PAGE_SIZE
;
1796 size_t len
= min(p
->bv_len
- skip
, size
);
1799 npages
+= DIV_ROUND_UP(offs
+ len
, PAGE_SIZE
);
1800 if (unlikely(npages
> maxpages
))
1806 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
)
1808 if (unlikely(!i
->count
))
1810 /* iovec and kvec have identical layouts */
1811 if (likely(iter_is_iovec(i
) || iov_iter_is_kvec(i
)))
1812 return iov_npages(i
, maxpages
);
1813 if (iov_iter_is_bvec(i
))
1814 return bvec_npages(i
, maxpages
);
1815 if (iov_iter_is_pipe(i
)) {
1816 unsigned int iter_head
;
1823 data_start(i
, &iter_head
, &off
);
1824 /* some of this one + all after this one */
1825 npages
= pipe_space_for_user(iter_head
, i
->pipe
->tail
, i
->pipe
);
1826 return min(npages
, maxpages
);
1828 if (iov_iter_is_xarray(i
)) {
1829 unsigned offset
= (i
->xarray_start
+ i
->iov_offset
) % PAGE_SIZE
;
1830 int npages
= DIV_ROUND_UP(offset
+ i
->count
, PAGE_SIZE
);
1831 return min(npages
, maxpages
);
1835 EXPORT_SYMBOL(iov_iter_npages
);
1837 const void *dup_iter(struct iov_iter
*new, struct iov_iter
*old
, gfp_t flags
)
1840 if (unlikely(iov_iter_is_pipe(new))) {
1844 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1846 if (iov_iter_is_bvec(new))
1847 return new->bvec
= kmemdup(new->bvec
,
1848 new->nr_segs
* sizeof(struct bio_vec
),
1851 /* iovec and kvec have identical layout */
1852 return new->iov
= kmemdup(new->iov
,
1853 new->nr_segs
* sizeof(struct iovec
),
1856 EXPORT_SYMBOL(dup_iter
);
1858 static int copy_compat_iovec_from_user(struct iovec
*iov
,
1859 const struct iovec __user
*uvec
, unsigned long nr_segs
)
1861 const struct compat_iovec __user
*uiov
=
1862 (const struct compat_iovec __user
*)uvec
;
1863 int ret
= -EFAULT
, i
;
1865 if (!user_access_begin(uiov
, nr_segs
* sizeof(*uiov
)))
1868 for (i
= 0; i
< nr_segs
; i
++) {
1872 unsafe_get_user(len
, &uiov
[i
].iov_len
, uaccess_end
);
1873 unsafe_get_user(buf
, &uiov
[i
].iov_base
, uaccess_end
);
1875 /* check for compat_size_t not fitting in compat_ssize_t .. */
1880 iov
[i
].iov_base
= compat_ptr(buf
);
1881 iov
[i
].iov_len
= len
;
1890 static int copy_iovec_from_user(struct iovec
*iov
,
1891 const struct iovec __user
*uvec
, unsigned long nr_segs
)
1895 if (copy_from_user(iov
, uvec
, nr_segs
* sizeof(*uvec
)))
1897 for (seg
= 0; seg
< nr_segs
; seg
++) {
1898 if ((ssize_t
)iov
[seg
].iov_len
< 0)
1905 struct iovec
*iovec_from_user(const struct iovec __user
*uvec
,
1906 unsigned long nr_segs
, unsigned long fast_segs
,
1907 struct iovec
*fast_iov
, bool compat
)
1909 struct iovec
*iov
= fast_iov
;
1913 * SuS says "The readv() function *may* fail if the iovcnt argument was
1914 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1915 * traditionally returned zero for zero segments, so...
1919 if (nr_segs
> UIO_MAXIOV
)
1920 return ERR_PTR(-EINVAL
);
1921 if (nr_segs
> fast_segs
) {
1922 iov
= kmalloc_array(nr_segs
, sizeof(struct iovec
), GFP_KERNEL
);
1924 return ERR_PTR(-ENOMEM
);
1928 ret
= copy_compat_iovec_from_user(iov
, uvec
, nr_segs
);
1930 ret
= copy_iovec_from_user(iov
, uvec
, nr_segs
);
1932 if (iov
!= fast_iov
)
1934 return ERR_PTR(ret
);
1940 ssize_t
__import_iovec(int type
, const struct iovec __user
*uvec
,
1941 unsigned nr_segs
, unsigned fast_segs
, struct iovec
**iovp
,
1942 struct iov_iter
*i
, bool compat
)
1944 ssize_t total_len
= 0;
1948 iov
= iovec_from_user(uvec
, nr_segs
, fast_segs
, *iovp
, compat
);
1951 return PTR_ERR(iov
);
1955 * According to the Single Unix Specification we should return EINVAL if
1956 * an element length is < 0 when cast to ssize_t or if the total length
1957 * would overflow the ssize_t return value of the system call.
1959 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1962 for (seg
= 0; seg
< nr_segs
; seg
++) {
1963 ssize_t len
= (ssize_t
)iov
[seg
].iov_len
;
1965 if (!access_ok(iov
[seg
].iov_base
, len
)) {
1972 if (len
> MAX_RW_COUNT
- total_len
) {
1973 len
= MAX_RW_COUNT
- total_len
;
1974 iov
[seg
].iov_len
= len
;
1979 iov_iter_init(i
, type
, iov
, nr_segs
, total_len
);
1988 * import_iovec() - Copy an array of &struct iovec from userspace
1989 * into the kernel, check that it is valid, and initialize a new
1990 * &struct iov_iter iterator to access it.
1992 * @type: One of %READ or %WRITE.
1993 * @uvec: Pointer to the userspace array.
1994 * @nr_segs: Number of elements in userspace array.
1995 * @fast_segs: Number of elements in @iov.
1996 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1997 * on-stack) kernel array.
1998 * @i: Pointer to iterator that will be initialized on success.
2000 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
2001 * then this function places %NULL in *@iov on return. Otherwise, a new
2002 * array will be allocated and the result placed in *@iov. This means that
2003 * the caller may call kfree() on *@iov regardless of whether the small
2004 * on-stack array was used or not (and regardless of whether this function
2005 * returns an error or not).
2007 * Return: Negative error code on error, bytes imported on success
2009 ssize_t
import_iovec(int type
, const struct iovec __user
*uvec
,
2010 unsigned nr_segs
, unsigned fast_segs
,
2011 struct iovec
**iovp
, struct iov_iter
*i
)
2013 return __import_iovec(type
, uvec
, nr_segs
, fast_segs
, iovp
, i
,
2014 in_compat_syscall());
2016 EXPORT_SYMBOL(import_iovec
);
2018 int import_single_range(int rw
, void __user
*buf
, size_t len
,
2019 struct iovec
*iov
, struct iov_iter
*i
)
2021 if (len
> MAX_RW_COUNT
)
2023 if (unlikely(!access_ok(buf
, len
)))
2026 iov
->iov_base
= buf
;
2028 iov_iter_init(i
, rw
, iov
, 1, len
);
2031 EXPORT_SYMBOL(import_single_range
);
2034 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
2035 * iov_iter_save_state() was called.
2037 * @i: &struct iov_iter to restore
2038 * @state: state to restore from
2040 * Used after iov_iter_save_state() to bring restore @i, if operations may
2043 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
2045 void iov_iter_restore(struct iov_iter
*i
, struct iov_iter_state
*state
)
2047 if (WARN_ON_ONCE(!iov_iter_is_bvec(i
) && !iter_is_iovec(i
)) &&
2048 !iov_iter_is_kvec(i
))
2050 i
->iov_offset
= state
->iov_offset
;
2051 i
->count
= state
->count
;
2053 * For the *vec iters, nr_segs + iov is constant - if we increment
2054 * the vec, then we also decrement the nr_segs count. Hence we don't
2055 * need to track both of these, just one is enough and we can deduct
2056 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
2057 * size, so we can just increment the iov pointer as they are unionzed.
2058 * ITER_BVEC _may_ be the same size on some archs, but on others it is
2059 * not. Be safe and handle it separately.
2061 BUILD_BUG_ON(sizeof(struct iovec
) != sizeof(struct kvec
));
2062 if (iov_iter_is_bvec(i
))
2063 i
->bvec
-= state
->nr_segs
- i
->nr_segs
;
2065 i
->iov
-= state
->nr_segs
- i
->nr_segs
;
2066 i
->nr_segs
= state
->nr_segs
;