1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/bvec.h>
5 #include <linux/pagemap.h>
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/splice.h>
9 #include <net/checksum.h>
10 #include <linux/scatterlist.h>
11 #include <linux/instrumented.h>
13 #define PIPE_PARANOIA /* for now */
15 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
19 __v.iov_len = min(n, __p->iov_len - skip); \
20 if (likely(__v.iov_len)) { \
21 __v.iov_base = __p->iov_base + skip; \
23 __v.iov_len -= left; \
24 skip += __v.iov_len; \
29 while (unlikely(!left && n)) { \
31 __v.iov_len = min(n, __p->iov_len); \
32 if (unlikely(!__v.iov_len)) \
34 __v.iov_base = __p->iov_base; \
36 __v.iov_len -= left; \
43 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
46 __v.iov_len = min(n, __p->iov_len - skip); \
47 if (likely(__v.iov_len)) { \
48 __v.iov_base = __p->iov_base + skip; \
50 skip += __v.iov_len; \
53 while (unlikely(n)) { \
55 __v.iov_len = min(n, __p->iov_len); \
56 if (unlikely(!__v.iov_len)) \
58 __v.iov_base = __p->iov_base; \
66 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
67 struct bvec_iter __start; \
68 __start.bi_size = n; \
69 __start.bi_bvec_done = skip; \
71 for_each_bvec(__v, i->bvec, __bi, __start) { \
78 #define iterate_all_kinds(i, n, v, I, B, K) { \
80 size_t skip = i->iov_offset; \
81 if (unlikely(i->type & ITER_BVEC)) { \
83 struct bvec_iter __bi; \
84 iterate_bvec(i, n, v, __bi, skip, (B)) \
85 } else if (unlikely(i->type & ITER_KVEC)) { \
86 const struct kvec *kvec; \
88 iterate_kvec(i, n, v, kvec, skip, (K)) \
89 } else if (unlikely(i->type & ITER_DISCARD)) { \
91 const struct iovec *iov; \
93 iterate_iovec(i, n, v, iov, skip, (I)) \
98 #define iterate_and_advance(i, n, v, I, B, K) { \
99 if (unlikely(i->count < n)) \
102 size_t skip = i->iov_offset; \
103 if (unlikely(i->type & ITER_BVEC)) { \
104 const struct bio_vec *bvec = i->bvec; \
106 struct bvec_iter __bi; \
107 iterate_bvec(i, n, v, __bi, skip, (B)) \
108 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
109 i->nr_segs -= i->bvec - bvec; \
110 skip = __bi.bi_bvec_done; \
111 } else if (unlikely(i->type & ITER_KVEC)) { \
112 const struct kvec *kvec; \
114 iterate_kvec(i, n, v, kvec, skip, (K)) \
115 if (skip == kvec->iov_len) { \
119 i->nr_segs -= kvec - i->kvec; \
121 } else if (unlikely(i->type & ITER_DISCARD)) { \
124 const struct iovec *iov; \
126 iterate_iovec(i, n, v, iov, skip, (I)) \
127 if (skip == iov->iov_len) { \
131 i->nr_segs -= iov - i->iov; \
135 i->iov_offset = skip; \
139 static int copyout(void __user
*to
, const void *from
, size_t n
)
141 if (access_ok(to
, n
)) {
142 instrument_copy_to_user(to
, from
, n
);
143 n
= raw_copy_to_user(to
, from
, n
);
148 static int copyin(void *to
, const void __user
*from
, size_t n
)
150 if (access_ok(from
, n
)) {
151 instrument_copy_from_user(to
, from
, n
);
152 n
= raw_copy_from_user(to
, from
, n
);
157 static size_t copy_page_to_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
160 size_t skip
, copy
, left
, wanted
;
161 const struct iovec
*iov
;
165 if (unlikely(bytes
> i
->count
))
168 if (unlikely(!bytes
))
174 skip
= i
->iov_offset
;
175 buf
= iov
->iov_base
+ skip
;
176 copy
= min(bytes
, iov
->iov_len
- skip
);
178 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_pages_writeable(buf
, copy
)) {
179 kaddr
= kmap_atomic(page
);
180 from
= kaddr
+ offset
;
182 /* first chunk, usually the only one */
183 left
= copyout(buf
, from
, copy
);
189 while (unlikely(!left
&& bytes
)) {
192 copy
= min(bytes
, iov
->iov_len
);
193 left
= copyout(buf
, from
, copy
);
199 if (likely(!bytes
)) {
200 kunmap_atomic(kaddr
);
203 offset
= from
- kaddr
;
205 kunmap_atomic(kaddr
);
206 copy
= min(bytes
, iov
->iov_len
- skip
);
208 /* Too bad - revert to non-atomic kmap */
211 from
= kaddr
+ offset
;
212 left
= copyout(buf
, from
, copy
);
217 while (unlikely(!left
&& bytes
)) {
220 copy
= min(bytes
, iov
->iov_len
);
221 left
= copyout(buf
, from
, copy
);
230 if (skip
== iov
->iov_len
) {
234 i
->count
-= wanted
- bytes
;
235 i
->nr_segs
-= iov
- i
->iov
;
237 i
->iov_offset
= skip
;
238 return wanted
- bytes
;
241 static size_t copy_page_from_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
244 size_t skip
, copy
, left
, wanted
;
245 const struct iovec
*iov
;
249 if (unlikely(bytes
> i
->count
))
252 if (unlikely(!bytes
))
258 skip
= i
->iov_offset
;
259 buf
= iov
->iov_base
+ skip
;
260 copy
= min(bytes
, iov
->iov_len
- skip
);
262 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_pages_readable(buf
, copy
)) {
263 kaddr
= kmap_atomic(page
);
266 /* first chunk, usually the only one */
267 left
= copyin(to
, buf
, copy
);
273 while (unlikely(!left
&& bytes
)) {
276 copy
= min(bytes
, iov
->iov_len
);
277 left
= copyin(to
, buf
, copy
);
283 if (likely(!bytes
)) {
284 kunmap_atomic(kaddr
);
289 kunmap_atomic(kaddr
);
290 copy
= min(bytes
, iov
->iov_len
- skip
);
292 /* Too bad - revert to non-atomic kmap */
296 left
= copyin(to
, buf
, copy
);
301 while (unlikely(!left
&& bytes
)) {
304 copy
= min(bytes
, iov
->iov_len
);
305 left
= copyin(to
, buf
, copy
);
314 if (skip
== iov
->iov_len
) {
318 i
->count
-= wanted
- bytes
;
319 i
->nr_segs
-= iov
- i
->iov
;
321 i
->iov_offset
= skip
;
322 return wanted
- bytes
;
326 static bool sanity(const struct iov_iter
*i
)
328 struct pipe_inode_info
*pipe
= i
->pipe
;
329 unsigned int p_head
= pipe
->head
;
330 unsigned int p_tail
= pipe
->tail
;
331 unsigned int p_mask
= pipe
->ring_size
- 1;
332 unsigned int p_occupancy
= pipe_occupancy(p_head
, p_tail
);
333 unsigned int i_head
= i
->head
;
337 struct pipe_buffer
*p
;
338 if (unlikely(p_occupancy
== 0))
339 goto Bad
; // pipe must be non-empty
340 if (unlikely(i_head
!= p_head
- 1))
341 goto Bad
; // must be at the last buffer...
343 p
= &pipe
->bufs
[i_head
& p_mask
];
344 if (unlikely(p
->offset
+ p
->len
!= i
->iov_offset
))
345 goto Bad
; // ... at the end of segment
347 if (i_head
!= p_head
)
348 goto Bad
; // must be right after the last buffer
352 printk(KERN_ERR
"idx = %d, offset = %zd\n", i_head
, i
->iov_offset
);
353 printk(KERN_ERR
"head = %d, tail = %d, buffers = %d\n",
354 p_head
, p_tail
, pipe
->ring_size
);
355 for (idx
= 0; idx
< pipe
->ring_size
; idx
++)
356 printk(KERN_ERR
"[%p %p %d %d]\n",
358 pipe
->bufs
[idx
].page
,
359 pipe
->bufs
[idx
].offset
,
360 pipe
->bufs
[idx
].len
);
365 #define sanity(i) true
368 static size_t copy_page_to_iter_pipe(struct page
*page
, size_t offset
, size_t bytes
,
371 struct pipe_inode_info
*pipe
= i
->pipe
;
372 struct pipe_buffer
*buf
;
373 unsigned int p_tail
= pipe
->tail
;
374 unsigned int p_mask
= pipe
->ring_size
- 1;
375 unsigned int i_head
= i
->head
;
378 if (unlikely(bytes
> i
->count
))
381 if (unlikely(!bytes
))
388 buf
= &pipe
->bufs
[i_head
& p_mask
];
390 if (offset
== off
&& buf
->page
== page
) {
391 /* merge with the last one */
393 i
->iov_offset
+= bytes
;
397 buf
= &pipe
->bufs
[i_head
& p_mask
];
399 if (pipe_full(i_head
, p_tail
, pipe
->max_usage
))
402 buf
->ops
= &page_cache_pipe_buf_ops
;
405 buf
->offset
= offset
;
408 pipe
->head
= i_head
+ 1;
409 i
->iov_offset
= offset
+ bytes
;
417 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
418 * bytes. For each iovec, fault in each page that constitutes the iovec.
420 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
421 * because it is an invalid address).
423 int iov_iter_fault_in_readable(struct iov_iter
*i
, size_t bytes
)
425 size_t skip
= i
->iov_offset
;
426 const struct iovec
*iov
;
430 if (!(i
->type
& (ITER_BVEC
|ITER_KVEC
))) {
431 iterate_iovec(i
, bytes
, v
, iov
, skip
, ({
432 err
= fault_in_pages_readable(v
.iov_base
, v
.iov_len
);
439 EXPORT_SYMBOL(iov_iter_fault_in_readable
);
441 void iov_iter_init(struct iov_iter
*i
, unsigned int direction
,
442 const struct iovec
*iov
, unsigned long nr_segs
,
445 WARN_ON(direction
& ~(READ
| WRITE
));
446 direction
&= READ
| WRITE
;
448 /* It will get better. Eventually... */
449 if (uaccess_kernel()) {
450 i
->type
= ITER_KVEC
| direction
;
451 i
->kvec
= (struct kvec
*)iov
;
453 i
->type
= ITER_IOVEC
| direction
;
456 i
->nr_segs
= nr_segs
;
460 EXPORT_SYMBOL(iov_iter_init
);
462 static void memcpy_from_page(char *to
, struct page
*page
, size_t offset
, size_t len
)
464 char *from
= kmap_atomic(page
);
465 memcpy(to
, from
+ offset
, len
);
469 static void memcpy_to_page(struct page
*page
, size_t offset
, const char *from
, size_t len
)
471 char *to
= kmap_atomic(page
);
472 memcpy(to
+ offset
, from
, len
);
476 static void memzero_page(struct page
*page
, size_t offset
, size_t len
)
478 char *addr
= kmap_atomic(page
);
479 memset(addr
+ offset
, 0, len
);
483 static inline bool allocated(struct pipe_buffer
*buf
)
485 return buf
->ops
== &default_pipe_buf_ops
;
488 static inline void data_start(const struct iov_iter
*i
,
489 unsigned int *iter_headp
, size_t *offp
)
491 unsigned int p_mask
= i
->pipe
->ring_size
- 1;
492 unsigned int iter_head
= i
->head
;
493 size_t off
= i
->iov_offset
;
495 if (off
&& (!allocated(&i
->pipe
->bufs
[iter_head
& p_mask
]) ||
500 *iter_headp
= iter_head
;
504 static size_t push_pipe(struct iov_iter
*i
, size_t size
,
505 int *iter_headp
, size_t *offp
)
507 struct pipe_inode_info
*pipe
= i
->pipe
;
508 unsigned int p_tail
= pipe
->tail
;
509 unsigned int p_mask
= pipe
->ring_size
- 1;
510 unsigned int iter_head
;
514 if (unlikely(size
> i
->count
))
520 data_start(i
, &iter_head
, &off
);
521 *iter_headp
= iter_head
;
524 left
-= PAGE_SIZE
- off
;
526 pipe
->bufs
[iter_head
& p_mask
].len
+= size
;
529 pipe
->bufs
[iter_head
& p_mask
].len
= PAGE_SIZE
;
532 while (!pipe_full(iter_head
, p_tail
, pipe
->max_usage
)) {
533 struct pipe_buffer
*buf
= &pipe
->bufs
[iter_head
& p_mask
];
534 struct page
*page
= alloc_page(GFP_USER
);
538 buf
->ops
= &default_pipe_buf_ops
;
541 buf
->len
= min_t(ssize_t
, left
, PAGE_SIZE
);
544 pipe
->head
= iter_head
;
552 static size_t copy_pipe_to_iter(const void *addr
, size_t bytes
,
555 struct pipe_inode_info
*pipe
= i
->pipe
;
556 unsigned int p_mask
= pipe
->ring_size
- 1;
563 bytes
= n
= push_pipe(i
, bytes
, &i_head
, &off
);
567 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
568 memcpy_to_page(pipe
->bufs
[i_head
& p_mask
].page
, off
, addr
, chunk
);
570 i
->iov_offset
= off
+ chunk
;
580 static __wsum
csum_and_memcpy(void *to
, const void *from
, size_t len
,
581 __wsum sum
, size_t off
)
583 __wsum next
= csum_partial_copy_nocheck(from
, to
, len
, 0);
584 return csum_block_add(sum
, next
, off
);
587 static size_t csum_and_copy_to_pipe_iter(const void *addr
, size_t bytes
,
588 __wsum
*csum
, struct iov_iter
*i
)
590 struct pipe_inode_info
*pipe
= i
->pipe
;
591 unsigned int p_mask
= pipe
->ring_size
- 1;
600 bytes
= n
= push_pipe(i
, bytes
, &i_head
, &r
);
604 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- r
);
605 char *p
= kmap_atomic(pipe
->bufs
[i_head
& p_mask
].page
);
606 sum
= csum_and_memcpy(p
+ r
, addr
, chunk
, sum
, off
);
609 i
->iov_offset
= r
+ chunk
;
621 size_t _copy_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
623 const char *from
= addr
;
624 if (unlikely(iov_iter_is_pipe(i
)))
625 return copy_pipe_to_iter(addr
, bytes
, i
);
626 if (iter_is_iovec(i
))
628 iterate_and_advance(i
, bytes
, v
,
629 copyout(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
, v
.iov_len
),
630 memcpy_to_page(v
.bv_page
, v
.bv_offset
,
631 (from
+= v
.bv_len
) - v
.bv_len
, v
.bv_len
),
632 memcpy(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
, v
.iov_len
)
637 EXPORT_SYMBOL(_copy_to_iter
);
639 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
640 static int copyout_mcsafe(void __user
*to
, const void *from
, size_t n
)
642 if (access_ok(to
, n
)) {
643 instrument_copy_to_user(to
, from
, n
);
644 n
= copy_to_user_mcsafe((__force
void *) to
, from
, n
);
649 static unsigned long memcpy_mcsafe_to_page(struct page
*page
, size_t offset
,
650 const char *from
, size_t len
)
655 to
= kmap_atomic(page
);
656 ret
= memcpy_mcsafe(to
+ offset
, from
, len
);
662 static size_t copy_pipe_to_iter_mcsafe(const void *addr
, size_t bytes
,
665 struct pipe_inode_info
*pipe
= i
->pipe
;
666 unsigned int p_mask
= pipe
->ring_size
- 1;
668 size_t n
, off
, xfer
= 0;
673 bytes
= n
= push_pipe(i
, bytes
, &i_head
, &off
);
677 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
680 rem
= memcpy_mcsafe_to_page(pipe
->bufs
[i_head
& p_mask
].page
,
683 i
->iov_offset
= off
+ chunk
- rem
;
697 * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
698 * @addr: source kernel address
699 * @bytes: total transfer length
700 * @iter: destination iterator
702 * The pmem driver arranges for filesystem-dax to use this facility via
703 * dax_copy_to_iter() for protecting read/write to persistent memory.
704 * Unless / until an architecture can guarantee identical performance
705 * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
706 * performance regression to switch more users to the mcsafe version.
708 * Otherwise, the main differences between this and typical _copy_to_iter().
710 * * Typical tail/residue handling after a fault retries the copy
711 * byte-by-byte until the fault happens again. Re-triggering machine
712 * checks is potentially fatal so the implementation uses source
713 * alignment and poison alignment assumptions to avoid re-triggering
714 * hardware exceptions.
716 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
717 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
720 * See MCSAFE_TEST for self-test.
722 size_t _copy_to_iter_mcsafe(const void *addr
, size_t bytes
, struct iov_iter
*i
)
724 const char *from
= addr
;
725 unsigned long rem
, curr_addr
, s_addr
= (unsigned long) addr
;
727 if (unlikely(iov_iter_is_pipe(i
)))
728 return copy_pipe_to_iter_mcsafe(addr
, bytes
, i
);
729 if (iter_is_iovec(i
))
731 iterate_and_advance(i
, bytes
, v
,
732 copyout_mcsafe(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
, v
.iov_len
),
734 rem
= memcpy_mcsafe_to_page(v
.bv_page
, v
.bv_offset
,
735 (from
+= v
.bv_len
) - v
.bv_len
, v
.bv_len
);
737 curr_addr
= (unsigned long) from
;
738 bytes
= curr_addr
- s_addr
- rem
;
743 rem
= memcpy_mcsafe(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
,
746 curr_addr
= (unsigned long) from
;
747 bytes
= curr_addr
- s_addr
- rem
;
755 EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe
);
756 #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
758 size_t _copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
761 if (unlikely(iov_iter_is_pipe(i
))) {
765 if (iter_is_iovec(i
))
767 iterate_and_advance(i
, bytes
, v
,
768 copyin((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
),
769 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
770 v
.bv_offset
, v
.bv_len
),
771 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
776 EXPORT_SYMBOL(_copy_from_iter
);
778 bool _copy_from_iter_full(void *addr
, size_t bytes
, struct iov_iter
*i
)
781 if (unlikely(iov_iter_is_pipe(i
))) {
785 if (unlikely(i
->count
< bytes
))
788 if (iter_is_iovec(i
))
790 iterate_all_kinds(i
, bytes
, v
, ({
791 if (copyin((to
+= v
.iov_len
) - v
.iov_len
,
792 v
.iov_base
, v
.iov_len
))
795 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
796 v
.bv_offset
, v
.bv_len
),
797 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
800 iov_iter_advance(i
, bytes
);
803 EXPORT_SYMBOL(_copy_from_iter_full
);
805 size_t _copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
808 if (unlikely(iov_iter_is_pipe(i
))) {
812 iterate_and_advance(i
, bytes
, v
,
813 __copy_from_user_inatomic_nocache((to
+= v
.iov_len
) - v
.iov_len
,
814 v
.iov_base
, v
.iov_len
),
815 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
816 v
.bv_offset
, v
.bv_len
),
817 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
822 EXPORT_SYMBOL(_copy_from_iter_nocache
);
824 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
826 * _copy_from_iter_flushcache - write destination through cpu cache
827 * @addr: destination kernel address
828 * @bytes: total transfer length
829 * @iter: source iterator
831 * The pmem driver arranges for filesystem-dax to use this facility via
832 * dax_copy_from_iter() for ensuring that writes to persistent memory
833 * are flushed through the CPU cache. It is differentiated from
834 * _copy_from_iter_nocache() in that guarantees all data is flushed for
835 * all iterator types. The _copy_from_iter_nocache() only attempts to
836 * bypass the cache for the ITER_IOVEC case, and on some archs may use
837 * instructions that strand dirty-data in the cache.
839 size_t _copy_from_iter_flushcache(void *addr
, size_t bytes
, struct iov_iter
*i
)
842 if (unlikely(iov_iter_is_pipe(i
))) {
846 iterate_and_advance(i
, bytes
, v
,
847 __copy_from_user_flushcache((to
+= v
.iov_len
) - v
.iov_len
,
848 v
.iov_base
, v
.iov_len
),
849 memcpy_page_flushcache((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
850 v
.bv_offset
, v
.bv_len
),
851 memcpy_flushcache((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
,
857 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache
);
860 bool _copy_from_iter_full_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
863 if (unlikely(iov_iter_is_pipe(i
))) {
867 if (unlikely(i
->count
< bytes
))
869 iterate_all_kinds(i
, bytes
, v
, ({
870 if (__copy_from_user_inatomic_nocache((to
+= v
.iov_len
) - v
.iov_len
,
871 v
.iov_base
, v
.iov_len
))
874 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
875 v
.bv_offset
, v
.bv_len
),
876 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
879 iov_iter_advance(i
, bytes
);
882 EXPORT_SYMBOL(_copy_from_iter_full_nocache
);
884 static inline bool page_copy_sane(struct page
*page
, size_t offset
, size_t n
)
887 size_t v
= n
+ offset
;
890 * The general case needs to access the page order in order
891 * to compute the page size.
892 * However, we mostly deal with order-0 pages and thus can
893 * avoid a possible cache line miss for requests that fit all
896 if (n
<= v
&& v
<= PAGE_SIZE
)
899 head
= compound_head(page
);
900 v
+= (page
- head
) << PAGE_SHIFT
;
902 if (likely(n
<= v
&& v
<= (page_size(head
))))
908 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
911 if (unlikely(!page_copy_sane(page
, offset
, bytes
)))
913 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
914 void *kaddr
= kmap_atomic(page
);
915 size_t wanted
= copy_to_iter(kaddr
+ offset
, bytes
, i
);
916 kunmap_atomic(kaddr
);
918 } else if (unlikely(iov_iter_is_discard(i
)))
920 else if (likely(!iov_iter_is_pipe(i
)))
921 return copy_page_to_iter_iovec(page
, offset
, bytes
, i
);
923 return copy_page_to_iter_pipe(page
, offset
, bytes
, i
);
925 EXPORT_SYMBOL(copy_page_to_iter
);
927 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
930 if (unlikely(!page_copy_sane(page
, offset
, bytes
)))
932 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
936 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
937 void *kaddr
= kmap_atomic(page
);
938 size_t wanted
= _copy_from_iter(kaddr
+ offset
, bytes
, i
);
939 kunmap_atomic(kaddr
);
942 return copy_page_from_iter_iovec(page
, offset
, bytes
, i
);
944 EXPORT_SYMBOL(copy_page_from_iter
);
946 static size_t pipe_zero(size_t bytes
, struct iov_iter
*i
)
948 struct pipe_inode_info
*pipe
= i
->pipe
;
949 unsigned int p_mask
= pipe
->ring_size
- 1;
956 bytes
= n
= push_pipe(i
, bytes
, &i_head
, &off
);
961 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
962 memzero_page(pipe
->bufs
[i_head
& p_mask
].page
, off
, chunk
);
964 i
->iov_offset
= off
+ chunk
;
973 size_t iov_iter_zero(size_t bytes
, struct iov_iter
*i
)
975 if (unlikely(iov_iter_is_pipe(i
)))
976 return pipe_zero(bytes
, i
);
977 iterate_and_advance(i
, bytes
, v
,
978 clear_user(v
.iov_base
, v
.iov_len
),
979 memzero_page(v
.bv_page
, v
.bv_offset
, v
.bv_len
),
980 memset(v
.iov_base
, 0, v
.iov_len
)
985 EXPORT_SYMBOL(iov_iter_zero
);
987 size_t iov_iter_copy_from_user_atomic(struct page
*page
,
988 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
990 char *kaddr
= kmap_atomic(page
), *p
= kaddr
+ offset
;
991 if (unlikely(!page_copy_sane(page
, offset
, bytes
))) {
992 kunmap_atomic(kaddr
);
995 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
996 kunmap_atomic(kaddr
);
1000 iterate_all_kinds(i
, bytes
, v
,
1001 copyin((p
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
),
1002 memcpy_from_page((p
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
1003 v
.bv_offset
, v
.bv_len
),
1004 memcpy((p
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
1006 kunmap_atomic(kaddr
);
1009 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic
);
1011 static inline void pipe_truncate(struct iov_iter
*i
)
1013 struct pipe_inode_info
*pipe
= i
->pipe
;
1014 unsigned int p_tail
= pipe
->tail
;
1015 unsigned int p_head
= pipe
->head
;
1016 unsigned int p_mask
= pipe
->ring_size
- 1;
1018 if (!pipe_empty(p_head
, p_tail
)) {
1019 struct pipe_buffer
*buf
;
1020 unsigned int i_head
= i
->head
;
1021 size_t off
= i
->iov_offset
;
1024 buf
= &pipe
->bufs
[i_head
& p_mask
];
1025 buf
->len
= off
- buf
->offset
;
1028 while (p_head
!= i_head
) {
1030 pipe_buf_release(pipe
, &pipe
->bufs
[p_head
& p_mask
]);
1033 pipe
->head
= p_head
;
1037 static void pipe_advance(struct iov_iter
*i
, size_t size
)
1039 struct pipe_inode_info
*pipe
= i
->pipe
;
1040 if (unlikely(i
->count
< size
))
1043 struct pipe_buffer
*buf
;
1044 unsigned int p_mask
= pipe
->ring_size
- 1;
1045 unsigned int i_head
= i
->head
;
1046 size_t off
= i
->iov_offset
, left
= size
;
1048 if (off
) /* make it relative to the beginning of buffer */
1049 left
+= off
- pipe
->bufs
[i_head
& p_mask
].offset
;
1051 buf
= &pipe
->bufs
[i_head
& p_mask
];
1052 if (left
<= buf
->len
)
1058 i
->iov_offset
= buf
->offset
+ left
;
1061 /* ... and discard everything past that point */
1065 void iov_iter_advance(struct iov_iter
*i
, size_t size
)
1067 if (unlikely(iov_iter_is_pipe(i
))) {
1068 pipe_advance(i
, size
);
1071 if (unlikely(iov_iter_is_discard(i
))) {
1075 iterate_and_advance(i
, size
, v
, 0, 0, 0)
1077 EXPORT_SYMBOL(iov_iter_advance
);
1079 void iov_iter_revert(struct iov_iter
*i
, size_t unroll
)
1083 if (WARN_ON(unroll
> MAX_RW_COUNT
))
1086 if (unlikely(iov_iter_is_pipe(i
))) {
1087 struct pipe_inode_info
*pipe
= i
->pipe
;
1088 unsigned int p_mask
= pipe
->ring_size
- 1;
1089 unsigned int i_head
= i
->head
;
1090 size_t off
= i
->iov_offset
;
1092 struct pipe_buffer
*b
= &pipe
->bufs
[i_head
& p_mask
];
1093 size_t n
= off
- b
->offset
;
1099 if (!unroll
&& i_head
== i
->start_head
) {
1104 b
= &pipe
->bufs
[i_head
& p_mask
];
1105 off
= b
->offset
+ b
->len
;
1107 i
->iov_offset
= off
;
1112 if (unlikely(iov_iter_is_discard(i
)))
1114 if (unroll
<= i
->iov_offset
) {
1115 i
->iov_offset
-= unroll
;
1118 unroll
-= i
->iov_offset
;
1119 if (iov_iter_is_bvec(i
)) {
1120 const struct bio_vec
*bvec
= i
->bvec
;
1122 size_t n
= (--bvec
)->bv_len
;
1126 i
->iov_offset
= n
- unroll
;
1131 } else { /* same logics for iovec and kvec */
1132 const struct iovec
*iov
= i
->iov
;
1134 size_t n
= (--iov
)->iov_len
;
1138 i
->iov_offset
= n
- unroll
;
1145 EXPORT_SYMBOL(iov_iter_revert
);
1148 * Return the count of just the current iov_iter segment.
1150 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
1152 if (unlikely(iov_iter_is_pipe(i
)))
1153 return i
->count
; // it is a silly place, anyway
1154 if (i
->nr_segs
== 1)
1156 if (unlikely(iov_iter_is_discard(i
)))
1158 else if (iov_iter_is_bvec(i
))
1159 return min(i
->count
, i
->bvec
->bv_len
- i
->iov_offset
);
1161 return min(i
->count
, i
->iov
->iov_len
- i
->iov_offset
);
1163 EXPORT_SYMBOL(iov_iter_single_seg_count
);
1165 void iov_iter_kvec(struct iov_iter
*i
, unsigned int direction
,
1166 const struct kvec
*kvec
, unsigned long nr_segs
,
1169 WARN_ON(direction
& ~(READ
| WRITE
));
1170 i
->type
= ITER_KVEC
| (direction
& (READ
| WRITE
));
1172 i
->nr_segs
= nr_segs
;
1176 EXPORT_SYMBOL(iov_iter_kvec
);
1178 void iov_iter_bvec(struct iov_iter
*i
, unsigned int direction
,
1179 const struct bio_vec
*bvec
, unsigned long nr_segs
,
1182 WARN_ON(direction
& ~(READ
| WRITE
));
1183 i
->type
= ITER_BVEC
| (direction
& (READ
| WRITE
));
1185 i
->nr_segs
= nr_segs
;
1189 EXPORT_SYMBOL(iov_iter_bvec
);
1191 void iov_iter_pipe(struct iov_iter
*i
, unsigned int direction
,
1192 struct pipe_inode_info
*pipe
,
1195 BUG_ON(direction
!= READ
);
1196 WARN_ON(pipe_full(pipe
->head
, pipe
->tail
, pipe
->ring_size
));
1197 i
->type
= ITER_PIPE
| READ
;
1199 i
->head
= pipe
->head
;
1202 i
->start_head
= i
->head
;
1204 EXPORT_SYMBOL(iov_iter_pipe
);
1207 * iov_iter_discard - Initialise an I/O iterator that discards data
1208 * @i: The iterator to initialise.
1209 * @direction: The direction of the transfer.
1210 * @count: The size of the I/O buffer in bytes.
1212 * Set up an I/O iterator that just discards everything that's written to it.
1213 * It's only available as a READ iterator.
1215 void iov_iter_discard(struct iov_iter
*i
, unsigned int direction
, size_t count
)
1217 BUG_ON(direction
!= READ
);
1218 i
->type
= ITER_DISCARD
| READ
;
1222 EXPORT_SYMBOL(iov_iter_discard
);
1224 unsigned long iov_iter_alignment(const struct iov_iter
*i
)
1226 unsigned long res
= 0;
1227 size_t size
= i
->count
;
1229 if (unlikely(iov_iter_is_pipe(i
))) {
1230 unsigned int p_mask
= i
->pipe
->ring_size
- 1;
1232 if (size
&& i
->iov_offset
&& allocated(&i
->pipe
->bufs
[i
->head
& p_mask
]))
1233 return size
| i
->iov_offset
;
1236 iterate_all_kinds(i
, size
, v
,
1237 (res
|= (unsigned long)v
.iov_base
| v
.iov_len
, 0),
1238 res
|= v
.bv_offset
| v
.bv_len
,
1239 res
|= (unsigned long)v
.iov_base
| v
.iov_len
1243 EXPORT_SYMBOL(iov_iter_alignment
);
1245 unsigned long iov_iter_gap_alignment(const struct iov_iter
*i
)
1247 unsigned long res
= 0;
1248 size_t size
= i
->count
;
1250 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
1255 iterate_all_kinds(i
, size
, v
,
1256 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
1257 (size
!= v
.iov_len
? size
: 0), 0),
1258 (res
|= (!res
? 0 : (unsigned long)v
.bv_offset
) |
1259 (size
!= v
.bv_len
? size
: 0)),
1260 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
1261 (size
!= v
.iov_len
? size
: 0))
1265 EXPORT_SYMBOL(iov_iter_gap_alignment
);
1267 static inline ssize_t
__pipe_get_pages(struct iov_iter
*i
,
1269 struct page
**pages
,
1273 struct pipe_inode_info
*pipe
= i
->pipe
;
1274 unsigned int p_mask
= pipe
->ring_size
- 1;
1275 ssize_t n
= push_pipe(i
, maxsize
, &iter_head
, start
);
1282 get_page(*pages
++ = pipe
->bufs
[iter_head
& p_mask
].page
);
1290 static ssize_t
pipe_get_pages(struct iov_iter
*i
,
1291 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
1294 unsigned int iter_head
, npages
;
1303 data_start(i
, &iter_head
, start
);
1304 /* Amount of free space: some of this one + all after this one */
1305 npages
= pipe_space_for_user(iter_head
, i
->pipe
->tail
, i
->pipe
);
1306 capacity
= min(npages
, maxpages
) * PAGE_SIZE
- *start
;
1308 return __pipe_get_pages(i
, min(maxsize
, capacity
), pages
, iter_head
, start
);
1311 ssize_t
iov_iter_get_pages(struct iov_iter
*i
,
1312 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
1315 if (maxsize
> i
->count
)
1318 if (unlikely(iov_iter_is_pipe(i
)))
1319 return pipe_get_pages(i
, pages
, maxsize
, maxpages
, start
);
1320 if (unlikely(iov_iter_is_discard(i
)))
1323 iterate_all_kinds(i
, maxsize
, v
, ({
1324 unsigned long addr
= (unsigned long)v
.iov_base
;
1325 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
1329 if (len
> maxpages
* PAGE_SIZE
)
1330 len
= maxpages
* PAGE_SIZE
;
1331 addr
&= ~(PAGE_SIZE
- 1);
1332 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
1333 res
= get_user_pages_fast(addr
, n
,
1334 iov_iter_rw(i
) != WRITE
? FOLL_WRITE
: 0,
1336 if (unlikely(res
< 0))
1338 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
1340 /* can't be more than PAGE_SIZE */
1341 *start
= v
.bv_offset
;
1342 get_page(*pages
= v
.bv_page
);
1350 EXPORT_SYMBOL(iov_iter_get_pages
);
1352 static struct page
**get_pages_array(size_t n
)
1354 return kvmalloc_array(n
, sizeof(struct page
*), GFP_KERNEL
);
1357 static ssize_t
pipe_get_pages_alloc(struct iov_iter
*i
,
1358 struct page
***pages
, size_t maxsize
,
1362 unsigned int iter_head
, npages
;
1371 data_start(i
, &iter_head
, start
);
1372 /* Amount of free space: some of this one + all after this one */
1373 npages
= pipe_space_for_user(iter_head
, i
->pipe
->tail
, i
->pipe
);
1374 n
= npages
* PAGE_SIZE
- *start
;
1378 npages
= DIV_ROUND_UP(maxsize
+ *start
, PAGE_SIZE
);
1379 p
= get_pages_array(npages
);
1382 n
= __pipe_get_pages(i
, maxsize
, p
, iter_head
, start
);
1390 ssize_t
iov_iter_get_pages_alloc(struct iov_iter
*i
,
1391 struct page
***pages
, size_t maxsize
,
1396 if (maxsize
> i
->count
)
1399 if (unlikely(iov_iter_is_pipe(i
)))
1400 return pipe_get_pages_alloc(i
, pages
, maxsize
, start
);
1401 if (unlikely(iov_iter_is_discard(i
)))
1404 iterate_all_kinds(i
, maxsize
, v
, ({
1405 unsigned long addr
= (unsigned long)v
.iov_base
;
1406 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
1410 addr
&= ~(PAGE_SIZE
- 1);
1411 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
1412 p
= get_pages_array(n
);
1415 res
= get_user_pages_fast(addr
, n
,
1416 iov_iter_rw(i
) != WRITE
? FOLL_WRITE
: 0, p
);
1417 if (unlikely(res
< 0)) {
1422 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
1424 /* can't be more than PAGE_SIZE */
1425 *start
= v
.bv_offset
;
1426 *pages
= p
= get_pages_array(1);
1429 get_page(*p
= v
.bv_page
);
1437 EXPORT_SYMBOL(iov_iter_get_pages_alloc
);
1439 size_t csum_and_copy_from_iter(void *addr
, size_t bytes
, __wsum
*csum
,
1446 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
1450 iterate_and_advance(i
, bytes
, v
, ({
1452 next
= csum_and_copy_from_user(v
.iov_base
,
1453 (to
+= v
.iov_len
) - v
.iov_len
,
1454 v
.iov_len
, 0, &err
);
1456 sum
= csum_block_add(sum
, next
, off
);
1459 err
? v
.iov_len
: 0;
1461 char *p
= kmap_atomic(v
.bv_page
);
1462 sum
= csum_and_memcpy((to
+= v
.bv_len
) - v
.bv_len
,
1463 p
+ v
.bv_offset
, v
.bv_len
,
1468 sum
= csum_and_memcpy((to
+= v
.iov_len
) - v
.iov_len
,
1469 v
.iov_base
, v
.iov_len
,
1477 EXPORT_SYMBOL(csum_and_copy_from_iter
);
1479 bool csum_and_copy_from_iter_full(void *addr
, size_t bytes
, __wsum
*csum
,
1486 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
1490 if (unlikely(i
->count
< bytes
))
1492 iterate_all_kinds(i
, bytes
, v
, ({
1494 next
= csum_and_copy_from_user(v
.iov_base
,
1495 (to
+= v
.iov_len
) - v
.iov_len
,
1496 v
.iov_len
, 0, &err
);
1499 sum
= csum_block_add(sum
, next
, off
);
1503 char *p
= kmap_atomic(v
.bv_page
);
1504 sum
= csum_and_memcpy((to
+= v
.bv_len
) - v
.bv_len
,
1505 p
+ v
.bv_offset
, v
.bv_len
,
1510 sum
= csum_and_memcpy((to
+= v
.iov_len
) - v
.iov_len
,
1511 v
.iov_base
, v
.iov_len
,
1517 iov_iter_advance(i
, bytes
);
1520 EXPORT_SYMBOL(csum_and_copy_from_iter_full
);
1522 size_t csum_and_copy_to_iter(const void *addr
, size_t bytes
, void *csump
,
1525 const char *from
= addr
;
1526 __wsum
*csum
= csump
;
1530 if (unlikely(iov_iter_is_pipe(i
)))
1531 return csum_and_copy_to_pipe_iter(addr
, bytes
, csum
, i
);
1534 if (unlikely(iov_iter_is_discard(i
))) {
1535 WARN_ON(1); /* for now */
1538 iterate_and_advance(i
, bytes
, v
, ({
1540 next
= csum_and_copy_to_user((from
+= v
.iov_len
) - v
.iov_len
,
1542 v
.iov_len
, 0, &err
);
1544 sum
= csum_block_add(sum
, next
, off
);
1547 err
? v
.iov_len
: 0;
1549 char *p
= kmap_atomic(v
.bv_page
);
1550 sum
= csum_and_memcpy(p
+ v
.bv_offset
,
1551 (from
+= v
.bv_len
) - v
.bv_len
,
1552 v
.bv_len
, sum
, off
);
1556 sum
= csum_and_memcpy(v
.iov_base
,
1557 (from
+= v
.iov_len
) - v
.iov_len
,
1558 v
.iov_len
, sum
, off
);
1565 EXPORT_SYMBOL(csum_and_copy_to_iter
);
1567 size_t hash_and_copy_to_iter(const void *addr
, size_t bytes
, void *hashp
,
1570 #ifdef CONFIG_CRYPTO
1571 struct ahash_request
*hash
= hashp
;
1572 struct scatterlist sg
;
1575 copied
= copy_to_iter(addr
, bytes
, i
);
1576 sg_init_one(&sg
, addr
, copied
);
1577 ahash_request_set_crypt(hash
, &sg
, NULL
, copied
);
1578 crypto_ahash_update(hash
);
1584 EXPORT_SYMBOL(hash_and_copy_to_iter
);
1586 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
)
1588 size_t size
= i
->count
;
1593 if (unlikely(iov_iter_is_discard(i
)))
1596 if (unlikely(iov_iter_is_pipe(i
))) {
1597 struct pipe_inode_info
*pipe
= i
->pipe
;
1598 unsigned int iter_head
;
1604 data_start(i
, &iter_head
, &off
);
1605 /* some of this one + all after this one */
1606 npages
= pipe_space_for_user(iter_head
, pipe
->tail
, pipe
);
1607 if (npages
>= maxpages
)
1609 } else iterate_all_kinds(i
, size
, v
, ({
1610 unsigned long p
= (unsigned long)v
.iov_base
;
1611 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
1613 if (npages
>= maxpages
)
1617 if (npages
>= maxpages
)
1620 unsigned long p
= (unsigned long)v
.iov_base
;
1621 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
1623 if (npages
>= maxpages
)
1629 EXPORT_SYMBOL(iov_iter_npages
);
1631 const void *dup_iter(struct iov_iter
*new, struct iov_iter
*old
, gfp_t flags
)
1634 if (unlikely(iov_iter_is_pipe(new))) {
1638 if (unlikely(iov_iter_is_discard(new)))
1640 if (iov_iter_is_bvec(new))
1641 return new->bvec
= kmemdup(new->bvec
,
1642 new->nr_segs
* sizeof(struct bio_vec
),
1645 /* iovec and kvec have identical layout */
1646 return new->iov
= kmemdup(new->iov
,
1647 new->nr_segs
* sizeof(struct iovec
),
1650 EXPORT_SYMBOL(dup_iter
);
1653 * import_iovec() - Copy an array of &struct iovec from userspace
1654 * into the kernel, check that it is valid, and initialize a new
1655 * &struct iov_iter iterator to access it.
1657 * @type: One of %READ or %WRITE.
1658 * @uvector: Pointer to the userspace array.
1659 * @nr_segs: Number of elements in userspace array.
1660 * @fast_segs: Number of elements in @iov.
1661 * @iov: (input and output parameter) Pointer to pointer to (usually small
1662 * on-stack) kernel array.
1663 * @i: Pointer to iterator that will be initialized on success.
1665 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1666 * then this function places %NULL in *@iov on return. Otherwise, a new
1667 * array will be allocated and the result placed in *@iov. This means that
1668 * the caller may call kfree() on *@iov regardless of whether the small
1669 * on-stack array was used or not (and regardless of whether this function
1670 * returns an error or not).
1672 * Return: Negative error code on error, bytes imported on success
1674 ssize_t
import_iovec(int type
, const struct iovec __user
* uvector
,
1675 unsigned nr_segs
, unsigned fast_segs
,
1676 struct iovec
**iov
, struct iov_iter
*i
)
1680 n
= rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
1688 iov_iter_init(i
, type
, p
, nr_segs
, n
);
1689 *iov
= p
== *iov
? NULL
: p
;
1692 EXPORT_SYMBOL(import_iovec
);
1694 #ifdef CONFIG_COMPAT
1695 #include <linux/compat.h>
1697 ssize_t
compat_import_iovec(int type
,
1698 const struct compat_iovec __user
* uvector
,
1699 unsigned nr_segs
, unsigned fast_segs
,
1700 struct iovec
**iov
, struct iov_iter
*i
)
1704 n
= compat_rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
1712 iov_iter_init(i
, type
, p
, nr_segs
, n
);
1713 *iov
= p
== *iov
? NULL
: p
;
1716 EXPORT_SYMBOL(compat_import_iovec
);
1719 int import_single_range(int rw
, void __user
*buf
, size_t len
,
1720 struct iovec
*iov
, struct iov_iter
*i
)
1722 if (len
> MAX_RW_COUNT
)
1724 if (unlikely(!access_ok(buf
, len
)))
1727 iov
->iov_base
= buf
;
1729 iov_iter_init(i
, rw
, iov
, 1, len
);
1732 EXPORT_SYMBOL(import_single_range
);
1734 int iov_iter_for_each_range(struct iov_iter
*i
, size_t bytes
,
1735 int (*f
)(struct kvec
*vec
, void *context
),
1743 iterate_all_kinds(i
, bytes
, v
, -EINVAL
, ({
1744 w
.iov_base
= kmap(v
.bv_page
) + v
.bv_offset
;
1745 w
.iov_len
= v
.bv_len
;
1746 err
= f(&w
, context
);
1750 err
= f(&w
, context
);})
1754 EXPORT_SYMBOL(iov_iter_for_each_range
);