1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Berkeley style UIO structures - Alan Cox 1994.
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <uapi/linux/uio.h>
13 struct pipe_inode_info
;
16 void *iov_base
; /* and that should *never* hold a userland pointer */
36 const struct iovec
*iov
;
37 const struct kvec
*kvec
;
38 const struct bio_vec
*bvec
;
39 struct xarray
*xarray
;
40 struct pipe_inode_info
*pipe
;
43 unsigned long nr_segs
;
46 unsigned int start_head
;
52 static inline enum iter_type
iov_iter_type(const struct iov_iter
*i
)
57 static inline bool iter_is_iovec(const struct iov_iter
*i
)
59 return iov_iter_type(i
) == ITER_IOVEC
;
62 static inline bool iov_iter_is_kvec(const struct iov_iter
*i
)
64 return iov_iter_type(i
) == ITER_KVEC
;
67 static inline bool iov_iter_is_bvec(const struct iov_iter
*i
)
69 return iov_iter_type(i
) == ITER_BVEC
;
72 static inline bool iov_iter_is_pipe(const struct iov_iter
*i
)
74 return iov_iter_type(i
) == ITER_PIPE
;
77 static inline bool iov_iter_is_discard(const struct iov_iter
*i
)
79 return iov_iter_type(i
) == ITER_DISCARD
;
82 static inline bool iov_iter_is_xarray(const struct iov_iter
*i
)
84 return iov_iter_type(i
) == ITER_XARRAY
;
87 static inline unsigned char iov_iter_rw(const struct iov_iter
*i
)
89 return i
->data_source
? WRITE
: READ
;
93 * Total number of bytes covered by an iovec.
95 * NOTE that it is not safe to use this function until all the iovec's
96 * segment lengths have been validated. Because the individual lengths can
97 * overflow a size_t when added together.
99 static inline size_t iov_length(const struct iovec
*iov
, unsigned long nr_segs
)
104 for (seg
= 0; seg
< nr_segs
; seg
++)
105 ret
+= iov
[seg
].iov_len
;
109 static inline struct iovec
iov_iter_iovec(const struct iov_iter
*iter
)
111 return (struct iovec
) {
112 .iov_base
= iter
->iov
->iov_base
+ iter
->iov_offset
,
113 .iov_len
= min(iter
->count
,
114 iter
->iov
->iov_len
- iter
->iov_offset
),
118 size_t copy_page_from_iter_atomic(struct page
*page
, unsigned offset
,
119 size_t bytes
, struct iov_iter
*i
);
120 void iov_iter_advance(struct iov_iter
*i
, size_t bytes
);
121 void iov_iter_revert(struct iov_iter
*i
, size_t bytes
);
122 int iov_iter_fault_in_readable(const struct iov_iter
*i
, size_t bytes
);
123 size_t iov_iter_single_seg_count(const struct iov_iter
*i
);
124 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
126 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
129 size_t _copy_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
);
130 size_t _copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
);
131 size_t _copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
);
133 static __always_inline __must_check
134 size_t copy_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
136 if (unlikely(!check_copy_size(addr
, bytes
, true)))
139 return _copy_to_iter(addr
, bytes
, i
);
142 static __always_inline __must_check
143 size_t copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
145 if (unlikely(!check_copy_size(addr
, bytes
, false)))
148 return _copy_from_iter(addr
, bytes
, i
);
151 static __always_inline __must_check
152 bool copy_from_iter_full(void *addr
, size_t bytes
, struct iov_iter
*i
)
154 size_t copied
= copy_from_iter(addr
, bytes
, i
);
155 if (likely(copied
== bytes
))
157 iov_iter_revert(i
, copied
);
161 static __always_inline __must_check
162 size_t copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
164 if (unlikely(!check_copy_size(addr
, bytes
, false)))
167 return _copy_from_iter_nocache(addr
, bytes
, i
);
170 static __always_inline __must_check
171 bool copy_from_iter_full_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
173 size_t copied
= copy_from_iter_nocache(addr
, bytes
, i
);
174 if (likely(copied
== bytes
))
176 iov_iter_revert(i
, copied
);
180 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
182 * Note, users like pmem that depend on the stricter semantics of
183 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
184 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
185 * destination is flushed from the cache on return.
187 size_t _copy_from_iter_flushcache(void *addr
, size_t bytes
, struct iov_iter
*i
);
189 #define _copy_from_iter_flushcache _copy_from_iter_nocache
192 #ifdef CONFIG_ARCH_HAS_COPY_MC
193 size_t _copy_mc_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
);
195 #define _copy_mc_to_iter _copy_to_iter
198 static __always_inline __must_check
199 size_t copy_from_iter_flushcache(void *addr
, size_t bytes
, struct iov_iter
*i
)
201 if (unlikely(!check_copy_size(addr
, bytes
, false)))
204 return _copy_from_iter_flushcache(addr
, bytes
, i
);
207 static __always_inline __must_check
208 size_t copy_mc_to_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
210 if (unlikely(!check_copy_size(addr
, bytes
, true)))
213 return _copy_mc_to_iter(addr
, bytes
, i
);
216 size_t iov_iter_zero(size_t bytes
, struct iov_iter
*);
217 unsigned long iov_iter_alignment(const struct iov_iter
*i
);
218 unsigned long iov_iter_gap_alignment(const struct iov_iter
*i
);
219 void iov_iter_init(struct iov_iter
*i
, unsigned int direction
, const struct iovec
*iov
,
220 unsigned long nr_segs
, size_t count
);
221 void iov_iter_kvec(struct iov_iter
*i
, unsigned int direction
, const struct kvec
*kvec
,
222 unsigned long nr_segs
, size_t count
);
223 void iov_iter_bvec(struct iov_iter
*i
, unsigned int direction
, const struct bio_vec
*bvec
,
224 unsigned long nr_segs
, size_t count
);
225 void iov_iter_pipe(struct iov_iter
*i
, unsigned int direction
, struct pipe_inode_info
*pipe
,
227 void iov_iter_discard(struct iov_iter
*i
, unsigned int direction
, size_t count
);
228 void iov_iter_xarray(struct iov_iter
*i
, unsigned int direction
, struct xarray
*xarray
,
229 loff_t start
, size_t count
);
230 ssize_t
iov_iter_get_pages(struct iov_iter
*i
, struct page
**pages
,
231 size_t maxsize
, unsigned maxpages
, size_t *start
);
232 ssize_t
iov_iter_get_pages_alloc(struct iov_iter
*i
, struct page
***pages
,
233 size_t maxsize
, size_t *start
);
234 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
);
236 const void *dup_iter(struct iov_iter
*new, struct iov_iter
*old
, gfp_t flags
);
238 static inline size_t iov_iter_count(const struct iov_iter
*i
)
244 * Cap the iov_iter by given limit; note that the second argument is
245 * *not* the new size - it's upper limit for such. Passing it a value
246 * greater than the amount of data in iov_iter is fine - it'll just do
247 * nothing in that case.
249 static inline void iov_iter_truncate(struct iov_iter
*i
, u64 count
)
252 * count doesn't have to fit in size_t - comparison extends both
253 * operands to u64 here and any value that would be truncated by
254 * conversion in assignement is by definition greater than all
255 * values of size_t, including old i->count.
257 if (i
->count
> count
)
262 * reexpand a previously truncated iterator; count must be no more than how much
265 static inline void iov_iter_reexpand(struct iov_iter
*i
, size_t count
)
275 size_t csum_and_copy_to_iter(const void *addr
, size_t bytes
, void *csstate
, struct iov_iter
*i
);
276 size_t csum_and_copy_from_iter(void *addr
, size_t bytes
, __wsum
*csum
, struct iov_iter
*i
);
278 static __always_inline __must_check
279 bool csum_and_copy_from_iter_full(void *addr
, size_t bytes
,
280 __wsum
*csum
, struct iov_iter
*i
)
282 size_t copied
= csum_and_copy_from_iter(addr
, bytes
, csum
, i
);
283 if (likely(copied
== bytes
))
285 iov_iter_revert(i
, copied
);
288 size_t hash_and_copy_to_iter(const void *addr
, size_t bytes
, void *hashp
,
291 struct iovec
*iovec_from_user(const struct iovec __user
*uvector
,
292 unsigned long nr_segs
, unsigned long fast_segs
,
293 struct iovec
*fast_iov
, bool compat
);
294 ssize_t
import_iovec(int type
, const struct iovec __user
*uvec
,
295 unsigned nr_segs
, unsigned fast_segs
, struct iovec
**iovp
,
297 ssize_t
__import_iovec(int type
, const struct iovec __user
*uvec
,
298 unsigned nr_segs
, unsigned fast_segs
, struct iovec
**iovp
,
299 struct iov_iter
*i
, bool compat
);
300 int import_single_range(int type
, void __user
*buf
, size_t len
,
301 struct iovec
*iov
, struct iov_iter
*i
);