2 * Berkeley style UIO structures - Alan Cox 1994.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/thread_info.h>
14 #include <uapi/linux/uio.h>
17 struct pipe_inode_info
;
20 void *iov_base
; /* and that should *never* hold a userland pointer */
37 const struct iovec
*iov
;
38 const struct kvec
*kvec
;
39 const struct bio_vec
*bvec
;
40 struct pipe_inode_info
*pipe
;
43 unsigned long nr_segs
;
51 static inline enum iter_type
iov_iter_type(const struct iov_iter
*i
)
53 return i
->type
& ~(READ
| WRITE
);
56 static inline bool iter_is_iovec(const struct iov_iter
*i
)
58 return iov_iter_type(i
) == ITER_IOVEC
;
61 static inline bool iov_iter_is_kvec(const struct iov_iter
*i
)
63 return iov_iter_type(i
) == ITER_KVEC
;
66 static inline bool iov_iter_is_bvec(const struct iov_iter
*i
)
68 return iov_iter_type(i
) == ITER_BVEC
;
71 static inline bool iov_iter_is_pipe(const struct iov_iter
*i
)
73 return iov_iter_type(i
) == ITER_PIPE
;
76 static inline bool iov_iter_is_discard(const struct iov_iter
*i
)
78 return iov_iter_type(i
) == ITER_DISCARD
;
81 static inline unsigned char iov_iter_rw(const struct iov_iter
*i
)
83 return i
->type
& (READ
| WRITE
);
87 * Total number of bytes covered by an iovec.
89 * NOTE that it is not safe to use this function until all the iovec's
90 * segment lengths have been validated. Because the individual lengths can
91 * overflow a size_t when added together.
93 static inline size_t iov_length(const struct iovec
*iov
, unsigned long nr_segs
)
98 for (seg
= 0; seg
< nr_segs
; seg
++)
99 ret
+= iov
[seg
].iov_len
;
103 static inline struct iovec
iov_iter_iovec(const struct iov_iter
*iter
)
105 return (struct iovec
) {
106 .iov_base
= iter
->iov
->iov_base
+ iter
->iov_offset
,
107 .iov_len
= min(iter
->count
,
108 iter
->iov
->iov_len
- iter
->iov_offset
),
112 #define iov_for_each(iov, iter, start) \
113 if (iov_iter_type(start) == ITER_IOVEC || \
114 iov_iter_type(start) == ITER_KVEC) \
115 for (iter = (start); \
117 ((iov = iov_iter_iovec(&(iter))), 1); \
118 iov_iter_advance(&(iter), (iov).iov_len))
120 size_t iov_iter_copy_from_user_atomic(struct page
*page
,
121 struct iov_iter
*i
, unsigned long offset
, size_t bytes
);
122 void iov_iter_advance(struct iov_iter
*i
, size_t bytes
);
123 void iov_iter_revert(struct iov_iter
*i
, size_t bytes
);
124 int iov_iter_fault_in_readable(struct iov_iter
*i
, size_t bytes
);
125 size_t iov_iter_single_seg_count(const struct iov_iter
*i
);
126 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
128 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
131 size_t _copy_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
);
132 size_t _copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
);
133 bool _copy_from_iter_full(void *addr
, size_t bytes
, struct iov_iter
*i
);
134 size_t _copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
);
135 bool _copy_from_iter_full_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
);
137 static __always_inline __must_check
138 size_t copy_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
140 if (unlikely(!check_copy_size(addr
, bytes
, true)))
143 return _copy_to_iter(addr
, bytes
, i
);
146 static __always_inline __must_check
147 size_t copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
149 if (unlikely(!check_copy_size(addr
, bytes
, false)))
152 return _copy_from_iter(addr
, bytes
, i
);
155 static __always_inline __must_check
156 bool copy_from_iter_full(void *addr
, size_t bytes
, struct iov_iter
*i
)
158 if (unlikely(!check_copy_size(addr
, bytes
, false)))
161 return _copy_from_iter_full(addr
, bytes
, i
);
164 static __always_inline __must_check
165 size_t copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
167 if (unlikely(!check_copy_size(addr
, bytes
, false)))
170 return _copy_from_iter_nocache(addr
, bytes
, i
);
173 static __always_inline __must_check
174 bool copy_from_iter_full_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
176 if (unlikely(!check_copy_size(addr
, bytes
, false)))
179 return _copy_from_iter_full_nocache(addr
, bytes
, i
);
182 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
184 * Note, users like pmem that depend on the stricter semantics of
185 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
186 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
187 * destination is flushed from the cache on return.
189 size_t _copy_from_iter_flushcache(void *addr
, size_t bytes
, struct iov_iter
*i
);
191 #define _copy_from_iter_flushcache _copy_from_iter_nocache
194 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
195 size_t _copy_to_iter_mcsafe(const void *addr
, size_t bytes
, struct iov_iter
*i
);
197 #define _copy_to_iter_mcsafe _copy_to_iter
200 static __always_inline __must_check
201 size_t copy_from_iter_flushcache(void *addr
, size_t bytes
, struct iov_iter
*i
)
203 if (unlikely(!check_copy_size(addr
, bytes
, false)))
206 return _copy_from_iter_flushcache(addr
, bytes
, i
);
209 static __always_inline __must_check
210 size_t copy_to_iter_mcsafe(void *addr
, size_t bytes
, struct iov_iter
*i
)
212 if (unlikely(!check_copy_size(addr
, bytes
, true)))
215 return _copy_to_iter_mcsafe(addr
, bytes
, i
);
218 size_t iov_iter_zero(size_t bytes
, struct iov_iter
*);
219 unsigned long iov_iter_alignment(const struct iov_iter
*i
);
220 unsigned long iov_iter_gap_alignment(const struct iov_iter
*i
);
221 void iov_iter_init(struct iov_iter
*i
, unsigned int direction
, const struct iovec
*iov
,
222 unsigned long nr_segs
, size_t count
);
223 void iov_iter_kvec(struct iov_iter
*i
, unsigned int direction
, const struct kvec
*kvec
,
224 unsigned long nr_segs
, size_t count
);
225 void iov_iter_bvec(struct iov_iter
*i
, unsigned int direction
, const struct bio_vec
*bvec
,
226 unsigned long nr_segs
, size_t count
);
227 void iov_iter_pipe(struct iov_iter
*i
, unsigned int direction
, struct pipe_inode_info
*pipe
,
229 void iov_iter_discard(struct iov_iter
*i
, unsigned int direction
, size_t count
);
230 ssize_t
iov_iter_get_pages(struct iov_iter
*i
, struct page
**pages
,
231 size_t maxsize
, unsigned maxpages
, size_t *start
);
232 ssize_t
iov_iter_get_pages_alloc(struct iov_iter
*i
, struct page
***pages
,
233 size_t maxsize
, size_t *start
);
234 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
);
236 const void *dup_iter(struct iov_iter
*new, struct iov_iter
*old
, gfp_t flags
);
238 static inline size_t iov_iter_count(const struct iov_iter
*i
)
244 * Cap the iov_iter by given limit; note that the second argument is
245 * *not* the new size - it's upper limit for such. Passing it a value
246 * greater than the amount of data in iov_iter is fine - it'll just do
247 * nothing in that case.
249 static inline void iov_iter_truncate(struct iov_iter
*i
, u64 count
)
252 * count doesn't have to fit in size_t - comparison extends both
253 * operands to u64 here and any value that would be truncated by
254 * conversion in assignement is by definition greater than all
255 * values of size_t, including old i->count.
257 if (i
->count
> count
)
262 * reexpand a previously truncated iterator; count must be no more than how much
265 static inline void iov_iter_reexpand(struct iov_iter
*i
, size_t count
)
269 size_t csum_and_copy_to_iter(const void *addr
, size_t bytes
, __wsum
*csum
, struct iov_iter
*i
);
270 size_t csum_and_copy_from_iter(void *addr
, size_t bytes
, __wsum
*csum
, struct iov_iter
*i
);
271 bool csum_and_copy_from_iter_full(void *addr
, size_t bytes
, __wsum
*csum
, struct iov_iter
*i
);
273 int import_iovec(int type
, const struct iovec __user
* uvector
,
274 unsigned nr_segs
, unsigned fast_segs
,
275 struct iovec
**iov
, struct iov_iter
*i
);
279 int compat_import_iovec(int type
, const struct compat_iovec __user
* uvector
,
280 unsigned nr_segs
, unsigned fast_segs
,
281 struct iovec
**iov
, struct iov_iter
*i
);
284 int import_single_range(int type
, void __user
*buf
, size_t len
,
285 struct iovec
*iov
, struct iov_iter
*i
);
287 int iov_iter_for_each_range(struct iov_iter
*i
, size_t bytes
,
288 int (*f
)(struct kvec
*vec
, void *context
),