]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/uio.h
Merge tag 'acpi-5.15-rc1-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[mirror_ubuntu-jammy-kernel.git] / include / linux / uio.h
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Berkeley style UIO structures - Alan Cox 1994.
4 */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <uapi/linux/uio.h>
11
12 struct page;
13 struct pipe_inode_info;
14
15 struct kvec {
16 void *iov_base; /* and that should *never* hold a userland pointer */
17 size_t iov_len;
18 };
19
20 enum iter_type {
21 /* iter types */
22 ITER_IOVEC,
23 ITER_KVEC,
24 ITER_BVEC,
25 ITER_PIPE,
26 ITER_XARRAY,
27 ITER_DISCARD,
28 };
29
30 struct iov_iter {
31 u8 iter_type;
32 bool data_source;
33 size_t iov_offset;
34 size_t count;
35 union {
36 const struct iovec *iov;
37 const struct kvec *kvec;
38 const struct bio_vec *bvec;
39 struct xarray *xarray;
40 struct pipe_inode_info *pipe;
41 };
42 union {
43 unsigned long nr_segs;
44 struct {
45 unsigned int head;
46 unsigned int start_head;
47 };
48 loff_t xarray_start;
49 };
50 size_t truncated;
51 };
52
53 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
54 {
55 return i->iter_type;
56 }
57
58 static inline bool iter_is_iovec(const struct iov_iter *i)
59 {
60 return iov_iter_type(i) == ITER_IOVEC;
61 }
62
63 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
64 {
65 return iov_iter_type(i) == ITER_KVEC;
66 }
67
68 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
69 {
70 return iov_iter_type(i) == ITER_BVEC;
71 }
72
73 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
74 {
75 return iov_iter_type(i) == ITER_PIPE;
76 }
77
78 static inline bool iov_iter_is_discard(const struct iov_iter *i)
79 {
80 return iov_iter_type(i) == ITER_DISCARD;
81 }
82
83 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
84 {
85 return iov_iter_type(i) == ITER_XARRAY;
86 }
87
88 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
89 {
90 return i->data_source ? WRITE : READ;
91 }
92
93 /*
94 * Total number of bytes covered by an iovec.
95 *
96 * NOTE that it is not safe to use this function until all the iovec's
97 * segment lengths have been validated. Because the individual lengths can
98 * overflow a size_t when added together.
99 */
100 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
101 {
102 unsigned long seg;
103 size_t ret = 0;
104
105 for (seg = 0; seg < nr_segs; seg++)
106 ret += iov[seg].iov_len;
107 return ret;
108 }
109
110 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
111 {
112 return (struct iovec) {
113 .iov_base = iter->iov->iov_base + iter->iov_offset,
114 .iov_len = min(iter->count,
115 iter->iov->iov_len - iter->iov_offset),
116 };
117 }
118
119 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
120 size_t bytes, struct iov_iter *i);
121 void iov_iter_advance(struct iov_iter *i, size_t bytes);
122 void iov_iter_revert(struct iov_iter *i, size_t bytes);
123 int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes);
124 size_t iov_iter_single_seg_count(const struct iov_iter *i);
125 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
126 struct iov_iter *i);
127 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
128 struct iov_iter *i);
129
130 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
131 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
132 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
133
134 static __always_inline __must_check
135 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
136 {
137 if (unlikely(!check_copy_size(addr, bytes, true)))
138 return 0;
139 else
140 return _copy_to_iter(addr, bytes, i);
141 }
142
143 static __always_inline __must_check
144 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
145 {
146 if (unlikely(!check_copy_size(addr, bytes, false)))
147 return 0;
148 else
149 return _copy_from_iter(addr, bytes, i);
150 }
151
152 static __always_inline __must_check
153 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
154 {
155 size_t copied = copy_from_iter(addr, bytes, i);
156 if (likely(copied == bytes))
157 return true;
158 iov_iter_revert(i, copied);
159 return false;
160 }
161
162 static __always_inline __must_check
163 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
164 {
165 if (unlikely(!check_copy_size(addr, bytes, false)))
166 return 0;
167 else
168 return _copy_from_iter_nocache(addr, bytes, i);
169 }
170
171 static __always_inline __must_check
172 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
173 {
174 size_t copied = copy_from_iter_nocache(addr, bytes, i);
175 if (likely(copied == bytes))
176 return true;
177 iov_iter_revert(i, copied);
178 return false;
179 }
180
181 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
182 /*
183 * Note, users like pmem that depend on the stricter semantics of
184 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
185 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
186 * destination is flushed from the cache on return.
187 */
188 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
189 #else
190 #define _copy_from_iter_flushcache _copy_from_iter_nocache
191 #endif
192
193 #ifdef CONFIG_ARCH_HAS_COPY_MC
194 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
195 #else
196 #define _copy_mc_to_iter _copy_to_iter
197 #endif
198
199 static __always_inline __must_check
200 size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
201 {
202 if (unlikely(!check_copy_size(addr, bytes, false)))
203 return 0;
204 else
205 return _copy_from_iter_flushcache(addr, bytes, i);
206 }
207
208 static __always_inline __must_check
209 size_t copy_mc_to_iter(void *addr, size_t bytes, struct iov_iter *i)
210 {
211 if (unlikely(!check_copy_size(addr, bytes, true)))
212 return 0;
213 else
214 return _copy_mc_to_iter(addr, bytes, i);
215 }
216
217 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
218 unsigned long iov_iter_alignment(const struct iov_iter *i);
219 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
220 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
221 unsigned long nr_segs, size_t count);
222 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
223 unsigned long nr_segs, size_t count);
224 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
225 unsigned long nr_segs, size_t count);
226 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
227 size_t count);
228 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
229 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
230 loff_t start, size_t count);
231 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
232 size_t maxsize, unsigned maxpages, size_t *start);
233 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
234 size_t maxsize, size_t *start);
235 int iov_iter_npages(const struct iov_iter *i, int maxpages);
236
237 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
238
239 static inline size_t iov_iter_count(const struct iov_iter *i)
240 {
241 return i->count;
242 }
243
244 /*
245 * Cap the iov_iter by given limit; note that the second argument is
246 * *not* the new size - it's upper limit for such. Passing it a value
247 * greater than the amount of data in iov_iter is fine - it'll just do
248 * nothing in that case.
249 */
250 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
251 {
252 /*
253 * count doesn't have to fit in size_t - comparison extends both
254 * operands to u64 here and any value that would be truncated by
255 * conversion in assignement is by definition greater than all
256 * values of size_t, including old i->count.
257 */
258 if (i->count > count) {
259 i->truncated += i->count - count;
260 i->count = count;
261 }
262 }
263
264 /*
265 * reexpand a previously truncated iterator; count must be no more than how much
266 * we had shrunk it.
267 */
268 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
269 {
270 i->truncated -= count - i->count;
271 i->count = count;
272 }
273
274 struct csum_state {
275 __wsum csum;
276 size_t off;
277 };
278
279 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
280 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
281
282 static __always_inline __must_check
283 bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
284 __wsum *csum, struct iov_iter *i)
285 {
286 size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
287 if (likely(copied == bytes))
288 return true;
289 iov_iter_revert(i, copied);
290 return false;
291 }
292 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
293 struct iov_iter *i);
294
295 struct iovec *iovec_from_user(const struct iovec __user *uvector,
296 unsigned long nr_segs, unsigned long fast_segs,
297 struct iovec *fast_iov, bool compat);
298 ssize_t import_iovec(int type, const struct iovec __user *uvec,
299 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
300 struct iov_iter *i);
301 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
302 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
303 struct iov_iter *i, bool compat);
304 int import_single_range(int type, void __user *buf, size_t len,
305 struct iovec *iov, struct iov_iter *i);
306
307 #endif