]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - include/linux/uio.h
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid
[mirror_ubuntu-eoan-kernel.git] / include / linux / uio.h
1 /*
2 * Berkeley style UIO structures - Alan Cox 1994.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9 #ifndef __LINUX_UIO_H
10 #define __LINUX_UIO_H
11
12 #include <linux/kernel.h>
13 #include <linux/thread_info.h>
14 #include <uapi/linux/uio.h>
15
16 struct page;
17 struct pipe_inode_info;
18
19 struct kvec {
20 void *iov_base; /* and that should *never* hold a userland pointer */
21 size_t iov_len;
22 };
23
24 enum iter_type {
25 ITER_IOVEC = 0,
26 ITER_KVEC = 2,
27 ITER_BVEC = 4,
28 ITER_PIPE = 8,
29 ITER_DISCARD = 16,
30 };
31
32 struct iov_iter {
33 unsigned int type;
34 size_t iov_offset;
35 size_t count;
36 union {
37 const struct iovec *iov;
38 const struct kvec *kvec;
39 const struct bio_vec *bvec;
40 struct pipe_inode_info *pipe;
41 };
42 union {
43 unsigned long nr_segs;
44 struct {
45 int idx;
46 int start_idx;
47 };
48 };
49 };
50
51 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
52 {
53 return i->type & ~(READ | WRITE);
54 }
55
56 static inline bool iter_is_iovec(const struct iov_iter *i)
57 {
58 return iov_iter_type(i) == ITER_IOVEC;
59 }
60
61 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
62 {
63 return iov_iter_type(i) == ITER_KVEC;
64 }
65
66 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
67 {
68 return iov_iter_type(i) == ITER_BVEC;
69 }
70
71 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
72 {
73 return iov_iter_type(i) == ITER_PIPE;
74 }
75
76 static inline bool iov_iter_is_discard(const struct iov_iter *i)
77 {
78 return iov_iter_type(i) == ITER_DISCARD;
79 }
80
81 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
82 {
83 return i->type & (READ | WRITE);
84 }
85
86 /*
87 * Total number of bytes covered by an iovec.
88 *
89 * NOTE that it is not safe to use this function until all the iovec's
90 * segment lengths have been validated. Because the individual lengths can
91 * overflow a size_t when added together.
92 */
93 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
94 {
95 unsigned long seg;
96 size_t ret = 0;
97
98 for (seg = 0; seg < nr_segs; seg++)
99 ret += iov[seg].iov_len;
100 return ret;
101 }
102
103 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
104 {
105 return (struct iovec) {
106 .iov_base = iter->iov->iov_base + iter->iov_offset,
107 .iov_len = min(iter->count,
108 iter->iov->iov_len - iter->iov_offset),
109 };
110 }
111
112 #define iov_for_each(iov, iter, start) \
113 if (iov_iter_type(start) == ITER_IOVEC || \
114 iov_iter_type(start) == ITER_KVEC) \
115 for (iter = (start); \
116 (iter).count && \
117 ((iov = iov_iter_iovec(&(iter))), 1); \
118 iov_iter_advance(&(iter), (iov).iov_len))
119
120 size_t iov_iter_copy_from_user_atomic(struct page *page,
121 struct iov_iter *i, unsigned long offset, size_t bytes);
122 void iov_iter_advance(struct iov_iter *i, size_t bytes);
123 void iov_iter_revert(struct iov_iter *i, size_t bytes);
124 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
125 size_t iov_iter_single_seg_count(const struct iov_iter *i);
126 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
127 struct iov_iter *i);
128 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
129 struct iov_iter *i);
130
131 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
132 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
133 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
134 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
135 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
136
137 static __always_inline __must_check
138 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
139 {
140 if (unlikely(!check_copy_size(addr, bytes, true)))
141 return 0;
142 else
143 return _copy_to_iter(addr, bytes, i);
144 }
145
146 static __always_inline __must_check
147 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
148 {
149 if (unlikely(!check_copy_size(addr, bytes, false)))
150 return 0;
151 else
152 return _copy_from_iter(addr, bytes, i);
153 }
154
155 static __always_inline __must_check
156 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
157 {
158 if (unlikely(!check_copy_size(addr, bytes, false)))
159 return false;
160 else
161 return _copy_from_iter_full(addr, bytes, i);
162 }
163
164 static __always_inline __must_check
165 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
166 {
167 if (unlikely(!check_copy_size(addr, bytes, false)))
168 return 0;
169 else
170 return _copy_from_iter_nocache(addr, bytes, i);
171 }
172
173 static __always_inline __must_check
174 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
175 {
176 if (unlikely(!check_copy_size(addr, bytes, false)))
177 return false;
178 else
179 return _copy_from_iter_full_nocache(addr, bytes, i);
180 }
181
182 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
183 /*
184 * Note, users like pmem that depend on the stricter semantics of
185 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
186 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
187 * destination is flushed from the cache on return.
188 */
189 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
190 #else
191 #define _copy_from_iter_flushcache _copy_from_iter_nocache
192 #endif
193
194 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
195 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
196 #else
197 #define _copy_to_iter_mcsafe _copy_to_iter
198 #endif
199
200 static __always_inline __must_check
201 size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
202 {
203 if (unlikely(!check_copy_size(addr, bytes, false)))
204 return 0;
205 else
206 return _copy_from_iter_flushcache(addr, bytes, i);
207 }
208
209 static __always_inline __must_check
210 size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
211 {
212 if (unlikely(!check_copy_size(addr, bytes, true)))
213 return 0;
214 else
215 return _copy_to_iter_mcsafe(addr, bytes, i);
216 }
217
218 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
219 unsigned long iov_iter_alignment(const struct iov_iter *i);
220 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
221 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
222 unsigned long nr_segs, size_t count);
223 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
224 unsigned long nr_segs, size_t count);
225 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
226 unsigned long nr_segs, size_t count);
227 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
228 size_t count);
229 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
230 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
231 size_t maxsize, unsigned maxpages, size_t *start);
232 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
233 size_t maxsize, size_t *start);
234 int iov_iter_npages(const struct iov_iter *i, int maxpages);
235
236 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
237
238 static inline size_t iov_iter_count(const struct iov_iter *i)
239 {
240 return i->count;
241 }
242
243 /*
244 * Cap the iov_iter by given limit; note that the second argument is
245 * *not* the new size - it's upper limit for such. Passing it a value
246 * greater than the amount of data in iov_iter is fine - it'll just do
247 * nothing in that case.
248 */
249 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
250 {
251 /*
252 * count doesn't have to fit in size_t - comparison extends both
253 * operands to u64 here and any value that would be truncated by
254 * conversion in assignement is by definition greater than all
255 * values of size_t, including old i->count.
256 */
257 if (i->count > count)
258 i->count = count;
259 }
260
261 /*
262 * reexpand a previously truncated iterator; count must be no more than how much
263 * we had shrunk it.
264 */
265 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
266 {
267 i->count = count;
268 }
269 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
270 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
271 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
272
273 int import_iovec(int type, const struct iovec __user * uvector,
274 unsigned nr_segs, unsigned fast_segs,
275 struct iovec **iov, struct iov_iter *i);
276
277 #ifdef CONFIG_COMPAT
278 struct compat_iovec;
279 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
280 unsigned nr_segs, unsigned fast_segs,
281 struct iovec **iov, struct iov_iter *i);
282 #endif
283
284 int import_single_range(int type, void __user *buf, size_t len,
285 struct iovec *iov, struct iov_iter *i);
286
287 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
288 int (*f)(struct kvec *vec, void *context),
289 void *context);
290
291 #endif