]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/uaccess.h
Merge tag 'pm-5.14-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[mirror_ubuntu-jammy-kernel.git] / include / linux / uaccess.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
c22ce143
HY
2#ifndef __LINUX_UACCESS_H__
3#define __LINUX_UACCESS_H__
4
4d0e9df5 5#include <linux/fault-inject-usercopy.h>
76d6f06c 6#include <linux/instrumented.h>
b296a6d5 7#include <linux/minmax.h>
8bcbde54 8#include <linux/sched.h>
af1d5b37 9#include <linux/thread_info.h>
5e6039d8 10
c22ce143
HY
11#include <asm/uaccess.h>
12
5e6e9852 13#ifdef CONFIG_SET_FS
3d13f313
CH
14/*
15 * Force the uaccess routines to be wired up for actual userspace access,
16 * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone
17 * using force_uaccess_end below.
18 */
19static inline mm_segment_t force_uaccess_begin(void)
20{
21 mm_segment_t fs = get_fs();
22
23 set_fs(USER_DS);
24 return fs;
25}
26
27static inline void force_uaccess_end(mm_segment_t oldfs)
28{
29 set_fs(oldfs);
30}
5e6e9852
CH
31#else /* CONFIG_SET_FS */
32typedef struct {
33 /* empty dummy */
34} mm_segment_t;
35
24ce66c0
CH
36#ifndef TASK_SIZE_MAX
37#define TASK_SIZE_MAX TASK_SIZE
38#endif
39
5e6e9852
CH
40#define uaccess_kernel() (false)
41#define user_addr_max() (TASK_SIZE_MAX)
42
43static inline mm_segment_t force_uaccess_begin(void)
44{
45 return (mm_segment_t) { };
46}
47
48static inline void force_uaccess_end(mm_segment_t oldfs)
49{
50}
51#endif /* CONFIG_SET_FS */
3d13f313 52
d597580d
AV
53/*
54 * Architectures should provide two primitives (raw_copy_{to,from}_user())
701cac61
AV
55 * and get rid of their private instances of copy_{to,from}_user() and
56 * __copy_{to,from}_user{,_inatomic}().
d597580d
AV
57 *
58 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
59 * return the amount left to copy. They should assume that access_ok() has
60 * already been checked (and succeeded); they should *not* zero-pad anything.
61 * No KASAN or object size checks either - those belong here.
62 *
63 * Both of these functions should attempt to copy size bytes starting at from
64 * into the area starting at to. They must not fetch or store anything
65 * outside of those areas. Return value must be between 0 (everything
66 * copied successfully) and size (nothing copied).
67 *
68 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
69 * at to must become equal to the bytes fetched from the corresponding area
70 * starting at from. All data past to + size - N must be left unmodified.
71 *
72 * If copying succeeds, the return value must be 0. If some data cannot be
73 * fetched, it is permitted to copy less than had been fetched; the only
74 * hard requirement is that not storing anything at all (i.e. returning size)
75 * should happen only when nothing could be copied. In other words, you don't
76 * have to squeeze as much as possible - it is allowed, but not necessary.
77 *
78 * For raw_copy_from_user() to always points to kernel memory and no faults
79 * on store should happen. Interpretation of from is affected by set_fs().
80 * For raw_copy_to_user() it's the other way round.
81 *
82 * Both can be inlined - it's up to architectures whether it wants to bother
83 * with that. They should not be used directly; they are used to implement
84 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
85 * that are used instead. Out of those, __... ones are inlined. Plain
86 * copy_{to,from}_user() might or might not be inlined. If you want them
87 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
88 *
89 * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
90 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
91 * at all; their callers absolutely must check the return value.
92 *
93 * Biarch ones should also provide raw_copy_in_user() - similar to the above,
94 * but both source and destination are __user pointers (affected by set_fs()
95 * as usual) and both source and destination can trigger faults.
96 */
97
9dd819a1 98static __always_inline __must_check unsigned long
d597580d
AV
99__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
100{
76d6f06c 101 instrument_copy_from_user(to, from, n);
d597580d
AV
102 check_object_size(to, n, false);
103 return raw_copy_from_user(to, from, n);
104}
105
9dd819a1 106static __always_inline __must_check unsigned long
d597580d
AV
107__copy_from_user(void *to, const void __user *from, unsigned long n)
108{
109 might_fault();
4d0e9df5
AL
110 if (should_fail_usercopy())
111 return n;
76d6f06c 112 instrument_copy_from_user(to, from, n);
d597580d
AV
113 check_object_size(to, n, false);
114 return raw_copy_from_user(to, from, n);
115}
116
117/**
118 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
119 * @to: Destination address, in user space.
120 * @from: Source address, in kernel space.
121 * @n: Number of bytes to copy.
122 *
123 * Context: User context only.
124 *
125 * Copy data from kernel space to user space. Caller must check
126 * the specified block with access_ok() before calling this function.
127 * The caller should also make sure he pins the user space address
128 * so that we don't result in page fault and sleep.
129 */
9dd819a1 130static __always_inline __must_check unsigned long
d597580d
AV
131__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
132{
4d0e9df5
AL
133 if (should_fail_usercopy())
134 return n;
76d6f06c 135 instrument_copy_to_user(to, from, n);
d597580d
AV
136 check_object_size(from, n, true);
137 return raw_copy_to_user(to, from, n);
138}
139
9dd819a1 140static __always_inline __must_check unsigned long
d597580d
AV
141__copy_to_user(void __user *to, const void *from, unsigned long n)
142{
143 might_fault();
4d0e9df5
AL
144 if (should_fail_usercopy())
145 return n;
76d6f06c 146 instrument_copy_to_user(to, from, n);
d597580d
AV
147 check_object_size(from, n, true);
148 return raw_copy_to_user(to, from, n);
149}
150
151#ifdef INLINE_COPY_FROM_USER
9dd819a1 152static inline __must_check unsigned long
d597580d
AV
153_copy_from_user(void *to, const void __user *from, unsigned long n)
154{
155 unsigned long res = n;
9c5f6908 156 might_fault();
4d0e9df5 157 if (!should_fail_usercopy() && likely(access_ok(from, n))) {
76d6f06c 158 instrument_copy_from_user(to, from, n);
d597580d 159 res = raw_copy_from_user(to, from, n);
9c5f6908 160 }
d597580d
AV
161 if (unlikely(res))
162 memset(to + (n - res), 0, res);
163 return res;
164}
165#else
9dd819a1 166extern __must_check unsigned long
d597580d
AV
167_copy_from_user(void *, const void __user *, unsigned long);
168#endif
169
170#ifdef INLINE_COPY_TO_USER
9dd819a1 171static inline __must_check unsigned long
d597580d
AV
172_copy_to_user(void __user *to, const void *from, unsigned long n)
173{
9c5f6908 174 might_fault();
4d0e9df5
AL
175 if (should_fail_usercopy())
176 return n;
96d4f267 177 if (access_ok(to, n)) {
76d6f06c 178 instrument_copy_to_user(to, from, n);
d597580d 179 n = raw_copy_to_user(to, from, n);
9c5f6908 180 }
d597580d
AV
181 return n;
182}
183#else
9dd819a1 184extern __must_check unsigned long
d597580d
AV
185_copy_to_user(void __user *, const void *, unsigned long);
186#endif
187
d597580d
AV
188static __always_inline unsigned long __must_check
189copy_from_user(void *to, const void __user *from, unsigned long n)
190{
b0377fed 191 if (likely(check_copy_size(to, n, false)))
d597580d 192 n = _copy_from_user(to, from, n);
d597580d
AV
193 return n;
194}
195
196static __always_inline unsigned long __must_check
197copy_to_user(void __user *to, const void *from, unsigned long n)
198{
b0377fed 199 if (likely(check_copy_size(from, n, true)))
d597580d 200 n = _copy_to_user(to, from, n);
d597580d
AV
201 return n;
202}
203#ifdef CONFIG_COMPAT
204static __always_inline unsigned long __must_check
f58e76c1 205copy_in_user(void __user *to, const void __user *from, unsigned long n)
d597580d
AV
206{
207 might_fault();
96d4f267 208 if (access_ok(to, n) && access_ok(from, n))
d597580d
AV
209 n = raw_copy_in_user(to, from, n);
210 return n;
211}
212#endif
d597580d 213
ec6347bb
DW
214#ifndef copy_mc_to_kernel
215/*
216 * Without arch opt-in this generic copy_mc_to_kernel() will not handle
217 * #MC (or arch equivalent) during source read.
218 */
219static inline unsigned long __must_check
220copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
221{
222 memcpy(dst, src, cnt);
223 return 0;
224}
225#endif
226
8bcbde54
DH
227static __always_inline void pagefault_disabled_inc(void)
228{
229 current->pagefault_disabled++;
230}
231
232static __always_inline void pagefault_disabled_dec(void)
233{
234 current->pagefault_disabled--;
8bcbde54
DH
235}
236
a866374a 237/*
8bcbde54
DH
238 * These routines enable/disable the pagefault handler. If disabled, it will
239 * not take any locks and go straight to the fixup table.
240 *
8222dbe2
DH
241 * User access methods will not sleep when called from a pagefault_disabled()
242 * environment.
a866374a
PZ
243 */
244static inline void pagefault_disable(void)
245{
8bcbde54 246 pagefault_disabled_inc();
a866374a
PZ
247 /*
248 * make sure to have issued the store before a pagefault
249 * can hit.
250 */
251 barrier();
252}
253
254static inline void pagefault_enable(void)
255{
256 /*
257 * make sure to issue those last loads/stores before enabling
258 * the pagefault handler again.
259 */
260 barrier();
8bcbde54 261 pagefault_disabled_dec();
a866374a
PZ
262}
263
8bcbde54
DH
264/*
265 * Is the pagefault handler disabled? If so, user access methods will not sleep.
266 */
2d8d8fac
MH
267static inline bool pagefault_disabled(void)
268{
269 return current->pagefault_disabled != 0;
270}
8bcbde54 271
70ffdb93
DH
272/*
273 * The pagefault handler is in general disabled by pagefault_disable() or
274 * when in irq context (via in_atomic()).
275 *
276 * This function should only be used by the fault handlers. Other users should
277 * stick to pagefault_disabled().
278 * Please NEVER use preempt_disable() to disable the fault handler. With
279 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
280 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
281 */
282#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
283
c22ce143
HY
284#ifndef ARCH_HAS_NOCACHE_UACCESS
285
9dd819a1
KC
286static inline __must_check unsigned long
287__copy_from_user_inatomic_nocache(void *to, const void __user *from,
288 unsigned long n)
c22ce143
HY
289{
290 return __copy_from_user_inatomic(to, from, n);
291}
292
c22ce143
HY
293#endif /* ARCH_HAS_NOCACHE_UACCESS */
294
f5a1a536
AS
295extern __must_check int check_zeroed_user(const void __user *from, size_t size);
296
297/**
298 * copy_struct_from_user: copy a struct from userspace
299 * @dst: Destination address, in kernel space. This buffer must be @ksize
300 * bytes long.
301 * @ksize: Size of @dst struct.
302 * @src: Source address, in userspace.
303 * @usize: (Alleged) size of @src struct.
304 *
305 * Copies a struct from userspace to kernel space, in a way that guarantees
306 * backwards-compatibility for struct syscall arguments (as long as future
307 * struct extensions are made such that all new fields are *appended* to the
308 * old struct, and zeroed-out new fields have the same meaning as the old
309 * struct).
310 *
311 * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
312 * The recommended usage is something like the following:
313 *
314 * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
315 * {
316 * int err;
317 * struct foo karg = {};
318 *
319 * if (usize > PAGE_SIZE)
320 * return -E2BIG;
321 * if (usize < FOO_SIZE_VER0)
322 * return -EINVAL;
323 *
324 * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
325 * if (err)
326 * return err;
327 *
328 * // ...
329 * }
330 *
331 * There are three cases to consider:
332 * * If @usize == @ksize, then it's copied verbatim.
333 * * If @usize < @ksize, then the userspace has passed an old struct to a
334 * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
335 * are to be zero-filled.
336 * * If @usize > @ksize, then the userspace has passed a new struct to an
337 * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
338 * are checked to ensure they are zeroed, otherwise -E2BIG is returned.
339 *
340 * Returns (in all cases, some data may have been copied):
341 * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src.
342 * * -EFAULT: access to userspace failed.
343 */
344static __always_inline __must_check int
345copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
346 size_t usize)
347{
348 size_t size = min(ksize, usize);
349 size_t rest = max(ksize, usize) - size;
350
351 /* Deal with trailing bytes. */
352 if (usize < ksize) {
353 memset(dst + size, 0, rest);
354 } else if (usize > ksize) {
355 int ret = check_zeroed_user(src + size, rest);
356 if (ret <= 0)
357 return ret ?: -E2BIG;
358 }
359 /* Copy the interoperable parts of the struct. */
360 if (copy_from_user(dst, src, size))
361 return -EFAULT;
362 return 0;
363}
364
fe557319 365bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
eab0c608 366
fe557319
CH
367long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
368long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
3d708182 369
c0ee37e8
CH
370long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
371long notrace copy_to_user_nofault(void __user *dst, const void *src,
fe557319 372 size_t size);
1d1585ca 373
c4cb1644
CH
374long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
375 long count);
eab0c608 376
bd88bb5d
CH
377long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
378 long count);
02dddb16 379long strnlen_user_nofault(const void __user *unsafe_addr, long count);
1a6877b9 380
0ab32b6f 381/**
25f12ae4
CH
382 * get_kernel_nofault(): safely attempt to read from a location
383 * @val: read into this variable
384 * @ptr: address to read from
0ab32b6f
AM
385 *
386 * Returns 0 on success, or -EFAULT.
387 */
0c389d89
LT
388#define get_kernel_nofault(val, ptr) ({ \
389 const typeof(val) *__gk_ptr = (ptr); \
390 copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
391})
0ab32b6f 392
5b24a7a2 393#ifndef user_access_begin
594cc251 394#define user_access_begin(ptr,len) access_ok(ptr, len)
5b24a7a2 395#define user_access_end() do { } while (0)
c512c691
LT
396#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
397#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
398#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
399#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
fb05121f 400#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
e74deb11
PZ
401static inline unsigned long user_access_save(void) { return 0UL; }
402static inline void user_access_restore(unsigned long flags) { }
5b24a7a2 403#endif
999a2289
CL
404#ifndef user_write_access_begin
405#define user_write_access_begin user_access_begin
406#define user_write_access_end user_access_end
407#endif
408#ifndef user_read_access_begin
409#define user_read_access_begin user_access_begin
410#define user_read_access_end user_access_end
411#endif
5b24a7a2 412
b394d468 413#ifdef CONFIG_HARDENED_USERCOPY
afcc90f8
KC
414void usercopy_warn(const char *name, const char *detail, bool to_user,
415 unsigned long offset, unsigned long len);
b394d468
KC
416void __noreturn usercopy_abort(const char *name, const char *detail,
417 bool to_user, unsigned long offset,
418 unsigned long len);
419#endif
420
c22ce143 421#endif /* __LINUX_UACCESS_H__ */