]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/include/asm/uaccess_64.h
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / uaccess_64.h
1 #ifndef _ASM_X86_UACCESS_64_H
2 #define _ASM_X86_UACCESS_64_H
3
4 /*
5 * User space memory access functions
6 */
7 #include <linux/compiler.h>
8 #include <linux/lockdep.h>
9 #include <linux/kasan-checks.h>
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/page.h>
13
14 /*
15 * Copy To/From Userspace
16 */
17
18 /* Handles exceptions in both to and from, but doesn't do access_ok */
19 __must_check unsigned long
20 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
21 __must_check unsigned long
22 copy_user_generic_string(void *to, const void *from, unsigned len);
23 __must_check unsigned long
24 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
25
26 static __always_inline __must_check unsigned long
27 copy_user_generic(void *to, const void *from, unsigned len)
28 {
29 unsigned ret;
30
31 /*
32 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
33 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
34 * Otherwise, use copy_user_generic_unrolled.
35 */
36 alternative_call_2(copy_user_generic_unrolled,
37 copy_user_generic_string,
38 X86_FEATURE_REP_GOOD,
39 copy_user_enhanced_fast_string,
40 X86_FEATURE_ERMS,
41 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
42 "=d" (len)),
43 "1" (to), "2" (from), "3" (len)
44 : "memory", "rcx", "r8", "r9", "r10", "r11");
45 return ret;
46 }
47
48 static __always_inline __must_check unsigned long
49 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
50 {
51 int ret = 0;
52
53 if (!__builtin_constant_p(size))
54 return copy_user_generic(dst, (__force void *)src, size);
55 switch (size) {
56 case 1:
57 __uaccess_begin();
58 __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
59 ret, "b", "b", "=q", 1);
60 __uaccess_end();
61 return ret;
62 case 2:
63 __uaccess_begin();
64 __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
65 ret, "w", "w", "=r", 2);
66 __uaccess_end();
67 return ret;
68 case 4:
69 __uaccess_begin();
70 __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
71 ret, "l", "k", "=r", 4);
72 __uaccess_end();
73 return ret;
74 case 8:
75 __uaccess_begin();
76 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
77 ret, "q", "", "=r", 8);
78 __uaccess_end();
79 return ret;
80 case 10:
81 __uaccess_begin();
82 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
83 ret, "q", "", "=r", 10);
84 if (likely(!ret))
85 __get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
86 (u16 __user *)(8 + (char __user *)src),
87 ret, "w", "w", "=r", 2);
88 __uaccess_end();
89 return ret;
90 case 16:
91 __uaccess_begin();
92 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
93 ret, "q", "", "=r", 16);
94 if (likely(!ret))
95 __get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
96 (u64 __user *)(8 + (char __user *)src),
97 ret, "q", "", "=r", 8);
98 __uaccess_end();
99 return ret;
100 default:
101 return copy_user_generic(dst, (__force void *)src, size);
102 }
103 }
104
105 static __always_inline __must_check unsigned long
106 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
107 {
108 int ret = 0;
109
110 if (!__builtin_constant_p(size))
111 return copy_user_generic((__force void *)dst, src, size);
112 switch (size) {
113 case 1:
114 __uaccess_begin();
115 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
116 ret, "b", "b", "iq", 1);
117 __uaccess_end();
118 return ret;
119 case 2:
120 __uaccess_begin();
121 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
122 ret, "w", "w", "ir", 2);
123 __uaccess_end();
124 return ret;
125 case 4:
126 __uaccess_begin();
127 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
128 ret, "l", "k", "ir", 4);
129 __uaccess_end();
130 return ret;
131 case 8:
132 __uaccess_begin();
133 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
134 ret, "q", "", "er", 8);
135 __uaccess_end();
136 return ret;
137 case 10:
138 __uaccess_begin();
139 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
140 ret, "q", "", "er", 10);
141 if (likely(!ret)) {
142 asm("":::"memory");
143 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
144 ret, "w", "w", "ir", 2);
145 }
146 __uaccess_end();
147 return ret;
148 case 16:
149 __uaccess_begin();
150 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
151 ret, "q", "", "er", 16);
152 if (likely(!ret)) {
153 asm("":::"memory");
154 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
155 ret, "q", "", "er", 8);
156 }
157 __uaccess_end();
158 return ret;
159 default:
160 return copy_user_generic((__force void *)dst, src, size);
161 }
162 }
163
164 static __always_inline __must_check
165 unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
166 {
167 return copy_user_generic((__force void *)dst,
168 (__force void *)src, size);
169 }
170
171 extern long __copy_user_nocache(void *dst, const void __user *src,
172 unsigned size, int zerorest);
173
174 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
175 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
176 size_t len);
177
178 static inline int
179 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
180 unsigned size)
181 {
182 kasan_check_write(dst, size);
183 return __copy_user_nocache(dst, src, size, 0);
184 }
185
186 static inline int
187 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
188 {
189 kasan_check_write(dst, size);
190 return __copy_user_flushcache(dst, src, size);
191 }
192
193 unsigned long
194 copy_user_handle_tail(char *to, char *from, unsigned len);
195
196 #endif /* _ASM_X86_UACCESS_64_H */