]>
Commit | Line | Data |
---|---|---|
ad2fc2cd VM |
1 | /* |
2 | * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com> | |
3 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
1da177e4 | 4 | * Subject to the GNU Public License v2. |
ad2fc2cd VM |
5 | * |
6 | * Functions to copy from and to user space. | |
7 | */ | |
1da177e4 | 8 | |
8d379dad | 9 | #include <linux/linkage.h> |
3022d734 AK |
10 | #include <asm/current.h> |
11 | #include <asm/asm-offsets.h> | |
12 | #include <asm/thread_info.h> | |
13 | #include <asm/cpufeature.h> | |
4307bec9 | 14 | #include <asm/alternative-asm.h> |
9732da8c | 15 | #include <asm/asm.h> |
63bcff2a | 16 | #include <asm/smap.h> |
3022d734 | 17 | |
ad2fc2cd | 18 | /* Standard copy_to_user with segment limit checking */ |
3c93ca00 | 19 | ENTRY(_copy_to_user) |
1da177e4 LT |
20 | GET_THREAD_INFO(%rax) |
21 | movq %rdi,%rcx | |
22 | addq %rdx,%rcx | |
ad2fc2cd | 23 | jc bad_to_user |
26ccb8a7 | 24 | cmpq TI_addr_limit(%rax),%rcx |
26afb7c6 | 25 | ja bad_to_user |
de2ff888 BP |
26 | ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \ |
27 | "jmp copy_user_generic_string", \ | |
28 | X86_FEATURE_REP_GOOD, \ | |
29 | "jmp copy_user_enhanced_fast_string", \ | |
30 | X86_FEATURE_ERMS | |
3c93ca00 | 31 | ENDPROC(_copy_to_user) |
7bcd3f34 | 32 | |
ad2fc2cd | 33 | /* Standard copy_from_user with segment limit checking */ |
9f0cf4ad | 34 | ENTRY(_copy_from_user) |
ad2fc2cd VM |
35 | GET_THREAD_INFO(%rax) |
36 | movq %rsi,%rcx | |
37 | addq %rdx,%rcx | |
38 | jc bad_from_user | |
39 | cmpq TI_addr_limit(%rax),%rcx | |
26afb7c6 | 40 | ja bad_from_user |
de2ff888 BP |
41 | ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \ |
42 | "jmp copy_user_generic_string", \ | |
43 | X86_FEATURE_REP_GOOD, \ | |
44 | "jmp copy_user_enhanced_fast_string", \ | |
45 | X86_FEATURE_ERMS | |
9f0cf4ad | 46 | ENDPROC(_copy_from_user) |
3022d734 | 47 | |
1da177e4 LT |
48 | .section .fixup,"ax" |
49 | /* must zero dest */ | |
ad2fc2cd | 50 | ENTRY(bad_from_user) |
1da177e4 LT |
51 | bad_from_user: |
52 | movl %edx,%ecx | |
53 | xorl %eax,%eax | |
54 | rep | |
55 | stosb | |
56 | bad_to_user: | |
ad2fc2cd | 57 | movl %edx,%eax |
1da177e4 | 58 | ret |
ad2fc2cd | 59 | ENDPROC(bad_from_user) |
1da177e4 | 60 | .previous |
ad2fc2cd | 61 | |
1da177e4 | 62 | /* |
3022d734 | 63 | * copy_user_generic_unrolled - memory copy with exception handling. |
ad2fc2cd VM |
64 | * This version is for CPUs like P4 that don't have efficient micro |
65 | * code for rep movsq | |
66 | * | |
67 | * Input: | |
1da177e4 LT |
68 | * rdi destination |
69 | * rsi source | |
70 | * rdx count | |
71 | * | |
ad2fc2cd | 72 | * Output: |
0d2eb44f | 73 | * eax uncopied bytes or 0 if successful. |
1da177e4 | 74 | */ |
3022d734 | 75 | ENTRY(copy_user_generic_unrolled) |
63bcff2a | 76 | ASM_STAC |
ad2fc2cd VM |
77 | cmpl $8,%edx |
78 | jb 20f /* less then 8 bytes, go to byte copy loop */ | |
79 | ALIGN_DESTINATION | |
80 | movl %edx,%ecx | |
81 | andl $63,%edx | |
82 | shrl $6,%ecx | |
83 | jz 17f | |
84 | 1: movq (%rsi),%r8 | |
85 | 2: movq 1*8(%rsi),%r9 | |
86 | 3: movq 2*8(%rsi),%r10 | |
87 | 4: movq 3*8(%rsi),%r11 | |
88 | 5: movq %r8,(%rdi) | |
89 | 6: movq %r9,1*8(%rdi) | |
90 | 7: movq %r10,2*8(%rdi) | |
91 | 8: movq %r11,3*8(%rdi) | |
92 | 9: movq 4*8(%rsi),%r8 | |
93 | 10: movq 5*8(%rsi),%r9 | |
94 | 11: movq 6*8(%rsi),%r10 | |
95 | 12: movq 7*8(%rsi),%r11 | |
96 | 13: movq %r8,4*8(%rdi) | |
97 | 14: movq %r9,5*8(%rdi) | |
98 | 15: movq %r10,6*8(%rdi) | |
99 | 16: movq %r11,7*8(%rdi) | |
7bcd3f34 AK |
100 | leaq 64(%rsi),%rsi |
101 | leaq 64(%rdi),%rdi | |
7bcd3f34 | 102 | decl %ecx |
ad2fc2cd VM |
103 | jnz 1b |
104 | 17: movl %edx,%ecx | |
105 | andl $7,%edx | |
106 | shrl $3,%ecx | |
107 | jz 20f | |
108 | 18: movq (%rsi),%r8 | |
109 | 19: movq %r8,(%rdi) | |
7bcd3f34 | 110 | leaq 8(%rsi),%rsi |
ad2fc2cd VM |
111 | leaq 8(%rdi),%rdi |
112 | decl %ecx | |
113 | jnz 18b | |
114 | 20: andl %edx,%edx | |
115 | jz 23f | |
7bcd3f34 | 116 | movl %edx,%ecx |
ad2fc2cd VM |
117 | 21: movb (%rsi),%al |
118 | 22: movb %al,(%rdi) | |
7bcd3f34 | 119 | incq %rsi |
ad2fc2cd | 120 | incq %rdi |
7bcd3f34 | 121 | decl %ecx |
ad2fc2cd VM |
122 | jnz 21b |
123 | 23: xor %eax,%eax | |
63bcff2a | 124 | ASM_CLAC |
7bcd3f34 AK |
125 | ret |
126 | ||
ad2fc2cd VM |
127 | .section .fixup,"ax" |
128 | 30: shll $6,%ecx | |
129 | addl %ecx,%edx | |
130 | jmp 60f | |
661c8019 | 131 | 40: leal (%rdx,%rcx,8),%edx |
ad2fc2cd VM |
132 | jmp 60f |
133 | 50: movl %ecx,%edx | |
134 | 60: jmp copy_user_handle_tail /* ecx is zerorest also */ | |
135 | .previous | |
7bcd3f34 | 136 | |
9732da8c PA |
137 | _ASM_EXTABLE(1b,30b) |
138 | _ASM_EXTABLE(2b,30b) | |
139 | _ASM_EXTABLE(3b,30b) | |
140 | _ASM_EXTABLE(4b,30b) | |
141 | _ASM_EXTABLE(5b,30b) | |
142 | _ASM_EXTABLE(6b,30b) | |
143 | _ASM_EXTABLE(7b,30b) | |
144 | _ASM_EXTABLE(8b,30b) | |
145 | _ASM_EXTABLE(9b,30b) | |
146 | _ASM_EXTABLE(10b,30b) | |
147 | _ASM_EXTABLE(11b,30b) | |
148 | _ASM_EXTABLE(12b,30b) | |
149 | _ASM_EXTABLE(13b,30b) | |
150 | _ASM_EXTABLE(14b,30b) | |
151 | _ASM_EXTABLE(15b,30b) | |
152 | _ASM_EXTABLE(16b,30b) | |
153 | _ASM_EXTABLE(18b,40b) | |
154 | _ASM_EXTABLE(19b,40b) | |
155 | _ASM_EXTABLE(21b,50b) | |
156 | _ASM_EXTABLE(22b,50b) | |
ad2fc2cd | 157 | ENDPROC(copy_user_generic_unrolled) |
8d379dad | 158 | |
ad2fc2cd VM |
159 | /* Some CPUs run faster using the string copy instructions. |
160 | * This is also a lot simpler. Use them when possible. | |
161 | * | |
162 | * Only 4GB of copy is supported. This shouldn't be a problem | |
163 | * because the kernel normally only writes from/to page sized chunks | |
164 | * even if user space passed a longer buffer. | |
165 | * And more would be dangerous because both Intel and AMD have | |
166 | * errata with rep movsq > 4GB. If someone feels the need to fix | |
167 | * this please consider this. | |
168 | * | |
169 | * Input: | |
170 | * rdi destination | |
171 | * rsi source | |
172 | * rdx count | |
173 | * | |
174 | * Output: | |
175 | * eax uncopied bytes or 0 if successful. | |
176 | */ | |
3022d734 | 177 | ENTRY(copy_user_generic_string) |
63bcff2a | 178 | ASM_STAC |
ad2fc2cd VM |
179 | cmpl $8,%edx |
180 | jb 2f /* less than 8 bytes, go to byte copy loop */ | |
181 | ALIGN_DESTINATION | |
1da177e4 LT |
182 | movl %edx,%ecx |
183 | shrl $3,%ecx | |
ad2fc2cd VM |
184 | andl $7,%edx |
185 | 1: rep | |
3022d734 | 186 | movsq |
ad2fc2cd VM |
187 | 2: movl %edx,%ecx |
188 | 3: rep | |
189 | movsb | |
f4cb1cc1 | 190 | xorl %eax,%eax |
63bcff2a | 191 | ASM_CLAC |
1da177e4 | 192 | ret |
3022d734 | 193 | |
ad2fc2cd | 194 | .section .fixup,"ax" |
661c8019 | 195 | 11: leal (%rdx,%rcx,8),%ecx |
ad2fc2cd VM |
196 | 12: movl %ecx,%edx /* ecx is zerorest also */ |
197 | jmp copy_user_handle_tail | |
198 | .previous | |
2cbc9ee3 | 199 | |
9732da8c PA |
200 | _ASM_EXTABLE(1b,11b) |
201 | _ASM_EXTABLE(3b,12b) | |
ad2fc2cd | 202 | ENDPROC(copy_user_generic_string) |
4307bec9 FY |
203 | |
204 | /* | |
205 | * Some CPUs are adding enhanced REP MOVSB/STOSB instructions. | |
206 | * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled. | |
207 | * | |
208 | * Input: | |
209 | * rdi destination | |
210 | * rsi source | |
211 | * rdx count | |
212 | * | |
213 | * Output: | |
214 | * eax uncopied bytes or 0 if successful. | |
215 | */ | |
216 | ENTRY(copy_user_enhanced_fast_string) | |
63bcff2a | 217 | ASM_STAC |
4307bec9 FY |
218 | movl %edx,%ecx |
219 | 1: rep | |
220 | movsb | |
f4cb1cc1 | 221 | xorl %eax,%eax |
63bcff2a | 222 | ASM_CLAC |
4307bec9 FY |
223 | ret |
224 | ||
225 | .section .fixup,"ax" | |
226 | 12: movl %ecx,%edx /* ecx is zerorest also */ | |
227 | jmp copy_user_handle_tail | |
228 | .previous | |
229 | ||
9732da8c | 230 | _ASM_EXTABLE(1b,12b) |
4307bec9 | 231 | ENDPROC(copy_user_enhanced_fast_string) |
b41e6ec2 BP |
232 | |
233 | /* | |
234 | * copy_user_nocache - Uncached memory copy with exception handling | |
235 | * This will force destination/source out of cache for more performance. | |
236 | */ | |
237 | ENTRY(__copy_user_nocache) | |
b41e6ec2 BP |
238 | ASM_STAC |
239 | cmpl $8,%edx | |
240 | jb 20f /* less then 8 bytes, go to byte copy loop */ | |
241 | ALIGN_DESTINATION | |
242 | movl %edx,%ecx | |
243 | andl $63,%edx | |
244 | shrl $6,%ecx | |
245 | jz 17f | |
246 | 1: movq (%rsi),%r8 | |
247 | 2: movq 1*8(%rsi),%r9 | |
248 | 3: movq 2*8(%rsi),%r10 | |
249 | 4: movq 3*8(%rsi),%r11 | |
250 | 5: movnti %r8,(%rdi) | |
251 | 6: movnti %r9,1*8(%rdi) | |
252 | 7: movnti %r10,2*8(%rdi) | |
253 | 8: movnti %r11,3*8(%rdi) | |
254 | 9: movq 4*8(%rsi),%r8 | |
255 | 10: movq 5*8(%rsi),%r9 | |
256 | 11: movq 6*8(%rsi),%r10 | |
257 | 12: movq 7*8(%rsi),%r11 | |
258 | 13: movnti %r8,4*8(%rdi) | |
259 | 14: movnti %r9,5*8(%rdi) | |
260 | 15: movnti %r10,6*8(%rdi) | |
261 | 16: movnti %r11,7*8(%rdi) | |
262 | leaq 64(%rsi),%rsi | |
263 | leaq 64(%rdi),%rdi | |
264 | decl %ecx | |
265 | jnz 1b | |
266 | 17: movl %edx,%ecx | |
267 | andl $7,%edx | |
268 | shrl $3,%ecx | |
269 | jz 20f | |
270 | 18: movq (%rsi),%r8 | |
271 | 19: movnti %r8,(%rdi) | |
272 | leaq 8(%rsi),%rsi | |
273 | leaq 8(%rdi),%rdi | |
274 | decl %ecx | |
275 | jnz 18b | |
276 | 20: andl %edx,%edx | |
277 | jz 23f | |
278 | movl %edx,%ecx | |
279 | 21: movb (%rsi),%al | |
280 | 22: movb %al,(%rdi) | |
281 | incq %rsi | |
282 | incq %rdi | |
283 | decl %ecx | |
284 | jnz 21b | |
285 | 23: xorl %eax,%eax | |
286 | ASM_CLAC | |
287 | sfence | |
288 | ret | |
289 | ||
290 | .section .fixup,"ax" | |
291 | 30: shll $6,%ecx | |
292 | addl %ecx,%edx | |
293 | jmp 60f | |
294 | 40: lea (%rdx,%rcx,8),%rdx | |
295 | jmp 60f | |
296 | 50: movl %ecx,%edx | |
297 | 60: sfence | |
298 | jmp copy_user_handle_tail | |
299 | .previous | |
300 | ||
301 | _ASM_EXTABLE(1b,30b) | |
302 | _ASM_EXTABLE(2b,30b) | |
303 | _ASM_EXTABLE(3b,30b) | |
304 | _ASM_EXTABLE(4b,30b) | |
305 | _ASM_EXTABLE(5b,30b) | |
306 | _ASM_EXTABLE(6b,30b) | |
307 | _ASM_EXTABLE(7b,30b) | |
308 | _ASM_EXTABLE(8b,30b) | |
309 | _ASM_EXTABLE(9b,30b) | |
310 | _ASM_EXTABLE(10b,30b) | |
311 | _ASM_EXTABLE(11b,30b) | |
312 | _ASM_EXTABLE(12b,30b) | |
313 | _ASM_EXTABLE(13b,30b) | |
314 | _ASM_EXTABLE(14b,30b) | |
315 | _ASM_EXTABLE(15b,30b) | |
316 | _ASM_EXTABLE(16b,30b) | |
317 | _ASM_EXTABLE(18b,40b) | |
318 | _ASM_EXTABLE(19b,40b) | |
319 | _ASM_EXTABLE(21b,50b) | |
320 | _ASM_EXTABLE(22b,50b) | |
b41e6ec2 | 321 | ENDPROC(__copy_user_nocache) |