]>
Commit | Line | Data |
---|---|---|
ad2fc2cd VM |
1 | /* |
2 | * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com> | |
3 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
1da177e4 | 4 | * Subject to the GNU Public License v2. |
ad2fc2cd VM |
5 | * |
6 | * Functions to copy from and to user space. | |
7 | */ | |
1da177e4 | 8 | |
8d379dad | 9 | #include <linux/linkage.h> |
3022d734 AK |
10 | #include <asm/current.h> |
11 | #include <asm/asm-offsets.h> | |
12 | #include <asm/thread_info.h> | |
cd4d09ec | 13 | #include <asm/cpufeatures.h> |
4307bec9 | 14 | #include <asm/alternative-asm.h> |
9732da8c | 15 | #include <asm/asm.h> |
63bcff2a | 16 | #include <asm/smap.h> |
784d5699 | 17 | #include <asm/export.h> |
3022d734 | 18 | |
ad2fc2cd | 19 | /* Standard copy_to_user with segment limit checking */ |
3c93ca00 | 20 | ENTRY(_copy_to_user) |
13d4ea09 | 21 | mov PER_CPU_VAR(current_task), %rax |
1da177e4 LT |
22 | movq %rdi,%rcx |
23 | addq %rdx,%rcx | |
ad2fc2cd | 24 | jc bad_to_user |
13d4ea09 | 25 | cmpq TASK_addr_limit(%rax),%rcx |
26afb7c6 | 26 | ja bad_to_user |
de2ff888 BP |
27 | ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \ |
28 | "jmp copy_user_generic_string", \ | |
29 | X86_FEATURE_REP_GOOD, \ | |
30 | "jmp copy_user_enhanced_fast_string", \ | |
31 | X86_FEATURE_ERMS | |
3c93ca00 | 32 | ENDPROC(_copy_to_user) |
784d5699 | 33 | EXPORT_SYMBOL(_copy_to_user) |
7bcd3f34 | 34 | |
ad2fc2cd | 35 | /* Standard copy_from_user with segment limit checking */ |
9f0cf4ad | 36 | ENTRY(_copy_from_user) |
13d4ea09 | 37 | mov PER_CPU_VAR(current_task), %rax |
ad2fc2cd VM |
38 | movq %rsi,%rcx |
39 | addq %rdx,%rcx | |
40 | jc bad_from_user | |
13d4ea09 | 41 | cmpq TASK_addr_limit(%rax),%rcx |
26afb7c6 | 42 | ja bad_from_user |
de2ff888 BP |
43 | ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \ |
44 | "jmp copy_user_generic_string", \ | |
45 | X86_FEATURE_REP_GOOD, \ | |
46 | "jmp copy_user_enhanced_fast_string", \ | |
47 | X86_FEATURE_ERMS | |
9f0cf4ad | 48 | ENDPROC(_copy_from_user) |
784d5699 AV |
49 | EXPORT_SYMBOL(_copy_from_user) |
50 | ||
3022d734 | 51 | |
1da177e4 LT |
52 | .section .fixup,"ax" |
53 | /* must zero dest */ | |
ad2fc2cd | 54 | ENTRY(bad_from_user) |
1da177e4 LT |
55 | bad_from_user: |
56 | movl %edx,%ecx | |
57 | xorl %eax,%eax | |
58 | rep | |
59 | stosb | |
60 | bad_to_user: | |
ad2fc2cd | 61 | movl %edx,%eax |
1da177e4 | 62 | ret |
ad2fc2cd | 63 | ENDPROC(bad_from_user) |
1da177e4 | 64 | .previous |
ad2fc2cd | 65 | |
1da177e4 | 66 | /* |
3022d734 | 67 | * copy_user_generic_unrolled - memory copy with exception handling. |
ad2fc2cd VM |
68 | * This version is for CPUs like P4 that don't have efficient micro |
69 | * code for rep movsq | |
70 | * | |
71 | * Input: | |
1da177e4 LT |
72 | * rdi destination |
73 | * rsi source | |
74 | * rdx count | |
75 | * | |
ad2fc2cd | 76 | * Output: |
0d2eb44f | 77 | * eax uncopied bytes or 0 if successful. |
1da177e4 | 78 | */ |
3022d734 | 79 | ENTRY(copy_user_generic_unrolled) |
63bcff2a | 80 | ASM_STAC |
ad2fc2cd VM |
81 | cmpl $8,%edx |
82 | jb 20f /* less then 8 bytes, go to byte copy loop */ | |
83 | ALIGN_DESTINATION | |
84 | movl %edx,%ecx | |
85 | andl $63,%edx | |
86 | shrl $6,%ecx | |
87 | jz 17f | |
88 | 1: movq (%rsi),%r8 | |
89 | 2: movq 1*8(%rsi),%r9 | |
90 | 3: movq 2*8(%rsi),%r10 | |
91 | 4: movq 3*8(%rsi),%r11 | |
92 | 5: movq %r8,(%rdi) | |
93 | 6: movq %r9,1*8(%rdi) | |
94 | 7: movq %r10,2*8(%rdi) | |
95 | 8: movq %r11,3*8(%rdi) | |
96 | 9: movq 4*8(%rsi),%r8 | |
97 | 10: movq 5*8(%rsi),%r9 | |
98 | 11: movq 6*8(%rsi),%r10 | |
99 | 12: movq 7*8(%rsi),%r11 | |
100 | 13: movq %r8,4*8(%rdi) | |
101 | 14: movq %r9,5*8(%rdi) | |
102 | 15: movq %r10,6*8(%rdi) | |
103 | 16: movq %r11,7*8(%rdi) | |
7bcd3f34 AK |
104 | leaq 64(%rsi),%rsi |
105 | leaq 64(%rdi),%rdi | |
7bcd3f34 | 106 | decl %ecx |
ad2fc2cd VM |
107 | jnz 1b |
108 | 17: movl %edx,%ecx | |
109 | andl $7,%edx | |
110 | shrl $3,%ecx | |
111 | jz 20f | |
112 | 18: movq (%rsi),%r8 | |
113 | 19: movq %r8,(%rdi) | |
7bcd3f34 | 114 | leaq 8(%rsi),%rsi |
ad2fc2cd VM |
115 | leaq 8(%rdi),%rdi |
116 | decl %ecx | |
117 | jnz 18b | |
118 | 20: andl %edx,%edx | |
119 | jz 23f | |
7bcd3f34 | 120 | movl %edx,%ecx |
ad2fc2cd VM |
121 | 21: movb (%rsi),%al |
122 | 22: movb %al,(%rdi) | |
7bcd3f34 | 123 | incq %rsi |
ad2fc2cd | 124 | incq %rdi |
7bcd3f34 | 125 | decl %ecx |
ad2fc2cd VM |
126 | jnz 21b |
127 | 23: xor %eax,%eax | |
63bcff2a | 128 | ASM_CLAC |
7bcd3f34 AK |
129 | ret |
130 | ||
ad2fc2cd VM |
131 | .section .fixup,"ax" |
132 | 30: shll $6,%ecx | |
133 | addl %ecx,%edx | |
134 | jmp 60f | |
661c8019 | 135 | 40: leal (%rdx,%rcx,8),%edx |
ad2fc2cd VM |
136 | jmp 60f |
137 | 50: movl %ecx,%edx | |
138 | 60: jmp copy_user_handle_tail /* ecx is zerorest also */ | |
139 | .previous | |
7bcd3f34 | 140 | |
9732da8c PA |
141 | _ASM_EXTABLE(1b,30b) |
142 | _ASM_EXTABLE(2b,30b) | |
143 | _ASM_EXTABLE(3b,30b) | |
144 | _ASM_EXTABLE(4b,30b) | |
145 | _ASM_EXTABLE(5b,30b) | |
146 | _ASM_EXTABLE(6b,30b) | |
147 | _ASM_EXTABLE(7b,30b) | |
148 | _ASM_EXTABLE(8b,30b) | |
149 | _ASM_EXTABLE(9b,30b) | |
150 | _ASM_EXTABLE(10b,30b) | |
151 | _ASM_EXTABLE(11b,30b) | |
152 | _ASM_EXTABLE(12b,30b) | |
153 | _ASM_EXTABLE(13b,30b) | |
154 | _ASM_EXTABLE(14b,30b) | |
155 | _ASM_EXTABLE(15b,30b) | |
156 | _ASM_EXTABLE(16b,30b) | |
157 | _ASM_EXTABLE(18b,40b) | |
158 | _ASM_EXTABLE(19b,40b) | |
159 | _ASM_EXTABLE(21b,50b) | |
160 | _ASM_EXTABLE(22b,50b) | |
ad2fc2cd | 161 | ENDPROC(copy_user_generic_unrolled) |
784d5699 | 162 | EXPORT_SYMBOL(copy_user_generic_unrolled) |
8d379dad | 163 | |
ad2fc2cd VM |
164 | /* Some CPUs run faster using the string copy instructions. |
165 | * This is also a lot simpler. Use them when possible. | |
166 | * | |
167 | * Only 4GB of copy is supported. This shouldn't be a problem | |
168 | * because the kernel normally only writes from/to page sized chunks | |
169 | * even if user space passed a longer buffer. | |
170 | * And more would be dangerous because both Intel and AMD have | |
171 | * errata with rep movsq > 4GB. If someone feels the need to fix | |
172 | * this please consider this. | |
173 | * | |
174 | * Input: | |
175 | * rdi destination | |
176 | * rsi source | |
177 | * rdx count | |
178 | * | |
179 | * Output: | |
180 | * eax uncopied bytes or 0 if successful. | |
181 | */ | |
3022d734 | 182 | ENTRY(copy_user_generic_string) |
63bcff2a | 183 | ASM_STAC |
ad2fc2cd VM |
184 | cmpl $8,%edx |
185 | jb 2f /* less than 8 bytes, go to byte copy loop */ | |
186 | ALIGN_DESTINATION | |
1da177e4 LT |
187 | movl %edx,%ecx |
188 | shrl $3,%ecx | |
ad2fc2cd VM |
189 | andl $7,%edx |
190 | 1: rep | |
3022d734 | 191 | movsq |
ad2fc2cd VM |
192 | 2: movl %edx,%ecx |
193 | 3: rep | |
194 | movsb | |
f4cb1cc1 | 195 | xorl %eax,%eax |
63bcff2a | 196 | ASM_CLAC |
1da177e4 | 197 | ret |
3022d734 | 198 | |
ad2fc2cd | 199 | .section .fixup,"ax" |
661c8019 | 200 | 11: leal (%rdx,%rcx,8),%ecx |
ad2fc2cd VM |
201 | 12: movl %ecx,%edx /* ecx is zerorest also */ |
202 | jmp copy_user_handle_tail | |
203 | .previous | |
2cbc9ee3 | 204 | |
9732da8c PA |
205 | _ASM_EXTABLE(1b,11b) |
206 | _ASM_EXTABLE(3b,12b) | |
ad2fc2cd | 207 | ENDPROC(copy_user_generic_string) |
784d5699 | 208 | EXPORT_SYMBOL(copy_user_generic_string) |
4307bec9 FY |
209 | |
210 | /* | |
211 | * Some CPUs are adding enhanced REP MOVSB/STOSB instructions. | |
212 | * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled. | |
213 | * | |
214 | * Input: | |
215 | * rdi destination | |
216 | * rsi source | |
217 | * rdx count | |
218 | * | |
219 | * Output: | |
220 | * eax uncopied bytes or 0 if successful. | |
221 | */ | |
222 | ENTRY(copy_user_enhanced_fast_string) | |
63bcff2a | 223 | ASM_STAC |
4307bec9 FY |
224 | movl %edx,%ecx |
225 | 1: rep | |
226 | movsb | |
f4cb1cc1 | 227 | xorl %eax,%eax |
63bcff2a | 228 | ASM_CLAC |
4307bec9 FY |
229 | ret |
230 | ||
231 | .section .fixup,"ax" | |
232 | 12: movl %ecx,%edx /* ecx is zerorest also */ | |
233 | jmp copy_user_handle_tail | |
234 | .previous | |
235 | ||
9732da8c | 236 | _ASM_EXTABLE(1b,12b) |
4307bec9 | 237 | ENDPROC(copy_user_enhanced_fast_string) |
784d5699 | 238 | EXPORT_SYMBOL(copy_user_enhanced_fast_string) |
b41e6ec2 BP |
239 | |
240 | /* | |
241 | * copy_user_nocache - Uncached memory copy with exception handling | |
ee9737c9 TK |
242 | * This will force destination out of cache for more performance. |
243 | * | |
244 | * Note: Cached memory copy is used when destination or size is not | |
245 | * naturally aligned. That is: | |
246 | * - Require 8-byte alignment when size is 8 bytes or larger. | |
a82eee74 | 247 | * - Require 4-byte alignment when size is 4 bytes. |
b41e6ec2 BP |
248 | */ |
249 | ENTRY(__copy_user_nocache) | |
b41e6ec2 | 250 | ASM_STAC |
ee9737c9 | 251 | |
a82eee74 | 252 | /* If size is less than 8 bytes, go to 4-byte copy */ |
b41e6ec2 | 253 | cmpl $8,%edx |
a82eee74 | 254 | jb .L_4b_nocache_copy_entry |
ee9737c9 TK |
255 | |
256 | /* If destination is not 8-byte aligned, "cache" copy to align it */ | |
b41e6ec2 | 257 | ALIGN_DESTINATION |
ee9737c9 TK |
258 | |
259 | /* Set 4x8-byte copy count and remainder */ | |
b41e6ec2 BP |
260 | movl %edx,%ecx |
261 | andl $63,%edx | |
262 | shrl $6,%ecx | |
ee9737c9 TK |
263 | jz .L_8b_nocache_copy_entry /* jump if count is 0 */ |
264 | ||
265 | /* Perform 4x8-byte nocache loop-copy */ | |
266 | .L_4x8b_nocache_copy_loop: | |
b41e6ec2 BP |
267 | 1: movq (%rsi),%r8 |
268 | 2: movq 1*8(%rsi),%r9 | |
269 | 3: movq 2*8(%rsi),%r10 | |
270 | 4: movq 3*8(%rsi),%r11 | |
271 | 5: movnti %r8,(%rdi) | |
272 | 6: movnti %r9,1*8(%rdi) | |
273 | 7: movnti %r10,2*8(%rdi) | |
274 | 8: movnti %r11,3*8(%rdi) | |
275 | 9: movq 4*8(%rsi),%r8 | |
276 | 10: movq 5*8(%rsi),%r9 | |
277 | 11: movq 6*8(%rsi),%r10 | |
278 | 12: movq 7*8(%rsi),%r11 | |
279 | 13: movnti %r8,4*8(%rdi) | |
280 | 14: movnti %r9,5*8(%rdi) | |
281 | 15: movnti %r10,6*8(%rdi) | |
282 | 16: movnti %r11,7*8(%rdi) | |
283 | leaq 64(%rsi),%rsi | |
284 | leaq 64(%rdi),%rdi | |
285 | decl %ecx | |
ee9737c9 TK |
286 | jnz .L_4x8b_nocache_copy_loop |
287 | ||
288 | /* Set 8-byte copy count and remainder */ | |
289 | .L_8b_nocache_copy_entry: | |
290 | movl %edx,%ecx | |
b41e6ec2 BP |
291 | andl $7,%edx |
292 | shrl $3,%ecx | |
a82eee74 | 293 | jz .L_4b_nocache_copy_entry /* jump if count is 0 */ |
ee9737c9 TK |
294 | |
295 | /* Perform 8-byte nocache loop-copy */ | |
296 | .L_8b_nocache_copy_loop: | |
297 | 20: movq (%rsi),%r8 | |
298 | 21: movnti %r8,(%rdi) | |
b41e6ec2 BP |
299 | leaq 8(%rsi),%rsi |
300 | leaq 8(%rdi),%rdi | |
301 | decl %ecx | |
ee9737c9 TK |
302 | jnz .L_8b_nocache_copy_loop |
303 | ||
304 | /* If no byte left, we're done */ | |
a82eee74 TK |
305 | .L_4b_nocache_copy_entry: |
306 | andl %edx,%edx | |
307 | jz .L_finish_copy | |
308 | ||
309 | /* If destination is not 4-byte aligned, go to byte copy: */ | |
310 | movl %edi,%ecx | |
311 | andl $3,%ecx | |
312 | jnz .L_1b_cache_copy_entry | |
313 | ||
314 | /* Set 4-byte copy count (1 or 0) and remainder */ | |
b41e6ec2 | 315 | movl %edx,%ecx |
a82eee74 TK |
316 | andl $3,%edx |
317 | shrl $2,%ecx | |
318 | jz .L_1b_cache_copy_entry /* jump if count is 0 */ | |
319 | ||
320 | /* Perform 4-byte nocache copy: */ | |
321 | 30: movl (%rsi),%r8d | |
322 | 31: movnti %r8d,(%rdi) | |
323 | leaq 4(%rsi),%rsi | |
324 | leaq 4(%rdi),%rdi | |
325 | ||
326 | /* If no bytes left, we're done: */ | |
ee9737c9 TK |
327 | andl %edx,%edx |
328 | jz .L_finish_copy | |
329 | ||
330 | /* Perform byte "cache" loop-copy for the remainder */ | |
a82eee74 | 331 | .L_1b_cache_copy_entry: |
b41e6ec2 | 332 | movl %edx,%ecx |
ee9737c9 TK |
333 | .L_1b_cache_copy_loop: |
334 | 40: movb (%rsi),%al | |
335 | 41: movb %al,(%rdi) | |
b41e6ec2 BP |
336 | incq %rsi |
337 | incq %rdi | |
338 | decl %ecx | |
ee9737c9 TK |
339 | jnz .L_1b_cache_copy_loop |
340 | ||
341 | /* Finished copying; fence the prior stores */ | |
342 | .L_finish_copy: | |
343 | xorl %eax,%eax | |
b41e6ec2 BP |
344 | ASM_CLAC |
345 | sfence | |
346 | ret | |
347 | ||
348 | .section .fixup,"ax" | |
ee9737c9 TK |
349 | .L_fixup_4x8b_copy: |
350 | shll $6,%ecx | |
b41e6ec2 | 351 | addl %ecx,%edx |
ee9737c9 TK |
352 | jmp .L_fixup_handle_tail |
353 | .L_fixup_8b_copy: | |
354 | lea (%rdx,%rcx,8),%rdx | |
355 | jmp .L_fixup_handle_tail | |
a82eee74 TK |
356 | .L_fixup_4b_copy: |
357 | lea (%rdx,%rcx,4),%rdx | |
358 | jmp .L_fixup_handle_tail | |
ee9737c9 TK |
359 | .L_fixup_1b_copy: |
360 | movl %ecx,%edx | |
361 | .L_fixup_handle_tail: | |
362 | sfence | |
b41e6ec2 BP |
363 | jmp copy_user_handle_tail |
364 | .previous | |
365 | ||
ee9737c9 TK |
366 | _ASM_EXTABLE(1b,.L_fixup_4x8b_copy) |
367 | _ASM_EXTABLE(2b,.L_fixup_4x8b_copy) | |
368 | _ASM_EXTABLE(3b,.L_fixup_4x8b_copy) | |
369 | _ASM_EXTABLE(4b,.L_fixup_4x8b_copy) | |
370 | _ASM_EXTABLE(5b,.L_fixup_4x8b_copy) | |
371 | _ASM_EXTABLE(6b,.L_fixup_4x8b_copy) | |
372 | _ASM_EXTABLE(7b,.L_fixup_4x8b_copy) | |
373 | _ASM_EXTABLE(8b,.L_fixup_4x8b_copy) | |
374 | _ASM_EXTABLE(9b,.L_fixup_4x8b_copy) | |
375 | _ASM_EXTABLE(10b,.L_fixup_4x8b_copy) | |
376 | _ASM_EXTABLE(11b,.L_fixup_4x8b_copy) | |
377 | _ASM_EXTABLE(12b,.L_fixup_4x8b_copy) | |
378 | _ASM_EXTABLE(13b,.L_fixup_4x8b_copy) | |
379 | _ASM_EXTABLE(14b,.L_fixup_4x8b_copy) | |
380 | _ASM_EXTABLE(15b,.L_fixup_4x8b_copy) | |
381 | _ASM_EXTABLE(16b,.L_fixup_4x8b_copy) | |
382 | _ASM_EXTABLE(20b,.L_fixup_8b_copy) | |
383 | _ASM_EXTABLE(21b,.L_fixup_8b_copy) | |
a82eee74 TK |
384 | _ASM_EXTABLE(30b,.L_fixup_4b_copy) |
385 | _ASM_EXTABLE(31b,.L_fixup_4b_copy) | |
ee9737c9 TK |
386 | _ASM_EXTABLE(40b,.L_fixup_1b_copy) |
387 | _ASM_EXTABLE(41b,.L_fixup_1b_copy) | |
b41e6ec2 | 388 | ENDPROC(__copy_user_nocache) |
784d5699 | 389 | EXPORT_SYMBOL(__copy_user_nocache) |