]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 | 2 | /* Copyright 2002 Andi Kleen, SuSE Labs */ |
8d379dad | 3 | |
8d379dad | 4 | #include <linux/linkage.h> |
cd4d09ec | 5 | #include <asm/cpufeatures.h> |
2f19e06a | 6 | #include <asm/alternative-asm.h> |
784d5699 | 7 | #include <asm/export.h> |
8d379dad | 8 | |
84d95ad4 BP |
9 | .weak memset |
10 | ||
1da177e4 | 11 | /* |
2f19e06a FY |
12 | * ISO C memset - set a memory block to a byte value. This function uses fast |
13 | * string to get better performance than the original function. The code is | |
6a6256f9 | 14 | * simpler and shorter than the original function as well. |
84d95ad4 | 15 | * |
1da177e4 | 16 | * rdi destination |
84d95ad4 BP |
17 | * rsi value (char) |
18 | * rdx count (bytes) | |
19 | * | |
1da177e4 | 20 | * rax original destination |
84d95ad4 BP |
21 | */ |
22 | ENTRY(memset) | |
23 | ENTRY(__memset) | |
24 | /* | |
25 | * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended | |
26 | * to use it when possible. If not available, use fast string instructions. | |
27 | * | |
28 | * Otherwise, use original memset function. | |
29 | */ | |
30 | ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \ | |
31 | "jmp memset_erms", X86_FEATURE_ERMS | |
32 | ||
8d379dad | 33 | movq %rdi,%r9 |
5d7244e7 JB |
34 | movq %rdx,%rcx |
35 | andl $7,%edx | |
36 | shrq $3,%rcx | |
8d379dad JB |
37 | /* expand byte value */ |
38 | movzbl %sil,%esi | |
39 | movabs $0x0101010101010101,%rax | |
5d7244e7 | 40 | imulq %rsi,%rax |
8d379dad | 41 | rep stosq |
5d7244e7 | 42 | movl %edx,%ecx |
8d379dad JB |
43 | rep stosb |
44 | movq %r9,%rax | |
45 | ret | |
84d95ad4 BP |
46 | ENDPROC(memset) |
47 | ENDPROC(__memset) | |
784d5699 AV |
48 | EXPORT_SYMBOL(memset) |
49 | EXPORT_SYMBOL(__memset) | |
8d379dad | 50 | |
2f19e06a FY |
51 | /* |
52 | * ISO C memset - set a memory block to a byte value. This function uses | |
53 | * enhanced rep stosb to override the fast string function. | |
54 | * The code is simpler and shorter than the fast string function as well. | |
55 | * | |
56 | * rdi destination | |
57 | * rsi value (char) | |
58 | * rdx count (bytes) | |
59 | * | |
60 | * rax original destination | |
61 | */ | |
84d95ad4 | 62 | ENTRY(memset_erms) |
2f19e06a FY |
63 | movq %rdi,%r9 |
64 | movb %sil,%al | |
5d7244e7 | 65 | movq %rdx,%rcx |
2f19e06a FY |
66 | rep stosb |
67 | movq %r9,%rax | |
68 | ret | |
84d95ad4 | 69 | ENDPROC(memset_erms) |
393f203f | 70 | |
84d95ad4 | 71 | ENTRY(memset_orig) |
7bcd3f34 | 72 | movq %rdi,%r10 |
7bcd3f34 AK |
73 | |
74 | /* expand byte value */ | |
75 | movzbl %sil,%ecx | |
76 | movabs $0x0101010101010101,%rax | |
5d7244e7 | 77 | imulq %rcx,%rax |
7bcd3f34 AK |
78 | |
79 | /* align dst */ | |
80 | movl %edi,%r9d | |
81 | andl $7,%r9d | |
82 | jnz .Lbad_alignment | |
83 | .Lafter_bad_alignment: | |
84 | ||
5d7244e7 JB |
85 | movq %rdx,%rcx |
86 | shrq $6,%rcx | |
7bcd3f34 AK |
87 | jz .Lhandle_tail |
88 | ||
89 | .p2align 4 | |
90 | .Lloop_64: | |
5d7244e7 | 91 | decq %rcx |
7bcd3f34 AK |
92 | movq %rax,(%rdi) |
93 | movq %rax,8(%rdi) | |
94 | movq %rax,16(%rdi) | |
95 | movq %rax,24(%rdi) | |
96 | movq %rax,32(%rdi) | |
97 | movq %rax,40(%rdi) | |
98 | movq %rax,48(%rdi) | |
99 | movq %rax,56(%rdi) | |
100 | leaq 64(%rdi),%rdi | |
101 | jnz .Lloop_64 | |
102 | ||
103 | /* Handle tail in loops. The loops should be faster than hard | |
104 | to predict jump tables. */ | |
105 | .p2align 4 | |
106 | .Lhandle_tail: | |
5d7244e7 | 107 | movl %edx,%ecx |
7bcd3f34 AK |
108 | andl $63&(~7),%ecx |
109 | jz .Lhandle_7 | |
110 | shrl $3,%ecx | |
111 | .p2align 4 | |
112 | .Lloop_8: | |
113 | decl %ecx | |
114 | movq %rax,(%rdi) | |
115 | leaq 8(%rdi),%rdi | |
116 | jnz .Lloop_8 | |
117 | ||
118 | .Lhandle_7: | |
5d7244e7 | 119 | andl $7,%edx |
7bcd3f34 AK |
120 | jz .Lende |
121 | .p2align 4 | |
122 | .Lloop_1: | |
5d7244e7 | 123 | decl %edx |
7bcd3f34 AK |
124 | movb %al,(%rdi) |
125 | leaq 1(%rdi),%rdi | |
126 | jnz .Lloop_1 | |
127 | ||
128 | .Lende: | |
129 | movq %r10,%rax | |
130 | ret | |
131 | ||
132 | .Lbad_alignment: | |
5d7244e7 | 133 | cmpq $7,%rdx |
7bcd3f34 AK |
134 | jbe .Lhandle_7 |
135 | movq %rax,(%rdi) /* unaligned store */ | |
136 | movq $8,%r8 | |
137 | subq %r9,%r8 | |
138 | addq %r8,%rdi | |
5d7244e7 | 139 | subq %r8,%rdx |
7bcd3f34 | 140 | jmp .Lafter_bad_alignment |
8d379dad | 141 | .Lfinal: |
84d95ad4 | 142 | ENDPROC(memset_orig) |