]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/isa-l/raid/pq_check_sse_i32.asm
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / isa-l / raid / pq_check_sse_i32.asm
CommitLineData
9f95a23c
TL
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
3;
4; Redistribution and use in source and binary forms, with or without
5; modification, are permitted provided that the following conditions
6; are met:
7; * Redistributions of source code must retain the above copyright
8; notice, this list of conditions and the following disclaimer.
9; * Redistributions in binary form must reproduce the above copyright
10; notice, this list of conditions and the following disclaimer in
11; the documentation and/or other materials provided with the
12; distribution.
13; * Neither the name of Intel Corporation nor the names of its
14; contributors may be used to endorse or promote products derived
15; from this software without specific prior written permission.
16;
17; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30;;; Optimized pq of N source vectors using SSE3
31;;; int pq_gen_sse(int vects, int len, void **array)
32
33;;; Generates P+Q parity vector from N (vects-2) sources in array of pointers
34;;; (**array). Last two pointers are the P and Q destinations respectively.
35;;; Vectors must be aligned to 16 bytes. Length must be 16 byte aligned.
36
37%include "reg_sizes.asm"
38
39%ifidn __OUTPUT_FORMAT__, elf64
40 %define arg0 rdi
41 %define arg1 rsi
42 %define arg2 rdx
43 %define arg3 rcx
44 %define arg4 r8
45 %define arg5 r9
46 %define tmp r11
47 %define return rax
48 %define PS 8
49 %define func(x) x:
50 %define FUNC_SAVE
51 %define FUNC_RESTORE
52
53%elifidn __OUTPUT_FORMAT__, win64
54 %define arg0 rcx
55 %define arg1 rdx
56 %define arg2 r8
57 %define arg3 r9
58 %define return rax
59 %define PS 8
60 %define tmp r11
61 %define stack_size 2*16 + 8 ; must be an odd multiple of 8
62 %define func(x) proc_frame x
63
64 %macro FUNC_SAVE 0
65 alloc_stack stack_size
66 save_xmm128 xmm6, 0*16
67 save_xmm128 xmm7, 1*16
68 end_prolog
69 %endmacro
70 %macro FUNC_RESTORE 0
71 movdqa xmm6, [rsp + 0*16]
72 movdqa xmm7, [rsp + 1*16]
73 add rsp, stack_size
74 %endmacro
75
76
77%elifidn __OUTPUT_FORMAT__, elf32
78 %define arg0 edx
79 %define arg1 ecx
80 %define return eax
81 %define PS 4
82 %define func(x) x:
83 %define arg(x) [ebp+8+PS*x]
84 %define arg2 edi ; must sav/restore
85 %define arg3 esi
86 %define tmp ebx
87
88 %macro FUNC_SAVE 0
89 push ebp
90 mov ebp, esp
91 push esi
92 push edi
93 push ebx
94 mov arg0, arg(0)
95 mov arg1, arg(1)
96 mov arg2, arg(2)
97 %endmacro
98
99 %macro FUNC_RESTORE 0
100 pop ebx
101 pop edi
102 pop esi
103 mov esp, ebp ;if has frame pointer?
104 pop ebp
105 %endmacro
106
107%endif ; output formats
108
109%define vec arg0
110%define len arg1
111%define ptr arg3
112%define pos return
113
114%define xp1 xmm0
115%define xq1 xmm1
116%define xtmp1 xmm2
117%define xs1 xmm3
118
119%define xp2 xmm4
120%define xq2 xmm5
121%define xtmp2 xmm6
122%define xs2 xmm7
123
124%ifidn PS,8 ; 64-bit code
125 default rel
126 [bits 64]
127 %define xpoly xmm15
128%elifidn PS,4 ; 32-bit code
129 %define xpoly [poly]
130%endif
131
132;;; Use Non-temporal load/stor
133%ifdef NO_NT_LDST
134 %define XLDR movdqa
135 %define XSTR movdqa
136%else
137 %define XLDR movntdqa
138 %define XSTR movntdq
139%endif
140
141section .text
142
143align 16
f67539c2 144global pq_check_sse:ISAL_SYM_TYPE_FUNCTION
9f95a23c
TL
145func(pq_check_sse)
146 FUNC_SAVE
147 sub vec, 3 ;Keep as offset to last source
148 jng return_fail ;Must have at least 2 sources
149 cmp len, 0
150 je return_pass
151 test len, (16-1) ;Check alignment of length
152 jnz return_fail
153 mov pos, 0
154%ifidn PS,8
155 movdqa xpoly, [poly] ;For 64-bit, load poly into high xmm reg
156%endif
157 cmp len, 32
158 jl loop16
159
160len_aligned_32bytes:
161 sub len, 32 ;Do end of vec first and run backward
162
163loop32:
164 mov ptr, [arg2+PS+vec*PS] ;Get address of P parity vector
165 mov tmp, [arg2+(2*PS)+vec*PS] ;Get address of Q parity vector
166 XLDR xp1, [ptr+pos] ;Initialize xp1 with P1 src
167 XLDR xp2, [ptr+pos+16] ;Initialize xp2 with P2 src + 16B ahead
168 pxor xq1, xq1 ;q1 = 0
169 pxor xq2, xq2 ;q2 = 0
170
171 mov ptr, [arg2+vec*PS] ;Fetch last source pointer
172 mov tmp, vec ;Set tmp to point back to last vector
173 XLDR xs1, [ptr+pos] ;Preload last vector (source)
174 XLDR xs2, [ptr+pos+16] ;Preload last vector (source)
175
176next_vect:
177 sub tmp, 1 ;Inner loop for each source vector
178 mov ptr, [arg2+tmp*PS] ; get pointer to next vect
179 pxor xp1, xs1 ; p1 ^= s1
180 pxor xp2, xs2 ; p2 ^= s2
181 pxor xq1, xs1 ; q1 ^= s1
182 pxor xq2, xs2 ; q2 ^= s2
183 pxor xtmp1, xtmp1 ; xtmp1 = 0 - for compare to 0
184 pxor xtmp2, xtmp2 ; xtmp2 = 0
185 pcmpgtb xtmp1, xq1 ; xtmp1 = mask 0xff or 0x00 if bit7 set
186 pcmpgtb xtmp2, xq2 ; xtmp2 = mask 0xff or 0x00 if bit7 set
187 pand xtmp1, xpoly ; xtmp1 = poly or 0x00
188 pand xtmp2, xpoly ; xtmp2 = poly or 0x00
189 XLDR xs1, [ptr+pos] ; Get next vector (source data1)
190 XLDR xs2, [ptr+pos+16] ; Get next vector (source data2)
191 paddb xq1, xq1 ; q1 = q1<<1
192 paddb xq2, xq2 ; q2 = q2<<1
193 pxor xq1, xtmp1 ; q1 = q1<<1 ^ poly_masked
194 pxor xq2, xtmp2 ; q2 = q2<<1 ^ poly_masked
195 jg next_vect ; Loop for each vect except 0
196
197 pxor xp1, xs1 ;p1 ^= s1[0] - last source is already loaded
198 pxor xq1, xs1 ;q1 ^= 1 * s1[0]
199 pxor xp2, xs2 ;p2 ^= s2[0]
200 pxor xq2, xs2 ;q2 ^= 1 * s2[0]
201
202 mov tmp, [arg2+(2*PS)+vec*PS] ;Get address of Q parity vector
203 XLDR xtmp1, [tmp+pos] ;re-init xq1 with Q1 src
204 XLDR xtmp2, [tmp+pos+16] ;re-init xq2 with Q2 src + 16B ahead
205
206 pxor xq1, xtmp1 ;xq1 = q1 calculated ^ q1 saved
207 pxor xq2, xtmp2
208
209 por xp1, xq1 ;Confirm that all P&Q parity are 0
210 por xp1, xp2
211 por xp1, xq2
212 ptest xp1, xp1
213 jnz return_fail
214 add pos, 32
215 cmp pos, len
216 jle loop32
217
218
219 ;; ------------------------------
220 ;; Do last 16 Bytes remaining
221 add len, 32
222 cmp pos, len
223 je return_pass
224
225loop16:
226 mov ptr, [arg2+PS+vec*PS] ;Get address of P parity vector
227 mov tmp, [arg2+(2*PS)+vec*PS] ;Get address of Q parity vector
228 XLDR xp1, [ptr+pos] ;Initialize xp1 with P1 src
229 pxor xq1, xq1 ;q = 0
230 mov ptr, [arg2+vec*PS] ;Fetch last source pointer
231 mov tmp, vec ;Set tmp to point back to last vector
232 XLDR xs1, [ptr+pos] ;Preload last vector (source)
233
234next_vect16:
235 sub tmp, 1 ;Inner loop for each source vector
236 mov ptr, [arg2+tmp*PS] ; get pointer to next vect
237 pxor xq1, xs1 ; q ^= s
238 pxor xtmp1, xtmp1 ; xtmp = 0
239 pcmpgtb xtmp1, xq1 ; xtmp = mask 0xff or 0x00 if bit7 set
240 pand xtmp1, xpoly ; xtmp = poly or 0x00
241 pxor xp1, xs1 ; p ^= s
242 paddb xq1, xq1 ; q = q<<1
243 pxor xq1, xtmp1 ; q = q<<1 ^ poly_masked
244 XLDR xs1, [ptr+pos] ; Get next vector (source data)
245 jg next_vect16 ; Loop for each vect except 0
246
247 pxor xp1, xs1 ;p ^= s[0] - last source is already loaded
248 pxor xq1, xs1 ;q ^= 1 * s[0]
249
250 mov tmp, [arg2+(2*PS)+vec*PS] ;Get address of Q parity vector
251 XLDR xtmp1, [tmp+pos] ;re-init tmp with Q1 src
252 pxor xq1, xtmp1 ;xq1 = q1 calculated ^ q1 saved
253
254 por xp1, xq1 ;Confirm that all P&Q parity are = 0
255 ptest xp1, xp1
256 jnz return_fail
257 add pos, 16
258 cmp pos, len
259 jl loop16
260
261
262return_pass:
263 mov return, 0
264 FUNC_RESTORE
265 ret
266
267
268return_fail:
269 mov return, 1
270 FUNC_RESTORE
271 ret
272
273endproc_frame
274
275section .data
276
277align 16
278poly:
279dq 0x1d1d1d1d1d1d1d1d, 0x1d1d1d1d1d1d1d1d
280
281;;; func core, ver, snum
282slversion pq_check_sse, 00, 06, 0033