]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
2 | ; Copyright(c) 2011-2015 Intel Corporation All rights reserved. | |
3 | ; | |
4 | ; Redistribution and use in source and binary forms, with or without | |
5 | ; modification, are permitted provided that the following conditions | |
6 | ; are met: | |
7 | ; * Redistributions of source code must retain the above copyright | |
8 | ; notice, this list of conditions and the following disclaimer. | |
9 | ; * Redistributions in binary form must reproduce the above copyright | |
10 | ; notice, this list of conditions and the following disclaimer in | |
11 | ; the documentation and/or other materials provided with the | |
12 | ; distribution. | |
13 | ; * Neither the name of Intel Corporation nor the names of its | |
14 | ; contributors may be used to endorse or promote products derived | |
15 | ; from this software without specific prior written permission. | |
16 | ; | |
17 | ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
18 | ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
19 | ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
20 | ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
21 | ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
22 | ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
23 | ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
24 | ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
25 | ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 | ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 | ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
29 | ||
30 | ;;; | |
31 | ;;; gf_vect_mad_avx(len, vec, vec_i, mul_array, src, dest); | |
32 | ;;; | |
33 | ||
34 | %include "reg_sizes.asm" | |
35 | ||
36 | %ifidn __OUTPUT_FORMAT__, win64 | |
37 | %define arg0 rcx | |
38 | %define arg0.w ecx | |
39 | %define arg1 rdx | |
40 | %define arg2 r8 | |
41 | %define arg3 r9 | |
42 | %define arg4 r12 | |
43 | %define arg5 r15 | |
44 | %define tmp r11 | |
45 | %define return rax | |
46 | %define return.w eax | |
47 | %define PS 8 | |
48 | %define stack_size 16*3 + 3*8 | |
49 | %define arg(x) [rsp + stack_size + PS + PS*x] | |
50 | %define func(x) proc_frame x | |
51 | ||
52 | %macro FUNC_SAVE 0 | |
53 | sub rsp, stack_size | |
54 | vmovdqa [rsp+16*0],xmm6 | |
55 | vmovdqa [rsp+16*1],xmm7 | |
56 | vmovdqa [rsp+16*2],xmm8 | |
57 | save_reg r12, 3*16 + 0*8 | |
58 | save_reg r15, 3*16 + 1*8 | |
59 | end_prolog | |
60 | mov arg4, arg(4) | |
61 | mov arg5, arg(5) | |
62 | %endmacro | |
63 | ||
64 | %macro FUNC_RESTORE 0 | |
65 | vmovdqa xmm6, [rsp+16*0] | |
66 | vmovdqa xmm7, [rsp+16*1] | |
67 | vmovdqa xmm8, [rsp+16*2] | |
68 | mov r12, [rsp + 3*16 + 0*8] | |
69 | mov r15, [rsp + 3*16 + 1*8] | |
70 | add rsp, stack_size | |
71 | %endmacro | |
72 | ||
73 | %elifidn __OUTPUT_FORMAT__, elf64 | |
74 | %define arg0 rdi | |
75 | %define arg0.w edi | |
76 | %define arg1 rsi | |
77 | %define arg2 rdx | |
78 | %define arg3 rcx | |
79 | %define arg4 r8 | |
80 | %define arg5 r9 | |
81 | %define tmp r11 | |
82 | %define return rax | |
83 | %define return.w eax | |
84 | ||
85 | %define func(x) x: | |
86 | %define FUNC_SAVE | |
87 | %define FUNC_RESTORE | |
88 | %endif | |
89 | ||
90 | ;;; gf_vect_mad_avx(len, vec, vec_i, mul_array, src, dest) | |
91 | %define len arg0 | |
92 | %define len.w arg0.w | |
93 | %define vec arg1 | |
94 | %define vec_i arg2 | |
95 | %define mul_array arg3 | |
96 | %define src arg4 | |
97 | %define dest arg5 | |
98 | %define pos return | |
99 | %define pos.w return.w | |
100 | ||
101 | %ifndef EC_ALIGNED_ADDR | |
102 | ;;; Use Un-aligned load/store | |
103 | %define XLDR vmovdqu | |
104 | %define XSTR vmovdqu | |
105 | %else | |
106 | ;;; Use Non-temporal load/stor | |
107 | %ifdef NO_NT_LDST | |
108 | %define XLDR vmovdqa | |
109 | %define XSTR vmovdqa | |
110 | %else | |
111 | %define XLDR vmovntdqa | |
112 | %define XSTR vmovntdq | |
113 | %endif | |
114 | %endif | |
115 | ||
116 | ||
117 | default rel | |
118 | ||
119 | [bits 64] | |
120 | section .text | |
121 | ||
122 | %define xmask0f xmm8 | |
123 | %define xgft_lo xmm7 | |
124 | %define xgft_hi xmm6 | |
125 | ||
126 | %define x0 xmm0 | |
127 | %define xtmpa xmm1 | |
128 | %define xtmph xmm2 | |
129 | %define xtmpl xmm3 | |
130 | %define xd xmm4 | |
131 | %define xtmpd xmm5 | |
132 | ||
133 | align 16 | |
f67539c2 | 134 | global gf_vect_mad_avx:ISAL_SYM_TYPE_FUNCTION |
9f95a23c TL |
135 | func(gf_vect_mad_avx) |
136 | FUNC_SAVE | |
137 | sub len, 16 | |
138 | jl .return_fail | |
139 | ||
140 | xor pos, pos | |
141 | vmovdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte | |
142 | ||
143 | sal vec_i, 5 ;Multiply by 32 | |
144 | vmovdqu xgft_lo, [vec_i+mul_array] ;Load array Cx{00}, Cx{01}, Cx{02}, ... | |
145 | vmovdqu xgft_hi, [vec_i+mul_array+16] ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0} | |
146 | ||
147 | XLDR xtmpd, [dest+len] ;backup the last 16 bytes in dest | |
148 | ||
149 | .loop16: | |
150 | XLDR xd, [dest+pos] ;Get next dest vector | |
151 | .loop16_overlap: | |
152 | XLDR x0, [src+pos] ;Get next source vector | |
153 | ||
154 | vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0 | |
155 | vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0 | |
156 | vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0 | |
157 | ||
158 | vpshufb xtmph, xgft_hi, x0 ;Lookup mul table of high nibble | |
159 | vpshufb xtmpl, xgft_lo, xtmpa ;Lookup mul table of low nibble | |
160 | vpxor xtmph, xtmph, xtmpl ;GF add high and low partials | |
161 | vpxor xd, xd, xtmph ;xd += partial | |
162 | ||
163 | XSTR [dest+pos], xd | |
164 | add pos, 16 ;Loop on 16 bytes at a time | |
165 | cmp pos, len | |
166 | jle .loop16 | |
167 | ||
168 | lea tmp, [len + 16] | |
169 | cmp pos, tmp | |
170 | je .return_pass | |
171 | ||
172 | ;; Tail len | |
173 | mov pos, len ;Overlapped offset length-16 | |
174 | vmovdqa xd, xtmpd ;Restore xd | |
175 | jmp .loop16_overlap ;Do one more overlap pass | |
176 | ||
177 | .return_pass: | |
178 | mov return, 0 | |
179 | FUNC_RESTORE | |
180 | ret | |
181 | ||
182 | .return_fail: | |
183 | mov return, 1 | |
184 | FUNC_RESTORE | |
185 | ret | |
186 | ||
187 | endproc_frame | |
188 | ||
189 | section .data | |
190 | ||
191 | align 16 | |
192 | ||
193 | mask0f: dq 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f | |
194 | ||
195 | ;;; func core, ver, snum | |
196 | slversion gf_vect_mad_avx, 02, 01, 0201 |