]>
Commit | Line | Data |
---|---|---|
a7ee22ee TZ |
1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* | |
3 | * SM4 Cipher Algorithm, AES-NI/AVX optimized. | |
4 | * as specified in | |
5 | * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html | |
6 | * | |
7 | * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi> | |
8 | * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi> | |
9 | * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> | |
10 | */ | |
11 | ||
12 | /* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at: | |
13 | * https://github.com/mjosaarinen/sm4ni | |
14 | */ | |
15 | ||
16 | #include <linux/linkage.h> | |
17 | #include <asm/frame.h> | |
18 | ||
19 | #define rRIP (%rip) | |
20 | ||
21 | #define RX0 %xmm0 | |
22 | #define RX1 %xmm1 | |
23 | #define MASK_4BIT %xmm2 | |
24 | #define RTMP0 %xmm3 | |
25 | #define RTMP1 %xmm4 | |
26 | #define RTMP2 %xmm5 | |
27 | #define RTMP3 %xmm6 | |
28 | #define RTMP4 %xmm7 | |
29 | ||
30 | #define RA0 %xmm8 | |
31 | #define RA1 %xmm9 | |
32 | #define RA2 %xmm10 | |
33 | #define RA3 %xmm11 | |
34 | ||
35 | #define RB0 %xmm12 | |
36 | #define RB1 %xmm13 | |
37 | #define RB2 %xmm14 | |
38 | #define RB3 %xmm15 | |
39 | ||
40 | #define RNOT %xmm0 | |
41 | #define RBSWAP %xmm1 | |
42 | ||
43 | ||
44 | /* Transpose four 32-bit words between 128-bit vectors. */ | |
45 | #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ | |
46 | vpunpckhdq x1, x0, t2; \ | |
47 | vpunpckldq x1, x0, x0; \ | |
48 | \ | |
49 | vpunpckldq x3, x2, t1; \ | |
50 | vpunpckhdq x3, x2, x2; \ | |
51 | \ | |
52 | vpunpckhqdq t1, x0, x1; \ | |
53 | vpunpcklqdq t1, x0, x0; \ | |
54 | \ | |
55 | vpunpckhqdq x2, t2, x3; \ | |
56 | vpunpcklqdq x2, t2, x2; | |
57 | ||
58 | /* pre-SubByte transform. */ | |
59 | #define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \ | |
60 | vpand x, mask4bit, tmp0; \ | |
61 | vpandn x, mask4bit, x; \ | |
62 | vpsrld $4, x, x; \ | |
63 | \ | |
64 | vpshufb tmp0, lo_t, tmp0; \ | |
65 | vpshufb x, hi_t, x; \ | |
66 | vpxor tmp0, x, x; | |
67 | ||
68 | /* post-SubByte transform. Note: x has been XOR'ed with mask4bit by | |
69 | * 'vaeslastenc' instruction. | |
70 | */ | |
71 | #define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \ | |
72 | vpandn mask4bit, x, tmp0; \ | |
73 | vpsrld $4, x, x; \ | |
74 | vpand x, mask4bit, x; \ | |
75 | \ | |
76 | vpshufb tmp0, lo_t, tmp0; \ | |
77 | vpshufb x, hi_t, x; \ | |
78 | vpxor tmp0, x, x; | |
79 | ||
80 | ||
81 | .section .rodata.cst164, "aM", @progbits, 164 | |
82 | .align 16 | |
83 | ||
84 | /* | |
85 | * Following four affine transform look-up tables are from work by | |
86 | * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni | |
87 | * | |
88 | * These allow exposing SM4 S-Box from AES SubByte. | |
89 | */ | |
90 | ||
91 | /* pre-SubByte affine transform, from SM4 field to AES field. */ | |
92 | .Lpre_tf_lo_s: | |
93 | .quad 0x9197E2E474720701, 0xC7C1B4B222245157 | |
94 | .Lpre_tf_hi_s: | |
95 | .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012 | |
96 | ||
97 | /* post-SubByte affine transform, from AES field to SM4 field. */ | |
98 | .Lpost_tf_lo_s: | |
99 | .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82 | |
100 | .Lpost_tf_hi_s: | |
101 | .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF | |
102 | ||
103 | /* For isolating SubBytes from AESENCLAST, inverse shift row */ | |
104 | .Linv_shift_row: | |
105 | .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b | |
106 | .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 | |
107 | ||
108 | /* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */ | |
109 | .Linv_shift_row_rol_8: | |
110 | .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e | |
111 | .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06 | |
112 | ||
113 | /* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */ | |
114 | .Linv_shift_row_rol_16: | |
115 | .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01 | |
116 | .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09 | |
117 | ||
118 | /* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */ | |
119 | .Linv_shift_row_rol_24: | |
120 | .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04 | |
121 | .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c | |
122 | ||
123 | /* For CTR-mode IV byteswap */ | |
124 | .Lbswap128_mask: | |
125 | .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 | |
126 | ||
127 | /* For input word byte-swap */ | |
128 | .Lbswap32_mask: | |
129 | .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 | |
130 | ||
131 | .align 4 | |
132 | /* 4-bit mask */ | |
133 | .L0f0f0f0f: | |
134 | .long 0x0f0f0f0f | |
135 | ||
136 | ||
137 | .text | |
138 | .align 16 | |
139 | ||
140 | /* | |
141 | * void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst, | |
142 | * const u8 *src, int nblocks) | |
143 | */ | |
144 | .align 8 | |
145 | SYM_FUNC_START(sm4_aesni_avx_crypt4) | |
146 | /* input: | |
147 | * %rdi: round key array, CTX | |
148 | * %rsi: dst (1..4 blocks) | |
149 | * %rdx: src (1..4 blocks) | |
150 | * %rcx: num blocks (1..4) | |
151 | */ | |
152 | FRAME_BEGIN | |
153 | ||
154 | vmovdqu 0*16(%rdx), RA0; | |
155 | vmovdqa RA0, RA1; | |
156 | vmovdqa RA0, RA2; | |
157 | vmovdqa RA0, RA3; | |
158 | cmpq $2, %rcx; | |
159 | jb .Lblk4_load_input_done; | |
160 | vmovdqu 1*16(%rdx), RA1; | |
161 | je .Lblk4_load_input_done; | |
162 | vmovdqu 2*16(%rdx), RA2; | |
163 | cmpq $3, %rcx; | |
164 | je .Lblk4_load_input_done; | |
165 | vmovdqu 3*16(%rdx), RA3; | |
166 | ||
167 | .Lblk4_load_input_done: | |
168 | ||
169 | vmovdqa .Lbswap32_mask rRIP, RTMP2; | |
170 | vpshufb RTMP2, RA0, RA0; | |
171 | vpshufb RTMP2, RA1, RA1; | |
172 | vpshufb RTMP2, RA2, RA2; | |
173 | vpshufb RTMP2, RA3, RA3; | |
174 | ||
175 | vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT; | |
176 | vmovdqa .Lpre_tf_lo_s rRIP, RTMP4; | |
177 | vmovdqa .Lpre_tf_hi_s rRIP, RB0; | |
178 | vmovdqa .Lpost_tf_lo_s rRIP, RB1; | |
179 | vmovdqa .Lpost_tf_hi_s rRIP, RB2; | |
180 | vmovdqa .Linv_shift_row rRIP, RB3; | |
181 | vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP2; | |
182 | vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP3; | |
183 | transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); | |
184 | ||
185 | #define ROUND(round, s0, s1, s2, s3) \ | |
186 | vbroadcastss (4*(round))(%rdi), RX0; \ | |
187 | vpxor s1, RX0, RX0; \ | |
188 | vpxor s2, RX0, RX0; \ | |
189 | vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ | |
190 | \ | |
191 | /* sbox, non-linear part */ \ | |
192 | transform_pre(RX0, RTMP4, RB0, MASK_4BIT, RTMP0); \ | |
193 | vaesenclast MASK_4BIT, RX0, RX0; \ | |
194 | transform_post(RX0, RB1, RB2, MASK_4BIT, RTMP0); \ | |
195 | \ | |
196 | /* linear part */ \ | |
197 | vpshufb RB3, RX0, RTMP0; \ | |
198 | vpxor RTMP0, s0, s0; /* s0 ^ x */ \ | |
199 | vpshufb RTMP2, RX0, RTMP1; \ | |
200 | vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \ | |
201 | vpshufb RTMP3, RX0, RTMP1; \ | |
202 | vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \ | |
203 | vpshufb .Linv_shift_row_rol_24 rRIP, RX0, RTMP1; \ | |
204 | vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ | |
205 | vpslld $2, RTMP0, RTMP1; \ | |
206 | vpsrld $30, RTMP0, RTMP0; \ | |
207 | vpxor RTMP0, s0, s0; \ | |
208 | /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ | |
209 | vpxor RTMP1, s0, s0; | |
210 | ||
211 | leaq (32*4)(%rdi), %rax; | |
212 | .align 16 | |
213 | .Lroundloop_blk4: | |
214 | ROUND(0, RA0, RA1, RA2, RA3); | |
215 | ROUND(1, RA1, RA2, RA3, RA0); | |
216 | ROUND(2, RA2, RA3, RA0, RA1); | |
217 | ROUND(3, RA3, RA0, RA1, RA2); | |
218 | leaq (4*4)(%rdi), %rdi; | |
219 | cmpq %rax, %rdi; | |
220 | jne .Lroundloop_blk4; | |
221 | ||
222 | #undef ROUND | |
223 | ||
224 | vmovdqa .Lbswap128_mask rRIP, RTMP2; | |
225 | ||
226 | transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); | |
227 | vpshufb RTMP2, RA0, RA0; | |
228 | vpshufb RTMP2, RA1, RA1; | |
229 | vpshufb RTMP2, RA2, RA2; | |
230 | vpshufb RTMP2, RA3, RA3; | |
231 | ||
232 | vmovdqu RA0, 0*16(%rsi); | |
233 | cmpq $2, %rcx; | |
234 | jb .Lblk4_store_output_done; | |
235 | vmovdqu RA1, 1*16(%rsi); | |
236 | je .Lblk4_store_output_done; | |
237 | vmovdqu RA2, 2*16(%rsi); | |
238 | cmpq $3, %rcx; | |
239 | je .Lblk4_store_output_done; | |
240 | vmovdqu RA3, 3*16(%rsi); | |
241 | ||
242 | .Lblk4_store_output_done: | |
243 | vzeroall; | |
244 | FRAME_END | |
245 | ret; | |
246 | SYM_FUNC_END(sm4_aesni_avx_crypt4) | |
247 | ||
248 | .align 8 | |
249 | SYM_FUNC_START_LOCAL(__sm4_crypt_blk8) | |
250 | /* input: | |
251 | * %rdi: round key array, CTX | |
252 | * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel | |
253 | * plaintext blocks | |
254 | * output: | |
255 | * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel | |
256 | * ciphertext blocks | |
257 | */ | |
258 | FRAME_BEGIN | |
259 | ||
260 | vmovdqa .Lbswap32_mask rRIP, RTMP2; | |
261 | vpshufb RTMP2, RA0, RA0; | |
262 | vpshufb RTMP2, RA1, RA1; | |
263 | vpshufb RTMP2, RA2, RA2; | |
264 | vpshufb RTMP2, RA3, RA3; | |
265 | vpshufb RTMP2, RB0, RB0; | |
266 | vpshufb RTMP2, RB1, RB1; | |
267 | vpshufb RTMP2, RB2, RB2; | |
268 | vpshufb RTMP2, RB3, RB3; | |
269 | ||
270 | vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT; | |
271 | transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); | |
272 | transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); | |
273 | ||
274 | #define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ | |
275 | vbroadcastss (4*(round))(%rdi), RX0; \ | |
276 | vmovdqa .Lpre_tf_lo_s rRIP, RTMP4; \ | |
277 | vmovdqa .Lpre_tf_hi_s rRIP, RTMP1; \ | |
278 | vmovdqa RX0, RX1; \ | |
279 | vpxor s1, RX0, RX0; \ | |
280 | vpxor s2, RX0, RX0; \ | |
281 | vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ | |
282 | vmovdqa .Lpost_tf_lo_s rRIP, RTMP2; \ | |
283 | vmovdqa .Lpost_tf_hi_s rRIP, RTMP3; \ | |
284 | vpxor r1, RX1, RX1; \ | |
285 | vpxor r2, RX1, RX1; \ | |
286 | vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \ | |
287 | \ | |
288 | /* sbox, non-linear part */ \ | |
289 | transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ | |
290 | transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ | |
291 | vmovdqa .Linv_shift_row rRIP, RTMP4; \ | |
292 | vaesenclast MASK_4BIT, RX0, RX0; \ | |
293 | vaesenclast MASK_4BIT, RX1, RX1; \ | |
294 | transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ | |
295 | transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ | |
296 | \ | |
297 | /* linear part */ \ | |
298 | vpshufb RTMP4, RX0, RTMP0; \ | |
299 | vpxor RTMP0, s0, s0; /* s0 ^ x */ \ | |
300 | vpshufb RTMP4, RX1, RTMP2; \ | |
301 | vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP4; \ | |
302 | vpxor RTMP2, r0, r0; /* r0 ^ x */ \ | |
303 | vpshufb RTMP4, RX0, RTMP1; \ | |
304 | vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \ | |
305 | vpshufb RTMP4, RX1, RTMP3; \ | |
306 | vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP4; \ | |
307 | vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */ \ | |
308 | vpshufb RTMP4, RX0, RTMP1; \ | |
309 | vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \ | |
310 | vpshufb RTMP4, RX1, RTMP3; \ | |
311 | vmovdqa .Linv_shift_row_rol_24 rRIP, RTMP4; \ | |
312 | vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \ | |
313 | vpshufb RTMP4, RX0, RTMP1; \ | |
314 | vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ | |
315 | /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ | |
316 | vpslld $2, RTMP0, RTMP1; \ | |
317 | vpsrld $30, RTMP0, RTMP0; \ | |
318 | vpxor RTMP0, s0, s0; \ | |
319 | vpxor RTMP1, s0, s0; \ | |
320 | vpshufb RTMP4, RX1, RTMP3; \ | |
321 | vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \ | |
322 | /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ | |
323 | vpslld $2, RTMP2, RTMP3; \ | |
324 | vpsrld $30, RTMP2, RTMP2; \ | |
325 | vpxor RTMP2, r0, r0; \ | |
326 | vpxor RTMP3, r0, r0; | |
327 | ||
328 | leaq (32*4)(%rdi), %rax; | |
329 | .align 16 | |
330 | .Lroundloop_blk8: | |
331 | ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3); | |
332 | ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0); | |
333 | ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1); | |
334 | ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2); | |
335 | leaq (4*4)(%rdi), %rdi; | |
336 | cmpq %rax, %rdi; | |
337 | jne .Lroundloop_blk8; | |
338 | ||
339 | #undef ROUND | |
340 | ||
341 | vmovdqa .Lbswap128_mask rRIP, RTMP2; | |
342 | ||
343 | transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); | |
344 | transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); | |
345 | vpshufb RTMP2, RA0, RA0; | |
346 | vpshufb RTMP2, RA1, RA1; | |
347 | vpshufb RTMP2, RA2, RA2; | |
348 | vpshufb RTMP2, RA3, RA3; | |
349 | vpshufb RTMP2, RB0, RB0; | |
350 | vpshufb RTMP2, RB1, RB1; | |
351 | vpshufb RTMP2, RB2, RB2; | |
352 | vpshufb RTMP2, RB3, RB3; | |
353 | ||
354 | FRAME_END | |
355 | ret; | |
356 | SYM_FUNC_END(__sm4_crypt_blk8) | |
357 | ||
358 | /* | |
359 | * void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst, | |
360 | * const u8 *src, int nblocks) | |
361 | */ | |
362 | .align 8 | |
363 | SYM_FUNC_START(sm4_aesni_avx_crypt8) | |
364 | /* input: | |
365 | * %rdi: round key array, CTX | |
366 | * %rsi: dst (1..8 blocks) | |
367 | * %rdx: src (1..8 blocks) | |
368 | * %rcx: num blocks (1..8) | |
369 | */ | |
a7ee22ee TZ |
370 | cmpq $5, %rcx; |
371 | jb sm4_aesni_avx_crypt4; | |
0e14ef38 JP |
372 | |
373 | FRAME_BEGIN | |
374 | ||
a7ee22ee TZ |
375 | vmovdqu (0 * 16)(%rdx), RA0; |
376 | vmovdqu (1 * 16)(%rdx), RA1; | |
377 | vmovdqu (2 * 16)(%rdx), RA2; | |
378 | vmovdqu (3 * 16)(%rdx), RA3; | |
379 | vmovdqu (4 * 16)(%rdx), RB0; | |
380 | vmovdqa RB0, RB1; | |
381 | vmovdqa RB0, RB2; | |
382 | vmovdqa RB0, RB3; | |
383 | je .Lblk8_load_input_done; | |
384 | vmovdqu (5 * 16)(%rdx), RB1; | |
385 | cmpq $7, %rcx; | |
386 | jb .Lblk8_load_input_done; | |
387 | vmovdqu (6 * 16)(%rdx), RB2; | |
388 | je .Lblk8_load_input_done; | |
389 | vmovdqu (7 * 16)(%rdx), RB3; | |
390 | ||
391 | .Lblk8_load_input_done: | |
392 | call __sm4_crypt_blk8; | |
393 | ||
394 | cmpq $6, %rcx; | |
395 | vmovdqu RA0, (0 * 16)(%rsi); | |
396 | vmovdqu RA1, (1 * 16)(%rsi); | |
397 | vmovdqu RA2, (2 * 16)(%rsi); | |
398 | vmovdqu RA3, (3 * 16)(%rsi); | |
399 | vmovdqu RB0, (4 * 16)(%rsi); | |
400 | jb .Lblk8_store_output_done; | |
401 | vmovdqu RB1, (5 * 16)(%rsi); | |
402 | je .Lblk8_store_output_done; | |
403 | vmovdqu RB2, (6 * 16)(%rsi); | |
404 | cmpq $7, %rcx; | |
405 | je .Lblk8_store_output_done; | |
406 | vmovdqu RB3, (7 * 16)(%rsi); | |
407 | ||
408 | .Lblk8_store_output_done: | |
409 | vzeroall; | |
410 | FRAME_END | |
411 | ret; | |
412 | SYM_FUNC_END(sm4_aesni_avx_crypt8) | |
413 | ||
414 | /* | |
415 | * void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst, | |
416 | * const u8 *src, u8 *iv) | |
417 | */ | |
418 | .align 8 | |
419 | SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8) | |
420 | /* input: | |
421 | * %rdi: round key array, CTX | |
422 | * %rsi: dst (8 blocks) | |
423 | * %rdx: src (8 blocks) | |
424 | * %rcx: iv (big endian, 128bit) | |
425 | */ | |
426 | FRAME_BEGIN | |
427 | ||
428 | /* load IV and byteswap */ | |
429 | vmovdqu (%rcx), RA0; | |
430 | ||
431 | vmovdqa .Lbswap128_mask rRIP, RBSWAP; | |
432 | vpshufb RBSWAP, RA0, RTMP0; /* be => le */ | |
433 | ||
434 | vpcmpeqd RNOT, RNOT, RNOT; | |
435 | vpsrldq $8, RNOT, RNOT; /* low: -1, high: 0 */ | |
436 | ||
437 | #define inc_le128(x, minus_one, tmp) \ | |
438 | vpcmpeqq minus_one, x, tmp; \ | |
439 | vpsubq minus_one, x, x; \ | |
440 | vpslldq $8, tmp, tmp; \ | |
441 | vpsubq tmp, x, x; | |
442 | ||
443 | /* construct IVs */ | |
444 | inc_le128(RTMP0, RNOT, RTMP2); /* +1 */ | |
445 | vpshufb RBSWAP, RTMP0, RA1; | |
446 | inc_le128(RTMP0, RNOT, RTMP2); /* +2 */ | |
447 | vpshufb RBSWAP, RTMP0, RA2; | |
448 | inc_le128(RTMP0, RNOT, RTMP2); /* +3 */ | |
449 | vpshufb RBSWAP, RTMP0, RA3; | |
450 | inc_le128(RTMP0, RNOT, RTMP2); /* +4 */ | |
451 | vpshufb RBSWAP, RTMP0, RB0; | |
452 | inc_le128(RTMP0, RNOT, RTMP2); /* +5 */ | |
453 | vpshufb RBSWAP, RTMP0, RB1; | |
454 | inc_le128(RTMP0, RNOT, RTMP2); /* +6 */ | |
455 | vpshufb RBSWAP, RTMP0, RB2; | |
456 | inc_le128(RTMP0, RNOT, RTMP2); /* +7 */ | |
457 | vpshufb RBSWAP, RTMP0, RB3; | |
458 | inc_le128(RTMP0, RNOT, RTMP2); /* +8 */ | |
459 | vpshufb RBSWAP, RTMP0, RTMP1; | |
460 | ||
461 | /* store new IV */ | |
462 | vmovdqu RTMP1, (%rcx); | |
463 | ||
464 | call __sm4_crypt_blk8; | |
465 | ||
466 | vpxor (0 * 16)(%rdx), RA0, RA0; | |
467 | vpxor (1 * 16)(%rdx), RA1, RA1; | |
468 | vpxor (2 * 16)(%rdx), RA2, RA2; | |
469 | vpxor (3 * 16)(%rdx), RA3, RA3; | |
470 | vpxor (4 * 16)(%rdx), RB0, RB0; | |
471 | vpxor (5 * 16)(%rdx), RB1, RB1; | |
472 | vpxor (6 * 16)(%rdx), RB2, RB2; | |
473 | vpxor (7 * 16)(%rdx), RB3, RB3; | |
474 | ||
475 | vmovdqu RA0, (0 * 16)(%rsi); | |
476 | vmovdqu RA1, (1 * 16)(%rsi); | |
477 | vmovdqu RA2, (2 * 16)(%rsi); | |
478 | vmovdqu RA3, (3 * 16)(%rsi); | |
479 | vmovdqu RB0, (4 * 16)(%rsi); | |
480 | vmovdqu RB1, (5 * 16)(%rsi); | |
481 | vmovdqu RB2, (6 * 16)(%rsi); | |
482 | vmovdqu RB3, (7 * 16)(%rsi); | |
483 | ||
484 | vzeroall; | |
485 | FRAME_END | |
486 | ret; | |
487 | SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8) | |
488 | ||
489 | /* | |
490 | * void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst, | |
491 | * const u8 *src, u8 *iv) | |
492 | */ | |
493 | .align 8 | |
494 | SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8) | |
495 | /* input: | |
496 | * %rdi: round key array, CTX | |
497 | * %rsi: dst (8 blocks) | |
498 | * %rdx: src (8 blocks) | |
499 | * %rcx: iv | |
500 | */ | |
501 | FRAME_BEGIN | |
502 | ||
503 | vmovdqu (0 * 16)(%rdx), RA0; | |
504 | vmovdqu (1 * 16)(%rdx), RA1; | |
505 | vmovdqu (2 * 16)(%rdx), RA2; | |
506 | vmovdqu (3 * 16)(%rdx), RA3; | |
507 | vmovdqu (4 * 16)(%rdx), RB0; | |
508 | vmovdqu (5 * 16)(%rdx), RB1; | |
509 | vmovdqu (6 * 16)(%rdx), RB2; | |
510 | vmovdqu (7 * 16)(%rdx), RB3; | |
511 | ||
512 | call __sm4_crypt_blk8; | |
513 | ||
514 | vmovdqu (7 * 16)(%rdx), RNOT; | |
515 | vpxor (%rcx), RA0, RA0; | |
516 | vpxor (0 * 16)(%rdx), RA1, RA1; | |
517 | vpxor (1 * 16)(%rdx), RA2, RA2; | |
518 | vpxor (2 * 16)(%rdx), RA3, RA3; | |
519 | vpxor (3 * 16)(%rdx), RB0, RB0; | |
520 | vpxor (4 * 16)(%rdx), RB1, RB1; | |
521 | vpxor (5 * 16)(%rdx), RB2, RB2; | |
522 | vpxor (6 * 16)(%rdx), RB3, RB3; | |
523 | vmovdqu RNOT, (%rcx); /* store new IV */ | |
524 | ||
525 | vmovdqu RA0, (0 * 16)(%rsi); | |
526 | vmovdqu RA1, (1 * 16)(%rsi); | |
527 | vmovdqu RA2, (2 * 16)(%rsi); | |
528 | vmovdqu RA3, (3 * 16)(%rsi); | |
529 | vmovdqu RB0, (4 * 16)(%rsi); | |
530 | vmovdqu RB1, (5 * 16)(%rsi); | |
531 | vmovdqu RB2, (6 * 16)(%rsi); | |
532 | vmovdqu RB3, (7 * 16)(%rsi); | |
533 | ||
534 | vzeroall; | |
535 | FRAME_END | |
536 | ret; | |
537 | SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8) | |
538 | ||
539 | /* | |
540 | * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst, | |
541 | * const u8 *src, u8 *iv) | |
542 | */ | |
543 | .align 8 | |
544 | SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8) | |
545 | /* input: | |
546 | * %rdi: round key array, CTX | |
547 | * %rsi: dst (8 blocks) | |
548 | * %rdx: src (8 blocks) | |
549 | * %rcx: iv | |
550 | */ | |
551 | FRAME_BEGIN | |
552 | ||
553 | /* Load input */ | |
554 | vmovdqu (%rcx), RA0; | |
555 | vmovdqu 0 * 16(%rdx), RA1; | |
556 | vmovdqu 1 * 16(%rdx), RA2; | |
557 | vmovdqu 2 * 16(%rdx), RA3; | |
558 | vmovdqu 3 * 16(%rdx), RB0; | |
559 | vmovdqu 4 * 16(%rdx), RB1; | |
560 | vmovdqu 5 * 16(%rdx), RB2; | |
561 | vmovdqu 6 * 16(%rdx), RB3; | |
562 | ||
563 | /* Update IV */ | |
564 | vmovdqu 7 * 16(%rdx), RNOT; | |
565 | vmovdqu RNOT, (%rcx); | |
566 | ||
567 | call __sm4_crypt_blk8; | |
568 | ||
569 | vpxor (0 * 16)(%rdx), RA0, RA0; | |
570 | vpxor (1 * 16)(%rdx), RA1, RA1; | |
571 | vpxor (2 * 16)(%rdx), RA2, RA2; | |
572 | vpxor (3 * 16)(%rdx), RA3, RA3; | |
573 | vpxor (4 * 16)(%rdx), RB0, RB0; | |
574 | vpxor (5 * 16)(%rdx), RB1, RB1; | |
575 | vpxor (6 * 16)(%rdx), RB2, RB2; | |
576 | vpxor (7 * 16)(%rdx), RB3, RB3; | |
577 | ||
578 | vmovdqu RA0, (0 * 16)(%rsi); | |
579 | vmovdqu RA1, (1 * 16)(%rsi); | |
580 | vmovdqu RA2, (2 * 16)(%rsi); | |
581 | vmovdqu RA3, (3 * 16)(%rsi); | |
582 | vmovdqu RB0, (4 * 16)(%rsi); | |
583 | vmovdqu RB1, (5 * 16)(%rsi); | |
584 | vmovdqu RB2, (6 * 16)(%rsi); | |
585 | vmovdqu RB3, (7 * 16)(%rsi); | |
586 | ||
587 | vzeroall; | |
588 | FRAME_END | |
589 | ret; | |
590 | SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8) |