2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/module.h>
25 #include <linux/err.h>
26 #include <crypto/algapi.h>
27 #include <crypto/aes.h>
28 #include <crypto/cryptd.h>
29 #include <crypto/ctr.h>
30 #include <crypto/b128ops.h>
31 #include <crypto/xts.h>
32 #include <asm/cpu_device_id.h>
33 #include <asm/fpu/api.h>
34 #include <asm/crypto/aes.h>
35 #include <crypto/scatterwalk.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/internal/simd.h>
38 #include <crypto/internal/skcipher.h>
39 #include <linux/workqueue.h>
40 #include <linux/spinlock.h>
42 #include <asm/crypto/glue_helper.h>
46 #define AESNI_ALIGN 16
47 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
48 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
49 #define RFC4106_HASH_SUBKEY_SIZE 16
50 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
51 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
52 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
54 /* This data is stored at the end of the crypto_tfm struct.
55 * It's a type of per "session" data storage location.
56 * This needs to be 16 byte aligned.
58 struct aesni_rfc4106_gcm_ctx
{
59 u8 hash_subkey
[16] AESNI_ALIGN_ATTR
;
60 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR
;
64 struct aesni_xts_ctx
{
65 u8 raw_tweak_ctx
[sizeof(struct crypto_aes_ctx
)] AESNI_ALIGN_ATTR
;
66 u8 raw_crypt_ctx
[sizeof(struct crypto_aes_ctx
)] AESNI_ALIGN_ATTR
;
69 asmlinkage
int aesni_set_key(struct crypto_aes_ctx
*ctx
, const u8
*in_key
,
70 unsigned int key_len
);
71 asmlinkage
void aesni_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
73 asmlinkage
void aesni_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
75 asmlinkage
void aesni_ecb_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
76 const u8
*in
, unsigned int len
);
77 asmlinkage
void aesni_ecb_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
78 const u8
*in
, unsigned int len
);
79 asmlinkage
void aesni_cbc_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
80 const u8
*in
, unsigned int len
, u8
*iv
);
81 asmlinkage
void aesni_cbc_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
82 const u8
*in
, unsigned int len
, u8
*iv
);
84 int crypto_fpu_init(void);
85 void crypto_fpu_exit(void);
87 #define AVX_GEN2_OPTSIZE 640
88 #define AVX_GEN4_OPTSIZE 4096
92 static void (*aesni_ctr_enc_tfm
)(struct crypto_aes_ctx
*ctx
, u8
*out
,
93 const u8
*in
, unsigned int len
, u8
*iv
);
94 asmlinkage
void aesni_ctr_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
95 const u8
*in
, unsigned int len
, u8
*iv
);
97 asmlinkage
void aesni_xts_crypt8(struct crypto_aes_ctx
*ctx
, u8
*out
,
98 const u8
*in
, bool enc
, u8
*iv
);
100 /* asmlinkage void aesni_gcm_enc()
101 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
102 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
103 * const u8 *in, Plaintext input
104 * unsigned long plaintext_len, Length of data in bytes for encryption.
105 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
106 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
107 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
108 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
109 * const u8 *aad, Additional Authentication Data (AAD)
110 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
111 * is going to be 8 or 12 bytes
112 * u8 *auth_tag, Authenticated Tag output.
113 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
114 * Valid values are 16 (most likely), 12 or 8.
116 asmlinkage
void aesni_gcm_enc(void *ctx
, u8
*out
,
117 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
118 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
119 u8
*auth_tag
, unsigned long auth_tag_len
);
121 /* asmlinkage void aesni_gcm_dec()
122 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
123 * u8 *out, Plaintext output. Decrypt in-place is allowed.
124 * const u8 *in, Ciphertext input
125 * unsigned long ciphertext_len, Length of data in bytes for decryption.
126 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
127 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
128 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
129 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
130 * const u8 *aad, Additional Authentication Data (AAD)
131 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
132 * to be 8 or 12 bytes
133 * u8 *auth_tag, Authenticated Tag output.
134 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
135 * Valid values are 16 (most likely), 12 or 8.
137 asmlinkage
void aesni_gcm_dec(void *ctx
, u8
*out
,
138 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
139 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
140 u8
*auth_tag
, unsigned long auth_tag_len
);
144 asmlinkage
void aes_ctr_enc_128_avx_by8(const u8
*in
, u8
*iv
,
145 void *keys
, u8
*out
, unsigned int num_bytes
);
146 asmlinkage
void aes_ctr_enc_192_avx_by8(const u8
*in
, u8
*iv
,
147 void *keys
, u8
*out
, unsigned int num_bytes
);
148 asmlinkage
void aes_ctr_enc_256_avx_by8(const u8
*in
, u8
*iv
,
149 void *keys
, u8
*out
, unsigned int num_bytes
);
151 * asmlinkage void aesni_gcm_precomp_avx_gen2()
152 * gcm_data *my_ctx_data, context data
153 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
155 asmlinkage
void aesni_gcm_precomp_avx_gen2(void *my_ctx_data
, u8
*hash_subkey
);
157 asmlinkage
void aesni_gcm_enc_avx_gen2(void *ctx
, u8
*out
,
158 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
159 const u8
*aad
, unsigned long aad_len
,
160 u8
*auth_tag
, unsigned long auth_tag_len
);
162 asmlinkage
void aesni_gcm_dec_avx_gen2(void *ctx
, u8
*out
,
163 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
164 const u8
*aad
, unsigned long aad_len
,
165 u8
*auth_tag
, unsigned long auth_tag_len
);
167 static void aesni_gcm_enc_avx(void *ctx
, u8
*out
,
168 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
169 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
170 u8
*auth_tag
, unsigned long auth_tag_len
)
172 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
173 if ((plaintext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)){
174 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
175 aad_len
, auth_tag
, auth_tag_len
);
177 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
178 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
179 aad_len
, auth_tag
, auth_tag_len
);
183 static void aesni_gcm_dec_avx(void *ctx
, u8
*out
,
184 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
185 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
186 u8
*auth_tag
, unsigned long auth_tag_len
)
188 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
189 if ((ciphertext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
190 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
, aad
,
191 aad_len
, auth_tag
, auth_tag_len
);
193 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
194 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
195 aad_len
, auth_tag
, auth_tag_len
);
200 #ifdef CONFIG_AS_AVX2
202 * asmlinkage void aesni_gcm_precomp_avx_gen4()
203 * gcm_data *my_ctx_data, context data
204 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
206 asmlinkage
void aesni_gcm_precomp_avx_gen4(void *my_ctx_data
, u8
*hash_subkey
);
208 asmlinkage
void aesni_gcm_enc_avx_gen4(void *ctx
, u8
*out
,
209 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
210 const u8
*aad
, unsigned long aad_len
,
211 u8
*auth_tag
, unsigned long auth_tag_len
);
213 asmlinkage
void aesni_gcm_dec_avx_gen4(void *ctx
, u8
*out
,
214 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
215 const u8
*aad
, unsigned long aad_len
,
216 u8
*auth_tag
, unsigned long auth_tag_len
);
218 static void aesni_gcm_enc_avx2(void *ctx
, u8
*out
,
219 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
220 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
221 u8
*auth_tag
, unsigned long auth_tag_len
)
223 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
224 if ((plaintext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
225 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
226 aad_len
, auth_tag
, auth_tag_len
);
227 } else if (plaintext_len
< AVX_GEN4_OPTSIZE
) {
228 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
229 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
230 aad_len
, auth_tag
, auth_tag_len
);
232 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
233 aesni_gcm_enc_avx_gen4(ctx
, out
, in
, plaintext_len
, iv
, aad
,
234 aad_len
, auth_tag
, auth_tag_len
);
238 static void aesni_gcm_dec_avx2(void *ctx
, u8
*out
,
239 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
240 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
241 u8
*auth_tag
, unsigned long auth_tag_len
)
243 struct crypto_aes_ctx
*aes_ctx
= (struct crypto_aes_ctx
*)ctx
;
244 if ((ciphertext_len
< AVX_GEN2_OPTSIZE
) || (aes_ctx
-> key_length
!= AES_KEYSIZE_128
)) {
245 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
,
246 aad
, aad_len
, auth_tag
, auth_tag_len
);
247 } else if (ciphertext_len
< AVX_GEN4_OPTSIZE
) {
248 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
249 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
250 aad_len
, auth_tag
, auth_tag_len
);
252 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
253 aesni_gcm_dec_avx_gen4(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
254 aad_len
, auth_tag
, auth_tag_len
);
259 static void (*aesni_gcm_enc_tfm
)(void *ctx
, u8
*out
,
260 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
261 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
262 u8
*auth_tag
, unsigned long auth_tag_len
);
264 static void (*aesni_gcm_dec_tfm
)(void *ctx
, u8
*out
,
265 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
266 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
267 u8
*auth_tag
, unsigned long auth_tag_len
);
270 aesni_rfc4106_gcm_ctx
*aesni_rfc4106_gcm_ctx_get(struct crypto_aead
*tfm
)
272 unsigned long align
= AESNI_ALIGN
;
274 if (align
<= crypto_tfm_ctx_alignment())
276 return PTR_ALIGN(crypto_aead_ctx(tfm
), align
);
280 static inline struct crypto_aes_ctx
*aes_ctx(void *raw_ctx
)
282 unsigned long addr
= (unsigned long)raw_ctx
;
283 unsigned long align
= AESNI_ALIGN
;
285 if (align
<= crypto_tfm_ctx_alignment())
287 return (struct crypto_aes_ctx
*)ALIGN(addr
, align
);
290 static int aes_set_key_common(struct crypto_tfm
*tfm
, void *raw_ctx
,
291 const u8
*in_key
, unsigned int key_len
)
293 struct crypto_aes_ctx
*ctx
= aes_ctx(raw_ctx
);
294 u32
*flags
= &tfm
->crt_flags
;
297 if (key_len
!= AES_KEYSIZE_128
&& key_len
!= AES_KEYSIZE_192
&&
298 key_len
!= AES_KEYSIZE_256
) {
299 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
303 if (!irq_fpu_usable())
304 err
= crypto_aes_expand_key(ctx
, in_key
, key_len
);
307 err
= aesni_set_key(ctx
, in_key
, key_len
);
314 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
315 unsigned int key_len
)
317 return aes_set_key_common(tfm
, crypto_tfm_ctx(tfm
), in_key
, key_len
);
320 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
322 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
324 if (!irq_fpu_usable())
325 crypto_aes_encrypt_x86(ctx
, dst
, src
);
328 aesni_enc(ctx
, dst
, src
);
333 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
335 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
337 if (!irq_fpu_usable())
338 crypto_aes_decrypt_x86(ctx
, dst
, src
);
341 aesni_dec(ctx
, dst
, src
);
346 static void __aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
348 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
350 aesni_enc(ctx
, dst
, src
);
353 static void __aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
355 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
357 aesni_dec(ctx
, dst
, src
);
360 static int aesni_skcipher_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
363 return aes_set_key_common(crypto_skcipher_tfm(tfm
),
364 crypto_skcipher_ctx(tfm
), key
, len
);
367 static int ecb_encrypt(struct skcipher_request
*req
)
369 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
370 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_skcipher_ctx(tfm
));
371 struct skcipher_walk walk
;
375 err
= skcipher_walk_virt(&walk
, req
, true);
378 while ((nbytes
= walk
.nbytes
)) {
379 aesni_ecb_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
380 nbytes
& AES_BLOCK_MASK
);
381 nbytes
&= AES_BLOCK_SIZE
- 1;
382 err
= skcipher_walk_done(&walk
, nbytes
);
389 static int ecb_decrypt(struct skcipher_request
*req
)
391 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
392 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_skcipher_ctx(tfm
));
393 struct skcipher_walk walk
;
397 err
= skcipher_walk_virt(&walk
, req
, true);
400 while ((nbytes
= walk
.nbytes
)) {
401 aesni_ecb_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
402 nbytes
& AES_BLOCK_MASK
);
403 nbytes
&= AES_BLOCK_SIZE
- 1;
404 err
= skcipher_walk_done(&walk
, nbytes
);
411 static int cbc_encrypt(struct skcipher_request
*req
)
413 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
414 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_skcipher_ctx(tfm
));
415 struct skcipher_walk walk
;
419 err
= skcipher_walk_virt(&walk
, req
, true);
422 while ((nbytes
= walk
.nbytes
)) {
423 aesni_cbc_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
424 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
425 nbytes
&= AES_BLOCK_SIZE
- 1;
426 err
= skcipher_walk_done(&walk
, nbytes
);
433 static int cbc_decrypt(struct skcipher_request
*req
)
435 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
436 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_skcipher_ctx(tfm
));
437 struct skcipher_walk walk
;
441 err
= skcipher_walk_virt(&walk
, req
, true);
444 while ((nbytes
= walk
.nbytes
)) {
445 aesni_cbc_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
446 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
447 nbytes
&= AES_BLOCK_SIZE
- 1;
448 err
= skcipher_walk_done(&walk
, nbytes
);
456 static void ctr_crypt_final(struct crypto_aes_ctx
*ctx
,
457 struct skcipher_walk
*walk
)
459 u8
*ctrblk
= walk
->iv
;
460 u8 keystream
[AES_BLOCK_SIZE
];
461 u8
*src
= walk
->src
.virt
.addr
;
462 u8
*dst
= walk
->dst
.virt
.addr
;
463 unsigned int nbytes
= walk
->nbytes
;
465 aesni_enc(ctx
, keystream
, ctrblk
);
466 crypto_xor(keystream
, src
, nbytes
);
467 memcpy(dst
, keystream
, nbytes
);
468 crypto_inc(ctrblk
, AES_BLOCK_SIZE
);
472 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx
*ctx
, u8
*out
,
473 const u8
*in
, unsigned int len
, u8
*iv
)
476 * based on key length, override with the by8 version
477 * of ctr mode encryption/decryption for improved performance
478 * aes_set_key_common() ensures that key length is one of
481 if (ctx
->key_length
== AES_KEYSIZE_128
)
482 aes_ctr_enc_128_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
483 else if (ctx
->key_length
== AES_KEYSIZE_192
)
484 aes_ctr_enc_192_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
486 aes_ctr_enc_256_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
490 static int ctr_crypt(struct skcipher_request
*req
)
492 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
493 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_skcipher_ctx(tfm
));
494 struct skcipher_walk walk
;
498 err
= skcipher_walk_virt(&walk
, req
, true);
501 while ((nbytes
= walk
.nbytes
) >= AES_BLOCK_SIZE
) {
502 aesni_ctr_enc_tfm(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
503 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
504 nbytes
&= AES_BLOCK_SIZE
- 1;
505 err
= skcipher_walk_done(&walk
, nbytes
);
508 ctr_crypt_final(ctx
, &walk
);
509 err
= skcipher_walk_done(&walk
, 0);
516 static int xts_aesni_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
519 struct aesni_xts_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
522 err
= xts_verify_key(tfm
, key
, keylen
);
528 /* first half of xts-key is for crypt */
529 err
= aes_set_key_common(crypto_skcipher_tfm(tfm
), ctx
->raw_crypt_ctx
,
534 /* second half of xts-key is for tweak */
535 return aes_set_key_common(crypto_skcipher_tfm(tfm
), ctx
->raw_tweak_ctx
,
536 key
+ keylen
, keylen
);
540 static void aesni_xts_tweak(void *ctx
, u8
*out
, const u8
*in
)
542 aesni_enc(ctx
, out
, in
);
545 static void aesni_xts_enc(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
547 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_enc
));
550 static void aesni_xts_dec(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
552 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_dec
));
555 static void aesni_xts_enc8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
557 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, true, (u8
*)iv
);
560 static void aesni_xts_dec8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
562 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, false, (u8
*)iv
);
565 static const struct common_glue_ctx aesni_enc_xts
= {
567 .fpu_blocks_limit
= 1,
571 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc8
) }
574 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc
) }
578 static const struct common_glue_ctx aesni_dec_xts
= {
580 .fpu_blocks_limit
= 1,
584 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec8
) }
587 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec
) }
591 static int xts_encrypt(struct skcipher_request
*req
)
593 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
594 struct aesni_xts_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
596 return glue_xts_req_128bit(&aesni_enc_xts
, req
,
597 XTS_TWEAK_CAST(aesni_xts_tweak
),
598 aes_ctx(ctx
->raw_tweak_ctx
),
599 aes_ctx(ctx
->raw_crypt_ctx
));
602 static int xts_decrypt(struct skcipher_request
*req
)
604 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
605 struct aesni_xts_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
607 return glue_xts_req_128bit(&aesni_dec_xts
, req
,
608 XTS_TWEAK_CAST(aesni_xts_tweak
),
609 aes_ctx(ctx
->raw_tweak_ctx
),
610 aes_ctx(ctx
->raw_crypt_ctx
));
613 static int rfc4106_init(struct crypto_aead
*aead
)
615 struct cryptd_aead
*cryptd_tfm
;
616 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
618 cryptd_tfm
= cryptd_alloc_aead("__driver-gcm-aes-aesni",
620 CRYPTO_ALG_INTERNAL
);
621 if (IS_ERR(cryptd_tfm
))
622 return PTR_ERR(cryptd_tfm
);
625 crypto_aead_set_reqsize(aead
, crypto_aead_reqsize(&cryptd_tfm
->base
));
629 static void rfc4106_exit(struct crypto_aead
*aead
)
631 struct cryptd_aead
**ctx
= crypto_aead_ctx(aead
);
633 cryptd_free_aead(*ctx
);
637 rfc4106_set_hash_subkey(u8
*hash_subkey
, const u8
*key
, unsigned int key_len
)
639 struct crypto_cipher
*tfm
;
642 tfm
= crypto_alloc_cipher("aes", 0, 0);
646 ret
= crypto_cipher_setkey(tfm
, key
, key_len
);
648 goto out_free_cipher
;
650 /* Clear the data in the hash sub key container to zero.*/
651 /* We want to cipher all zeros to create the hash sub key. */
652 memset(hash_subkey
, 0, RFC4106_HASH_SUBKEY_SIZE
);
654 crypto_cipher_encrypt_one(tfm
, hash_subkey
, hash_subkey
);
657 crypto_free_cipher(tfm
);
661 static int common_rfc4106_set_key(struct crypto_aead
*aead
, const u8
*key
,
662 unsigned int key_len
)
664 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(aead
);
667 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
670 /*Account for 4 byte nonce at the end.*/
673 memcpy(ctx
->nonce
, key
+ key_len
, sizeof(ctx
->nonce
));
675 return aes_set_key_common(crypto_aead_tfm(aead
),
676 &ctx
->aes_key_expanded
, key
, key_len
) ?:
677 rfc4106_set_hash_subkey(ctx
->hash_subkey
, key
, key_len
);
680 static int rfc4106_set_key(struct crypto_aead
*parent
, const u8
*key
,
681 unsigned int key_len
)
683 struct cryptd_aead
**ctx
= crypto_aead_ctx(parent
);
684 struct cryptd_aead
*cryptd_tfm
= *ctx
;
686 return crypto_aead_setkey(&cryptd_tfm
->base
, key
, key_len
);
689 static int common_rfc4106_set_authsize(struct crypto_aead
*aead
,
690 unsigned int authsize
)
704 /* This is the Integrity Check Value (aka the authentication tag length and can
705 * be 8, 12 or 16 bytes long. */
706 static int rfc4106_set_authsize(struct crypto_aead
*parent
,
707 unsigned int authsize
)
709 struct cryptd_aead
**ctx
= crypto_aead_ctx(parent
);
710 struct cryptd_aead
*cryptd_tfm
= *ctx
;
712 return crypto_aead_setauthsize(&cryptd_tfm
->base
, authsize
);
715 static int helper_rfc4106_encrypt(struct aead_request
*req
)
717 u8 one_entry_in_sg
= 0;
718 u8
*src
, *dst
, *assoc
;
719 __be32 counter
= cpu_to_be32(1);
720 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
721 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
722 void *aes_ctx
= &(ctx
->aes_key_expanded
);
723 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
724 u8 iv
[16] __attribute__ ((__aligned__(AESNI_ALIGN
)));
725 struct scatter_walk src_sg_walk
;
726 struct scatter_walk dst_sg_walk
= {};
729 /* Assuming we are supporting rfc4106 64-bit extended */
730 /* sequence numbers We need to have the AAD length equal */
731 /* to 16 or 20 bytes */
732 if (unlikely(req
->assoclen
!= 16 && req
->assoclen
!= 20))
736 for (i
= 0; i
< 4; i
++)
737 *(iv
+i
) = ctx
->nonce
[i
];
738 for (i
= 0; i
< 8; i
++)
739 *(iv
+4+i
) = req
->iv
[i
];
740 *((__be32
*)(iv
+12)) = counter
;
742 if (sg_is_last(req
->src
) &&
743 (!PageHighMem(sg_page(req
->src
)) ||
744 req
->src
->offset
+ req
->src
->length
<= PAGE_SIZE
) &&
745 sg_is_last(req
->dst
) &&
746 (!PageHighMem(sg_page(req
->dst
)) ||
747 req
->dst
->offset
+ req
->dst
->length
<= PAGE_SIZE
)) {
749 scatterwalk_start(&src_sg_walk
, req
->src
);
750 assoc
= scatterwalk_map(&src_sg_walk
);
751 src
= assoc
+ req
->assoclen
;
753 if (unlikely(req
->src
!= req
->dst
)) {
754 scatterwalk_start(&dst_sg_walk
, req
->dst
);
755 dst
= scatterwalk_map(&dst_sg_walk
) + req
->assoclen
;
758 /* Allocate memory for src, dst, assoc */
759 assoc
= kmalloc(req
->cryptlen
+ auth_tag_len
+ req
->assoclen
,
761 if (unlikely(!assoc
))
763 scatterwalk_map_and_copy(assoc
, req
->src
, 0,
764 req
->assoclen
+ req
->cryptlen
, 0);
765 src
= assoc
+ req
->assoclen
;
770 aesni_gcm_enc_tfm(aes_ctx
, dst
, src
, req
->cryptlen
, iv
,
771 ctx
->hash_subkey
, assoc
, req
->assoclen
- 8,
772 dst
+ req
->cryptlen
, auth_tag_len
);
775 /* The authTag (aka the Integrity Check Value) needs to be written
776 * back to the packet. */
777 if (one_entry_in_sg
) {
778 if (unlikely(req
->src
!= req
->dst
)) {
779 scatterwalk_unmap(dst
- req
->assoclen
);
780 scatterwalk_advance(&dst_sg_walk
, req
->dst
->length
);
781 scatterwalk_done(&dst_sg_walk
, 1, 0);
783 scatterwalk_unmap(assoc
);
784 scatterwalk_advance(&src_sg_walk
, req
->src
->length
);
785 scatterwalk_done(&src_sg_walk
, req
->src
== req
->dst
, 0);
787 scatterwalk_map_and_copy(dst
, req
->dst
, req
->assoclen
,
788 req
->cryptlen
+ auth_tag_len
, 1);
794 static int helper_rfc4106_decrypt(struct aead_request
*req
)
796 u8 one_entry_in_sg
= 0;
797 u8
*src
, *dst
, *assoc
;
798 unsigned long tempCipherLen
= 0;
799 __be32 counter
= cpu_to_be32(1);
801 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
802 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
803 void *aes_ctx
= &(ctx
->aes_key_expanded
);
804 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
805 u8 iv
[16] __attribute__ ((__aligned__(AESNI_ALIGN
)));
807 struct scatter_walk src_sg_walk
;
808 struct scatter_walk dst_sg_walk
= {};
811 if (unlikely(req
->assoclen
!= 16 && req
->assoclen
!= 20))
814 /* Assuming we are supporting rfc4106 64-bit extended */
815 /* sequence numbers We need to have the AAD length */
816 /* equal to 16 or 20 bytes */
818 tempCipherLen
= (unsigned long)(req
->cryptlen
- auth_tag_len
);
820 for (i
= 0; i
< 4; i
++)
821 *(iv
+i
) = ctx
->nonce
[i
];
822 for (i
= 0; i
< 8; i
++)
823 *(iv
+4+i
) = req
->iv
[i
];
824 *((__be32
*)(iv
+12)) = counter
;
826 if (sg_is_last(req
->src
) &&
827 (!PageHighMem(sg_page(req
->src
)) ||
828 req
->src
->offset
+ req
->src
->length
<= PAGE_SIZE
) &&
829 sg_is_last(req
->dst
) &&
830 (!PageHighMem(sg_page(req
->dst
)) ||
831 req
->dst
->offset
+ req
->dst
->length
<= PAGE_SIZE
)) {
833 scatterwalk_start(&src_sg_walk
, req
->src
);
834 assoc
= scatterwalk_map(&src_sg_walk
);
835 src
= assoc
+ req
->assoclen
;
837 if (unlikely(req
->src
!= req
->dst
)) {
838 scatterwalk_start(&dst_sg_walk
, req
->dst
);
839 dst
= scatterwalk_map(&dst_sg_walk
) + req
->assoclen
;
843 /* Allocate memory for src, dst, assoc */
844 assoc
= kmalloc(req
->cryptlen
+ req
->assoclen
, GFP_ATOMIC
);
847 scatterwalk_map_and_copy(assoc
, req
->src
, 0,
848 req
->assoclen
+ req
->cryptlen
, 0);
849 src
= assoc
+ req
->assoclen
;
854 aesni_gcm_dec_tfm(aes_ctx
, dst
, src
, tempCipherLen
, iv
,
855 ctx
->hash_subkey
, assoc
, req
->assoclen
- 8,
856 authTag
, auth_tag_len
);
859 /* Compare generated tag with passed in tag. */
860 retval
= crypto_memneq(src
+ tempCipherLen
, authTag
, auth_tag_len
) ?
863 if (one_entry_in_sg
) {
864 if (unlikely(req
->src
!= req
->dst
)) {
865 scatterwalk_unmap(dst
- req
->assoclen
);
866 scatterwalk_advance(&dst_sg_walk
, req
->dst
->length
);
867 scatterwalk_done(&dst_sg_walk
, 1, 0);
869 scatterwalk_unmap(assoc
);
870 scatterwalk_advance(&src_sg_walk
, req
->src
->length
);
871 scatterwalk_done(&src_sg_walk
, req
->src
== req
->dst
, 0);
873 scatterwalk_map_and_copy(dst
, req
->dst
, req
->assoclen
,
880 static int rfc4106_encrypt(struct aead_request
*req
)
882 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
883 struct cryptd_aead
**ctx
= crypto_aead_ctx(tfm
);
884 struct cryptd_aead
*cryptd_tfm
= *ctx
;
886 tfm
= &cryptd_tfm
->base
;
887 if (irq_fpu_usable() && (!in_atomic() ||
888 !cryptd_aead_queued(cryptd_tfm
)))
889 tfm
= cryptd_aead_child(cryptd_tfm
);
891 aead_request_set_tfm(req
, tfm
);
893 return crypto_aead_encrypt(req
);
896 static int rfc4106_decrypt(struct aead_request
*req
)
898 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
899 struct cryptd_aead
**ctx
= crypto_aead_ctx(tfm
);
900 struct cryptd_aead
*cryptd_tfm
= *ctx
;
902 tfm
= &cryptd_tfm
->base
;
903 if (irq_fpu_usable() && (!in_atomic() ||
904 !cryptd_aead_queued(cryptd_tfm
)))
905 tfm
= cryptd_aead_child(cryptd_tfm
);
907 aead_request_set_tfm(req
, tfm
);
909 return crypto_aead_decrypt(req
);
913 static struct crypto_alg aesni_algs
[] = { {
915 .cra_driver_name
= "aes-aesni",
917 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
918 .cra_blocksize
= AES_BLOCK_SIZE
,
919 .cra_ctxsize
= CRYPTO_AES_CTX_SIZE
,
920 .cra_module
= THIS_MODULE
,
923 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
924 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
925 .cia_setkey
= aes_set_key
,
926 .cia_encrypt
= aes_encrypt
,
927 .cia_decrypt
= aes_decrypt
932 .cra_driver_name
= "__aes-aesni",
934 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
| CRYPTO_ALG_INTERNAL
,
935 .cra_blocksize
= AES_BLOCK_SIZE
,
936 .cra_ctxsize
= CRYPTO_AES_CTX_SIZE
,
937 .cra_module
= THIS_MODULE
,
940 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
941 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
942 .cia_setkey
= aes_set_key
,
943 .cia_encrypt
= __aes_encrypt
,
944 .cia_decrypt
= __aes_decrypt
949 static struct skcipher_alg aesni_skciphers
[] = {
952 .cra_name
= "__ecb(aes)",
953 .cra_driver_name
= "__ecb-aes-aesni",
955 .cra_flags
= CRYPTO_ALG_INTERNAL
,
956 .cra_blocksize
= AES_BLOCK_SIZE
,
957 .cra_ctxsize
= CRYPTO_AES_CTX_SIZE
,
958 .cra_module
= THIS_MODULE
,
960 .min_keysize
= AES_MIN_KEY_SIZE
,
961 .max_keysize
= AES_MAX_KEY_SIZE
,
962 .setkey
= aesni_skcipher_setkey
,
963 .encrypt
= ecb_encrypt
,
964 .decrypt
= ecb_decrypt
,
967 .cra_name
= "__cbc(aes)",
968 .cra_driver_name
= "__cbc-aes-aesni",
970 .cra_flags
= CRYPTO_ALG_INTERNAL
,
971 .cra_blocksize
= AES_BLOCK_SIZE
,
972 .cra_ctxsize
= CRYPTO_AES_CTX_SIZE
,
973 .cra_module
= THIS_MODULE
,
975 .min_keysize
= AES_MIN_KEY_SIZE
,
976 .max_keysize
= AES_MAX_KEY_SIZE
,
977 .ivsize
= AES_BLOCK_SIZE
,
978 .setkey
= aesni_skcipher_setkey
,
979 .encrypt
= cbc_encrypt
,
980 .decrypt
= cbc_decrypt
,
984 .cra_name
= "__ctr(aes)",
985 .cra_driver_name
= "__ctr-aes-aesni",
987 .cra_flags
= CRYPTO_ALG_INTERNAL
,
989 .cra_ctxsize
= CRYPTO_AES_CTX_SIZE
,
990 .cra_module
= THIS_MODULE
,
992 .min_keysize
= AES_MIN_KEY_SIZE
,
993 .max_keysize
= AES_MAX_KEY_SIZE
,
994 .ivsize
= AES_BLOCK_SIZE
,
995 .chunksize
= AES_BLOCK_SIZE
,
996 .setkey
= aesni_skcipher_setkey
,
997 .encrypt
= ctr_crypt
,
998 .decrypt
= ctr_crypt
,
1001 .cra_name
= "__xts(aes)",
1002 .cra_driver_name
= "__xts-aes-aesni",
1003 .cra_priority
= 401,
1004 .cra_flags
= CRYPTO_ALG_INTERNAL
,
1005 .cra_blocksize
= AES_BLOCK_SIZE
,
1006 .cra_ctxsize
= XTS_AES_CTX_SIZE
,
1007 .cra_module
= THIS_MODULE
,
1009 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1010 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1011 .ivsize
= AES_BLOCK_SIZE
,
1012 .setkey
= xts_aesni_setkey
,
1013 .encrypt
= xts_encrypt
,
1014 .decrypt
= xts_decrypt
,
1019 struct simd_skcipher_alg
*aesni_simd_skciphers
[ARRAY_SIZE(aesni_skciphers
)];
1022 const char *algname
;
1023 const char *drvname
;
1024 const char *basename
;
1025 struct simd_skcipher_alg
*simd
;
1026 } aesni_simd_skciphers2
[] = {
1027 #if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
1028 IS_BUILTIN(CONFIG_CRYPTO_PCBC)
1030 .algname
= "pcbc(aes)",
1031 .drvname
= "pcbc-aes-aesni",
1032 .basename
= "fpu(pcbc(__aes-aesni))",
1037 #ifdef CONFIG_X86_64
1038 static struct aead_alg aesni_aead_algs
[] = { {
1039 .setkey
= common_rfc4106_set_key
,
1040 .setauthsize
= common_rfc4106_set_authsize
,
1041 .encrypt
= helper_rfc4106_encrypt
,
1042 .decrypt
= helper_rfc4106_decrypt
,
1046 .cra_name
= "__gcm-aes-aesni",
1047 .cra_driver_name
= "__driver-gcm-aes-aesni",
1048 .cra_flags
= CRYPTO_ALG_INTERNAL
,
1050 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
),
1051 .cra_alignmask
= AESNI_ALIGN
- 1,
1052 .cra_module
= THIS_MODULE
,
1055 .init
= rfc4106_init
,
1056 .exit
= rfc4106_exit
,
1057 .setkey
= rfc4106_set_key
,
1058 .setauthsize
= rfc4106_set_authsize
,
1059 .encrypt
= rfc4106_encrypt
,
1060 .decrypt
= rfc4106_decrypt
,
1064 .cra_name
= "rfc4106(gcm(aes))",
1065 .cra_driver_name
= "rfc4106-gcm-aesni",
1066 .cra_priority
= 400,
1067 .cra_flags
= CRYPTO_ALG_ASYNC
,
1069 .cra_ctxsize
= sizeof(struct cryptd_aead
*),
1070 .cra_module
= THIS_MODULE
,
1074 static struct aead_alg aesni_aead_algs
[0];
1078 static const struct x86_cpu_id aesni_cpu_id
[] = {
1079 X86_FEATURE_MATCH(X86_FEATURE_AES
),
1082 MODULE_DEVICE_TABLE(x86cpu
, aesni_cpu_id
);
1084 static void aesni_free_simds(void)
1088 for (i
= 0; i
< ARRAY_SIZE(aesni_simd_skciphers
) &&
1089 aesni_simd_skciphers
[i
]; i
++)
1090 simd_skcipher_free(aesni_simd_skciphers
[i
]);
1092 for (i
= 0; i
< ARRAY_SIZE(aesni_simd_skciphers2
); i
++)
1093 if (aesni_simd_skciphers2
[i
].simd
)
1094 simd_skcipher_free(aesni_simd_skciphers2
[i
].simd
);
1097 static int __init
aesni_init(void)
1099 struct simd_skcipher_alg
*simd
;
1100 const char *basename
;
1101 const char *algname
;
1102 const char *drvname
;
1106 if (!x86_match_cpu(aesni_cpu_id
))
1108 #ifdef CONFIG_X86_64
1109 #ifdef CONFIG_AS_AVX2
1110 if (boot_cpu_has(X86_FEATURE_AVX2
)) {
1111 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1112 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx2
;
1113 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx2
;
1116 #ifdef CONFIG_AS_AVX
1117 if (boot_cpu_has(X86_FEATURE_AVX
)) {
1118 pr_info("AVX version of gcm_enc/dec engaged.\n");
1119 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx
;
1120 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx
;
1124 pr_info("SSE version of gcm_enc/dec engaged.\n");
1125 aesni_gcm_enc_tfm
= aesni_gcm_enc
;
1126 aesni_gcm_dec_tfm
= aesni_gcm_dec
;
1128 aesni_ctr_enc_tfm
= aesni_ctr_enc
;
1129 #ifdef CONFIG_AS_AVX
1130 if (boot_cpu_has(X86_FEATURE_AVX
)) {
1131 /* optimize performance of ctr mode encryption transform */
1132 aesni_ctr_enc_tfm
= aesni_ctr_enc_avx_tfm
;
1133 pr_info("AES CTR mode by8 optimization enabled\n");
1138 err
= crypto_fpu_init();
1142 err
= crypto_register_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1146 err
= crypto_register_skciphers(aesni_skciphers
,
1147 ARRAY_SIZE(aesni_skciphers
));
1149 goto unregister_algs
;
1151 err
= crypto_register_aeads(aesni_aead_algs
,
1152 ARRAY_SIZE(aesni_aead_algs
));
1154 goto unregister_skciphers
;
1156 for (i
= 0; i
< ARRAY_SIZE(aesni_skciphers
); i
++) {
1157 algname
= aesni_skciphers
[i
].base
.cra_name
+ 2;
1158 drvname
= aesni_skciphers
[i
].base
.cra_driver_name
+ 2;
1159 basename
= aesni_skciphers
[i
].base
.cra_driver_name
;
1160 simd
= simd_skcipher_create_compat(algname
, drvname
, basename
);
1161 err
= PTR_ERR(simd
);
1163 goto unregister_simds
;
1165 aesni_simd_skciphers
[i
] = simd
;
1168 for (i
= 0; i
< ARRAY_SIZE(aesni_simd_skciphers2
); i
++) {
1169 algname
= aesni_simd_skciphers2
[i
].algname
;
1170 drvname
= aesni_simd_skciphers2
[i
].drvname
;
1171 basename
= aesni_simd_skciphers2
[i
].basename
;
1172 simd
= simd_skcipher_create_compat(algname
, drvname
, basename
);
1173 err
= PTR_ERR(simd
);
1177 aesni_simd_skciphers2
[i
].simd
= simd
;
1184 crypto_unregister_aeads(aesni_aead_algs
, ARRAY_SIZE(aesni_aead_algs
));
1185 unregister_skciphers
:
1186 crypto_unregister_skciphers(aesni_skciphers
,
1187 ARRAY_SIZE(aesni_skciphers
));
1189 crypto_unregister_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1195 static void __exit
aesni_exit(void)
1198 crypto_unregister_aeads(aesni_aead_algs
, ARRAY_SIZE(aesni_aead_algs
));
1199 crypto_unregister_skciphers(aesni_skciphers
,
1200 ARRAY_SIZE(aesni_skciphers
));
1201 crypto_unregister_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1206 late_initcall(aesni_init
);
1207 module_exit(aesni_exit
);
1209 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1210 MODULE_LICENSE("GPL");
1211 MODULE_ALIAS_CRYPTO("aes");