2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
43 #include <asm/crypto/glue_helper.h>
46 #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
50 /* This data is stored at the end of the crypto_tfm struct.
51 * It's a type of per "session" data storage location.
52 * This needs to be 16 byte aligned.
54 struct aesni_rfc4106_gcm_ctx
{
56 struct crypto_aes_ctx aes_key_expanded
;
58 struct cryptd_aead
*cryptd_tfm
;
61 struct aesni_gcm_set_hash_subkey_result
{
63 struct completion completion
;
66 struct aesni_hash_subkey_req_data
{
68 struct aesni_gcm_set_hash_subkey_result result
;
69 struct scatterlist sg
;
72 #define AESNI_ALIGN (16)
73 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
74 #define RFC4106_HASH_SUBKEY_SIZE 16
76 struct aesni_lrw_ctx
{
77 struct lrw_table_ctx lrw_table
;
78 u8 raw_aes_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
81 struct aesni_xts_ctx
{
82 u8 raw_tweak_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
83 u8 raw_crypt_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
86 asmlinkage
int aesni_set_key(struct crypto_aes_ctx
*ctx
, const u8
*in_key
,
87 unsigned int key_len
);
88 asmlinkage
void aesni_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
90 asmlinkage
void aesni_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
92 asmlinkage
void aesni_ecb_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
93 const u8
*in
, unsigned int len
);
94 asmlinkage
void aesni_ecb_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
95 const u8
*in
, unsigned int len
);
96 asmlinkage
void aesni_cbc_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
97 const u8
*in
, unsigned int len
, u8
*iv
);
98 asmlinkage
void aesni_cbc_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
99 const u8
*in
, unsigned int len
, u8
*iv
);
101 int crypto_fpu_init(void);
102 void crypto_fpu_exit(void);
104 #define AVX_GEN2_OPTSIZE 640
105 #define AVX_GEN4_OPTSIZE 4096
109 static void (*aesni_ctr_enc_tfm
)(struct crypto_aes_ctx
*ctx
, u8
*out
,
110 const u8
*in
, unsigned int len
, u8
*iv
);
111 asmlinkage
void aesni_ctr_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
112 const u8
*in
, unsigned int len
, u8
*iv
);
114 asmlinkage
void aesni_xts_crypt8(struct crypto_aes_ctx
*ctx
, u8
*out
,
115 const u8
*in
, bool enc
, u8
*iv
);
117 /* asmlinkage void aesni_gcm_enc()
118 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
119 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
120 * const u8 *in, Plaintext input
121 * unsigned long plaintext_len, Length of data in bytes for encryption.
122 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
123 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
124 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
125 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
126 * const u8 *aad, Additional Authentication Data (AAD)
127 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
128 * is going to be 8 or 12 bytes
129 * u8 *auth_tag, Authenticated Tag output.
130 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
131 * Valid values are 16 (most likely), 12 or 8.
133 asmlinkage
void aesni_gcm_enc(void *ctx
, u8
*out
,
134 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
135 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
136 u8
*auth_tag
, unsigned long auth_tag_len
);
138 /* asmlinkage void aesni_gcm_dec()
139 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
140 * u8 *out, Plaintext output. Decrypt in-place is allowed.
141 * const u8 *in, Ciphertext input
142 * unsigned long ciphertext_len, Length of data in bytes for decryption.
143 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
144 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
145 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
146 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
147 * const u8 *aad, Additional Authentication Data (AAD)
148 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
149 * to be 8 or 12 bytes
150 * u8 *auth_tag, Authenticated Tag output.
151 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
152 * Valid values are 16 (most likely), 12 or 8.
154 asmlinkage
void aesni_gcm_dec(void *ctx
, u8
*out
,
155 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
156 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
157 u8
*auth_tag
, unsigned long auth_tag_len
);
161 asmlinkage
void aes_ctr_enc_128_avx_by8(const u8
*in
, u8
*iv
,
162 void *keys
, u8
*out
, unsigned int num_bytes
);
163 asmlinkage
void aes_ctr_enc_192_avx_by8(const u8
*in
, u8
*iv
,
164 void *keys
, u8
*out
, unsigned int num_bytes
);
165 asmlinkage
void aes_ctr_enc_256_avx_by8(const u8
*in
, u8
*iv
,
166 void *keys
, u8
*out
, unsigned int num_bytes
);
168 * asmlinkage void aesni_gcm_precomp_avx_gen2()
169 * gcm_data *my_ctx_data, context data
170 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
172 asmlinkage
void aesni_gcm_precomp_avx_gen2(void *my_ctx_data
, u8
*hash_subkey
);
174 asmlinkage
void aesni_gcm_enc_avx_gen2(void *ctx
, u8
*out
,
175 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
176 const u8
*aad
, unsigned long aad_len
,
177 u8
*auth_tag
, unsigned long auth_tag_len
);
179 asmlinkage
void aesni_gcm_dec_avx_gen2(void *ctx
, u8
*out
,
180 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
181 const u8
*aad
, unsigned long aad_len
,
182 u8
*auth_tag
, unsigned long auth_tag_len
);
184 static void aesni_gcm_enc_avx(void *ctx
, u8
*out
,
185 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
186 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
187 u8
*auth_tag
, unsigned long auth_tag_len
)
189 if (plaintext_len
< AVX_GEN2_OPTSIZE
) {
190 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
191 aad_len
, auth_tag
, auth_tag_len
);
193 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
194 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
195 aad_len
, auth_tag
, auth_tag_len
);
199 static void aesni_gcm_dec_avx(void *ctx
, u8
*out
,
200 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
201 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
202 u8
*auth_tag
, unsigned long auth_tag_len
)
204 if (ciphertext_len
< AVX_GEN2_OPTSIZE
) {
205 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
, aad
,
206 aad_len
, auth_tag
, auth_tag_len
);
208 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
209 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
210 aad_len
, auth_tag
, auth_tag_len
);
215 #ifdef CONFIG_AS_AVX2
217 * asmlinkage void aesni_gcm_precomp_avx_gen4()
218 * gcm_data *my_ctx_data, context data
219 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
221 asmlinkage
void aesni_gcm_precomp_avx_gen4(void *my_ctx_data
, u8
*hash_subkey
);
223 asmlinkage
void aesni_gcm_enc_avx_gen4(void *ctx
, u8
*out
,
224 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
225 const u8
*aad
, unsigned long aad_len
,
226 u8
*auth_tag
, unsigned long auth_tag_len
);
228 asmlinkage
void aesni_gcm_dec_avx_gen4(void *ctx
, u8
*out
,
229 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
230 const u8
*aad
, unsigned long aad_len
,
231 u8
*auth_tag
, unsigned long auth_tag_len
);
233 static void aesni_gcm_enc_avx2(void *ctx
, u8
*out
,
234 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
235 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
236 u8
*auth_tag
, unsigned long auth_tag_len
)
238 if (plaintext_len
< AVX_GEN2_OPTSIZE
) {
239 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
240 aad_len
, auth_tag
, auth_tag_len
);
241 } else if (plaintext_len
< AVX_GEN4_OPTSIZE
) {
242 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
243 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
244 aad_len
, auth_tag
, auth_tag_len
);
246 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
247 aesni_gcm_enc_avx_gen4(ctx
, out
, in
, plaintext_len
, iv
, aad
,
248 aad_len
, auth_tag
, auth_tag_len
);
252 static void aesni_gcm_dec_avx2(void *ctx
, u8
*out
,
253 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
254 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
255 u8
*auth_tag
, unsigned long auth_tag_len
)
257 if (ciphertext_len
< AVX_GEN2_OPTSIZE
) {
258 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
,
259 aad
, aad_len
, auth_tag
, auth_tag_len
);
260 } else if (ciphertext_len
< AVX_GEN4_OPTSIZE
) {
261 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
262 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
263 aad_len
, auth_tag
, auth_tag_len
);
265 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
266 aesni_gcm_dec_avx_gen4(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
267 aad_len
, auth_tag
, auth_tag_len
);
272 static void (*aesni_gcm_enc_tfm
)(void *ctx
, u8
*out
,
273 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
274 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
275 u8
*auth_tag
, unsigned long auth_tag_len
);
277 static void (*aesni_gcm_dec_tfm
)(void *ctx
, u8
*out
,
278 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
279 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
280 u8
*auth_tag
, unsigned long auth_tag_len
);
283 aesni_rfc4106_gcm_ctx
*aesni_rfc4106_gcm_ctx_get(struct crypto_aead
*tfm
)
286 (struct aesni_rfc4106_gcm_ctx
*)
288 crypto_tfm_ctx(crypto_aead_tfm(tfm
)), AESNI_ALIGN
);
292 static inline struct crypto_aes_ctx
*aes_ctx(void *raw_ctx
)
294 unsigned long addr
= (unsigned long)raw_ctx
;
295 unsigned long align
= AESNI_ALIGN
;
297 if (align
<= crypto_tfm_ctx_alignment())
299 return (struct crypto_aes_ctx
*)ALIGN(addr
, align
);
302 static int aes_set_key_common(struct crypto_tfm
*tfm
, void *raw_ctx
,
303 const u8
*in_key
, unsigned int key_len
)
305 struct crypto_aes_ctx
*ctx
= aes_ctx(raw_ctx
);
306 u32
*flags
= &tfm
->crt_flags
;
309 if (key_len
!= AES_KEYSIZE_128
&& key_len
!= AES_KEYSIZE_192
&&
310 key_len
!= AES_KEYSIZE_256
) {
311 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
315 if (!irq_fpu_usable())
316 err
= crypto_aes_expand_key(ctx
, in_key
, key_len
);
319 err
= aesni_set_key(ctx
, in_key
, key_len
);
326 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
327 unsigned int key_len
)
329 return aes_set_key_common(tfm
, crypto_tfm_ctx(tfm
), in_key
, key_len
);
332 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
334 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
336 if (!irq_fpu_usable())
337 crypto_aes_encrypt_x86(ctx
, dst
, src
);
340 aesni_enc(ctx
, dst
, src
);
345 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
347 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
349 if (!irq_fpu_usable())
350 crypto_aes_decrypt_x86(ctx
, dst
, src
);
353 aesni_dec(ctx
, dst
, src
);
358 static void __aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
360 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
362 aesni_enc(ctx
, dst
, src
);
365 static void __aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
367 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
369 aesni_dec(ctx
, dst
, src
);
372 static int ecb_encrypt(struct blkcipher_desc
*desc
,
373 struct scatterlist
*dst
, struct scatterlist
*src
,
376 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
377 struct blkcipher_walk walk
;
380 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
381 err
= blkcipher_walk_virt(desc
, &walk
);
382 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
385 while ((nbytes
= walk
.nbytes
)) {
386 aesni_ecb_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
387 nbytes
& AES_BLOCK_MASK
);
388 nbytes
&= AES_BLOCK_SIZE
- 1;
389 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
396 static int ecb_decrypt(struct blkcipher_desc
*desc
,
397 struct scatterlist
*dst
, struct scatterlist
*src
,
400 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
401 struct blkcipher_walk walk
;
404 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
405 err
= blkcipher_walk_virt(desc
, &walk
);
406 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
409 while ((nbytes
= walk
.nbytes
)) {
410 aesni_ecb_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
411 nbytes
& AES_BLOCK_MASK
);
412 nbytes
&= AES_BLOCK_SIZE
- 1;
413 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
420 static int cbc_encrypt(struct blkcipher_desc
*desc
,
421 struct scatterlist
*dst
, struct scatterlist
*src
,
424 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
425 struct blkcipher_walk walk
;
428 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
429 err
= blkcipher_walk_virt(desc
, &walk
);
430 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
433 while ((nbytes
= walk
.nbytes
)) {
434 aesni_cbc_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
435 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
436 nbytes
&= AES_BLOCK_SIZE
- 1;
437 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
444 static int cbc_decrypt(struct blkcipher_desc
*desc
,
445 struct scatterlist
*dst
, struct scatterlist
*src
,
448 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
449 struct blkcipher_walk walk
;
452 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
453 err
= blkcipher_walk_virt(desc
, &walk
);
454 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
457 while ((nbytes
= walk
.nbytes
)) {
458 aesni_cbc_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
459 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
460 nbytes
&= AES_BLOCK_SIZE
- 1;
461 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
469 static void ctr_crypt_final(struct crypto_aes_ctx
*ctx
,
470 struct blkcipher_walk
*walk
)
472 u8
*ctrblk
= walk
->iv
;
473 u8 keystream
[AES_BLOCK_SIZE
];
474 u8
*src
= walk
->src
.virt
.addr
;
475 u8
*dst
= walk
->dst
.virt
.addr
;
476 unsigned int nbytes
= walk
->nbytes
;
478 aesni_enc(ctx
, keystream
, ctrblk
);
479 crypto_xor(keystream
, src
, nbytes
);
480 memcpy(dst
, keystream
, nbytes
);
481 crypto_inc(ctrblk
, AES_BLOCK_SIZE
);
485 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx
*ctx
, u8
*out
,
486 const u8
*in
, unsigned int len
, u8
*iv
)
489 * based on key length, override with the by8 version
490 * of ctr mode encryption/decryption for improved performance
491 * aes_set_key_common() ensures that key length is one of
494 if (ctx
->key_length
== AES_KEYSIZE_128
)
495 aes_ctr_enc_128_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
496 else if (ctx
->key_length
== AES_KEYSIZE_192
)
497 aes_ctr_enc_192_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
499 aes_ctr_enc_256_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
503 static int ctr_crypt(struct blkcipher_desc
*desc
,
504 struct scatterlist
*dst
, struct scatterlist
*src
,
507 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
508 struct blkcipher_walk walk
;
511 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
512 err
= blkcipher_walk_virt_block(desc
, &walk
, AES_BLOCK_SIZE
);
513 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
516 while ((nbytes
= walk
.nbytes
) >= AES_BLOCK_SIZE
) {
517 aesni_ctr_enc_tfm(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
518 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
519 nbytes
&= AES_BLOCK_SIZE
- 1;
520 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
523 ctr_crypt_final(ctx
, &walk
);
524 err
= blkcipher_walk_done(desc
, &walk
, 0);
532 static int ablk_ecb_init(struct crypto_tfm
*tfm
)
534 return ablk_init_common(tfm
, "__driver-ecb-aes-aesni");
537 static int ablk_cbc_init(struct crypto_tfm
*tfm
)
539 return ablk_init_common(tfm
, "__driver-cbc-aes-aesni");
543 static int ablk_ctr_init(struct crypto_tfm
*tfm
)
545 return ablk_init_common(tfm
, "__driver-ctr-aes-aesni");
551 static int ablk_pcbc_init(struct crypto_tfm
*tfm
)
553 return ablk_init_common(tfm
, "fpu(pcbc(__driver-aes-aesni))");
557 static void lrw_xts_encrypt_callback(void *ctx
, u8
*blks
, unsigned int nbytes
)
559 aesni_ecb_enc(ctx
, blks
, blks
, nbytes
);
562 static void lrw_xts_decrypt_callback(void *ctx
, u8
*blks
, unsigned int nbytes
)
564 aesni_ecb_dec(ctx
, blks
, blks
, nbytes
);
567 static int lrw_aesni_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
570 struct aesni_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
573 err
= aes_set_key_common(tfm
, ctx
->raw_aes_ctx
, key
,
574 keylen
- AES_BLOCK_SIZE
);
578 return lrw_init_table(&ctx
->lrw_table
, key
+ keylen
- AES_BLOCK_SIZE
);
581 static void lrw_aesni_exit_tfm(struct crypto_tfm
*tfm
)
583 struct aesni_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
585 lrw_free_table(&ctx
->lrw_table
);
588 static int lrw_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
589 struct scatterlist
*src
, unsigned int nbytes
)
591 struct aesni_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
593 struct lrw_crypt_req req
= {
595 .tbuflen
= sizeof(buf
),
597 .table_ctx
= &ctx
->lrw_table
,
598 .crypt_ctx
= aes_ctx(ctx
->raw_aes_ctx
),
599 .crypt_fn
= lrw_xts_encrypt_callback
,
603 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
606 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
612 static int lrw_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
613 struct scatterlist
*src
, unsigned int nbytes
)
615 struct aesni_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
617 struct lrw_crypt_req req
= {
619 .tbuflen
= sizeof(buf
),
621 .table_ctx
= &ctx
->lrw_table
,
622 .crypt_ctx
= aes_ctx(ctx
->raw_aes_ctx
),
623 .crypt_fn
= lrw_xts_decrypt_callback
,
627 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
630 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
636 static int xts_aesni_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
639 struct aesni_xts_ctx
*ctx
= crypto_tfm_ctx(tfm
);
640 u32
*flags
= &tfm
->crt_flags
;
643 /* key consists of keys of equal size concatenated, therefore
644 * the length must be even
647 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
651 /* first half of xts-key is for crypt */
652 err
= aes_set_key_common(tfm
, ctx
->raw_crypt_ctx
, key
, keylen
/ 2);
656 /* second half of xts-key is for tweak */
657 return aes_set_key_common(tfm
, ctx
->raw_tweak_ctx
, key
+ keylen
/ 2,
662 static void aesni_xts_tweak(void *ctx
, u8
*out
, const u8
*in
)
664 aesni_enc(ctx
, out
, in
);
669 static void aesni_xts_enc(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
671 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_enc
));
674 static void aesni_xts_dec(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
676 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_dec
));
679 static void aesni_xts_enc8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
681 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, true, (u8
*)iv
);
684 static void aesni_xts_dec8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
686 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, false, (u8
*)iv
);
689 static const struct common_glue_ctx aesni_enc_xts
= {
691 .fpu_blocks_limit
= 1,
695 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc8
) }
698 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc
) }
702 static const struct common_glue_ctx aesni_dec_xts
= {
704 .fpu_blocks_limit
= 1,
708 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec8
) }
711 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec
) }
715 static int xts_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
716 struct scatterlist
*src
, unsigned int nbytes
)
718 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
720 return glue_xts_crypt_128bit(&aesni_enc_xts
, desc
, dst
, src
, nbytes
,
721 XTS_TWEAK_CAST(aesni_xts_tweak
),
722 aes_ctx(ctx
->raw_tweak_ctx
),
723 aes_ctx(ctx
->raw_crypt_ctx
));
726 static int xts_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
727 struct scatterlist
*src
, unsigned int nbytes
)
729 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
731 return glue_xts_crypt_128bit(&aesni_dec_xts
, desc
, dst
, src
, nbytes
,
732 XTS_TWEAK_CAST(aesni_xts_tweak
),
733 aes_ctx(ctx
->raw_tweak_ctx
),
734 aes_ctx(ctx
->raw_crypt_ctx
));
739 static int xts_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
740 struct scatterlist
*src
, unsigned int nbytes
)
742 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
744 struct xts_crypt_req req
= {
746 .tbuflen
= sizeof(buf
),
748 .tweak_ctx
= aes_ctx(ctx
->raw_tweak_ctx
),
749 .tweak_fn
= aesni_xts_tweak
,
750 .crypt_ctx
= aes_ctx(ctx
->raw_crypt_ctx
),
751 .crypt_fn
= lrw_xts_encrypt_callback
,
755 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
758 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
764 static int xts_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
765 struct scatterlist
*src
, unsigned int nbytes
)
767 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
769 struct xts_crypt_req req
= {
771 .tbuflen
= sizeof(buf
),
773 .tweak_ctx
= aes_ctx(ctx
->raw_tweak_ctx
),
774 .tweak_fn
= aesni_xts_tweak
,
775 .crypt_ctx
= aes_ctx(ctx
->raw_crypt_ctx
),
776 .crypt_fn
= lrw_xts_decrypt_callback
,
780 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
783 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
792 static int rfc4106_init(struct crypto_tfm
*tfm
)
794 struct cryptd_aead
*cryptd_tfm
;
795 struct aesni_rfc4106_gcm_ctx
*ctx
= (struct aesni_rfc4106_gcm_ctx
*)
796 PTR_ALIGN((u8
*)crypto_tfm_ctx(tfm
), AESNI_ALIGN
);
797 struct crypto_aead
*cryptd_child
;
798 struct aesni_rfc4106_gcm_ctx
*child_ctx
;
799 cryptd_tfm
= cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
800 if (IS_ERR(cryptd_tfm
))
801 return PTR_ERR(cryptd_tfm
);
803 cryptd_child
= cryptd_aead_child(cryptd_tfm
);
804 child_ctx
= aesni_rfc4106_gcm_ctx_get(cryptd_child
);
805 memcpy(child_ctx
, ctx
, sizeof(*ctx
));
806 ctx
->cryptd_tfm
= cryptd_tfm
;
807 tfm
->crt_aead
.reqsize
= sizeof(struct aead_request
)
808 + crypto_aead_reqsize(&cryptd_tfm
->base
);
812 static void rfc4106_exit(struct crypto_tfm
*tfm
)
814 struct aesni_rfc4106_gcm_ctx
*ctx
=
815 (struct aesni_rfc4106_gcm_ctx
*)
816 PTR_ALIGN((u8
*)crypto_tfm_ctx(tfm
), AESNI_ALIGN
);
817 if (!IS_ERR(ctx
->cryptd_tfm
))
818 cryptd_free_aead(ctx
->cryptd_tfm
);
823 rfc4106_set_hash_subkey_done(struct crypto_async_request
*req
, int err
)
825 struct aesni_gcm_set_hash_subkey_result
*result
= req
->data
;
827 if (err
== -EINPROGRESS
)
830 complete(&result
->completion
);
834 rfc4106_set_hash_subkey(u8
*hash_subkey
, const u8
*key
, unsigned int key_len
)
836 struct crypto_ablkcipher
*ctr_tfm
;
837 struct ablkcipher_request
*req
;
839 struct aesni_hash_subkey_req_data
*req_data
;
841 ctr_tfm
= crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
843 return PTR_ERR(ctr_tfm
);
845 crypto_ablkcipher_clear_flags(ctr_tfm
, ~0);
847 ret
= crypto_ablkcipher_setkey(ctr_tfm
, key
, key_len
);
849 goto out_free_ablkcipher
;
852 req
= ablkcipher_request_alloc(ctr_tfm
, GFP_KERNEL
);
854 goto out_free_ablkcipher
;
856 req_data
= kmalloc(sizeof(*req_data
), GFP_KERNEL
);
858 goto out_free_request
;
860 memset(req_data
->iv
, 0, sizeof(req_data
->iv
));
862 /* Clear the data in the hash sub key container to zero.*/
863 /* We want to cipher all zeros to create the hash sub key. */
864 memset(hash_subkey
, 0, RFC4106_HASH_SUBKEY_SIZE
);
866 init_completion(&req_data
->result
.completion
);
867 sg_init_one(&req_data
->sg
, hash_subkey
, RFC4106_HASH_SUBKEY_SIZE
);
868 ablkcipher_request_set_tfm(req
, ctr_tfm
);
869 ablkcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
|
870 CRYPTO_TFM_REQ_MAY_BACKLOG
,
871 rfc4106_set_hash_subkey_done
,
874 ablkcipher_request_set_crypt(req
, &req_data
->sg
,
875 &req_data
->sg
, RFC4106_HASH_SUBKEY_SIZE
, req_data
->iv
);
877 ret
= crypto_ablkcipher_encrypt(req
);
878 if (ret
== -EINPROGRESS
|| ret
== -EBUSY
) {
879 ret
= wait_for_completion_interruptible
880 (&req_data
->result
.completion
);
882 ret
= req_data
->result
.err
;
886 ablkcipher_request_free(req
);
888 crypto_free_ablkcipher(ctr_tfm
);
892 static int rfc4106_set_key(struct crypto_aead
*parent
, const u8
*key
,
893 unsigned int key_len
)
896 struct crypto_tfm
*tfm
= crypto_aead_tfm(parent
);
897 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(parent
);
898 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
899 struct aesni_rfc4106_gcm_ctx
*child_ctx
=
900 aesni_rfc4106_gcm_ctx_get(cryptd_child
);
901 u8
*new_key_align
, *new_key_mem
= NULL
;
904 crypto_tfm_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
907 /*Account for 4 byte nonce at the end.*/
909 if (key_len
!= AES_KEYSIZE_128
) {
910 crypto_tfm_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
914 memcpy(ctx
->nonce
, key
+ key_len
, sizeof(ctx
->nonce
));
915 /*This must be on a 16 byte boundary!*/
916 if ((unsigned long)(&(ctx
->aes_key_expanded
.key_enc
[0])) % AESNI_ALIGN
)
919 if ((unsigned long)key
% AESNI_ALIGN
) {
920 /*key is not aligned: use an auxuliar aligned pointer*/
921 new_key_mem
= kmalloc(key_len
+AESNI_ALIGN
, GFP_KERNEL
);
925 new_key_align
= PTR_ALIGN(new_key_mem
, AESNI_ALIGN
);
926 memcpy(new_key_align
, key
, key_len
);
930 if (!irq_fpu_usable())
931 ret
= crypto_aes_expand_key(&(ctx
->aes_key_expanded
),
935 ret
= aesni_set_key(&(ctx
->aes_key_expanded
), key
, key_len
);
938 /*This must be on a 16 byte boundary!*/
939 if ((unsigned long)(&(ctx
->hash_subkey
[0])) % AESNI_ALIGN
) {
943 ret
= rfc4106_set_hash_subkey(ctx
->hash_subkey
, key
, key_len
);
944 memcpy(child_ctx
, ctx
, sizeof(*ctx
));
950 /* This is the Integrity Check Value (aka the authentication tag length and can
951 * be 8, 12 or 16 bytes long. */
952 static int rfc4106_set_authsize(struct crypto_aead
*parent
,
953 unsigned int authsize
)
955 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(parent
);
956 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
966 crypto_aead_crt(parent
)->authsize
= authsize
;
967 crypto_aead_crt(cryptd_child
)->authsize
= authsize
;
971 static int rfc4106_encrypt(struct aead_request
*req
)
974 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
975 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
977 if (!irq_fpu_usable()) {
978 struct aead_request
*cryptd_req
=
979 (struct aead_request
*) aead_request_ctx(req
);
980 memcpy(cryptd_req
, req
, sizeof(*req
));
981 aead_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
982 return crypto_aead_encrypt(cryptd_req
);
984 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
986 ret
= cryptd_child
->base
.crt_aead
.encrypt(req
);
992 static int rfc4106_decrypt(struct aead_request
*req
)
995 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
996 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
998 if (!irq_fpu_usable()) {
999 struct aead_request
*cryptd_req
=
1000 (struct aead_request
*) aead_request_ctx(req
);
1001 memcpy(cryptd_req
, req
, sizeof(*req
));
1002 aead_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
1003 return crypto_aead_decrypt(cryptd_req
);
1005 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
1007 ret
= cryptd_child
->base
.crt_aead
.decrypt(req
);
1013 static int __driver_rfc4106_encrypt(struct aead_request
*req
)
1015 u8 one_entry_in_sg
= 0;
1016 u8
*src
, *dst
, *assoc
;
1017 __be32 counter
= cpu_to_be32(1);
1018 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1019 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1020 void *aes_ctx
= &(ctx
->aes_key_expanded
);
1021 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
1022 u8 iv_tab
[16+AESNI_ALIGN
];
1023 u8
* iv
= (u8
*) PTR_ALIGN((u8
*)iv_tab
, AESNI_ALIGN
);
1024 struct scatter_walk src_sg_walk
;
1025 struct scatter_walk assoc_sg_walk
;
1026 struct scatter_walk dst_sg_walk
;
1029 /* Assuming we are supporting rfc4106 64-bit extended */
1030 /* sequence numbers We need to have the AAD length equal */
1031 /* to 8 or 12 bytes */
1032 if (unlikely(req
->assoclen
!= 8 && req
->assoclen
!= 12))
1034 /* IV below built */
1035 for (i
= 0; i
< 4; i
++)
1036 *(iv
+i
) = ctx
->nonce
[i
];
1037 for (i
= 0; i
< 8; i
++)
1038 *(iv
+4+i
) = req
->iv
[i
];
1039 *((__be32
*)(iv
+12)) = counter
;
1041 if ((sg_is_last(req
->src
)) && (sg_is_last(req
->assoc
))) {
1042 one_entry_in_sg
= 1;
1043 scatterwalk_start(&src_sg_walk
, req
->src
);
1044 scatterwalk_start(&assoc_sg_walk
, req
->assoc
);
1045 src
= scatterwalk_map(&src_sg_walk
);
1046 assoc
= scatterwalk_map(&assoc_sg_walk
);
1048 if (unlikely(req
->src
!= req
->dst
)) {
1049 scatterwalk_start(&dst_sg_walk
, req
->dst
);
1050 dst
= scatterwalk_map(&dst_sg_walk
);
1054 /* Allocate memory for src, dst, assoc */
1055 src
= kmalloc(req
->cryptlen
+ auth_tag_len
+ req
->assoclen
,
1059 assoc
= (src
+ req
->cryptlen
+ auth_tag_len
);
1060 scatterwalk_map_and_copy(src
, req
->src
, 0, req
->cryptlen
, 0);
1061 scatterwalk_map_and_copy(assoc
, req
->assoc
, 0,
1066 aesni_gcm_enc_tfm(aes_ctx
, dst
, src
, (unsigned long)req
->cryptlen
, iv
,
1067 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
, dst
1068 + ((unsigned long)req
->cryptlen
), auth_tag_len
);
1070 /* The authTag (aka the Integrity Check Value) needs to be written
1071 * back to the packet. */
1072 if (one_entry_in_sg
) {
1073 if (unlikely(req
->src
!= req
->dst
)) {
1074 scatterwalk_unmap(dst
);
1075 scatterwalk_done(&dst_sg_walk
, 0, 0);
1077 scatterwalk_unmap(src
);
1078 scatterwalk_unmap(assoc
);
1079 scatterwalk_done(&src_sg_walk
, 0, 0);
1080 scatterwalk_done(&assoc_sg_walk
, 0, 0);
1082 scatterwalk_map_and_copy(dst
, req
->dst
, 0,
1083 req
->cryptlen
+ auth_tag_len
, 1);
1089 static int __driver_rfc4106_decrypt(struct aead_request
*req
)
1091 u8 one_entry_in_sg
= 0;
1092 u8
*src
, *dst
, *assoc
;
1093 unsigned long tempCipherLen
= 0;
1094 __be32 counter
= cpu_to_be32(1);
1096 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1097 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1098 void *aes_ctx
= &(ctx
->aes_key_expanded
);
1099 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
1100 u8 iv_and_authTag
[32+AESNI_ALIGN
];
1101 u8
*iv
= (u8
*) PTR_ALIGN((u8
*)iv_and_authTag
, AESNI_ALIGN
);
1102 u8
*authTag
= iv
+ 16;
1103 struct scatter_walk src_sg_walk
;
1104 struct scatter_walk assoc_sg_walk
;
1105 struct scatter_walk dst_sg_walk
;
1108 if (unlikely((req
->cryptlen
< auth_tag_len
) ||
1109 (req
->assoclen
!= 8 && req
->assoclen
!= 12)))
1111 /* Assuming we are supporting rfc4106 64-bit extended */
1112 /* sequence numbers We need to have the AAD length */
1113 /* equal to 8 or 12 bytes */
1115 tempCipherLen
= (unsigned long)(req
->cryptlen
- auth_tag_len
);
1116 /* IV below built */
1117 for (i
= 0; i
< 4; i
++)
1118 *(iv
+i
) = ctx
->nonce
[i
];
1119 for (i
= 0; i
< 8; i
++)
1120 *(iv
+4+i
) = req
->iv
[i
];
1121 *((__be32
*)(iv
+12)) = counter
;
1123 if ((sg_is_last(req
->src
)) && (sg_is_last(req
->assoc
))) {
1124 one_entry_in_sg
= 1;
1125 scatterwalk_start(&src_sg_walk
, req
->src
);
1126 scatterwalk_start(&assoc_sg_walk
, req
->assoc
);
1127 src
= scatterwalk_map(&src_sg_walk
);
1128 assoc
= scatterwalk_map(&assoc_sg_walk
);
1130 if (unlikely(req
->src
!= req
->dst
)) {
1131 scatterwalk_start(&dst_sg_walk
, req
->dst
);
1132 dst
= scatterwalk_map(&dst_sg_walk
);
1136 /* Allocate memory for src, dst, assoc */
1137 src
= kmalloc(req
->cryptlen
+ req
->assoclen
, GFP_ATOMIC
);
1140 assoc
= (src
+ req
->cryptlen
+ auth_tag_len
);
1141 scatterwalk_map_and_copy(src
, req
->src
, 0, req
->cryptlen
, 0);
1142 scatterwalk_map_and_copy(assoc
, req
->assoc
, 0,
1147 aesni_gcm_dec_tfm(aes_ctx
, dst
, src
, tempCipherLen
, iv
,
1148 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
,
1149 authTag
, auth_tag_len
);
1151 /* Compare generated tag with passed in tag. */
1152 retval
= crypto_memneq(src
+ tempCipherLen
, authTag
, auth_tag_len
) ?
1155 if (one_entry_in_sg
) {
1156 if (unlikely(req
->src
!= req
->dst
)) {
1157 scatterwalk_unmap(dst
);
1158 scatterwalk_done(&dst_sg_walk
, 0, 0);
1160 scatterwalk_unmap(src
);
1161 scatterwalk_unmap(assoc
);
1162 scatterwalk_done(&src_sg_walk
, 0, 0);
1163 scatterwalk_done(&assoc_sg_walk
, 0, 0);
1165 scatterwalk_map_and_copy(dst
, req
->dst
, 0, req
->cryptlen
, 1);
1172 static struct crypto_alg aesni_algs
[] = { {
1174 .cra_driver_name
= "aes-aesni",
1175 .cra_priority
= 300,
1176 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
1177 .cra_blocksize
= AES_BLOCK_SIZE
,
1178 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1181 .cra_module
= THIS_MODULE
,
1184 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
1185 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
1186 .cia_setkey
= aes_set_key
,
1187 .cia_encrypt
= aes_encrypt
,
1188 .cia_decrypt
= aes_decrypt
1192 .cra_name
= "__aes-aesni",
1193 .cra_driver_name
= "__driver-aes-aesni",
1195 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
1196 .cra_blocksize
= AES_BLOCK_SIZE
,
1197 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1200 .cra_module
= THIS_MODULE
,
1203 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
1204 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
1205 .cia_setkey
= aes_set_key
,
1206 .cia_encrypt
= __aes_encrypt
,
1207 .cia_decrypt
= __aes_decrypt
1211 .cra_name
= "__ecb-aes-aesni",
1212 .cra_driver_name
= "__driver-ecb-aes-aesni",
1214 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
1215 .cra_blocksize
= AES_BLOCK_SIZE
,
1216 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1219 .cra_type
= &crypto_blkcipher_type
,
1220 .cra_module
= THIS_MODULE
,
1223 .min_keysize
= AES_MIN_KEY_SIZE
,
1224 .max_keysize
= AES_MAX_KEY_SIZE
,
1225 .setkey
= aes_set_key
,
1226 .encrypt
= ecb_encrypt
,
1227 .decrypt
= ecb_decrypt
,
1231 .cra_name
= "__cbc-aes-aesni",
1232 .cra_driver_name
= "__driver-cbc-aes-aesni",
1234 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
1235 .cra_blocksize
= AES_BLOCK_SIZE
,
1236 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1239 .cra_type
= &crypto_blkcipher_type
,
1240 .cra_module
= THIS_MODULE
,
1243 .min_keysize
= AES_MIN_KEY_SIZE
,
1244 .max_keysize
= AES_MAX_KEY_SIZE
,
1245 .setkey
= aes_set_key
,
1246 .encrypt
= cbc_encrypt
,
1247 .decrypt
= cbc_decrypt
,
1251 .cra_name
= "ecb(aes)",
1252 .cra_driver_name
= "ecb-aes-aesni",
1253 .cra_priority
= 400,
1254 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1255 .cra_blocksize
= AES_BLOCK_SIZE
,
1256 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1258 .cra_type
= &crypto_ablkcipher_type
,
1259 .cra_module
= THIS_MODULE
,
1260 .cra_init
= ablk_ecb_init
,
1261 .cra_exit
= ablk_exit
,
1264 .min_keysize
= AES_MIN_KEY_SIZE
,
1265 .max_keysize
= AES_MAX_KEY_SIZE
,
1266 .setkey
= ablk_set_key
,
1267 .encrypt
= ablk_encrypt
,
1268 .decrypt
= ablk_decrypt
,
1272 .cra_name
= "cbc(aes)",
1273 .cra_driver_name
= "cbc-aes-aesni",
1274 .cra_priority
= 400,
1275 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1276 .cra_blocksize
= AES_BLOCK_SIZE
,
1277 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1279 .cra_type
= &crypto_ablkcipher_type
,
1280 .cra_module
= THIS_MODULE
,
1281 .cra_init
= ablk_cbc_init
,
1282 .cra_exit
= ablk_exit
,
1285 .min_keysize
= AES_MIN_KEY_SIZE
,
1286 .max_keysize
= AES_MAX_KEY_SIZE
,
1287 .ivsize
= AES_BLOCK_SIZE
,
1288 .setkey
= ablk_set_key
,
1289 .encrypt
= ablk_encrypt
,
1290 .decrypt
= ablk_decrypt
,
1293 #ifdef CONFIG_X86_64
1295 .cra_name
= "__ctr-aes-aesni",
1296 .cra_driver_name
= "__driver-ctr-aes-aesni",
1298 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
1300 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1303 .cra_type
= &crypto_blkcipher_type
,
1304 .cra_module
= THIS_MODULE
,
1307 .min_keysize
= AES_MIN_KEY_SIZE
,
1308 .max_keysize
= AES_MAX_KEY_SIZE
,
1309 .ivsize
= AES_BLOCK_SIZE
,
1310 .setkey
= aes_set_key
,
1311 .encrypt
= ctr_crypt
,
1312 .decrypt
= ctr_crypt
,
1316 .cra_name
= "ctr(aes)",
1317 .cra_driver_name
= "ctr-aes-aesni",
1318 .cra_priority
= 400,
1319 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1321 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1323 .cra_type
= &crypto_ablkcipher_type
,
1324 .cra_module
= THIS_MODULE
,
1325 .cra_init
= ablk_ctr_init
,
1326 .cra_exit
= ablk_exit
,
1329 .min_keysize
= AES_MIN_KEY_SIZE
,
1330 .max_keysize
= AES_MAX_KEY_SIZE
,
1331 .ivsize
= AES_BLOCK_SIZE
,
1332 .setkey
= ablk_set_key
,
1333 .encrypt
= ablk_encrypt
,
1334 .decrypt
= ablk_encrypt
,
1339 .cra_name
= "__gcm-aes-aesni",
1340 .cra_driver_name
= "__driver-gcm-aes-aesni",
1342 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
,
1344 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
) +
1347 .cra_type
= &crypto_aead_type
,
1348 .cra_module
= THIS_MODULE
,
1351 .encrypt
= __driver_rfc4106_encrypt
,
1352 .decrypt
= __driver_rfc4106_decrypt
,
1356 .cra_name
= "rfc4106(gcm(aes))",
1357 .cra_driver_name
= "rfc4106-gcm-aesni",
1358 .cra_priority
= 400,
1359 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1361 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
) +
1364 .cra_type
= &crypto_nivaead_type
,
1365 .cra_module
= THIS_MODULE
,
1366 .cra_init
= rfc4106_init
,
1367 .cra_exit
= rfc4106_exit
,
1370 .setkey
= rfc4106_set_key
,
1371 .setauthsize
= rfc4106_set_authsize
,
1372 .encrypt
= rfc4106_encrypt
,
1373 .decrypt
= rfc4106_decrypt
,
1382 .cra_name
= "pcbc(aes)",
1383 .cra_driver_name
= "pcbc-aes-aesni",
1384 .cra_priority
= 400,
1385 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1386 .cra_blocksize
= AES_BLOCK_SIZE
,
1387 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1389 .cra_type
= &crypto_ablkcipher_type
,
1390 .cra_module
= THIS_MODULE
,
1391 .cra_init
= ablk_pcbc_init
,
1392 .cra_exit
= ablk_exit
,
1395 .min_keysize
= AES_MIN_KEY_SIZE
,
1396 .max_keysize
= AES_MAX_KEY_SIZE
,
1397 .ivsize
= AES_BLOCK_SIZE
,
1398 .setkey
= ablk_set_key
,
1399 .encrypt
= ablk_encrypt
,
1400 .decrypt
= ablk_decrypt
,
1405 .cra_name
= "__lrw-aes-aesni",
1406 .cra_driver_name
= "__driver-lrw-aes-aesni",
1408 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
1409 .cra_blocksize
= AES_BLOCK_SIZE
,
1410 .cra_ctxsize
= sizeof(struct aesni_lrw_ctx
),
1412 .cra_type
= &crypto_blkcipher_type
,
1413 .cra_module
= THIS_MODULE
,
1414 .cra_exit
= lrw_aesni_exit_tfm
,
1417 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
1418 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
1419 .ivsize
= AES_BLOCK_SIZE
,
1420 .setkey
= lrw_aesni_setkey
,
1421 .encrypt
= lrw_encrypt
,
1422 .decrypt
= lrw_decrypt
,
1426 .cra_name
= "__xts-aes-aesni",
1427 .cra_driver_name
= "__driver-xts-aes-aesni",
1429 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
1430 .cra_blocksize
= AES_BLOCK_SIZE
,
1431 .cra_ctxsize
= sizeof(struct aesni_xts_ctx
),
1433 .cra_type
= &crypto_blkcipher_type
,
1434 .cra_module
= THIS_MODULE
,
1437 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1438 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1439 .ivsize
= AES_BLOCK_SIZE
,
1440 .setkey
= xts_aesni_setkey
,
1441 .encrypt
= xts_encrypt
,
1442 .decrypt
= xts_decrypt
,
1446 .cra_name
= "lrw(aes)",
1447 .cra_driver_name
= "lrw-aes-aesni",
1448 .cra_priority
= 400,
1449 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1450 .cra_blocksize
= AES_BLOCK_SIZE
,
1451 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1453 .cra_type
= &crypto_ablkcipher_type
,
1454 .cra_module
= THIS_MODULE
,
1455 .cra_init
= ablk_init
,
1456 .cra_exit
= ablk_exit
,
1459 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
1460 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
1461 .ivsize
= AES_BLOCK_SIZE
,
1462 .setkey
= ablk_set_key
,
1463 .encrypt
= ablk_encrypt
,
1464 .decrypt
= ablk_decrypt
,
1468 .cra_name
= "xts(aes)",
1469 .cra_driver_name
= "xts-aes-aesni",
1470 .cra_priority
= 400,
1471 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1472 .cra_blocksize
= AES_BLOCK_SIZE
,
1473 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1475 .cra_type
= &crypto_ablkcipher_type
,
1476 .cra_module
= THIS_MODULE
,
1477 .cra_init
= ablk_init
,
1478 .cra_exit
= ablk_exit
,
1481 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1482 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1483 .ivsize
= AES_BLOCK_SIZE
,
1484 .setkey
= ablk_set_key
,
1485 .encrypt
= ablk_encrypt
,
1486 .decrypt
= ablk_decrypt
,
1492 static const struct x86_cpu_id aesni_cpu_id
[] = {
1493 X86_FEATURE_MATCH(X86_FEATURE_AES
),
1496 MODULE_DEVICE_TABLE(x86cpu
, aesni_cpu_id
);
1498 static int __init
aesni_init(void)
1502 if (!x86_match_cpu(aesni_cpu_id
))
1504 #ifdef CONFIG_X86_64
1505 #ifdef CONFIG_AS_AVX2
1506 if (boot_cpu_has(X86_FEATURE_AVX2
)) {
1507 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1508 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx2
;
1509 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx2
;
1512 #ifdef CONFIG_AS_AVX
1513 if (boot_cpu_has(X86_FEATURE_AVX
)) {
1514 pr_info("AVX version of gcm_enc/dec engaged.\n");
1515 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx
;
1516 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx
;
1520 pr_info("SSE version of gcm_enc/dec engaged.\n");
1521 aesni_gcm_enc_tfm
= aesni_gcm_enc
;
1522 aesni_gcm_dec_tfm
= aesni_gcm_dec
;
1524 aesni_ctr_enc_tfm
= aesni_ctr_enc
;
1525 #ifdef CONFIG_AS_AVX
1527 /* optimize performance of ctr mode encryption transform */
1528 aesni_ctr_enc_tfm
= aesni_ctr_enc_avx_tfm
;
1529 pr_info("AES CTR mode by8 optimization enabled\n");
1534 err
= crypto_fpu_init();
1538 return crypto_register_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1541 static void __exit
aesni_exit(void)
1543 crypto_unregister_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1548 module_init(aesni_init
);
1549 module_exit(aesni_exit
);
1551 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1552 MODULE_LICENSE("GPL");
1553 MODULE_ALIAS("aes");