]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/crypto/aesni-intel_glue.c
Merge tag 'trace-v4.10-rc2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rosted...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
54b6a1bd
HY
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
0bd82f5f
TS
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
54b6a1bd
HY
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
7c52d551 24#include <linux/module.h>
54b6a1bd
HY
25#include <linux/err.h>
26#include <crypto/algapi.h>
27#include <crypto/aes.h>
28#include <crypto/cryptd.h>
12387a46 29#include <crypto/ctr.h>
023af608 30#include <crypto/b128ops.h>
023af608 31#include <crypto/xts.h>
3bd391f0 32#include <asm/cpu_device_id.h>
df6b35f4 33#include <asm/fpu/api.h>
70ef2601 34#include <asm/crypto/aes.h>
0bd82f5f
TS
35#include <crypto/scatterwalk.h>
36#include <crypto/internal/aead.h>
85671860
HX
37#include <crypto/internal/simd.h>
38#include <crypto/internal/skcipher.h>
0bd82f5f
TS
39#include <linux/workqueue.h>
40#include <linux/spinlock.h>
c456a9cd
JK
41#ifdef CONFIG_X86_64
42#include <asm/crypto/glue_helper.h>
43#endif
54b6a1bd 44
e31ac32d 45
b7c89d9e 46#define AESNI_ALIGN 16
85671860 47#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
b7c89d9e
HX
48#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
49#define RFC4106_HASH_SUBKEY_SIZE 16
85671860
HX
50#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
51#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
52#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
b7c89d9e 53
0bd82f5f
TS
54/* This data is stored at the end of the crypto_tfm struct.
55 * It's a type of per "session" data storage location.
56 * This needs to be 16 byte aligned.
57 */
58struct aesni_rfc4106_gcm_ctx {
85671860
HX
59 u8 hash_subkey[16] AESNI_ALIGN_ATTR;
60 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
0bd82f5f 61 u8 nonce[4];
0bd82f5f
TS
62};
63
023af608 64struct aesni_xts_ctx {
85671860
HX
65 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
66 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
023af608
JK
67};
68
54b6a1bd
HY
69asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
70 unsigned int key_len);
71asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
72 const u8 *in);
73asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
74 const u8 *in);
75asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
76 const u8 *in, unsigned int len);
77asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
78 const u8 *in, unsigned int len);
79asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
80 const u8 *in, unsigned int len, u8 *iv);
81asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
82 const u8 *in, unsigned int len, u8 *iv);
9bed4aca
RD
83
84int crypto_fpu_init(void);
85void crypto_fpu_exit(void);
86
d764593a
TC
87#define AVX_GEN2_OPTSIZE 640
88#define AVX_GEN4_OPTSIZE 4096
89
0d258efb 90#ifdef CONFIG_X86_64
22cddcc7 91
92static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
93 const u8 *in, unsigned int len, u8 *iv);
12387a46
HY
94asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
95 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 96
c456a9cd
JK
97asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
98 const u8 *in, bool enc, u8 *iv);
99
0bd82f5f
TS
100/* asmlinkage void aesni_gcm_enc()
101 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
102 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
103 * const u8 *in, Plaintext input
104 * unsigned long plaintext_len, Length of data in bytes for encryption.
105 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
106 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
107 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
108 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
109 * const u8 *aad, Additional Authentication Data (AAD)
110 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
111 * is going to be 8 or 12 bytes
112 * u8 *auth_tag, Authenticated Tag output.
113 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
114 * Valid values are 16 (most likely), 12 or 8.
115 */
116asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
117 const u8 *in, unsigned long plaintext_len, u8 *iv,
118 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
119 u8 *auth_tag, unsigned long auth_tag_len);
120
121/* asmlinkage void aesni_gcm_dec()
122 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
123 * u8 *out, Plaintext output. Decrypt in-place is allowed.
124 * const u8 *in, Ciphertext input
125 * unsigned long ciphertext_len, Length of data in bytes for decryption.
126 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
127 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
128 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
129 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
130 * const u8 *aad, Additional Authentication Data (AAD)
131 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
132 * to be 8 or 12 bytes
133 * u8 *auth_tag, Authenticated Tag output.
134 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
135 * Valid values are 16 (most likely), 12 or 8.
136 */
137asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
138 const u8 *in, unsigned long ciphertext_len, u8 *iv,
139 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
140 u8 *auth_tag, unsigned long auth_tag_len);
141
d764593a
TC
142
143#ifdef CONFIG_AS_AVX
22cddcc7 144asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
145 void *keys, u8 *out, unsigned int num_bytes);
146asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
147 void *keys, u8 *out, unsigned int num_bytes);
148asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
149 void *keys, u8 *out, unsigned int num_bytes);
d764593a
TC
150/*
151 * asmlinkage void aesni_gcm_precomp_avx_gen2()
152 * gcm_data *my_ctx_data, context data
153 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
154 */
155asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
156
157asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
158 const u8 *in, unsigned long plaintext_len, u8 *iv,
159 const u8 *aad, unsigned long aad_len,
160 u8 *auth_tag, unsigned long auth_tag_len);
161
162asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
163 const u8 *in, unsigned long ciphertext_len, u8 *iv,
164 const u8 *aad, unsigned long aad_len,
165 u8 *auth_tag, unsigned long auth_tag_len);
166
167static void aesni_gcm_enc_avx(void *ctx, u8 *out,
168 const u8 *in, unsigned long plaintext_len, u8 *iv,
169 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
170 u8 *auth_tag, unsigned long auth_tag_len)
171{
e31ac32d
TM
172 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
173 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
d764593a
TC
174 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
175 aad_len, auth_tag, auth_tag_len);
176 } else {
177 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
178 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
179 aad_len, auth_tag, auth_tag_len);
180 }
181}
182
183static void aesni_gcm_dec_avx(void *ctx, u8 *out,
184 const u8 *in, unsigned long ciphertext_len, u8 *iv,
185 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
186 u8 *auth_tag, unsigned long auth_tag_len)
187{
e31ac32d
TM
188 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
189 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
d764593a
TC
190 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
191 aad_len, auth_tag, auth_tag_len);
192 } else {
193 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
194 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
195 aad_len, auth_tag, auth_tag_len);
196 }
197}
198#endif
199
200#ifdef CONFIG_AS_AVX2
201/*
202 * asmlinkage void aesni_gcm_precomp_avx_gen4()
203 * gcm_data *my_ctx_data, context data
204 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
205 */
206asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
207
208asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
209 const u8 *in, unsigned long plaintext_len, u8 *iv,
210 const u8 *aad, unsigned long aad_len,
211 u8 *auth_tag, unsigned long auth_tag_len);
212
213asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
214 const u8 *in, unsigned long ciphertext_len, u8 *iv,
215 const u8 *aad, unsigned long aad_len,
216 u8 *auth_tag, unsigned long auth_tag_len);
217
218static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
219 const u8 *in, unsigned long plaintext_len, u8 *iv,
220 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
221 u8 *auth_tag, unsigned long auth_tag_len)
222{
e31ac32d
TM
223 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
224 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
d764593a
TC
225 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
226 aad_len, auth_tag, auth_tag_len);
227 } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
228 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
229 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
230 aad_len, auth_tag, auth_tag_len);
231 } else {
232 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
233 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
234 aad_len, auth_tag, auth_tag_len);
235 }
236}
237
238static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
239 const u8 *in, unsigned long ciphertext_len, u8 *iv,
240 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
241 u8 *auth_tag, unsigned long auth_tag_len)
242{
e31ac32d
TM
243 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
244 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
d764593a
TC
245 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
246 aad, aad_len, auth_tag, auth_tag_len);
247 } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
248 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
249 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
250 aad_len, auth_tag, auth_tag_len);
251 } else {
252 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
253 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
254 aad_len, auth_tag, auth_tag_len);
255 }
256}
257#endif
258
259static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
260 const u8 *in, unsigned long plaintext_len, u8 *iv,
261 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
262 u8 *auth_tag, unsigned long auth_tag_len);
263
264static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
265 const u8 *in, unsigned long ciphertext_len, u8 *iv,
266 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
267 u8 *auth_tag, unsigned long auth_tag_len);
268
0bd82f5f
TS
269static inline struct
270aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
271{
b7c89d9e
HX
272 unsigned long align = AESNI_ALIGN;
273
274 if (align <= crypto_tfm_ctx_alignment())
275 align = 1;
276 return PTR_ALIGN(crypto_aead_ctx(tfm), align);
0bd82f5f 277}
559ad0ff 278#endif
0bd82f5f 279
54b6a1bd
HY
280static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
281{
282 unsigned long addr = (unsigned long)raw_ctx;
283 unsigned long align = AESNI_ALIGN;
284
285 if (align <= crypto_tfm_ctx_alignment())
286 align = 1;
287 return (struct crypto_aes_ctx *)ALIGN(addr, align);
288}
289
290static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
291 const u8 *in_key, unsigned int key_len)
292{
293 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
294 u32 *flags = &tfm->crt_flags;
295 int err;
296
297 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
298 key_len != AES_KEYSIZE_256) {
299 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
300 return -EINVAL;
301 }
302
13b79b97 303 if (!irq_fpu_usable())
54b6a1bd
HY
304 err = crypto_aes_expand_key(ctx, in_key, key_len);
305 else {
306 kernel_fpu_begin();
307 err = aesni_set_key(ctx, in_key, key_len);
308 kernel_fpu_end();
309 }
310
311 return err;
312}
313
314static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
315 unsigned int key_len)
316{
317 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
318}
319
320static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
321{
322 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
323
13b79b97 324 if (!irq_fpu_usable())
54b6a1bd
HY
325 crypto_aes_encrypt_x86(ctx, dst, src);
326 else {
327 kernel_fpu_begin();
328 aesni_enc(ctx, dst, src);
329 kernel_fpu_end();
330 }
331}
332
333static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
334{
335 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
336
13b79b97 337 if (!irq_fpu_usable())
54b6a1bd
HY
338 crypto_aes_decrypt_x86(ctx, dst, src);
339 else {
340 kernel_fpu_begin();
341 aesni_dec(ctx, dst, src);
342 kernel_fpu_end();
343 }
344}
345
2cf4ac8b
HY
346static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
347{
348 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
349
350 aesni_enc(ctx, dst, src);
351}
352
353static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
354{
355 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
356
357 aesni_dec(ctx, dst, src);
358}
359
85671860
HX
360static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
361 unsigned int len)
362{
363 return aes_set_key_common(crypto_skcipher_tfm(tfm),
364 crypto_skcipher_ctx(tfm), key, len);
365}
366
367static int ecb_encrypt(struct skcipher_request *req)
54b6a1bd 368{
85671860
HX
369 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
370 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
371 struct skcipher_walk walk;
372 unsigned int nbytes;
54b6a1bd
HY
373 int err;
374
85671860 375 err = skcipher_walk_virt(&walk, req, true);
54b6a1bd
HY
376
377 kernel_fpu_begin();
378 while ((nbytes = walk.nbytes)) {
379 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
380 nbytes & AES_BLOCK_MASK);
381 nbytes &= AES_BLOCK_SIZE - 1;
85671860 382 err = skcipher_walk_done(&walk, nbytes);
54b6a1bd
HY
383 }
384 kernel_fpu_end();
385
386 return err;
387}
388
85671860 389static int ecb_decrypt(struct skcipher_request *req)
54b6a1bd 390{
85671860
HX
391 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
392 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
393 struct skcipher_walk walk;
394 unsigned int nbytes;
54b6a1bd
HY
395 int err;
396
85671860 397 err = skcipher_walk_virt(&walk, req, true);
54b6a1bd
HY
398
399 kernel_fpu_begin();
400 while ((nbytes = walk.nbytes)) {
401 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
402 nbytes & AES_BLOCK_MASK);
403 nbytes &= AES_BLOCK_SIZE - 1;
85671860 404 err = skcipher_walk_done(&walk, nbytes);
54b6a1bd
HY
405 }
406 kernel_fpu_end();
407
408 return err;
409}
410
85671860 411static int cbc_encrypt(struct skcipher_request *req)
54b6a1bd 412{
85671860
HX
413 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
414 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
415 struct skcipher_walk walk;
416 unsigned int nbytes;
54b6a1bd
HY
417 int err;
418
85671860 419 err = skcipher_walk_virt(&walk, req, true);
54b6a1bd
HY
420
421 kernel_fpu_begin();
422 while ((nbytes = walk.nbytes)) {
423 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
424 nbytes & AES_BLOCK_MASK, walk.iv);
425 nbytes &= AES_BLOCK_SIZE - 1;
85671860 426 err = skcipher_walk_done(&walk, nbytes);
54b6a1bd
HY
427 }
428 kernel_fpu_end();
429
430 return err;
431}
432
85671860 433static int cbc_decrypt(struct skcipher_request *req)
54b6a1bd 434{
85671860
HX
435 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
436 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
437 struct skcipher_walk walk;
438 unsigned int nbytes;
54b6a1bd
HY
439 int err;
440
85671860 441 err = skcipher_walk_virt(&walk, req, true);
54b6a1bd
HY
442
443 kernel_fpu_begin();
444 while ((nbytes = walk.nbytes)) {
445 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
446 nbytes & AES_BLOCK_MASK, walk.iv);
447 nbytes &= AES_BLOCK_SIZE - 1;
85671860 448 err = skcipher_walk_done(&walk, nbytes);
54b6a1bd
HY
449 }
450 kernel_fpu_end();
451
452 return err;
453}
454
0d258efb 455#ifdef CONFIG_X86_64
12387a46 456static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
85671860 457 struct skcipher_walk *walk)
12387a46
HY
458{
459 u8 *ctrblk = walk->iv;
460 u8 keystream[AES_BLOCK_SIZE];
461 u8 *src = walk->src.virt.addr;
462 u8 *dst = walk->dst.virt.addr;
463 unsigned int nbytes = walk->nbytes;
464
465 aesni_enc(ctx, keystream, ctrblk);
466 crypto_xor(keystream, src, nbytes);
467 memcpy(dst, keystream, nbytes);
468 crypto_inc(ctrblk, AES_BLOCK_SIZE);
469}
470
5cfed7b3 471#ifdef CONFIG_AS_AVX
22cddcc7 472static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
473 const u8 *in, unsigned int len, u8 *iv)
474{
475 /*
476 * based on key length, override with the by8 version
477 * of ctr mode encryption/decryption for improved performance
478 * aes_set_key_common() ensures that key length is one of
479 * {128,192,256}
480 */
481 if (ctx->key_length == AES_KEYSIZE_128)
482 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
483 else if (ctx->key_length == AES_KEYSIZE_192)
484 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
485 else
486 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
487}
488#endif
489
85671860 490static int ctr_crypt(struct skcipher_request *req)
12387a46 491{
85671860
HX
492 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
493 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
494 struct skcipher_walk walk;
495 unsigned int nbytes;
12387a46
HY
496 int err;
497
85671860 498 err = skcipher_walk_virt(&walk, req, true);
12387a46
HY
499
500 kernel_fpu_begin();
501 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
22cddcc7 502 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
e31ac32d 503 nbytes & AES_BLOCK_MASK, walk.iv);
12387a46 504 nbytes &= AES_BLOCK_SIZE - 1;
85671860 505 err = skcipher_walk_done(&walk, nbytes);
12387a46
HY
506 }
507 if (walk.nbytes) {
508 ctr_crypt_final(ctx, &walk);
85671860 509 err = skcipher_walk_done(&walk, 0);
12387a46
HY
510 }
511 kernel_fpu_end();
512
513 return err;
514}
023af608 515
85671860 516static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
023af608
JK
517 unsigned int keylen)
518{
85671860 519 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
023af608
JK
520 int err;
521
85671860 522 err = xts_verify_key(tfm, key, keylen);
023af608
JK
523 if (err)
524 return err;
525
85671860 526 keylen /= 2;
023af608
JK
527
528 /* first half of xts-key is for crypt */
85671860
HX
529 err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
530 key, keylen);
023af608
JK
531 if (err)
532 return err;
533
534 /* second half of xts-key is for tweak */
85671860
HX
535 return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
536 key + keylen, keylen);
023af608
JK
537}
538
539
32bec973
JK
540static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
541{
542 aesni_enc(ctx, out, in);
543}
544
c456a9cd
JK
545static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
546{
547 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
548}
549
550static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
551{
552 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
553}
554
555static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
556{
557 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
558}
559
560static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
561{
562 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
563}
564
565static const struct common_glue_ctx aesni_enc_xts = {
566 .num_funcs = 2,
567 .fpu_blocks_limit = 1,
568
569 .funcs = { {
570 .num_blocks = 8,
571 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
572 }, {
573 .num_blocks = 1,
574 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
575 } }
576};
577
578static const struct common_glue_ctx aesni_dec_xts = {
579 .num_funcs = 2,
580 .fpu_blocks_limit = 1,
581
582 .funcs = { {
583 .num_blocks = 8,
584 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
585 }, {
586 .num_blocks = 1,
587 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
588 } }
589};
590
85671860 591static int xts_encrypt(struct skcipher_request *req)
c456a9cd 592{
85671860
HX
593 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
594 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
c456a9cd 595
85671860
HX
596 return glue_xts_req_128bit(&aesni_enc_xts, req,
597 XTS_TWEAK_CAST(aesni_xts_tweak),
598 aes_ctx(ctx->raw_tweak_ctx),
599 aes_ctx(ctx->raw_crypt_ctx));
c456a9cd
JK
600}
601
85671860 602static int xts_decrypt(struct skcipher_request *req)
c456a9cd 603{
85671860
HX
604 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
605 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
023af608 606
85671860
HX
607 return glue_xts_req_128bit(&aesni_dec_xts, req,
608 XTS_TWEAK_CAST(aesni_xts_tweak),
609 aes_ctx(ctx->raw_tweak_ctx),
610 aes_ctx(ctx->raw_crypt_ctx));
2cf4ac8b 611}
2cf4ac8b 612
af05b300 613static int rfc4106_init(struct crypto_aead *aead)
0bd82f5f
TS
614{
615 struct cryptd_aead *cryptd_tfm;
af05b300
HX
616 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
617
eabdc320
SM
618 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
619 CRYPTO_ALG_INTERNAL,
620 CRYPTO_ALG_INTERNAL);
0bd82f5f
TS
621 if (IS_ERR(cryptd_tfm))
622 return PTR_ERR(cryptd_tfm);
60af520c 623
af05b300 624 *ctx = cryptd_tfm;
e9b8d2c2 625 crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
0bd82f5f
TS
626 return 0;
627}
628
af05b300 629static void rfc4106_exit(struct crypto_aead *aead)
0bd82f5f 630{
af05b300
HX
631 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
632
633 cryptd_free_aead(*ctx);
0bd82f5f
TS
634}
635
0bd82f5f
TS
636static int
637rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
638{
02fa472a
HX
639 struct crypto_cipher *tfm;
640 int ret;
0bd82f5f 641
02fa472a
HX
642 tfm = crypto_alloc_cipher("aes", 0, 0);
643 if (IS_ERR(tfm))
644 return PTR_ERR(tfm);
0bd82f5f 645
02fa472a 646 ret = crypto_cipher_setkey(tfm, key, key_len);
7efd95f6 647 if (ret)
02fa472a 648 goto out_free_cipher;
0bd82f5f
TS
649
650 /* Clear the data in the hash sub key container to zero.*/
651 /* We want to cipher all zeros to create the hash sub key. */
652 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
653
02fa472a
HX
654 crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
655
656out_free_cipher:
657 crypto_free_cipher(tfm);
0bd82f5f
TS
658 return ret;
659}
660
81e397d9
TS
661static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
662 unsigned int key_len)
0bd82f5f 663{
81e397d9 664 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
0bd82f5f
TS
665
666 if (key_len < 4) {
b7c89d9e 667 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
0bd82f5f
TS
668 return -EINVAL;
669 }
670 /*Account for 4 byte nonce at the end.*/
671 key_len -= 4;
0bd82f5f
TS
672
673 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
0bd82f5f 674
b7c89d9e
HX
675 return aes_set_key_common(crypto_aead_tfm(aead),
676 &ctx->aes_key_expanded, key, key_len) ?:
677 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
0bd82f5f
TS
678}
679
81e397d9
TS
680static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
681 unsigned int key_len)
0bd82f5f 682{
af05b300
HX
683 struct cryptd_aead **ctx = crypto_aead_ctx(parent);
684 struct cryptd_aead *cryptd_tfm = *ctx;
0bd82f5f 685
af05b300 686 return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
81e397d9
TS
687}
688
689static int common_rfc4106_set_authsize(struct crypto_aead *aead,
690 unsigned int authsize)
691{
0bd82f5f
TS
692 switch (authsize) {
693 case 8:
694 case 12:
695 case 16:
696 break;
697 default:
698 return -EINVAL;
699 }
b7c89d9e 700
0bd82f5f
TS
701 return 0;
702}
703
81e397d9
TS
704/* This is the Integrity Check Value (aka the authentication tag length and can
705 * be 8, 12 or 16 bytes long. */
706static int rfc4106_set_authsize(struct crypto_aead *parent,
707 unsigned int authsize)
0bd82f5f 708{
af05b300
HX
709 struct cryptd_aead **ctx = crypto_aead_ctx(parent);
710 struct cryptd_aead *cryptd_tfm = *ctx;
0bd82f5f 711
af05b300 712 return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
0bd82f5f
TS
713}
714
b7c89d9e 715static int helper_rfc4106_encrypt(struct aead_request *req)
0bd82f5f
TS
716{
717 u8 one_entry_in_sg = 0;
718 u8 *src, *dst, *assoc;
719 __be32 counter = cpu_to_be32(1);
720 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
721 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
722 void *aes_ctx = &(ctx->aes_key_expanded);
723 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
b7c89d9e 724 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
0bd82f5f 725 struct scatter_walk src_sg_walk;
beae2c9e 726 struct scatter_walk dst_sg_walk = {};
0bd82f5f
TS
727 unsigned int i;
728
729 /* Assuming we are supporting rfc4106 64-bit extended */
730 /* sequence numbers We need to have the AAD length equal */
e9b8d2c2
HX
731 /* to 16 or 20 bytes */
732 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
0bd82f5f 733 return -EINVAL;
e31ac32d 734
0bd82f5f
TS
735 /* IV below built */
736 for (i = 0; i < 4; i++)
737 *(iv+i) = ctx->nonce[i];
738 for (i = 0; i < 8; i++)
739 *(iv+4+i) = req->iv[i];
740 *((__be32 *)(iv+12)) = counter;
741
b7c89d9e
HX
742 if (sg_is_last(req->src) &&
743 req->src->offset + req->src->length <= PAGE_SIZE &&
744 sg_is_last(req->dst) &&
745 req->dst->offset + req->dst->length <= PAGE_SIZE) {
0bd82f5f
TS
746 one_entry_in_sg = 1;
747 scatterwalk_start(&src_sg_walk, req->src);
b7c89d9e
HX
748 assoc = scatterwalk_map(&src_sg_walk);
749 src = assoc + req->assoclen;
0bd82f5f
TS
750 dst = src;
751 if (unlikely(req->src != req->dst)) {
752 scatterwalk_start(&dst_sg_walk, req->dst);
b7c89d9e 753 dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
0bd82f5f 754 }
0bd82f5f
TS
755 } else {
756 /* Allocate memory for src, dst, assoc */
b7c89d9e 757 assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
0bd82f5f 758 GFP_ATOMIC);
b7c89d9e 759 if (unlikely(!assoc))
0bd82f5f 760 return -ENOMEM;
b7c89d9e
HX
761 scatterwalk_map_and_copy(assoc, req->src, 0,
762 req->assoclen + req->cryptlen, 0);
763 src = assoc + req->assoclen;
0bd82f5f
TS
764 dst = src;
765 }
766
b7c89d9e 767 kernel_fpu_begin();
e9b8d2c2
HX
768 aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
769 ctx->hash_subkey, assoc, req->assoclen - 8,
770 dst + req->cryptlen, auth_tag_len);
b7c89d9e 771 kernel_fpu_end();
0bd82f5f
TS
772
773 /* The authTag (aka the Integrity Check Value) needs to be written
774 * back to the packet. */
775 if (one_entry_in_sg) {
776 if (unlikely(req->src != req->dst)) {
b7c89d9e
HX
777 scatterwalk_unmap(dst - req->assoclen);
778 scatterwalk_advance(&dst_sg_walk, req->dst->length);
779 scatterwalk_done(&dst_sg_walk, 1, 0);
0bd82f5f 780 }
8fd75e12 781 scatterwalk_unmap(assoc);
b7c89d9e
HX
782 scatterwalk_advance(&src_sg_walk, req->src->length);
783 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
0bd82f5f 784 } else {
b7c89d9e
HX
785 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
786 req->cryptlen + auth_tag_len, 1);
787 kfree(assoc);
0bd82f5f
TS
788 }
789 return 0;
790}
791
b7c89d9e 792static int helper_rfc4106_decrypt(struct aead_request *req)
0bd82f5f
TS
793{
794 u8 one_entry_in_sg = 0;
795 u8 *src, *dst, *assoc;
796 unsigned long tempCipherLen = 0;
797 __be32 counter = cpu_to_be32(1);
798 int retval = 0;
799 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
800 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
801 void *aes_ctx = &(ctx->aes_key_expanded);
802 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
b7c89d9e
HX
803 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
804 u8 authTag[16];
0bd82f5f 805 struct scatter_walk src_sg_walk;
beae2c9e 806 struct scatter_walk dst_sg_walk = {};
0bd82f5f
TS
807 unsigned int i;
808
e9b8d2c2 809 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
0bd82f5f 810 return -EINVAL;
e31ac32d 811
0bd82f5f
TS
812 /* Assuming we are supporting rfc4106 64-bit extended */
813 /* sequence numbers We need to have the AAD length */
e9b8d2c2 814 /* equal to 16 or 20 bytes */
0bd82f5f
TS
815
816 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
817 /* IV below built */
818 for (i = 0; i < 4; i++)
819 *(iv+i) = ctx->nonce[i];
820 for (i = 0; i < 8; i++)
821 *(iv+4+i) = req->iv[i];
822 *((__be32 *)(iv+12)) = counter;
823
b7c89d9e
HX
824 if (sg_is_last(req->src) &&
825 req->src->offset + req->src->length <= PAGE_SIZE &&
826 sg_is_last(req->dst) &&
827 req->dst->offset + req->dst->length <= PAGE_SIZE) {
0bd82f5f
TS
828 one_entry_in_sg = 1;
829 scatterwalk_start(&src_sg_walk, req->src);
b7c89d9e
HX
830 assoc = scatterwalk_map(&src_sg_walk);
831 src = assoc + req->assoclen;
0bd82f5f
TS
832 dst = src;
833 if (unlikely(req->src != req->dst)) {
834 scatterwalk_start(&dst_sg_walk, req->dst);
b7c89d9e 835 dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
0bd82f5f
TS
836 }
837
838 } else {
839 /* Allocate memory for src, dst, assoc */
b7c89d9e
HX
840 assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
841 if (!assoc)
0bd82f5f 842 return -ENOMEM;
b7c89d9e
HX
843 scatterwalk_map_and_copy(assoc, req->src, 0,
844 req->assoclen + req->cryptlen, 0);
845 src = assoc + req->assoclen;
0bd82f5f
TS
846 dst = src;
847 }
848
b7c89d9e 849 kernel_fpu_begin();
d764593a 850 aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
e9b8d2c2
HX
851 ctx->hash_subkey, assoc, req->assoclen - 8,
852 authTag, auth_tag_len);
b7c89d9e 853 kernel_fpu_end();
0bd82f5f
TS
854
855 /* Compare generated tag with passed in tag. */
fed28611 856 retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
0bd82f5f
TS
857 -EBADMSG : 0;
858
859 if (one_entry_in_sg) {
860 if (unlikely(req->src != req->dst)) {
b7c89d9e
HX
861 scatterwalk_unmap(dst - req->assoclen);
862 scatterwalk_advance(&dst_sg_walk, req->dst->length);
863 scatterwalk_done(&dst_sg_walk, 1, 0);
0bd82f5f 864 }
8fd75e12 865 scatterwalk_unmap(assoc);
b7c89d9e
HX
866 scatterwalk_advance(&src_sg_walk, req->src->length);
867 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
0bd82f5f 868 } else {
b7c89d9e
HX
869 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
870 tempCipherLen, 1);
871 kfree(assoc);
0bd82f5f
TS
872 }
873 return retval;
874}
81e397d9
TS
875
876static int rfc4106_encrypt(struct aead_request *req)
877{
81e397d9 878 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
af05b300
HX
879 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
880 struct cryptd_aead *cryptd_tfm = *ctx;
81e397d9 881
38b2f68b
HX
882 tfm = &cryptd_tfm->base;
883 if (irq_fpu_usable() && (!in_atomic() ||
884 !cryptd_aead_queued(cryptd_tfm)))
885 tfm = cryptd_aead_child(cryptd_tfm);
886
887 aead_request_set_tfm(req, tfm);
81e397d9 888
e9b8d2c2 889 return crypto_aead_encrypt(req);
81e397d9
TS
890}
891
892static int rfc4106_decrypt(struct aead_request *req)
893{
81e397d9 894 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
af05b300
HX
895 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
896 struct cryptd_aead *cryptd_tfm = *ctx;
81e397d9 897
38b2f68b
HX
898 tfm = &cryptd_tfm->base;
899 if (irq_fpu_usable() && (!in_atomic() ||
900 !cryptd_aead_queued(cryptd_tfm)))
901 tfm = cryptd_aead_child(cryptd_tfm);
902
903 aead_request_set_tfm(req, tfm);
81e397d9 904
e9b8d2c2 905 return crypto_aead_decrypt(req);
81e397d9 906}
fa46ccb8 907#endif
0bd82f5f 908
fa46ccb8
JK
909static struct crypto_alg aesni_algs[] = { {
910 .cra_name = "aes",
911 .cra_driver_name = "aes-aesni",
912 .cra_priority = 300,
913 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
914 .cra_blocksize = AES_BLOCK_SIZE,
85671860 915 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
fa46ccb8
JK
916 .cra_module = THIS_MODULE,
917 .cra_u = {
918 .cipher = {
919 .cia_min_keysize = AES_MIN_KEY_SIZE,
920 .cia_max_keysize = AES_MAX_KEY_SIZE,
921 .cia_setkey = aes_set_key,
922 .cia_encrypt = aes_encrypt,
923 .cia_decrypt = aes_decrypt
924 }
925 }
926}, {
85671860
HX
927 .cra_name = "__aes",
928 .cra_driver_name = "__aes-aesni",
929 .cra_priority = 300,
eabdc320 930 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
fa46ccb8 931 .cra_blocksize = AES_BLOCK_SIZE,
85671860 932 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
fa46ccb8
JK
933 .cra_module = THIS_MODULE,
934 .cra_u = {
935 .cipher = {
936 .cia_min_keysize = AES_MIN_KEY_SIZE,
937 .cia_max_keysize = AES_MAX_KEY_SIZE,
938 .cia_setkey = aes_set_key,
939 .cia_encrypt = __aes_encrypt,
940 .cia_decrypt = __aes_decrypt
941 }
942 }
85671860
HX
943} };
944
945static struct skcipher_alg aesni_skciphers[] = {
946 {
947 .base = {
948 .cra_name = "__ecb(aes)",
949 .cra_driver_name = "__ecb-aes-aesni",
950 .cra_priority = 400,
951 .cra_flags = CRYPTO_ALG_INTERNAL,
952 .cra_blocksize = AES_BLOCK_SIZE,
953 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
954 .cra_module = THIS_MODULE,
fa46ccb8 955 },
85671860
HX
956 .min_keysize = AES_MIN_KEY_SIZE,
957 .max_keysize = AES_MAX_KEY_SIZE,
958 .setkey = aesni_skcipher_setkey,
959 .encrypt = ecb_encrypt,
960 .decrypt = ecb_decrypt,
961 }, {
962 .base = {
963 .cra_name = "__cbc(aes)",
964 .cra_driver_name = "__cbc-aes-aesni",
965 .cra_priority = 400,
966 .cra_flags = CRYPTO_ALG_INTERNAL,
967 .cra_blocksize = AES_BLOCK_SIZE,
968 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
969 .cra_module = THIS_MODULE,
fa46ccb8 970 },
85671860
HX
971 .min_keysize = AES_MIN_KEY_SIZE,
972 .max_keysize = AES_MAX_KEY_SIZE,
973 .ivsize = AES_BLOCK_SIZE,
974 .setkey = aesni_skcipher_setkey,
975 .encrypt = cbc_encrypt,
976 .decrypt = cbc_decrypt,
fa46ccb8 977#ifdef CONFIG_X86_64
85671860
HX
978 }, {
979 .base = {
980 .cra_name = "__ctr(aes)",
981 .cra_driver_name = "__ctr-aes-aesni",
982 .cra_priority = 400,
983 .cra_flags = CRYPTO_ALG_INTERNAL,
984 .cra_blocksize = 1,
985 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
986 .cra_module = THIS_MODULE,
fa46ccb8 987 },
85671860
HX
988 .min_keysize = AES_MIN_KEY_SIZE,
989 .max_keysize = AES_MAX_KEY_SIZE,
990 .ivsize = AES_BLOCK_SIZE,
991 .chunksize = AES_BLOCK_SIZE,
992 .setkey = aesni_skcipher_setkey,
993 .encrypt = ctr_crypt,
994 .decrypt = ctr_crypt,
995 }, {
996 .base = {
997 .cra_name = "__xts(aes)",
998 .cra_driver_name = "__xts-aes-aesni",
999 .cra_priority = 401,
1000 .cra_flags = CRYPTO_ALG_INTERNAL,
1001 .cra_blocksize = AES_BLOCK_SIZE,
1002 .cra_ctxsize = XTS_AES_CTX_SIZE,
1003 .cra_module = THIS_MODULE,
fa46ccb8 1004 },
85671860
HX
1005 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1006 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1007 .ivsize = AES_BLOCK_SIZE,
1008 .setkey = xts_aesni_setkey,
1009 .encrypt = xts_encrypt,
1010 .decrypt = xts_decrypt,
fa46ccb8 1011#endif
85671860
HX
1012 }
1013};
1014
1015struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1016
1017struct {
1018 const char *algname;
1019 const char *drvname;
1020 const char *basename;
1021 struct simd_skcipher_alg *simd;
1022} aesni_simd_skciphers2[] = {
07825f0a
HX
1023#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
1024 IS_BUILTIN(CONFIG_CRYPTO_PCBC)
85671860
HX
1025 {
1026 .algname = "pcbc(aes)",
1027 .drvname = "pcbc-aes-aesni",
1028 .basename = "fpu(pcbc(__aes-aesni))",
fa46ccb8
JK
1029 },
1030#endif
85671860 1031};
0bd82f5f 1032
af05b300
HX
1033#ifdef CONFIG_X86_64
1034static struct aead_alg aesni_aead_algs[] = { {
b7c89d9e
HX
1035 .setkey = common_rfc4106_set_key,
1036 .setauthsize = common_rfc4106_set_authsize,
1037 .encrypt = helper_rfc4106_encrypt,
1038 .decrypt = helper_rfc4106_decrypt,
1039 .ivsize = 8,
1040 .maxauthsize = 16,
1041 .base = {
1042 .cra_name = "__gcm-aes-aesni",
1043 .cra_driver_name = "__driver-gcm-aes-aesni",
1044 .cra_flags = CRYPTO_ALG_INTERNAL,
1045 .cra_blocksize = 1,
1046 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
1047 .cra_alignmask = AESNI_ALIGN - 1,
1048 .cra_module = THIS_MODULE,
1049 },
1050}, {
af05b300
HX
1051 .init = rfc4106_init,
1052 .exit = rfc4106_exit,
1053 .setkey = rfc4106_set_key,
1054 .setauthsize = rfc4106_set_authsize,
1055 .encrypt = rfc4106_encrypt,
1056 .decrypt = rfc4106_decrypt,
1057 .ivsize = 8,
1058 .maxauthsize = 16,
1059 .base = {
1060 .cra_name = "rfc4106(gcm(aes))",
1061 .cra_driver_name = "rfc4106-gcm-aesni",
1062 .cra_priority = 400,
5e4b8c1f 1063 .cra_flags = CRYPTO_ALG_ASYNC,
af05b300
HX
1064 .cra_blocksize = 1,
1065 .cra_ctxsize = sizeof(struct cryptd_aead *),
1066 .cra_module = THIS_MODULE,
1067 },
1068} };
1069#else
1070static struct aead_alg aesni_aead_algs[0];
1071#endif
1072
3bd391f0
AK
1073
1074static const struct x86_cpu_id aesni_cpu_id[] = {
1075 X86_FEATURE_MATCH(X86_FEATURE_AES),
1076 {}
1077};
1078MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1079
85671860
HX
1080static void aesni_free_simds(void)
1081{
1082 int i;
1083
1084 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
1085 aesni_simd_skciphers[i]; i++)
1086 simd_skcipher_free(aesni_simd_skciphers[i]);
1087
1088 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) &&
1089 aesni_simd_skciphers2[i].simd; i++)
1090 simd_skcipher_free(aesni_simd_skciphers2[i].simd);
1091}
1092
54b6a1bd
HY
1093static int __init aesni_init(void)
1094{
85671860
HX
1095 struct simd_skcipher_alg *simd;
1096 const char *basename;
1097 const char *algname;
1098 const char *drvname;
7af6c245 1099 int err;
85671860 1100 int i;
54b6a1bd 1101
3bd391f0 1102 if (!x86_match_cpu(aesni_cpu_id))
54b6a1bd 1103 return -ENODEV;
8610d7bf 1104#ifdef CONFIG_X86_64
d764593a
TC
1105#ifdef CONFIG_AS_AVX2
1106 if (boot_cpu_has(X86_FEATURE_AVX2)) {
1107 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1108 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1109 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1110 } else
1111#endif
1112#ifdef CONFIG_AS_AVX
1113 if (boot_cpu_has(X86_FEATURE_AVX)) {
1114 pr_info("AVX version of gcm_enc/dec engaged.\n");
1115 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1116 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1117 } else
1118#endif
1119 {
1120 pr_info("SSE version of gcm_enc/dec engaged.\n");
1121 aesni_gcm_enc_tfm = aesni_gcm_enc;
1122 aesni_gcm_dec_tfm = aesni_gcm_dec;
1123 }
22cddcc7 1124 aesni_ctr_enc_tfm = aesni_ctr_enc;
5cfed7b3 1125#ifdef CONFIG_AS_AVX
da154e82 1126 if (boot_cpu_has(X86_FEATURE_AVX)) {
22cddcc7 1127 /* optimize performance of ctr mode encryption transform */
1128 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1129 pr_info("AES CTR mode by8 optimization enabled\n");
1130 }
1131#endif
8610d7bf 1132#endif
0bd82f5f 1133
fa46ccb8
JK
1134 err = crypto_fpu_init();
1135 if (err)
1136 return err;
54b6a1bd 1137
af05b300
HX
1138 err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1139 if (err)
1140 goto fpu_exit;
1141
85671860
HX
1142 err = crypto_register_skciphers(aesni_skciphers,
1143 ARRAY_SIZE(aesni_skciphers));
1144 if (err)
1145 goto unregister_algs;
1146
af05b300
HX
1147 err = crypto_register_aeads(aesni_aead_algs,
1148 ARRAY_SIZE(aesni_aead_algs));
1149 if (err)
85671860
HX
1150 goto unregister_skciphers;
1151
1152 for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
1153 algname = aesni_skciphers[i].base.cra_name + 2;
1154 drvname = aesni_skciphers[i].base.cra_driver_name + 2;
1155 basename = aesni_skciphers[i].base.cra_driver_name;
1156 simd = simd_skcipher_create_compat(algname, drvname, basename);
1157 err = PTR_ERR(simd);
1158 if (IS_ERR(simd))
1159 goto unregister_simds;
1160
1161 aesni_simd_skciphers[i] = simd;
1162 }
af05b300 1163
85671860
HX
1164 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
1165 algname = aesni_simd_skciphers2[i].algname;
1166 drvname = aesni_simd_skciphers2[i].drvname;
1167 basename = aesni_simd_skciphers2[i].basename;
1168 simd = simd_skcipher_create_compat(algname, drvname, basename);
1169 err = PTR_ERR(simd);
1170 if (IS_ERR(simd))
1171 goto unregister_simds;
af05b300 1172
85671860
HX
1173 aesni_simd_skciphers2[i].simd = simd;
1174 }
1175
1176 return 0;
1177
1178unregister_simds:
1179 aesni_free_simds();
1180 crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1181unregister_skciphers:
1182 crypto_unregister_skciphers(aesni_skciphers,
1183 ARRAY_SIZE(aesni_skciphers));
af05b300
HX
1184unregister_algs:
1185 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1186fpu_exit:
1187 crypto_fpu_exit();
1188 return err;
54b6a1bd
HY
1189}
1190
1191static void __exit aesni_exit(void)
1192{
85671860 1193 aesni_free_simds();
af05b300 1194 crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
85671860
HX
1195 crypto_unregister_skciphers(aesni_skciphers,
1196 ARRAY_SIZE(aesni_skciphers));
fa46ccb8 1197 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
b23b6451
AL
1198
1199 crypto_fpu_exit();
54b6a1bd
HY
1200}
1201
0fbafd06 1202late_initcall(aesni_init);
54b6a1bd
HY
1203module_exit(aesni_exit);
1204
1205MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1206MODULE_LICENSE("GPL");
5d26a105 1207MODULE_ALIAS_CRYPTO("aes");