]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/crypto/aesni-intel_glue.c
Merge tag 'drm-fixes-for-v4.13-rc6' of git://people.freedesktop.org/~airlied/linux
[mirror_ubuntu-artful-kernel.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
54b6a1bd
HY
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
0bd82f5f
TS
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
54b6a1bd
HY
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
7c52d551 24#include <linux/module.h>
54b6a1bd
HY
25#include <linux/err.h>
26#include <crypto/algapi.h>
27#include <crypto/aes.h>
28#include <crypto/cryptd.h>
12387a46 29#include <crypto/ctr.h>
023af608 30#include <crypto/b128ops.h>
023af608 31#include <crypto/xts.h>
3bd391f0 32#include <asm/cpu_device_id.h>
df6b35f4 33#include <asm/fpu/api.h>
70ef2601 34#include <asm/crypto/aes.h>
0bd82f5f
TS
35#include <crypto/scatterwalk.h>
36#include <crypto/internal/aead.h>
85671860
HX
37#include <crypto/internal/simd.h>
38#include <crypto/internal/skcipher.h>
0bd82f5f
TS
39#include <linux/workqueue.h>
40#include <linux/spinlock.h>
c456a9cd
JK
41#ifdef CONFIG_X86_64
42#include <asm/crypto/glue_helper.h>
43#endif
54b6a1bd 44
e31ac32d 45
b7c89d9e 46#define AESNI_ALIGN 16
85671860 47#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
b7c89d9e
HX
48#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
49#define RFC4106_HASH_SUBKEY_SIZE 16
85671860
HX
50#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
51#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
52#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
b7c89d9e 53
0bd82f5f
TS
54/* This data is stored at the end of the crypto_tfm struct.
55 * It's a type of per "session" data storage location.
56 * This needs to be 16 byte aligned.
57 */
58struct aesni_rfc4106_gcm_ctx {
85671860
HX
59 u8 hash_subkey[16] AESNI_ALIGN_ATTR;
60 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
0bd82f5f 61 u8 nonce[4];
0bd82f5f
TS
62};
63
cce2ea8d
SD
64struct generic_gcmaes_ctx {
65 u8 hash_subkey[16] AESNI_ALIGN_ATTR;
66 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
67};
68
023af608 69struct aesni_xts_ctx {
85671860
HX
70 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
71 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
023af608
JK
72};
73
54b6a1bd
HY
74asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
75 unsigned int key_len);
76asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
77 const u8 *in);
78asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
79 const u8 *in);
80asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
81 const u8 *in, unsigned int len);
82asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
83 const u8 *in, unsigned int len);
84asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
85 const u8 *in, unsigned int len, u8 *iv);
86asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
87 const u8 *in, unsigned int len, u8 *iv);
9bed4aca
RD
88
89int crypto_fpu_init(void);
90void crypto_fpu_exit(void);
91
d764593a
TC
92#define AVX_GEN2_OPTSIZE 640
93#define AVX_GEN4_OPTSIZE 4096
94
0d258efb 95#ifdef CONFIG_X86_64
22cddcc7 96
97static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
98 const u8 *in, unsigned int len, u8 *iv);
12387a46
HY
99asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
100 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 101
c456a9cd
JK
102asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
103 const u8 *in, bool enc, u8 *iv);
104
0bd82f5f
TS
105/* asmlinkage void aesni_gcm_enc()
106 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
107 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
108 * const u8 *in, Plaintext input
109 * unsigned long plaintext_len, Length of data in bytes for encryption.
cce2ea8d
SD
110 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
111 * 16-byte aligned pointer.
0bd82f5f
TS
112 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
113 * const u8 *aad, Additional Authentication Data (AAD)
cce2ea8d 114 * unsigned long aad_len, Length of AAD in bytes.
0bd82f5f
TS
115 * u8 *auth_tag, Authenticated Tag output.
116 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
117 * Valid values are 16 (most likely), 12 or 8.
118 */
119asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
120 const u8 *in, unsigned long plaintext_len, u8 *iv,
121 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
122 u8 *auth_tag, unsigned long auth_tag_len);
123
124/* asmlinkage void aesni_gcm_dec()
125 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
126 * u8 *out, Plaintext output. Decrypt in-place is allowed.
127 * const u8 *in, Ciphertext input
128 * unsigned long ciphertext_len, Length of data in bytes for decryption.
cce2ea8d
SD
129 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
130 * 16-byte aligned pointer.
0bd82f5f
TS
131 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
132 * const u8 *aad, Additional Authentication Data (AAD)
133 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
134 * to be 8 or 12 bytes
135 * u8 *auth_tag, Authenticated Tag output.
136 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
137 * Valid values are 16 (most likely), 12 or 8.
138 */
139asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
140 const u8 *in, unsigned long ciphertext_len, u8 *iv,
141 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
142 u8 *auth_tag, unsigned long auth_tag_len);
143
d764593a
TC
144
145#ifdef CONFIG_AS_AVX
22cddcc7 146asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
147 void *keys, u8 *out, unsigned int num_bytes);
148asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
149 void *keys, u8 *out, unsigned int num_bytes);
150asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
151 void *keys, u8 *out, unsigned int num_bytes);
d764593a
TC
152/*
153 * asmlinkage void aesni_gcm_precomp_avx_gen2()
154 * gcm_data *my_ctx_data, context data
155 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
156 */
157asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
158
159asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
160 const u8 *in, unsigned long plaintext_len, u8 *iv,
161 const u8 *aad, unsigned long aad_len,
162 u8 *auth_tag, unsigned long auth_tag_len);
163
164asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
165 const u8 *in, unsigned long ciphertext_len, u8 *iv,
166 const u8 *aad, unsigned long aad_len,
167 u8 *auth_tag, unsigned long auth_tag_len);
168
169static void aesni_gcm_enc_avx(void *ctx, u8 *out,
170 const u8 *in, unsigned long plaintext_len, u8 *iv,
171 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
172 u8 *auth_tag, unsigned long auth_tag_len)
173{
e31ac32d
TM
174 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
175 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
d764593a
TC
176 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
177 aad_len, auth_tag, auth_tag_len);
178 } else {
179 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
180 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
181 aad_len, auth_tag, auth_tag_len);
182 }
183}
184
185static void aesni_gcm_dec_avx(void *ctx, u8 *out,
186 const u8 *in, unsigned long ciphertext_len, u8 *iv,
187 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
188 u8 *auth_tag, unsigned long auth_tag_len)
189{
e31ac32d
TM
190 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
191 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
d764593a
TC
192 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
193 aad_len, auth_tag, auth_tag_len);
194 } else {
195 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
196 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
197 aad_len, auth_tag, auth_tag_len);
198 }
199}
200#endif
201
202#ifdef CONFIG_AS_AVX2
203/*
204 * asmlinkage void aesni_gcm_precomp_avx_gen4()
205 * gcm_data *my_ctx_data, context data
206 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
207 */
208asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
209
210asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
211 const u8 *in, unsigned long plaintext_len, u8 *iv,
212 const u8 *aad, unsigned long aad_len,
213 u8 *auth_tag, unsigned long auth_tag_len);
214
215asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
216 const u8 *in, unsigned long ciphertext_len, u8 *iv,
217 const u8 *aad, unsigned long aad_len,
218 u8 *auth_tag, unsigned long auth_tag_len);
219
220static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
221 const u8 *in, unsigned long plaintext_len, u8 *iv,
222 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
223 u8 *auth_tag, unsigned long auth_tag_len)
224{
e31ac32d
TM
225 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
226 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
d764593a
TC
227 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
228 aad_len, auth_tag, auth_tag_len);
229 } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
230 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
231 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
232 aad_len, auth_tag, auth_tag_len);
233 } else {
234 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
235 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
236 aad_len, auth_tag, auth_tag_len);
237 }
238}
239
240static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
241 const u8 *in, unsigned long ciphertext_len, u8 *iv,
242 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
243 u8 *auth_tag, unsigned long auth_tag_len)
244{
e31ac32d
TM
245 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
246 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
d764593a
TC
247 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
248 aad, aad_len, auth_tag, auth_tag_len);
249 } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
250 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
251 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
252 aad_len, auth_tag, auth_tag_len);
253 } else {
254 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
255 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
256 aad_len, auth_tag, auth_tag_len);
257 }
258}
259#endif
260
261static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
262 const u8 *in, unsigned long plaintext_len, u8 *iv,
263 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
264 u8 *auth_tag, unsigned long auth_tag_len);
265
266static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
267 const u8 *in, unsigned long ciphertext_len, u8 *iv,
268 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
269 u8 *auth_tag, unsigned long auth_tag_len);
270
0bd82f5f
TS
271static inline struct
272aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
273{
b7c89d9e
HX
274 unsigned long align = AESNI_ALIGN;
275
276 if (align <= crypto_tfm_ctx_alignment())
277 align = 1;
278 return PTR_ALIGN(crypto_aead_ctx(tfm), align);
0bd82f5f 279}
cce2ea8d
SD
280
281static inline struct
282generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
283{
284 unsigned long align = AESNI_ALIGN;
285
286 if (align <= crypto_tfm_ctx_alignment())
287 align = 1;
288 return PTR_ALIGN(crypto_aead_ctx(tfm), align);
289}
559ad0ff 290#endif
0bd82f5f 291
54b6a1bd
HY
292static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
293{
294 unsigned long addr = (unsigned long)raw_ctx;
295 unsigned long align = AESNI_ALIGN;
296
297 if (align <= crypto_tfm_ctx_alignment())
298 align = 1;
299 return (struct crypto_aes_ctx *)ALIGN(addr, align);
300}
301
302static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
303 const u8 *in_key, unsigned int key_len)
304{
305 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
306 u32 *flags = &tfm->crt_flags;
307 int err;
308
309 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
310 key_len != AES_KEYSIZE_256) {
311 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
312 return -EINVAL;
313 }
314
13b79b97 315 if (!irq_fpu_usable())
54b6a1bd
HY
316 err = crypto_aes_expand_key(ctx, in_key, key_len);
317 else {
318 kernel_fpu_begin();
319 err = aesni_set_key(ctx, in_key, key_len);
320 kernel_fpu_end();
321 }
322
323 return err;
324}
325
326static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
327 unsigned int key_len)
328{
329 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
330}
331
332static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
333{
334 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
335
13b79b97 336 if (!irq_fpu_usable())
54b6a1bd
HY
337 crypto_aes_encrypt_x86(ctx, dst, src);
338 else {
339 kernel_fpu_begin();
340 aesni_enc(ctx, dst, src);
341 kernel_fpu_end();
342 }
343}
344
345static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
346{
347 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
348
13b79b97 349 if (!irq_fpu_usable())
54b6a1bd
HY
350 crypto_aes_decrypt_x86(ctx, dst, src);
351 else {
352 kernel_fpu_begin();
353 aesni_dec(ctx, dst, src);
354 kernel_fpu_end();
355 }
356}
357
2cf4ac8b
HY
358static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
359{
360 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
361
362 aesni_enc(ctx, dst, src);
363}
364
365static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
366{
367 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
368
369 aesni_dec(ctx, dst, src);
370}
371
85671860
HX
372static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
373 unsigned int len)
374{
375 return aes_set_key_common(crypto_skcipher_tfm(tfm),
376 crypto_skcipher_ctx(tfm), key, len);
377}
378
379static int ecb_encrypt(struct skcipher_request *req)
54b6a1bd 380{
85671860
HX
381 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
382 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
383 struct skcipher_walk walk;
384 unsigned int nbytes;
54b6a1bd
HY
385 int err;
386
85671860 387 err = skcipher_walk_virt(&walk, req, true);
54b6a1bd
HY
388
389 kernel_fpu_begin();
390 while ((nbytes = walk.nbytes)) {
391 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
392 nbytes & AES_BLOCK_MASK);
393 nbytes &= AES_BLOCK_SIZE - 1;
85671860 394 err = skcipher_walk_done(&walk, nbytes);
54b6a1bd
HY
395 }
396 kernel_fpu_end();
397
398 return err;
399}
400
85671860 401static int ecb_decrypt(struct skcipher_request *req)
54b6a1bd 402{
85671860
HX
403 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
404 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
405 struct skcipher_walk walk;
406 unsigned int nbytes;
54b6a1bd
HY
407 int err;
408
85671860 409 err = skcipher_walk_virt(&walk, req, true);
54b6a1bd
HY
410
411 kernel_fpu_begin();
412 while ((nbytes = walk.nbytes)) {
413 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
414 nbytes & AES_BLOCK_MASK);
415 nbytes &= AES_BLOCK_SIZE - 1;
85671860 416 err = skcipher_walk_done(&walk, nbytes);
54b6a1bd
HY
417 }
418 kernel_fpu_end();
419
420 return err;
421}
422
85671860 423static int cbc_encrypt(struct skcipher_request *req)
54b6a1bd 424{
85671860
HX
425 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
426 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
427 struct skcipher_walk walk;
428 unsigned int nbytes;
54b6a1bd
HY
429 int err;
430
85671860 431 err = skcipher_walk_virt(&walk, req, true);
54b6a1bd
HY
432
433 kernel_fpu_begin();
434 while ((nbytes = walk.nbytes)) {
435 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
436 nbytes & AES_BLOCK_MASK, walk.iv);
437 nbytes &= AES_BLOCK_SIZE - 1;
85671860 438 err = skcipher_walk_done(&walk, nbytes);
54b6a1bd
HY
439 }
440 kernel_fpu_end();
441
442 return err;
443}
444
85671860 445static int cbc_decrypt(struct skcipher_request *req)
54b6a1bd 446{
85671860
HX
447 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
448 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
449 struct skcipher_walk walk;
450 unsigned int nbytes;
54b6a1bd
HY
451 int err;
452
85671860 453 err = skcipher_walk_virt(&walk, req, true);
54b6a1bd
HY
454
455 kernel_fpu_begin();
456 while ((nbytes = walk.nbytes)) {
457 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
458 nbytes & AES_BLOCK_MASK, walk.iv);
459 nbytes &= AES_BLOCK_SIZE - 1;
85671860 460 err = skcipher_walk_done(&walk, nbytes);
54b6a1bd
HY
461 }
462 kernel_fpu_end();
463
464 return err;
465}
466
0d258efb 467#ifdef CONFIG_X86_64
12387a46 468static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
85671860 469 struct skcipher_walk *walk)
12387a46
HY
470{
471 u8 *ctrblk = walk->iv;
472 u8 keystream[AES_BLOCK_SIZE];
473 u8 *src = walk->src.virt.addr;
474 u8 *dst = walk->dst.virt.addr;
475 unsigned int nbytes = walk->nbytes;
476
477 aesni_enc(ctx, keystream, ctrblk);
478 crypto_xor(keystream, src, nbytes);
479 memcpy(dst, keystream, nbytes);
480 crypto_inc(ctrblk, AES_BLOCK_SIZE);
481}
482
5cfed7b3 483#ifdef CONFIG_AS_AVX
22cddcc7 484static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
485 const u8 *in, unsigned int len, u8 *iv)
486{
487 /*
488 * based on key length, override with the by8 version
489 * of ctr mode encryption/decryption for improved performance
490 * aes_set_key_common() ensures that key length is one of
491 * {128,192,256}
492 */
493 if (ctx->key_length == AES_KEYSIZE_128)
494 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
495 else if (ctx->key_length == AES_KEYSIZE_192)
496 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
497 else
498 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
499}
500#endif
501
85671860 502static int ctr_crypt(struct skcipher_request *req)
12387a46 503{
85671860
HX
504 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
505 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
506 struct skcipher_walk walk;
507 unsigned int nbytes;
12387a46
HY
508 int err;
509
85671860 510 err = skcipher_walk_virt(&walk, req, true);
12387a46
HY
511
512 kernel_fpu_begin();
513 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
22cddcc7 514 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
e31ac32d 515 nbytes & AES_BLOCK_MASK, walk.iv);
12387a46 516 nbytes &= AES_BLOCK_SIZE - 1;
85671860 517 err = skcipher_walk_done(&walk, nbytes);
12387a46
HY
518 }
519 if (walk.nbytes) {
520 ctr_crypt_final(ctx, &walk);
85671860 521 err = skcipher_walk_done(&walk, 0);
12387a46
HY
522 }
523 kernel_fpu_end();
524
525 return err;
526}
023af608 527
85671860 528static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
023af608
JK
529 unsigned int keylen)
530{
85671860 531 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
023af608
JK
532 int err;
533
85671860 534 err = xts_verify_key(tfm, key, keylen);
023af608
JK
535 if (err)
536 return err;
537
85671860 538 keylen /= 2;
023af608
JK
539
540 /* first half of xts-key is for crypt */
85671860
HX
541 err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
542 key, keylen);
023af608
JK
543 if (err)
544 return err;
545
546 /* second half of xts-key is for tweak */
85671860
HX
547 return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
548 key + keylen, keylen);
023af608
JK
549}
550
551
32bec973
JK
552static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
553{
554 aesni_enc(ctx, out, in);
555}
556
c456a9cd
JK
557static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
558{
559 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
560}
561
562static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
563{
564 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
565}
566
567static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
568{
569 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
570}
571
572static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
573{
574 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
575}
576
577static const struct common_glue_ctx aesni_enc_xts = {
578 .num_funcs = 2,
579 .fpu_blocks_limit = 1,
580
581 .funcs = { {
582 .num_blocks = 8,
583 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
584 }, {
585 .num_blocks = 1,
586 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
587 } }
588};
589
590static const struct common_glue_ctx aesni_dec_xts = {
591 .num_funcs = 2,
592 .fpu_blocks_limit = 1,
593
594 .funcs = { {
595 .num_blocks = 8,
596 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
597 }, {
598 .num_blocks = 1,
599 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
600 } }
601};
602
85671860 603static int xts_encrypt(struct skcipher_request *req)
c456a9cd 604{
85671860
HX
605 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
606 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
c456a9cd 607
85671860
HX
608 return glue_xts_req_128bit(&aesni_enc_xts, req,
609 XTS_TWEAK_CAST(aesni_xts_tweak),
610 aes_ctx(ctx->raw_tweak_ctx),
611 aes_ctx(ctx->raw_crypt_ctx));
c456a9cd
JK
612}
613
85671860 614static int xts_decrypt(struct skcipher_request *req)
c456a9cd 615{
85671860
HX
616 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
617 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
023af608 618
85671860
HX
619 return glue_xts_req_128bit(&aesni_dec_xts, req,
620 XTS_TWEAK_CAST(aesni_xts_tweak),
621 aes_ctx(ctx->raw_tweak_ctx),
622 aes_ctx(ctx->raw_crypt_ctx));
2cf4ac8b 623}
2cf4ac8b 624
af05b300 625static int rfc4106_init(struct crypto_aead *aead)
0bd82f5f
TS
626{
627 struct cryptd_aead *cryptd_tfm;
af05b300
HX
628 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
629
eabdc320
SM
630 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
631 CRYPTO_ALG_INTERNAL,
632 CRYPTO_ALG_INTERNAL);
0bd82f5f
TS
633 if (IS_ERR(cryptd_tfm))
634 return PTR_ERR(cryptd_tfm);
60af520c 635
af05b300 636 *ctx = cryptd_tfm;
e9b8d2c2 637 crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
0bd82f5f
TS
638 return 0;
639}
640
af05b300 641static void rfc4106_exit(struct crypto_aead *aead)
0bd82f5f 642{
af05b300
HX
643 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
644
645 cryptd_free_aead(*ctx);
0bd82f5f
TS
646}
647
0bd82f5f
TS
648static int
649rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
650{
02fa472a
HX
651 struct crypto_cipher *tfm;
652 int ret;
0bd82f5f 653
02fa472a
HX
654 tfm = crypto_alloc_cipher("aes", 0, 0);
655 if (IS_ERR(tfm))
656 return PTR_ERR(tfm);
0bd82f5f 657
02fa472a 658 ret = crypto_cipher_setkey(tfm, key, key_len);
7efd95f6 659 if (ret)
02fa472a 660 goto out_free_cipher;
0bd82f5f
TS
661
662 /* Clear the data in the hash sub key container to zero.*/
663 /* We want to cipher all zeros to create the hash sub key. */
664 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
665
02fa472a
HX
666 crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
667
668out_free_cipher:
669 crypto_free_cipher(tfm);
0bd82f5f
TS
670 return ret;
671}
672
81e397d9
TS
673static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
674 unsigned int key_len)
0bd82f5f 675{
81e397d9 676 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
0bd82f5f
TS
677
678 if (key_len < 4) {
b7c89d9e 679 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
0bd82f5f
TS
680 return -EINVAL;
681 }
682 /*Account for 4 byte nonce at the end.*/
683 key_len -= 4;
0bd82f5f
TS
684
685 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
0bd82f5f 686
b7c89d9e
HX
687 return aes_set_key_common(crypto_aead_tfm(aead),
688 &ctx->aes_key_expanded, key, key_len) ?:
689 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
0bd82f5f
TS
690}
691
81e397d9
TS
692static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
693 unsigned int key_len)
0bd82f5f 694{
af05b300
HX
695 struct cryptd_aead **ctx = crypto_aead_ctx(parent);
696 struct cryptd_aead *cryptd_tfm = *ctx;
0bd82f5f 697
af05b300 698 return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
81e397d9
TS
699}
700
701static int common_rfc4106_set_authsize(struct crypto_aead *aead,
702 unsigned int authsize)
703{
0bd82f5f
TS
704 switch (authsize) {
705 case 8:
706 case 12:
707 case 16:
708 break;
709 default:
710 return -EINVAL;
711 }
b7c89d9e 712
0bd82f5f
TS
713 return 0;
714}
715
81e397d9
TS
716/* This is the Integrity Check Value (aka the authentication tag length and can
717 * be 8, 12 or 16 bytes long. */
718static int rfc4106_set_authsize(struct crypto_aead *parent,
719 unsigned int authsize)
0bd82f5f 720{
af05b300
HX
721 struct cryptd_aead **ctx = crypto_aead_ctx(parent);
722 struct cryptd_aead *cryptd_tfm = *ctx;
0bd82f5f 723
af05b300 724 return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
0bd82f5f
TS
725}
726
cce2ea8d
SD
727static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
728 unsigned int authsize)
729{
730 switch (authsize) {
731 case 4:
732 case 8:
733 case 12:
734 case 13:
735 case 14:
736 case 15:
737 case 16:
738 break;
739 default:
740 return -EINVAL;
741 }
742
743 return 0;
744}
745
746static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
747 u8 *hash_subkey, u8 *iv, void *aes_ctx)
0bd82f5f
TS
748{
749 u8 one_entry_in_sg = 0;
750 u8 *src, *dst, *assoc;
0bd82f5f 751 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0bd82f5f 752 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
0bd82f5f 753 struct scatter_walk src_sg_walk;
beae2c9e 754 struct scatter_walk dst_sg_walk = {};
0bd82f5f 755
b7c89d9e 756 if (sg_is_last(req->src) &&
50fb5704
IL
757 (!PageHighMem(sg_page(req->src)) ||
758 req->src->offset + req->src->length <= PAGE_SIZE) &&
b7c89d9e 759 sg_is_last(req->dst) &&
50fb5704
IL
760 (!PageHighMem(sg_page(req->dst)) ||
761 req->dst->offset + req->dst->length <= PAGE_SIZE)) {
0bd82f5f
TS
762 one_entry_in_sg = 1;
763 scatterwalk_start(&src_sg_walk, req->src);
b7c89d9e
HX
764 assoc = scatterwalk_map(&src_sg_walk);
765 src = assoc + req->assoclen;
0bd82f5f
TS
766 dst = src;
767 if (unlikely(req->src != req->dst)) {
768 scatterwalk_start(&dst_sg_walk, req->dst);
b7c89d9e 769 dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
0bd82f5f 770 }
0bd82f5f
TS
771 } else {
772 /* Allocate memory for src, dst, assoc */
b7c89d9e 773 assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
0bd82f5f 774 GFP_ATOMIC);
b7c89d9e 775 if (unlikely(!assoc))
0bd82f5f 776 return -ENOMEM;
b7c89d9e
HX
777 scatterwalk_map_and_copy(assoc, req->src, 0,
778 req->assoclen + req->cryptlen, 0);
779 src = assoc + req->assoclen;
0bd82f5f
TS
780 dst = src;
781 }
782
b7c89d9e 783 kernel_fpu_begin();
e9b8d2c2 784 aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
cce2ea8d 785 hash_subkey, assoc, assoclen,
e9b8d2c2 786 dst + req->cryptlen, auth_tag_len);
b7c89d9e 787 kernel_fpu_end();
0bd82f5f
TS
788
789 /* The authTag (aka the Integrity Check Value) needs to be written
790 * back to the packet. */
791 if (one_entry_in_sg) {
792 if (unlikely(req->src != req->dst)) {
b7c89d9e
HX
793 scatterwalk_unmap(dst - req->assoclen);
794 scatterwalk_advance(&dst_sg_walk, req->dst->length);
795 scatterwalk_done(&dst_sg_walk, 1, 0);
0bd82f5f 796 }
8fd75e12 797 scatterwalk_unmap(assoc);
b7c89d9e
HX
798 scatterwalk_advance(&src_sg_walk, req->src->length);
799 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
0bd82f5f 800 } else {
b7c89d9e
HX
801 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
802 req->cryptlen + auth_tag_len, 1);
803 kfree(assoc);
0bd82f5f
TS
804 }
805 return 0;
806}
807
cce2ea8d
SD
808static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
809 u8 *hash_subkey, u8 *iv, void *aes_ctx)
0bd82f5f
TS
810{
811 u8 one_entry_in_sg = 0;
812 u8 *src, *dst, *assoc;
813 unsigned long tempCipherLen = 0;
0bd82f5f 814 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0bd82f5f 815 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
b7c89d9e 816 u8 authTag[16];
0bd82f5f 817 struct scatter_walk src_sg_walk;
beae2c9e 818 struct scatter_walk dst_sg_walk = {};
cce2ea8d 819 int retval = 0;
0bd82f5f
TS
820
821 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
0bd82f5f 822
b7c89d9e 823 if (sg_is_last(req->src) &&
50fb5704
IL
824 (!PageHighMem(sg_page(req->src)) ||
825 req->src->offset + req->src->length <= PAGE_SIZE) &&
b7c89d9e 826 sg_is_last(req->dst) &&
50fb5704
IL
827 (!PageHighMem(sg_page(req->dst)) ||
828 req->dst->offset + req->dst->length <= PAGE_SIZE)) {
0bd82f5f
TS
829 one_entry_in_sg = 1;
830 scatterwalk_start(&src_sg_walk, req->src);
b7c89d9e
HX
831 assoc = scatterwalk_map(&src_sg_walk);
832 src = assoc + req->assoclen;
0bd82f5f
TS
833 dst = src;
834 if (unlikely(req->src != req->dst)) {
835 scatterwalk_start(&dst_sg_walk, req->dst);
b7c89d9e 836 dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
0bd82f5f 837 }
0bd82f5f
TS
838 } else {
839 /* Allocate memory for src, dst, assoc */
b7c89d9e
HX
840 assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
841 if (!assoc)
0bd82f5f 842 return -ENOMEM;
b7c89d9e
HX
843 scatterwalk_map_and_copy(assoc, req->src, 0,
844 req->assoclen + req->cryptlen, 0);
845 src = assoc + req->assoclen;
0bd82f5f
TS
846 dst = src;
847 }
848
cce2ea8d 849
b7c89d9e 850 kernel_fpu_begin();
d764593a 851 aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
cce2ea8d 852 hash_subkey, assoc, assoclen,
e9b8d2c2 853 authTag, auth_tag_len);
b7c89d9e 854 kernel_fpu_end();
0bd82f5f
TS
855
856 /* Compare generated tag with passed in tag. */
fed28611 857 retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
0bd82f5f
TS
858 -EBADMSG : 0;
859
860 if (one_entry_in_sg) {
861 if (unlikely(req->src != req->dst)) {
b7c89d9e
HX
862 scatterwalk_unmap(dst - req->assoclen);
863 scatterwalk_advance(&dst_sg_walk, req->dst->length);
864 scatterwalk_done(&dst_sg_walk, 1, 0);
0bd82f5f 865 }
8fd75e12 866 scatterwalk_unmap(assoc);
b7c89d9e
HX
867 scatterwalk_advance(&src_sg_walk, req->src->length);
868 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
0bd82f5f 869 } else {
b7c89d9e
HX
870 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
871 tempCipherLen, 1);
872 kfree(assoc);
0bd82f5f
TS
873 }
874 return retval;
cce2ea8d
SD
875
876}
877
878static int helper_rfc4106_encrypt(struct aead_request *req)
879{
880 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
881 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
882 void *aes_ctx = &(ctx->aes_key_expanded);
883 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
884 unsigned int i;
885 __be32 counter = cpu_to_be32(1);
886
887 /* Assuming we are supporting rfc4106 64-bit extended */
888 /* sequence numbers We need to have the AAD length equal */
889 /* to 16 or 20 bytes */
890 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
891 return -EINVAL;
892
893 /* IV below built */
894 for (i = 0; i < 4; i++)
895 *(iv+i) = ctx->nonce[i];
896 for (i = 0; i < 8; i++)
897 *(iv+4+i) = req->iv[i];
898 *((__be32 *)(iv+12)) = counter;
899
900 return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
901 aes_ctx);
902}
903
904static int helper_rfc4106_decrypt(struct aead_request *req)
905{
906 __be32 counter = cpu_to_be32(1);
907 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
908 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
909 void *aes_ctx = &(ctx->aes_key_expanded);
910 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
911 unsigned int i;
912
913 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
914 return -EINVAL;
915
916 /* Assuming we are supporting rfc4106 64-bit extended */
917 /* sequence numbers We need to have the AAD length */
918 /* equal to 16 or 20 bytes */
919
920 /* IV below built */
921 for (i = 0; i < 4; i++)
922 *(iv+i) = ctx->nonce[i];
923 for (i = 0; i < 8; i++)
924 *(iv+4+i) = req->iv[i];
925 *((__be32 *)(iv+12)) = counter;
926
927 return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
928 aes_ctx);
0bd82f5f 929}
81e397d9
TS
930
931static int rfc4106_encrypt(struct aead_request *req)
932{
81e397d9 933 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
af05b300
HX
934 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
935 struct cryptd_aead *cryptd_tfm = *ctx;
81e397d9 936
38b2f68b
HX
937 tfm = &cryptd_tfm->base;
938 if (irq_fpu_usable() && (!in_atomic() ||
939 !cryptd_aead_queued(cryptd_tfm)))
940 tfm = cryptd_aead_child(cryptd_tfm);
941
942 aead_request_set_tfm(req, tfm);
81e397d9 943
e9b8d2c2 944 return crypto_aead_encrypt(req);
81e397d9
TS
945}
946
947static int rfc4106_decrypt(struct aead_request *req)
948{
81e397d9 949 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
af05b300
HX
950 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
951 struct cryptd_aead *cryptd_tfm = *ctx;
81e397d9 952
38b2f68b
HX
953 tfm = &cryptd_tfm->base;
954 if (irq_fpu_usable() && (!in_atomic() ||
955 !cryptd_aead_queued(cryptd_tfm)))
956 tfm = cryptd_aead_child(cryptd_tfm);
957
958 aead_request_set_tfm(req, tfm);
81e397d9 959
e9b8d2c2 960 return crypto_aead_decrypt(req);
81e397d9 961}
fa46ccb8 962#endif
0bd82f5f 963
fa46ccb8
JK
964static struct crypto_alg aesni_algs[] = { {
965 .cra_name = "aes",
966 .cra_driver_name = "aes-aesni",
967 .cra_priority = 300,
968 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
969 .cra_blocksize = AES_BLOCK_SIZE,
85671860 970 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
fa46ccb8
JK
971 .cra_module = THIS_MODULE,
972 .cra_u = {
973 .cipher = {
974 .cia_min_keysize = AES_MIN_KEY_SIZE,
975 .cia_max_keysize = AES_MAX_KEY_SIZE,
976 .cia_setkey = aes_set_key,
977 .cia_encrypt = aes_encrypt,
978 .cia_decrypt = aes_decrypt
979 }
980 }
981}, {
85671860
HX
982 .cra_name = "__aes",
983 .cra_driver_name = "__aes-aesni",
984 .cra_priority = 300,
eabdc320 985 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
fa46ccb8 986 .cra_blocksize = AES_BLOCK_SIZE,
85671860 987 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
fa46ccb8
JK
988 .cra_module = THIS_MODULE,
989 .cra_u = {
990 .cipher = {
991 .cia_min_keysize = AES_MIN_KEY_SIZE,
992 .cia_max_keysize = AES_MAX_KEY_SIZE,
993 .cia_setkey = aes_set_key,
994 .cia_encrypt = __aes_encrypt,
995 .cia_decrypt = __aes_decrypt
996 }
997 }
85671860
HX
998} };
999
1000static struct skcipher_alg aesni_skciphers[] = {
1001 {
1002 .base = {
1003 .cra_name = "__ecb(aes)",
1004 .cra_driver_name = "__ecb-aes-aesni",
1005 .cra_priority = 400,
1006 .cra_flags = CRYPTO_ALG_INTERNAL,
1007 .cra_blocksize = AES_BLOCK_SIZE,
1008 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
1009 .cra_module = THIS_MODULE,
fa46ccb8 1010 },
85671860
HX
1011 .min_keysize = AES_MIN_KEY_SIZE,
1012 .max_keysize = AES_MAX_KEY_SIZE,
1013 .setkey = aesni_skcipher_setkey,
1014 .encrypt = ecb_encrypt,
1015 .decrypt = ecb_decrypt,
1016 }, {
1017 .base = {
1018 .cra_name = "__cbc(aes)",
1019 .cra_driver_name = "__cbc-aes-aesni",
1020 .cra_priority = 400,
1021 .cra_flags = CRYPTO_ALG_INTERNAL,
1022 .cra_blocksize = AES_BLOCK_SIZE,
1023 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
1024 .cra_module = THIS_MODULE,
fa46ccb8 1025 },
85671860
HX
1026 .min_keysize = AES_MIN_KEY_SIZE,
1027 .max_keysize = AES_MAX_KEY_SIZE,
1028 .ivsize = AES_BLOCK_SIZE,
1029 .setkey = aesni_skcipher_setkey,
1030 .encrypt = cbc_encrypt,
1031 .decrypt = cbc_decrypt,
fa46ccb8 1032#ifdef CONFIG_X86_64
85671860
HX
1033 }, {
1034 .base = {
1035 .cra_name = "__ctr(aes)",
1036 .cra_driver_name = "__ctr-aes-aesni",
1037 .cra_priority = 400,
1038 .cra_flags = CRYPTO_ALG_INTERNAL,
1039 .cra_blocksize = 1,
1040 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
1041 .cra_module = THIS_MODULE,
fa46ccb8 1042 },
85671860
HX
1043 .min_keysize = AES_MIN_KEY_SIZE,
1044 .max_keysize = AES_MAX_KEY_SIZE,
1045 .ivsize = AES_BLOCK_SIZE,
1046 .chunksize = AES_BLOCK_SIZE,
1047 .setkey = aesni_skcipher_setkey,
1048 .encrypt = ctr_crypt,
1049 .decrypt = ctr_crypt,
1050 }, {
1051 .base = {
1052 .cra_name = "__xts(aes)",
1053 .cra_driver_name = "__xts-aes-aesni",
1054 .cra_priority = 401,
1055 .cra_flags = CRYPTO_ALG_INTERNAL,
1056 .cra_blocksize = AES_BLOCK_SIZE,
1057 .cra_ctxsize = XTS_AES_CTX_SIZE,
1058 .cra_module = THIS_MODULE,
fa46ccb8 1059 },
85671860
HX
1060 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1061 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1062 .ivsize = AES_BLOCK_SIZE,
1063 .setkey = xts_aesni_setkey,
1064 .encrypt = xts_encrypt,
1065 .decrypt = xts_decrypt,
fa46ccb8 1066#endif
85671860
HX
1067 }
1068};
1069
1070struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1071
1072struct {
1073 const char *algname;
1074 const char *drvname;
1075 const char *basename;
1076 struct simd_skcipher_alg *simd;
1077} aesni_simd_skciphers2[] = {
07825f0a
HX
1078#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
1079 IS_BUILTIN(CONFIG_CRYPTO_PCBC)
85671860
HX
1080 {
1081 .algname = "pcbc(aes)",
1082 .drvname = "pcbc-aes-aesni",
1083 .basename = "fpu(pcbc(__aes-aesni))",
fa46ccb8
JK
1084 },
1085#endif
85671860 1086};
0bd82f5f 1087
af05b300 1088#ifdef CONFIG_X86_64
cce2ea8d
SD
1089static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1090 unsigned int key_len)
1091{
1092 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1093
1094 return aes_set_key_common(crypto_aead_tfm(aead),
1095 &ctx->aes_key_expanded, key, key_len) ?:
1096 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1097}
1098
1099static int generic_gcmaes_encrypt(struct aead_request *req)
1100{
1101 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1102 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1103 void *aes_ctx = &(ctx->aes_key_expanded);
1104 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1105 __be32 counter = cpu_to_be32(1);
1106
1107 memcpy(iv, req->iv, 12);
1108 *((__be32 *)(iv+12)) = counter;
1109
1110 return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1111 aes_ctx);
1112}
1113
1114static int generic_gcmaes_decrypt(struct aead_request *req)
1115{
1116 __be32 counter = cpu_to_be32(1);
1117 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1118 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1119 void *aes_ctx = &(ctx->aes_key_expanded);
1120 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1121
1122 memcpy(iv, req->iv, 12);
1123 *((__be32 *)(iv+12)) = counter;
1124
1125 return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1126 aes_ctx);
1127}
1128
af05b300 1129static struct aead_alg aesni_aead_algs[] = { {
b7c89d9e
HX
1130 .setkey = common_rfc4106_set_key,
1131 .setauthsize = common_rfc4106_set_authsize,
1132 .encrypt = helper_rfc4106_encrypt,
1133 .decrypt = helper_rfc4106_decrypt,
1134 .ivsize = 8,
1135 .maxauthsize = 16,
1136 .base = {
1137 .cra_name = "__gcm-aes-aesni",
1138 .cra_driver_name = "__driver-gcm-aes-aesni",
1139 .cra_flags = CRYPTO_ALG_INTERNAL,
1140 .cra_blocksize = 1,
1141 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
1142 .cra_alignmask = AESNI_ALIGN - 1,
1143 .cra_module = THIS_MODULE,
1144 },
1145}, {
af05b300
HX
1146 .init = rfc4106_init,
1147 .exit = rfc4106_exit,
1148 .setkey = rfc4106_set_key,
1149 .setauthsize = rfc4106_set_authsize,
1150 .encrypt = rfc4106_encrypt,
1151 .decrypt = rfc4106_decrypt,
1152 .ivsize = 8,
1153 .maxauthsize = 16,
1154 .base = {
1155 .cra_name = "rfc4106(gcm(aes))",
1156 .cra_driver_name = "rfc4106-gcm-aesni",
1157 .cra_priority = 400,
5e4b8c1f 1158 .cra_flags = CRYPTO_ALG_ASYNC,
af05b300
HX
1159 .cra_blocksize = 1,
1160 .cra_ctxsize = sizeof(struct cryptd_aead *),
1161 .cra_module = THIS_MODULE,
1162 },
cce2ea8d
SD
1163}, {
1164 .setkey = generic_gcmaes_set_key,
1165 .setauthsize = generic_gcmaes_set_authsize,
1166 .encrypt = generic_gcmaes_encrypt,
1167 .decrypt = generic_gcmaes_decrypt,
1168 .ivsize = 12,
1169 .maxauthsize = 16,
1170 .base = {
1171 .cra_name = "gcm(aes)",
1172 .cra_driver_name = "generic-gcm-aesni",
1173 .cra_priority = 400,
1174 .cra_flags = CRYPTO_ALG_ASYNC,
1175 .cra_blocksize = 1,
1176 .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
1177 .cra_alignmask = AESNI_ALIGN - 1,
1178 .cra_module = THIS_MODULE,
1179 },
af05b300
HX
1180} };
1181#else
1182static struct aead_alg aesni_aead_algs[0];
1183#endif
1184
3bd391f0
AK
1185
1186static const struct x86_cpu_id aesni_cpu_id[] = {
1187 X86_FEATURE_MATCH(X86_FEATURE_AES),
1188 {}
1189};
1190MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1191
85671860
HX
1192static void aesni_free_simds(void)
1193{
1194 int i;
1195
1196 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
1197 aesni_simd_skciphers[i]; i++)
1198 simd_skcipher_free(aesni_simd_skciphers[i]);
1199
c2681990
HX
1200 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
1201 if (aesni_simd_skciphers2[i].simd)
1202 simd_skcipher_free(aesni_simd_skciphers2[i].simd);
85671860
HX
1203}
1204
54b6a1bd
HY
1205static int __init aesni_init(void)
1206{
85671860
HX
1207 struct simd_skcipher_alg *simd;
1208 const char *basename;
1209 const char *algname;
1210 const char *drvname;
7af6c245 1211 int err;
85671860 1212 int i;
54b6a1bd 1213
3bd391f0 1214 if (!x86_match_cpu(aesni_cpu_id))
54b6a1bd 1215 return -ENODEV;
8610d7bf 1216#ifdef CONFIG_X86_64
d764593a
TC
1217#ifdef CONFIG_AS_AVX2
1218 if (boot_cpu_has(X86_FEATURE_AVX2)) {
1219 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1220 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1221 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1222 } else
1223#endif
1224#ifdef CONFIG_AS_AVX
1225 if (boot_cpu_has(X86_FEATURE_AVX)) {
1226 pr_info("AVX version of gcm_enc/dec engaged.\n");
1227 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1228 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1229 } else
1230#endif
1231 {
1232 pr_info("SSE version of gcm_enc/dec engaged.\n");
1233 aesni_gcm_enc_tfm = aesni_gcm_enc;
1234 aesni_gcm_dec_tfm = aesni_gcm_dec;
1235 }
22cddcc7 1236 aesni_ctr_enc_tfm = aesni_ctr_enc;
5cfed7b3 1237#ifdef CONFIG_AS_AVX
da154e82 1238 if (boot_cpu_has(X86_FEATURE_AVX)) {
22cddcc7 1239 /* optimize performance of ctr mode encryption transform */
1240 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1241 pr_info("AES CTR mode by8 optimization enabled\n");
1242 }
1243#endif
8610d7bf 1244#endif
0bd82f5f 1245
fa46ccb8
JK
1246 err = crypto_fpu_init();
1247 if (err)
1248 return err;
54b6a1bd 1249
af05b300
HX
1250 err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1251 if (err)
1252 goto fpu_exit;
1253
85671860
HX
1254 err = crypto_register_skciphers(aesni_skciphers,
1255 ARRAY_SIZE(aesni_skciphers));
1256 if (err)
1257 goto unregister_algs;
1258
af05b300
HX
1259 err = crypto_register_aeads(aesni_aead_algs,
1260 ARRAY_SIZE(aesni_aead_algs));
1261 if (err)
85671860
HX
1262 goto unregister_skciphers;
1263
1264 for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
1265 algname = aesni_skciphers[i].base.cra_name + 2;
1266 drvname = aesni_skciphers[i].base.cra_driver_name + 2;
1267 basename = aesni_skciphers[i].base.cra_driver_name;
1268 simd = simd_skcipher_create_compat(algname, drvname, basename);
1269 err = PTR_ERR(simd);
1270 if (IS_ERR(simd))
1271 goto unregister_simds;
1272
1273 aesni_simd_skciphers[i] = simd;
1274 }
af05b300 1275
85671860
HX
1276 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
1277 algname = aesni_simd_skciphers2[i].algname;
1278 drvname = aesni_simd_skciphers2[i].drvname;
1279 basename = aesni_simd_skciphers2[i].basename;
1280 simd = simd_skcipher_create_compat(algname, drvname, basename);
1281 err = PTR_ERR(simd);
1282 if (IS_ERR(simd))
c2681990 1283 continue;
af05b300 1284
85671860
HX
1285 aesni_simd_skciphers2[i].simd = simd;
1286 }
1287
1288 return 0;
1289
1290unregister_simds:
1291 aesni_free_simds();
1292 crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1293unregister_skciphers:
1294 crypto_unregister_skciphers(aesni_skciphers,
1295 ARRAY_SIZE(aesni_skciphers));
af05b300
HX
1296unregister_algs:
1297 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1298fpu_exit:
1299 crypto_fpu_exit();
1300 return err;
54b6a1bd
HY
1301}
1302
1303static void __exit aesni_exit(void)
1304{
85671860 1305 aesni_free_simds();
af05b300 1306 crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
85671860
HX
1307 crypto_unregister_skciphers(aesni_skciphers,
1308 ARRAY_SIZE(aesni_skciphers));
fa46ccb8 1309 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
b23b6451
AL
1310
1311 crypto_fpu_exit();
54b6a1bd
HY
1312}
1313
0fbafd06 1314late_initcall(aesni_init);
54b6a1bd
HY
1315module_exit(aesni_exit);
1316
1317MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1318MODULE_LICENSE("GPL");
5d26a105 1319MODULE_ALIAS_CRYPTO("aes");