]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - arch/x86/crypto/aesni-intel_glue.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152
[mirror_ubuntu-focal-kernel.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
54b6a1bd
HY
2/*
3 * Support for Intel AES-NI instructions. This file contains glue
4 * code, the real AES implementation is in intel-aes_asm.S.
5 *
6 * Copyright (C) 2008, Intel Corp.
7 * Author: Huang Ying <ying.huang@intel.com>
8 *
0bd82f5f
TS
9 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
10 * interface for 64-bit kernels.
11 * Authors: Adrian Hoban <adrian.hoban@intel.com>
12 * Gabriele Paoloni <gabriele.paoloni@intel.com>
13 * Tadeusz Struk (tadeusz.struk@intel.com)
14 * Aidan O'Mahony (aidan.o.mahony@intel.com)
15 * Copyright (c) 2010, Intel Corporation.
54b6a1bd
HY
16 */
17
18#include <linux/hardirq.h>
19#include <linux/types.h>
7c52d551 20#include <linux/module.h>
54b6a1bd
HY
21#include <linux/err.h>
22#include <crypto/algapi.h>
23#include <crypto/aes.h>
12387a46 24#include <crypto/ctr.h>
023af608 25#include <crypto/b128ops.h>
46d93748 26#include <crypto/gcm.h>
023af608 27#include <crypto/xts.h>
3bd391f0 28#include <asm/cpu_device_id.h>
70ef2601 29#include <asm/crypto/aes.h>
f2abe0d7 30#include <asm/simd.h>
0bd82f5f
TS
31#include <crypto/scatterwalk.h>
32#include <crypto/internal/aead.h>
85671860
HX
33#include <crypto/internal/simd.h>
34#include <crypto/internal/skcipher.h>
0bd82f5f
TS
35#include <linux/workqueue.h>
36#include <linux/spinlock.h>
c456a9cd
JK
37#ifdef CONFIG_X86_64
38#include <asm/crypto/glue_helper.h>
39#endif
54b6a1bd 40
e31ac32d 41
b7c89d9e 42#define AESNI_ALIGN 16
85671860 43#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
b7c89d9e
HX
44#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
45#define RFC4106_HASH_SUBKEY_SIZE 16
85671860
HX
46#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
47#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
48#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
b7c89d9e 49
0bd82f5f
TS
50/* This data is stored at the end of the crypto_tfm struct.
51 * It's a type of per "session" data storage location.
52 * This needs to be 16 byte aligned.
53 */
54struct aesni_rfc4106_gcm_ctx {
85671860
HX
55 u8 hash_subkey[16] AESNI_ALIGN_ATTR;
56 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
0bd82f5f 57 u8 nonce[4];
0bd82f5f
TS
58};
59
cce2ea8d
SD
60struct generic_gcmaes_ctx {
61 u8 hash_subkey[16] AESNI_ALIGN_ATTR;
62 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
63};
64
023af608 65struct aesni_xts_ctx {
85671860
HX
66 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
67 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
023af608
JK
68};
69
9ee4a5df
DW
70#define GCM_BLOCK_LEN 16
71
72struct gcm_context_data {
73 /* init, update and finalize context data */
74 u8 aad_hash[GCM_BLOCK_LEN];
75 u64 aad_length;
76 u64 in_length;
77 u8 partial_block_enc_key[GCM_BLOCK_LEN];
78 u8 orig_IV[GCM_BLOCK_LEN];
79 u8 current_counter[GCM_BLOCK_LEN];
80 u64 partial_block_len;
81 u64 unused;
de85fc46 82 u8 hash_keys[GCM_BLOCK_LEN * 16];
9ee4a5df
DW
83};
84
54b6a1bd
HY
85asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
86 unsigned int key_len);
87asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
88 const u8 *in);
89asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
90 const u8 *in);
91asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
92 const u8 *in, unsigned int len);
93asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len);
95asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv);
97asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
98 const u8 *in, unsigned int len, u8 *iv);
9bed4aca 99
d764593a
TC
100#define AVX_GEN2_OPTSIZE 640
101#define AVX_GEN4_OPTSIZE 4096
102
0d258efb 103#ifdef CONFIG_X86_64
22cddcc7 104
105static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
106 const u8 *in, unsigned int len, u8 *iv);
12387a46
HY
107asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
108 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 109
c456a9cd
JK
110asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
111 const u8 *in, bool enc, u8 *iv);
112
0bd82f5f
TS
113/* asmlinkage void aesni_gcm_enc()
114 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
9ee4a5df 115 * struct gcm_context_data. May be uninitialized.
0bd82f5f
TS
116 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117 * const u8 *in, Plaintext input
118 * unsigned long plaintext_len, Length of data in bytes for encryption.
cce2ea8d
SD
119 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
120 * 16-byte aligned pointer.
0bd82f5f
TS
121 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
122 * const u8 *aad, Additional Authentication Data (AAD)
cce2ea8d 123 * unsigned long aad_len, Length of AAD in bytes.
0bd82f5f
TS
124 * u8 *auth_tag, Authenticated Tag output.
125 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
126 * Valid values are 16 (most likely), 12 or 8.
127 */
9ee4a5df
DW
128asmlinkage void aesni_gcm_enc(void *ctx,
129 struct gcm_context_data *gdata, u8 *out,
0bd82f5f
TS
130 const u8 *in, unsigned long plaintext_len, u8 *iv,
131 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
132 u8 *auth_tag, unsigned long auth_tag_len);
133
134/* asmlinkage void aesni_gcm_dec()
135 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
9ee4a5df 136 * struct gcm_context_data. May be uninitialized.
0bd82f5f
TS
137 * u8 *out, Plaintext output. Decrypt in-place is allowed.
138 * const u8 *in, Ciphertext input
139 * unsigned long ciphertext_len, Length of data in bytes for decryption.
cce2ea8d
SD
140 * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
141 * 16-byte aligned pointer.
0bd82f5f
TS
142 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
143 * const u8 *aad, Additional Authentication Data (AAD)
144 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
145 * to be 8 or 12 bytes
146 * u8 *auth_tag, Authenticated Tag output.
147 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
148 * Valid values are 16 (most likely), 12 or 8.
149 */
9ee4a5df
DW
150asmlinkage void aesni_gcm_dec(void *ctx,
151 struct gcm_context_data *gdata, u8 *out,
0bd82f5f
TS
152 const u8 *in, unsigned long ciphertext_len, u8 *iv,
153 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154 u8 *auth_tag, unsigned long auth_tag_len);
155
fb8986e6
DW
156/* Scatter / Gather routines, with args similar to above */
157asmlinkage void aesni_gcm_init(void *ctx,
158 struct gcm_context_data *gdata,
159 u8 *iv,
160 u8 *hash_subkey, const u8 *aad,
161 unsigned long aad_len);
162asmlinkage void aesni_gcm_enc_update(void *ctx,
163 struct gcm_context_data *gdata, u8 *out,
164 const u8 *in, unsigned long plaintext_len);
165asmlinkage void aesni_gcm_dec_update(void *ctx,
166 struct gcm_context_data *gdata, u8 *out,
167 const u8 *in,
168 unsigned long ciphertext_len);
169asmlinkage void aesni_gcm_finalize(void *ctx,
170 struct gcm_context_data *gdata,
171 u8 *auth_tag, unsigned long auth_tag_len);
d764593a 172
793ff5ff
EB
173static const struct aesni_gcm_tfm_s {
174 void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
175 u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
176 void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
177 const u8 *in, unsigned long plaintext_len);
178 void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
179 const u8 *in, unsigned long ciphertext_len);
180 void (*finalize)(void *ctx, struct gcm_context_data *gdata,
181 u8 *auth_tag, unsigned long auth_tag_len);
603f8c3b
DW
182} *aesni_gcm_tfm;
183
793ff5ff 184static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
603f8c3b
DW
185 .init = &aesni_gcm_init,
186 .enc_update = &aesni_gcm_enc_update,
187 .dec_update = &aesni_gcm_dec_update,
188 .finalize = &aesni_gcm_finalize,
189};
190
d764593a 191#ifdef CONFIG_AS_AVX
22cddcc7 192asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
193 void *keys, u8 *out, unsigned int num_bytes);
194asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
195 void *keys, u8 *out, unsigned int num_bytes);
196asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
197 void *keys, u8 *out, unsigned int num_bytes);
d764593a 198/*
603f8c3b 199 * asmlinkage void aesni_gcm_init_avx_gen2()
d764593a
TC
200 * gcm_data *my_ctx_data, context data
201 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
202 */
603f8c3b
DW
203asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
204 struct gcm_context_data *gdata,
205 u8 *iv,
206 u8 *hash_subkey,
207 const u8 *aad,
208 unsigned long aad_len);
209
210asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
211 struct gcm_context_data *gdata, u8 *out,
212 const u8 *in, unsigned long plaintext_len);
213asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
214 struct gcm_context_data *gdata, u8 *out,
215 const u8 *in,
216 unsigned long ciphertext_len);
217asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
218 struct gcm_context_data *gdata,
219 u8 *auth_tag, unsigned long auth_tag_len);
d764593a 220
de85fc46
DW
221asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx,
222 struct gcm_context_data *gdata, u8 *out,
d764593a
TC
223 const u8 *in, unsigned long plaintext_len, u8 *iv,
224 const u8 *aad, unsigned long aad_len,
225 u8 *auth_tag, unsigned long auth_tag_len);
226
de85fc46
DW
227asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx,
228 struct gcm_context_data *gdata, u8 *out,
d764593a
TC
229 const u8 *in, unsigned long ciphertext_len, u8 *iv,
230 const u8 *aad, unsigned long aad_len,
231 u8 *auth_tag, unsigned long auth_tag_len);
232
793ff5ff 233static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
603f8c3b
DW
234 .init = &aesni_gcm_init_avx_gen2,
235 .enc_update = &aesni_gcm_enc_update_avx_gen2,
236 .dec_update = &aesni_gcm_dec_update_avx_gen2,
237 .finalize = &aesni_gcm_finalize_avx_gen2,
238};
d764593a 239
d764593a
TC
240#endif
241
242#ifdef CONFIG_AS_AVX2
243/*
603f8c3b 244 * asmlinkage void aesni_gcm_init_avx_gen4()
d764593a
TC
245 * gcm_data *my_ctx_data, context data
246 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
247 */
603f8c3b
DW
248asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
249 struct gcm_context_data *gdata,
250 u8 *iv,
251 u8 *hash_subkey,
252 const u8 *aad,
253 unsigned long aad_len);
254
255asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
256 struct gcm_context_data *gdata, u8 *out,
257 const u8 *in, unsigned long plaintext_len);
258asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
259 struct gcm_context_data *gdata, u8 *out,
260 const u8 *in,
261 unsigned long ciphertext_len);
262asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
263 struct gcm_context_data *gdata,
264 u8 *auth_tag, unsigned long auth_tag_len);
d764593a 265
de85fc46
DW
266asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx,
267 struct gcm_context_data *gdata, u8 *out,
d764593a
TC
268 const u8 *in, unsigned long plaintext_len, u8 *iv,
269 const u8 *aad, unsigned long aad_len,
270 u8 *auth_tag, unsigned long auth_tag_len);
271
de85fc46
DW
272asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx,
273 struct gcm_context_data *gdata, u8 *out,
d764593a
TC
274 const u8 *in, unsigned long ciphertext_len, u8 *iv,
275 const u8 *aad, unsigned long aad_len,
276 u8 *auth_tag, unsigned long auth_tag_len);
277
793ff5ff 278static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
603f8c3b
DW
279 .init = &aesni_gcm_init_avx_gen4,
280 .enc_update = &aesni_gcm_enc_update_avx_gen4,
281 .dec_update = &aesni_gcm_dec_update_avx_gen4,
282 .finalize = &aesni_gcm_finalize_avx_gen4,
283};
d764593a 284
d764593a
TC
285#endif
286
0bd82f5f
TS
287static inline struct
288aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
289{
b7c89d9e
HX
290 unsigned long align = AESNI_ALIGN;
291
292 if (align <= crypto_tfm_ctx_alignment())
293 align = 1;
294 return PTR_ALIGN(crypto_aead_ctx(tfm), align);
0bd82f5f 295}
cce2ea8d
SD
296
297static inline struct
298generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
299{
300 unsigned long align = AESNI_ALIGN;
301
302 if (align <= crypto_tfm_ctx_alignment())
303 align = 1;
304 return PTR_ALIGN(crypto_aead_ctx(tfm), align);
305}
559ad0ff 306#endif
0bd82f5f 307
54b6a1bd
HY
308static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
309{
310 unsigned long addr = (unsigned long)raw_ctx;
311 unsigned long align = AESNI_ALIGN;
312
313 if (align <= crypto_tfm_ctx_alignment())
314 align = 1;
315 return (struct crypto_aes_ctx *)ALIGN(addr, align);
316}
317
318static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
319 const u8 *in_key, unsigned int key_len)
320{
321 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
322 u32 *flags = &tfm->crt_flags;
323 int err;
324
325 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
326 key_len != AES_KEYSIZE_256) {
327 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
328 return -EINVAL;
329 }
330
f2abe0d7 331 if (!crypto_simd_usable())
54b6a1bd
HY
332 err = crypto_aes_expand_key(ctx, in_key, key_len);
333 else {
334 kernel_fpu_begin();
335 err = aesni_set_key(ctx, in_key, key_len);
336 kernel_fpu_end();
337 }
338
339 return err;
340}
341
342static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
343 unsigned int key_len)
344{
345 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
346}
347
348static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
349{
350 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
351
f2abe0d7 352 if (!crypto_simd_usable())
54b6a1bd
HY
353 crypto_aes_encrypt_x86(ctx, dst, src);
354 else {
355 kernel_fpu_begin();
356 aesni_enc(ctx, dst, src);
357 kernel_fpu_end();
358 }
359}
360
361static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
362{
363 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
364
f2abe0d7 365 if (!crypto_simd_usable())
54b6a1bd
HY
366 crypto_aes_decrypt_x86(ctx, dst, src);
367 else {
368 kernel_fpu_begin();
369 aesni_dec(ctx, dst, src);
370 kernel_fpu_end();
371 }
372}
373
2cf4ac8b
HY
374static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
375{
376 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
377
378 aesni_enc(ctx, dst, src);
379}
380
381static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
382{
383 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
384
385 aesni_dec(ctx, dst, src);
386}
387
85671860
HX
388static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
389 unsigned int len)
390{
391 return aes_set_key_common(crypto_skcipher_tfm(tfm),
392 crypto_skcipher_ctx(tfm), key, len);
393}
394
395static int ecb_encrypt(struct skcipher_request *req)
54b6a1bd 396{
85671860
HX
397 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
398 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
399 struct skcipher_walk walk;
400 unsigned int nbytes;
54b6a1bd
HY
401 int err;
402
85671860 403 err = skcipher_walk_virt(&walk, req, true);
54b6a1bd
HY
404
405 kernel_fpu_begin();
406 while ((nbytes = walk.nbytes)) {
407 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
408 nbytes & AES_BLOCK_MASK);
409 nbytes &= AES_BLOCK_SIZE - 1;
85671860 410 err = skcipher_walk_done(&walk, nbytes);
54b6a1bd
HY
411 }
412 kernel_fpu_end();
413
414 return err;
415}
416
85671860 417static int ecb_decrypt(struct skcipher_request *req)
54b6a1bd 418{
85671860
HX
419 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
420 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
421 struct skcipher_walk walk;
422 unsigned int nbytes;
54b6a1bd
HY
423 int err;
424
85671860 425 err = skcipher_walk_virt(&walk, req, true);
54b6a1bd
HY
426
427 kernel_fpu_begin();
428 while ((nbytes = walk.nbytes)) {
429 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
430 nbytes & AES_BLOCK_MASK);
431 nbytes &= AES_BLOCK_SIZE - 1;
85671860 432 err = skcipher_walk_done(&walk, nbytes);
54b6a1bd
HY
433 }
434 kernel_fpu_end();
435
436 return err;
437}
438
85671860 439static int cbc_encrypt(struct skcipher_request *req)
54b6a1bd 440{
85671860
HX
441 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
442 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
443 struct skcipher_walk walk;
444 unsigned int nbytes;
54b6a1bd
HY
445 int err;
446
85671860 447 err = skcipher_walk_virt(&walk, req, true);
54b6a1bd
HY
448
449 kernel_fpu_begin();
450 while ((nbytes = walk.nbytes)) {
451 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
452 nbytes & AES_BLOCK_MASK, walk.iv);
453 nbytes &= AES_BLOCK_SIZE - 1;
85671860 454 err = skcipher_walk_done(&walk, nbytes);
54b6a1bd
HY
455 }
456 kernel_fpu_end();
457
458 return err;
459}
460
85671860 461static int cbc_decrypt(struct skcipher_request *req)
54b6a1bd 462{
85671860
HX
463 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
464 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
465 struct skcipher_walk walk;
466 unsigned int nbytes;
54b6a1bd
HY
467 int err;
468
85671860 469 err = skcipher_walk_virt(&walk, req, true);
54b6a1bd
HY
470
471 kernel_fpu_begin();
472 while ((nbytes = walk.nbytes)) {
473 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
474 nbytes & AES_BLOCK_MASK, walk.iv);
475 nbytes &= AES_BLOCK_SIZE - 1;
85671860 476 err = skcipher_walk_done(&walk, nbytes);
54b6a1bd
HY
477 }
478 kernel_fpu_end();
479
480 return err;
481}
482
0d258efb 483#ifdef CONFIG_X86_64
12387a46 484static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
85671860 485 struct skcipher_walk *walk)
12387a46
HY
486{
487 u8 *ctrblk = walk->iv;
488 u8 keystream[AES_BLOCK_SIZE];
489 u8 *src = walk->src.virt.addr;
490 u8 *dst = walk->dst.virt.addr;
491 unsigned int nbytes = walk->nbytes;
492
493 aesni_enc(ctx, keystream, ctrblk);
45fe93df
AB
494 crypto_xor_cpy(dst, keystream, src, nbytes);
495
12387a46
HY
496 crypto_inc(ctrblk, AES_BLOCK_SIZE);
497}
498
5cfed7b3 499#ifdef CONFIG_AS_AVX
22cddcc7 500static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
501 const u8 *in, unsigned int len, u8 *iv)
502{
503 /*
504 * based on key length, override with the by8 version
505 * of ctr mode encryption/decryption for improved performance
506 * aes_set_key_common() ensures that key length is one of
507 * {128,192,256}
508 */
509 if (ctx->key_length == AES_KEYSIZE_128)
510 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
511 else if (ctx->key_length == AES_KEYSIZE_192)
512 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
513 else
514 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
515}
516#endif
517
85671860 518static int ctr_crypt(struct skcipher_request *req)
12387a46 519{
85671860
HX
520 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
521 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
522 struct skcipher_walk walk;
523 unsigned int nbytes;
12387a46
HY
524 int err;
525
85671860 526 err = skcipher_walk_virt(&walk, req, true);
12387a46
HY
527
528 kernel_fpu_begin();
529 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
22cddcc7 530 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
e31ac32d 531 nbytes & AES_BLOCK_MASK, walk.iv);
12387a46 532 nbytes &= AES_BLOCK_SIZE - 1;
85671860 533 err = skcipher_walk_done(&walk, nbytes);
12387a46
HY
534 }
535 if (walk.nbytes) {
536 ctr_crypt_final(ctx, &walk);
85671860 537 err = skcipher_walk_done(&walk, 0);
12387a46
HY
538 }
539 kernel_fpu_end();
540
541 return err;
542}
023af608 543
85671860 544static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
023af608
JK
545 unsigned int keylen)
546{
85671860 547 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
023af608
JK
548 int err;
549
85671860 550 err = xts_verify_key(tfm, key, keylen);
023af608
JK
551 if (err)
552 return err;
553
85671860 554 keylen /= 2;
023af608
JK
555
556 /* first half of xts-key is for crypt */
85671860
HX
557 err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
558 key, keylen);
023af608
JK
559 if (err)
560 return err;
561
562 /* second half of xts-key is for tweak */
85671860
HX
563 return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
564 key + keylen, keylen);
023af608
JK
565}
566
567
32bec973
JK
568static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
569{
570 aesni_enc(ctx, out, in);
571}
572
c456a9cd
JK
573static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
574{
575 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
576}
577
578static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
579{
580 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
581}
582
583static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
584{
585 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
586}
587
588static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
589{
590 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
591}
592
593static const struct common_glue_ctx aesni_enc_xts = {
594 .num_funcs = 2,
595 .fpu_blocks_limit = 1,
596
597 .funcs = { {
598 .num_blocks = 8,
599 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
600 }, {
601 .num_blocks = 1,
602 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
603 } }
604};
605
606static const struct common_glue_ctx aesni_dec_xts = {
607 .num_funcs = 2,
608 .fpu_blocks_limit = 1,
609
610 .funcs = { {
611 .num_blocks = 8,
612 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
613 }, {
614 .num_blocks = 1,
615 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
616 } }
617};
618
85671860 619static int xts_encrypt(struct skcipher_request *req)
c456a9cd 620{
85671860
HX
621 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
622 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
c456a9cd 623
85671860
HX
624 return glue_xts_req_128bit(&aesni_enc_xts, req,
625 XTS_TWEAK_CAST(aesni_xts_tweak),
626 aes_ctx(ctx->raw_tweak_ctx),
627 aes_ctx(ctx->raw_crypt_ctx));
c456a9cd
JK
628}
629
85671860 630static int xts_decrypt(struct skcipher_request *req)
c456a9cd 631{
85671860
HX
632 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
633 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
023af608 634
85671860
HX
635 return glue_xts_req_128bit(&aesni_dec_xts, req,
636 XTS_TWEAK_CAST(aesni_xts_tweak),
637 aes_ctx(ctx->raw_tweak_ctx),
638 aes_ctx(ctx->raw_crypt_ctx));
2cf4ac8b 639}
2cf4ac8b 640
0bd82f5f
TS
641static int
642rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
643{
02fa472a
HX
644 struct crypto_cipher *tfm;
645 int ret;
0bd82f5f 646
02fa472a
HX
647 tfm = crypto_alloc_cipher("aes", 0, 0);
648 if (IS_ERR(tfm))
649 return PTR_ERR(tfm);
0bd82f5f 650
02fa472a 651 ret = crypto_cipher_setkey(tfm, key, key_len);
7efd95f6 652 if (ret)
02fa472a 653 goto out_free_cipher;
0bd82f5f
TS
654
655 /* Clear the data in the hash sub key container to zero.*/
656 /* We want to cipher all zeros to create the hash sub key. */
657 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
658
02fa472a
HX
659 crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
660
661out_free_cipher:
662 crypto_free_cipher(tfm);
0bd82f5f
TS
663 return ret;
664}
665
81e397d9
TS
666static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
667 unsigned int key_len)
0bd82f5f 668{
81e397d9 669 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
0bd82f5f
TS
670
671 if (key_len < 4) {
b7c89d9e 672 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
0bd82f5f
TS
673 return -EINVAL;
674 }
675 /*Account for 4 byte nonce at the end.*/
676 key_len -= 4;
0bd82f5f
TS
677
678 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
0bd82f5f 679
b7c89d9e
HX
680 return aes_set_key_common(crypto_aead_tfm(aead),
681 &ctx->aes_key_expanded, key, key_len) ?:
682 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
0bd82f5f
TS
683}
684
149e1225
EB
685/* This is the Integrity Check Value (aka the authentication tag) length and can
686 * be 8, 12 or 16 bytes long. */
81e397d9
TS
687static int common_rfc4106_set_authsize(struct crypto_aead *aead,
688 unsigned int authsize)
689{
0bd82f5f
TS
690 switch (authsize) {
691 case 8:
692 case 12:
693 case 16:
694 break;
695 default:
696 return -EINVAL;
697 }
b7c89d9e 698
0bd82f5f
TS
699 return 0;
700}
701
cce2ea8d
SD
702static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
703 unsigned int authsize)
704{
705 switch (authsize) {
706 case 4:
707 case 8:
708 case 12:
709 case 13:
710 case 14:
711 case 15:
712 case 16:
713 break;
714 default:
715 return -EINVAL;
716 }
717
718 return 0;
719}
720
e8455207
DW
721static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
722 unsigned int assoclen, u8 *hash_subkey,
723 u8 *iv, void *aes_ctx)
724{
725 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
726 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
793ff5ff 727 const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
e8455207
DW
728 struct gcm_context_data data AESNI_ALIGN_ATTR;
729 struct scatter_walk dst_sg_walk = {};
730 unsigned long left = req->cryptlen;
731 unsigned long len, srclen, dstlen;
732 struct scatter_walk assoc_sg_walk;
733 struct scatter_walk src_sg_walk;
734 struct scatterlist src_start[2];
735 struct scatterlist dst_start[2];
736 struct scatterlist *src_sg;
737 struct scatterlist *dst_sg;
738 u8 *src, *dst, *assoc;
739 u8 *assocmem = NULL;
740 u8 authTag[16];
741
742 if (!enc)
743 left -= auth_tag_len;
744
603f8c3b
DW
745#ifdef CONFIG_AS_AVX2
746 if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
747 gcm_tfm = &aesni_gcm_tfm_avx_gen2;
748#endif
749#ifdef CONFIG_AS_AVX
750 if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
751 gcm_tfm = &aesni_gcm_tfm_sse;
752#endif
753
e8455207
DW
754 /* Linearize assoc, if not already linear */
755 if (req->src->length >= assoclen && req->src->length &&
756 (!PageHighMem(sg_page(req->src)) ||
a7888481 757 req->src->offset + req->src->length <= PAGE_SIZE)) {
e8455207
DW
758 scatterwalk_start(&assoc_sg_walk, req->src);
759 assoc = scatterwalk_map(&assoc_sg_walk);
760 } else {
761 /* assoc can be any length, so must be on heap */
762 assocmem = kmalloc(assoclen, GFP_ATOMIC);
763 if (unlikely(!assocmem))
764 return -ENOMEM;
765 assoc = assocmem;
766
767 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
768 }
769
3af34963
EB
770 if (left) {
771 src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
772 scatterwalk_start(&src_sg_walk, src_sg);
773 if (req->src != req->dst) {
774 dst_sg = scatterwalk_ffwd(dst_start, req->dst,
775 req->assoclen);
776 scatterwalk_start(&dst_sg_walk, dst_sg);
777 }
e8455207
DW
778 }
779
780 kernel_fpu_begin();
603f8c3b 781 gcm_tfm->init(aes_ctx, &data, iv,
e8455207
DW
782 hash_subkey, assoc, assoclen);
783 if (req->src != req->dst) {
784 while (left) {
785 src = scatterwalk_map(&src_sg_walk);
786 dst = scatterwalk_map(&dst_sg_walk);
787 srclen = scatterwalk_clamp(&src_sg_walk, left);
788 dstlen = scatterwalk_clamp(&dst_sg_walk, left);
789 len = min(srclen, dstlen);
790 if (len) {
791 if (enc)
603f8c3b 792 gcm_tfm->enc_update(aes_ctx, &data,
e8455207
DW
793 dst, src, len);
794 else
603f8c3b 795 gcm_tfm->dec_update(aes_ctx, &data,
e8455207
DW
796 dst, src, len);
797 }
798 left -= len;
799
800 scatterwalk_unmap(src);
801 scatterwalk_unmap(dst);
802 scatterwalk_advance(&src_sg_walk, len);
803 scatterwalk_advance(&dst_sg_walk, len);
804 scatterwalk_done(&src_sg_walk, 0, left);
805 scatterwalk_done(&dst_sg_walk, 1, left);
806 }
807 } else {
808 while (left) {
809 dst = src = scatterwalk_map(&src_sg_walk);
810 len = scatterwalk_clamp(&src_sg_walk, left);
811 if (len) {
812 if (enc)
603f8c3b 813 gcm_tfm->enc_update(aes_ctx, &data,
e8455207
DW
814 src, src, len);
815 else
603f8c3b 816 gcm_tfm->dec_update(aes_ctx, &data,
e8455207
DW
817 src, src, len);
818 }
819 left -= len;
820 scatterwalk_unmap(src);
821 scatterwalk_advance(&src_sg_walk, len);
822 scatterwalk_done(&src_sg_walk, 1, left);
823 }
824 }
603f8c3b 825 gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len);
e8455207
DW
826 kernel_fpu_end();
827
828 if (!assocmem)
829 scatterwalk_unmap(assoc);
830 else
831 kfree(assocmem);
832
833 if (!enc) {
834 u8 authTagMsg[16];
835
836 /* Copy out original authTag */
837 scatterwalk_map_and_copy(authTagMsg, req->src,
838 req->assoclen + req->cryptlen -
839 auth_tag_len,
840 auth_tag_len, 0);
841
842 /* Compare generated tag with passed in tag. */
843 return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
844 -EBADMSG : 0;
845 }
846
847 /* Copy in the authTag */
848 scatterwalk_map_and_copy(authTag, req->dst,
849 req->assoclen + req->cryptlen,
850 auth_tag_len, 1);
851
852 return 0;
853}
854
cce2ea8d
SD
855static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
856 u8 *hash_subkey, u8 *iv, void *aes_ctx)
0bd82f5f 857{
603f8c3b
DW
858 return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
859 aes_ctx);
0bd82f5f
TS
860}
861
cce2ea8d
SD
862static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
863 u8 *hash_subkey, u8 *iv, void *aes_ctx)
0bd82f5f 864{
603f8c3b
DW
865 return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
866 aes_ctx);
cce2ea8d
SD
867}
868
869static int helper_rfc4106_encrypt(struct aead_request *req)
870{
871 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
872 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
873 void *aes_ctx = &(ctx->aes_key_expanded);
874 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
875 unsigned int i;
876 __be32 counter = cpu_to_be32(1);
877
878 /* Assuming we are supporting rfc4106 64-bit extended */
879 /* sequence numbers We need to have the AAD length equal */
880 /* to 16 or 20 bytes */
881 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
882 return -EINVAL;
883
884 /* IV below built */
885 for (i = 0; i < 4; i++)
886 *(iv+i) = ctx->nonce[i];
887 for (i = 0; i < 8; i++)
888 *(iv+4+i) = req->iv[i];
889 *((__be32 *)(iv+12)) = counter;
890
891 return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
892 aes_ctx);
893}
894
895static int helper_rfc4106_decrypt(struct aead_request *req)
896{
897 __be32 counter = cpu_to_be32(1);
898 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
899 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
900 void *aes_ctx = &(ctx->aes_key_expanded);
901 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
902 unsigned int i;
903
904 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
905 return -EINVAL;
906
907 /* Assuming we are supporting rfc4106 64-bit extended */
908 /* sequence numbers We need to have the AAD length */
909 /* equal to 16 or 20 bytes */
910
911 /* IV below built */
912 for (i = 0; i < 4; i++)
913 *(iv+i) = ctx->nonce[i];
914 for (i = 0; i < 8; i++)
915 *(iv+4+i) = req->iv[i];
916 *((__be32 *)(iv+12)) = counter;
917
918 return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
919 aes_ctx);
0bd82f5f 920}
fa46ccb8 921#endif
0bd82f5f 922
fa46ccb8
JK
923static struct crypto_alg aesni_algs[] = { {
924 .cra_name = "aes",
925 .cra_driver_name = "aes-aesni",
926 .cra_priority = 300,
927 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
928 .cra_blocksize = AES_BLOCK_SIZE,
85671860 929 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
fa46ccb8
JK
930 .cra_module = THIS_MODULE,
931 .cra_u = {
932 .cipher = {
933 .cia_min_keysize = AES_MIN_KEY_SIZE,
934 .cia_max_keysize = AES_MAX_KEY_SIZE,
935 .cia_setkey = aes_set_key,
936 .cia_encrypt = aes_encrypt,
937 .cia_decrypt = aes_decrypt
938 }
939 }
940}, {
85671860
HX
941 .cra_name = "__aes",
942 .cra_driver_name = "__aes-aesni",
943 .cra_priority = 300,
eabdc320 944 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
fa46ccb8 945 .cra_blocksize = AES_BLOCK_SIZE,
85671860 946 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
fa46ccb8
JK
947 .cra_module = THIS_MODULE,
948 .cra_u = {
949 .cipher = {
950 .cia_min_keysize = AES_MIN_KEY_SIZE,
951 .cia_max_keysize = AES_MAX_KEY_SIZE,
952 .cia_setkey = aes_set_key,
953 .cia_encrypt = __aes_encrypt,
954 .cia_decrypt = __aes_decrypt
955 }
956 }
85671860
HX
957} };
958
959static struct skcipher_alg aesni_skciphers[] = {
960 {
961 .base = {
962 .cra_name = "__ecb(aes)",
963 .cra_driver_name = "__ecb-aes-aesni",
964 .cra_priority = 400,
965 .cra_flags = CRYPTO_ALG_INTERNAL,
966 .cra_blocksize = AES_BLOCK_SIZE,
967 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
968 .cra_module = THIS_MODULE,
fa46ccb8 969 },
85671860
HX
970 .min_keysize = AES_MIN_KEY_SIZE,
971 .max_keysize = AES_MAX_KEY_SIZE,
972 .setkey = aesni_skcipher_setkey,
973 .encrypt = ecb_encrypt,
974 .decrypt = ecb_decrypt,
975 }, {
976 .base = {
977 .cra_name = "__cbc(aes)",
978 .cra_driver_name = "__cbc-aes-aesni",
979 .cra_priority = 400,
980 .cra_flags = CRYPTO_ALG_INTERNAL,
981 .cra_blocksize = AES_BLOCK_SIZE,
982 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
983 .cra_module = THIS_MODULE,
fa46ccb8 984 },
85671860
HX
985 .min_keysize = AES_MIN_KEY_SIZE,
986 .max_keysize = AES_MAX_KEY_SIZE,
987 .ivsize = AES_BLOCK_SIZE,
988 .setkey = aesni_skcipher_setkey,
989 .encrypt = cbc_encrypt,
990 .decrypt = cbc_decrypt,
fa46ccb8 991#ifdef CONFIG_X86_64
85671860
HX
992 }, {
993 .base = {
994 .cra_name = "__ctr(aes)",
995 .cra_driver_name = "__ctr-aes-aesni",
996 .cra_priority = 400,
997 .cra_flags = CRYPTO_ALG_INTERNAL,
998 .cra_blocksize = 1,
999 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
1000 .cra_module = THIS_MODULE,
fa46ccb8 1001 },
85671860
HX
1002 .min_keysize = AES_MIN_KEY_SIZE,
1003 .max_keysize = AES_MAX_KEY_SIZE,
1004 .ivsize = AES_BLOCK_SIZE,
1005 .chunksize = AES_BLOCK_SIZE,
1006 .setkey = aesni_skcipher_setkey,
1007 .encrypt = ctr_crypt,
1008 .decrypt = ctr_crypt,
1009 }, {
1010 .base = {
1011 .cra_name = "__xts(aes)",
1012 .cra_driver_name = "__xts-aes-aesni",
1013 .cra_priority = 401,
1014 .cra_flags = CRYPTO_ALG_INTERNAL,
1015 .cra_blocksize = AES_BLOCK_SIZE,
1016 .cra_ctxsize = XTS_AES_CTX_SIZE,
1017 .cra_module = THIS_MODULE,
fa46ccb8 1018 },
85671860
HX
1019 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1020 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1021 .ivsize = AES_BLOCK_SIZE,
1022 .setkey = xts_aesni_setkey,
1023 .encrypt = xts_encrypt,
1024 .decrypt = xts_decrypt,
fa46ccb8 1025#endif
85671860
HX
1026 }
1027};
1028
1c9fa294 1029static
85671860
HX
1030struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1031
af05b300 1032#ifdef CONFIG_X86_64
cce2ea8d
SD
1033static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1034 unsigned int key_len)
1035{
1036 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1037
1038 return aes_set_key_common(crypto_aead_tfm(aead),
1039 &ctx->aes_key_expanded, key, key_len) ?:
1040 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1041}
1042
1043static int generic_gcmaes_encrypt(struct aead_request *req)
1044{
1045 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1046 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1047 void *aes_ctx = &(ctx->aes_key_expanded);
1048 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1049 __be32 counter = cpu_to_be32(1);
1050
1051 memcpy(iv, req->iv, 12);
1052 *((__be32 *)(iv+12)) = counter;
1053
1054 return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1055 aes_ctx);
1056}
1057
1058static int generic_gcmaes_decrypt(struct aead_request *req)
1059{
1060 __be32 counter = cpu_to_be32(1);
1061 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
106840c4 1062 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
cce2ea8d
SD
1063 void *aes_ctx = &(ctx->aes_key_expanded);
1064 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1065
1066 memcpy(iv, req->iv, 12);
1067 *((__be32 *)(iv+12)) = counter;
1068
1069 return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1070 aes_ctx);
1071}
1072
149e1225 1073static struct aead_alg aesni_aeads[] = { {
b7c89d9e
HX
1074 .setkey = common_rfc4106_set_key,
1075 .setauthsize = common_rfc4106_set_authsize,
1076 .encrypt = helper_rfc4106_encrypt,
1077 .decrypt = helper_rfc4106_decrypt,
46d93748 1078 .ivsize = GCM_RFC4106_IV_SIZE,
b7c89d9e
HX
1079 .maxauthsize = 16,
1080 .base = {
149e1225
EB
1081 .cra_name = "__rfc4106(gcm(aes))",
1082 .cra_driver_name = "__rfc4106-gcm-aesni",
1083 .cra_priority = 400,
b7c89d9e
HX
1084 .cra_flags = CRYPTO_ALG_INTERNAL,
1085 .cra_blocksize = 1,
1086 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
1087 .cra_alignmask = AESNI_ALIGN - 1,
1088 .cra_module = THIS_MODULE,
1089 },
cce2ea8d
SD
1090}, {
1091 .setkey = generic_gcmaes_set_key,
1092 .setauthsize = generic_gcmaes_set_authsize,
1093 .encrypt = generic_gcmaes_encrypt,
1094 .decrypt = generic_gcmaes_decrypt,
46d93748 1095 .ivsize = GCM_AES_IV_SIZE,
cce2ea8d 1096 .maxauthsize = 16,
fc8517bf 1097 .base = {
149e1225
EB
1098 .cra_name = "__gcm(aes)",
1099 .cra_driver_name = "__generic-gcm-aesni",
1100 .cra_priority = 400,
fc8517bf
SD
1101 .cra_flags = CRYPTO_ALG_INTERNAL,
1102 .cra_blocksize = 1,
1103 .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
1104 .cra_alignmask = AESNI_ALIGN - 1,
1105 .cra_module = THIS_MODULE,
1106 },
af05b300
HX
1107} };
1108#else
149e1225 1109static struct aead_alg aesni_aeads[0];
af05b300
HX
1110#endif
1111
149e1225 1112static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
3bd391f0
AK
1113
1114static const struct x86_cpu_id aesni_cpu_id[] = {
1115 X86_FEATURE_MATCH(X86_FEATURE_AES),
1116 {}
1117};
1118MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1119
54b6a1bd
HY
1120static int __init aesni_init(void)
1121{
7af6c245 1122 int err;
54b6a1bd 1123
3bd391f0 1124 if (!x86_match_cpu(aesni_cpu_id))
54b6a1bd 1125 return -ENODEV;
8610d7bf 1126#ifdef CONFIG_X86_64
d764593a
TC
1127#ifdef CONFIG_AS_AVX2
1128 if (boot_cpu_has(X86_FEATURE_AVX2)) {
1129 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
603f8c3b 1130 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
d764593a
TC
1131 } else
1132#endif
1133#ifdef CONFIG_AS_AVX
1134 if (boot_cpu_has(X86_FEATURE_AVX)) {
1135 pr_info("AVX version of gcm_enc/dec engaged.\n");
603f8c3b 1136 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
d764593a
TC
1137 } else
1138#endif
1139 {
1140 pr_info("SSE version of gcm_enc/dec engaged.\n");
603f8c3b 1141 aesni_gcm_tfm = &aesni_gcm_tfm_sse;
d764593a 1142 }
22cddcc7 1143 aesni_ctr_enc_tfm = aesni_ctr_enc;
5cfed7b3 1144#ifdef CONFIG_AS_AVX
da154e82 1145 if (boot_cpu_has(X86_FEATURE_AVX)) {
22cddcc7 1146 /* optimize performance of ctr mode encryption transform */
1147 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1148 pr_info("AES CTR mode by8 optimization enabled\n");
1149 }
1150#endif
8610d7bf 1151#endif
0bd82f5f 1152
af05b300
HX
1153 err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1154 if (err)
e0db9c48 1155 return err;
af05b300 1156
8b56d348
EB
1157 err = simd_register_skciphers_compat(aesni_skciphers,
1158 ARRAY_SIZE(aesni_skciphers),
1159 aesni_simd_skciphers);
85671860
HX
1160 if (err)
1161 goto unregister_algs;
1162
149e1225
EB
1163 err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1164 aesni_simd_aeads);
af05b300 1165 if (err)
85671860
HX
1166 goto unregister_skciphers;
1167
85671860
HX
1168 return 0;
1169
85671860 1170unregister_skciphers:
8b56d348
EB
1171 simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1172 aesni_simd_skciphers);
af05b300
HX
1173unregister_algs:
1174 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
af05b300 1175 return err;
54b6a1bd
HY
1176}
1177
1178static void __exit aesni_exit(void)
1179{
149e1225
EB
1180 simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1181 aesni_simd_aeads);
8b56d348
EB
1182 simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1183 aesni_simd_skciphers);
fa46ccb8 1184 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
54b6a1bd
HY
1185}
1186
0fbafd06 1187late_initcall(aesni_init);
54b6a1bd
HY
1188module_exit(aesni_exit);
1189
1190MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1191MODULE_LICENSE("GPL");
5d26a105 1192MODULE_ALIAS_CRYPTO("aes");