]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/crypto/aesni-intel_glue.c
crypto: move x86 to the generic version of ablk_helper
[mirror_ubuntu-artful-kernel.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
54b6a1bd
HY
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
0bd82f5f
TS
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
54b6a1bd
HY
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
7c52d551 25#include <linux/module.h>
54b6a1bd
HY
26#include <linux/err.h>
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/cryptd.h>
12387a46 30#include <crypto/ctr.h>
023af608
JK
31#include <crypto/b128ops.h>
32#include <crypto/lrw.h>
33#include <crypto/xts.h>
3bd391f0 34#include <asm/cpu_device_id.h>
54b6a1bd 35#include <asm/i387.h>
70ef2601 36#include <asm/crypto/aes.h>
801201aa 37#include <crypto/ablk_helper.h>
0bd82f5f
TS
38#include <crypto/scatterwalk.h>
39#include <crypto/internal/aead.h>
40#include <linux/workqueue.h>
41#include <linux/spinlock.h>
c456a9cd
JK
42#ifdef CONFIG_X86_64
43#include <asm/crypto/glue_helper.h>
44#endif
54b6a1bd 45
2cf4ac8b
HY
46#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
47#define HAS_PCBC
48#endif
49
0bd82f5f
TS
50/* This data is stored at the end of the crypto_tfm struct.
51 * It's a type of per "session" data storage location.
52 * This needs to be 16 byte aligned.
53 */
54struct aesni_rfc4106_gcm_ctx {
55 u8 hash_subkey[16];
56 struct crypto_aes_ctx aes_key_expanded;
57 u8 nonce[4];
58 struct cryptd_aead *cryptd_tfm;
59};
60
61struct aesni_gcm_set_hash_subkey_result {
62 int err;
63 struct completion completion;
64};
65
66struct aesni_hash_subkey_req_data {
67 u8 iv[16];
68 struct aesni_gcm_set_hash_subkey_result result;
69 struct scatterlist sg;
70};
71
72#define AESNI_ALIGN (16)
54b6a1bd 73#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
0bd82f5f 74#define RFC4106_HASH_SUBKEY_SIZE 16
54b6a1bd 75
023af608
JK
76struct aesni_lrw_ctx {
77 struct lrw_table_ctx lrw_table;
78 u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
79};
80
81struct aesni_xts_ctx {
82 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
83 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
84};
85
54b6a1bd
HY
86asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
87 unsigned int key_len);
88asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
89 const u8 *in);
90asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
91 const u8 *in);
92asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
93 const u8 *in, unsigned int len);
94asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
95 const u8 *in, unsigned int len);
96asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
97 const u8 *in, unsigned int len, u8 *iv);
98asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
99 const u8 *in, unsigned int len, u8 *iv);
9bed4aca
RD
100
101int crypto_fpu_init(void);
102void crypto_fpu_exit(void);
103
0d258efb 104#ifdef CONFIG_X86_64
12387a46
HY
105asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
106 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 107
c456a9cd
JK
108asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
109 const u8 *in, bool enc, u8 *iv);
110
0bd82f5f
TS
111/* asmlinkage void aesni_gcm_enc()
112 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
113 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
114 * const u8 *in, Plaintext input
115 * unsigned long plaintext_len, Length of data in bytes for encryption.
116 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
117 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
118 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
119 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
120 * const u8 *aad, Additional Authentication Data (AAD)
121 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
122 * is going to be 8 or 12 bytes
123 * u8 *auth_tag, Authenticated Tag output.
124 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
125 * Valid values are 16 (most likely), 12 or 8.
126 */
127asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
128 const u8 *in, unsigned long plaintext_len, u8 *iv,
129 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
130 u8 *auth_tag, unsigned long auth_tag_len);
131
132/* asmlinkage void aesni_gcm_dec()
133 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
134 * u8 *out, Plaintext output. Decrypt in-place is allowed.
135 * const u8 *in, Ciphertext input
136 * unsigned long ciphertext_len, Length of data in bytes for decryption.
137 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
138 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
139 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
140 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
141 * const u8 *aad, Additional Authentication Data (AAD)
142 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
143 * to be 8 or 12 bytes
144 * u8 *auth_tag, Authenticated Tag output.
145 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
146 * Valid values are 16 (most likely), 12 or 8.
147 */
148asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
149 const u8 *in, unsigned long ciphertext_len, u8 *iv,
150 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
151 u8 *auth_tag, unsigned long auth_tag_len);
152
153static inline struct
154aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
155{
156 return
157 (struct aesni_rfc4106_gcm_ctx *)
158 PTR_ALIGN((u8 *)
159 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
160}
559ad0ff 161#endif
0bd82f5f 162
54b6a1bd
HY
163static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
164{
165 unsigned long addr = (unsigned long)raw_ctx;
166 unsigned long align = AESNI_ALIGN;
167
168 if (align <= crypto_tfm_ctx_alignment())
169 align = 1;
170 return (struct crypto_aes_ctx *)ALIGN(addr, align);
171}
172
173static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
174 const u8 *in_key, unsigned int key_len)
175{
176 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
177 u32 *flags = &tfm->crt_flags;
178 int err;
179
180 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
181 key_len != AES_KEYSIZE_256) {
182 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
183 return -EINVAL;
184 }
185
13b79b97 186 if (!irq_fpu_usable())
54b6a1bd
HY
187 err = crypto_aes_expand_key(ctx, in_key, key_len);
188 else {
189 kernel_fpu_begin();
190 err = aesni_set_key(ctx, in_key, key_len);
191 kernel_fpu_end();
192 }
193
194 return err;
195}
196
197static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
198 unsigned int key_len)
199{
200 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
201}
202
203static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
204{
205 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
206
13b79b97 207 if (!irq_fpu_usable())
54b6a1bd
HY
208 crypto_aes_encrypt_x86(ctx, dst, src);
209 else {
210 kernel_fpu_begin();
211 aesni_enc(ctx, dst, src);
212 kernel_fpu_end();
213 }
214}
215
216static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
217{
218 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
219
13b79b97 220 if (!irq_fpu_usable())
54b6a1bd
HY
221 crypto_aes_decrypt_x86(ctx, dst, src);
222 else {
223 kernel_fpu_begin();
224 aesni_dec(ctx, dst, src);
225 kernel_fpu_end();
226 }
227}
228
2cf4ac8b
HY
229static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
230{
231 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
232
233 aesni_enc(ctx, dst, src);
234}
235
236static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
237{
238 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
239
240 aesni_dec(ctx, dst, src);
241}
242
54b6a1bd
HY
243static int ecb_encrypt(struct blkcipher_desc *desc,
244 struct scatterlist *dst, struct scatterlist *src,
245 unsigned int nbytes)
246{
247 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
248 struct blkcipher_walk walk;
249 int err;
250
251 blkcipher_walk_init(&walk, dst, src, nbytes);
252 err = blkcipher_walk_virt(desc, &walk);
9251b64f 253 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
254
255 kernel_fpu_begin();
256 while ((nbytes = walk.nbytes)) {
257 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
258 nbytes & AES_BLOCK_MASK);
259 nbytes &= AES_BLOCK_SIZE - 1;
260 err = blkcipher_walk_done(desc, &walk, nbytes);
261 }
262 kernel_fpu_end();
263
264 return err;
265}
266
267static int ecb_decrypt(struct blkcipher_desc *desc,
268 struct scatterlist *dst, struct scatterlist *src,
269 unsigned int nbytes)
270{
271 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
272 struct blkcipher_walk walk;
273 int err;
274
275 blkcipher_walk_init(&walk, dst, src, nbytes);
276 err = blkcipher_walk_virt(desc, &walk);
9251b64f 277 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
278
279 kernel_fpu_begin();
280 while ((nbytes = walk.nbytes)) {
281 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
282 nbytes & AES_BLOCK_MASK);
283 nbytes &= AES_BLOCK_SIZE - 1;
284 err = blkcipher_walk_done(desc, &walk, nbytes);
285 }
286 kernel_fpu_end();
287
288 return err;
289}
290
54b6a1bd
HY
291static int cbc_encrypt(struct blkcipher_desc *desc,
292 struct scatterlist *dst, struct scatterlist *src,
293 unsigned int nbytes)
294{
295 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
296 struct blkcipher_walk walk;
297 int err;
298
299 blkcipher_walk_init(&walk, dst, src, nbytes);
300 err = blkcipher_walk_virt(desc, &walk);
9251b64f 301 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
302
303 kernel_fpu_begin();
304 while ((nbytes = walk.nbytes)) {
305 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
306 nbytes & AES_BLOCK_MASK, walk.iv);
307 nbytes &= AES_BLOCK_SIZE - 1;
308 err = blkcipher_walk_done(desc, &walk, nbytes);
309 }
310 kernel_fpu_end();
311
312 return err;
313}
314
315static int cbc_decrypt(struct blkcipher_desc *desc,
316 struct scatterlist *dst, struct scatterlist *src,
317 unsigned int nbytes)
318{
319 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
320 struct blkcipher_walk walk;
321 int err;
322
323 blkcipher_walk_init(&walk, dst, src, nbytes);
324 err = blkcipher_walk_virt(desc, &walk);
9251b64f 325 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
326
327 kernel_fpu_begin();
328 while ((nbytes = walk.nbytes)) {
329 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
330 nbytes & AES_BLOCK_MASK, walk.iv);
331 nbytes &= AES_BLOCK_SIZE - 1;
332 err = blkcipher_walk_done(desc, &walk, nbytes);
333 }
334 kernel_fpu_end();
335
336 return err;
337}
338
0d258efb 339#ifdef CONFIG_X86_64
12387a46
HY
340static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
341 struct blkcipher_walk *walk)
342{
343 u8 *ctrblk = walk->iv;
344 u8 keystream[AES_BLOCK_SIZE];
345 u8 *src = walk->src.virt.addr;
346 u8 *dst = walk->dst.virt.addr;
347 unsigned int nbytes = walk->nbytes;
348
349 aesni_enc(ctx, keystream, ctrblk);
350 crypto_xor(keystream, src, nbytes);
351 memcpy(dst, keystream, nbytes);
352 crypto_inc(ctrblk, AES_BLOCK_SIZE);
353}
354
355static int ctr_crypt(struct blkcipher_desc *desc,
356 struct scatterlist *dst, struct scatterlist *src,
357 unsigned int nbytes)
358{
359 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
360 struct blkcipher_walk walk;
361 int err;
362
363 blkcipher_walk_init(&walk, dst, src, nbytes);
364 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
365 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
366
367 kernel_fpu_begin();
368 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
369 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
370 nbytes & AES_BLOCK_MASK, walk.iv);
371 nbytes &= AES_BLOCK_SIZE - 1;
372 err = blkcipher_walk_done(desc, &walk, nbytes);
373 }
374 if (walk.nbytes) {
375 ctr_crypt_final(ctx, &walk);
376 err = blkcipher_walk_done(desc, &walk, 0);
377 }
378 kernel_fpu_end();
379
380 return err;
381}
0d258efb 382#endif
12387a46 383
54b6a1bd
HY
384static int ablk_ecb_init(struct crypto_tfm *tfm)
385{
ef45b834 386 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
54b6a1bd
HY
387}
388
54b6a1bd
HY
389static int ablk_cbc_init(struct crypto_tfm *tfm)
390{
ef45b834 391 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
54b6a1bd
HY
392}
393
0d258efb 394#ifdef CONFIG_X86_64
2cf4ac8b
HY
395static int ablk_ctr_init(struct crypto_tfm *tfm)
396{
ef45b834 397 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
2cf4ac8b
HY
398}
399
0d258efb 400#endif
2cf4ac8b 401
2cf4ac8b
HY
402#ifdef HAS_PCBC
403static int ablk_pcbc_init(struct crypto_tfm *tfm)
404{
ef45b834 405 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
2cf4ac8b 406}
2cf4ac8b
HY
407#endif
408
023af608 409static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
2cf4ac8b 410{
023af608
JK
411 aesni_ecb_enc(ctx, blks, blks, nbytes);
412}
413
414static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
415{
416 aesni_ecb_dec(ctx, blks, blks, nbytes);
417}
418
419static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
420 unsigned int keylen)
421{
422 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
423 int err;
424
425 err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
426 keylen - AES_BLOCK_SIZE);
427 if (err)
428 return err;
429
430 return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
431}
432
433static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
434{
435 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
436
437 lrw_free_table(&ctx->lrw_table);
438}
439
440static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
441 struct scatterlist *src, unsigned int nbytes)
442{
443 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
444 be128 buf[8];
445 struct lrw_crypt_req req = {
446 .tbuf = buf,
447 .tbuflen = sizeof(buf),
448
449 .table_ctx = &ctx->lrw_table,
450 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
451 .crypt_fn = lrw_xts_encrypt_callback,
452 };
453 int ret;
454
455 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
456
457 kernel_fpu_begin();
458 ret = lrw_crypt(desc, dst, src, nbytes, &req);
459 kernel_fpu_end();
460
461 return ret;
462}
463
464static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
465 struct scatterlist *src, unsigned int nbytes)
466{
467 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
468 be128 buf[8];
469 struct lrw_crypt_req req = {
470 .tbuf = buf,
471 .tbuflen = sizeof(buf),
472
473 .table_ctx = &ctx->lrw_table,
474 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
475 .crypt_fn = lrw_xts_decrypt_callback,
476 };
477 int ret;
478
479 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
480
481 kernel_fpu_begin();
482 ret = lrw_crypt(desc, dst, src, nbytes, &req);
483 kernel_fpu_end();
484
485 return ret;
486}
487
488static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
489 unsigned int keylen)
490{
491 struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
492 u32 *flags = &tfm->crt_flags;
493 int err;
494
495 /* key consists of keys of equal size concatenated, therefore
496 * the length must be even
497 */
498 if (keylen % 2) {
499 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
500 return -EINVAL;
501 }
502
503 /* first half of xts-key is for crypt */
504 err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
505 if (err)
506 return err;
507
508 /* second half of xts-key is for tweak */
509 return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
510 keylen / 2);
511}
512
513
32bec973
JK
514static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
515{
516 aesni_enc(ctx, out, in);
517}
518
c456a9cd
JK
519#ifdef CONFIG_X86_64
520
521static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
522{
523 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
524}
525
526static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
527{
528 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
529}
530
531static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
532{
533 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
534}
535
536static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
537{
538 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
539}
540
541static const struct common_glue_ctx aesni_enc_xts = {
542 .num_funcs = 2,
543 .fpu_blocks_limit = 1,
544
545 .funcs = { {
546 .num_blocks = 8,
547 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
548 }, {
549 .num_blocks = 1,
550 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
551 } }
552};
553
554static const struct common_glue_ctx aesni_dec_xts = {
555 .num_funcs = 2,
556 .fpu_blocks_limit = 1,
557
558 .funcs = { {
559 .num_blocks = 8,
560 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
561 }, {
562 .num_blocks = 1,
563 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
564 } }
565};
566
567static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
568 struct scatterlist *src, unsigned int nbytes)
569{
570 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
571
572 return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
573 XTS_TWEAK_CAST(aesni_xts_tweak),
574 aes_ctx(ctx->raw_tweak_ctx),
575 aes_ctx(ctx->raw_crypt_ctx));
576}
577
578static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
579 struct scatterlist *src, unsigned int nbytes)
580{
581 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
582
583 return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
584 XTS_TWEAK_CAST(aesni_xts_tweak),
585 aes_ctx(ctx->raw_tweak_ctx),
586 aes_ctx(ctx->raw_crypt_ctx));
587}
588
589#else
590
023af608
JK
591static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
592 struct scatterlist *src, unsigned int nbytes)
593{
594 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
595 be128 buf[8];
596 struct xts_crypt_req req = {
597 .tbuf = buf,
598 .tbuflen = sizeof(buf),
599
600 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
32bec973 601 .tweak_fn = aesni_xts_tweak,
023af608
JK
602 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
603 .crypt_fn = lrw_xts_encrypt_callback,
604 };
605 int ret;
606
607 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
608
609 kernel_fpu_begin();
610 ret = xts_crypt(desc, dst, src, nbytes, &req);
611 kernel_fpu_end();
612
613 return ret;
614}
615
616static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
617 struct scatterlist *src, unsigned int nbytes)
618{
619 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
620 be128 buf[8];
621 struct xts_crypt_req req = {
622 .tbuf = buf,
623 .tbuflen = sizeof(buf),
624
625 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
32bec973 626 .tweak_fn = aesni_xts_tweak,
023af608
JK
627 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
628 .crypt_fn = lrw_xts_decrypt_callback,
629 };
630 int ret;
631
632 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
633
634 kernel_fpu_begin();
635 ret = xts_crypt(desc, dst, src, nbytes, &req);
636 kernel_fpu_end();
637
638 return ret;
2cf4ac8b 639}
2cf4ac8b 640
c456a9cd
JK
641#endif
642
559ad0ff 643#ifdef CONFIG_X86_64
0bd82f5f
TS
644static int rfc4106_init(struct crypto_tfm *tfm)
645{
646 struct cryptd_aead *cryptd_tfm;
647 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
648 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
60af520c
TS
649 struct crypto_aead *cryptd_child;
650 struct aesni_rfc4106_gcm_ctx *child_ctx;
0bd82f5f
TS
651 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
652 if (IS_ERR(cryptd_tfm))
653 return PTR_ERR(cryptd_tfm);
60af520c
TS
654
655 cryptd_child = cryptd_aead_child(cryptd_tfm);
656 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
657 memcpy(child_ctx, ctx, sizeof(*ctx));
0bd82f5f
TS
658 ctx->cryptd_tfm = cryptd_tfm;
659 tfm->crt_aead.reqsize = sizeof(struct aead_request)
660 + crypto_aead_reqsize(&cryptd_tfm->base);
661 return 0;
662}
663
664static void rfc4106_exit(struct crypto_tfm *tfm)
665{
666 struct aesni_rfc4106_gcm_ctx *ctx =
667 (struct aesni_rfc4106_gcm_ctx *)
668 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
669 if (!IS_ERR(ctx->cryptd_tfm))
670 cryptd_free_aead(ctx->cryptd_tfm);
671 return;
672}
673
674static void
675rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
676{
677 struct aesni_gcm_set_hash_subkey_result *result = req->data;
678
679 if (err == -EINPROGRESS)
680 return;
681 result->err = err;
682 complete(&result->completion);
683}
684
685static int
686rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
687{
688 struct crypto_ablkcipher *ctr_tfm;
689 struct ablkcipher_request *req;
690 int ret = -EINVAL;
691 struct aesni_hash_subkey_req_data *req_data;
692
693 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
694 if (IS_ERR(ctr_tfm))
695 return PTR_ERR(ctr_tfm);
696
697 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
698
699 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
7efd95f6 700 if (ret)
fc9044e2 701 goto out_free_ablkcipher;
0bd82f5f 702
fc9044e2 703 ret = -ENOMEM;
0bd82f5f 704 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
fc9044e2 705 if (!req)
7efd95f6 706 goto out_free_ablkcipher;
0bd82f5f
TS
707
708 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
fc9044e2 709 if (!req_data)
7efd95f6 710 goto out_free_request;
fc9044e2 711
0bd82f5f
TS
712 memset(req_data->iv, 0, sizeof(req_data->iv));
713
714 /* Clear the data in the hash sub key container to zero.*/
715 /* We want to cipher all zeros to create the hash sub key. */
716 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
717
718 init_completion(&req_data->result.completion);
719 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
720 ablkcipher_request_set_tfm(req, ctr_tfm);
721 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
722 CRYPTO_TFM_REQ_MAY_BACKLOG,
723 rfc4106_set_hash_subkey_done,
724 &req_data->result);
725
726 ablkcipher_request_set_crypt(req, &req_data->sg,
727 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
728
729 ret = crypto_ablkcipher_encrypt(req);
730 if (ret == -EINPROGRESS || ret == -EBUSY) {
731 ret = wait_for_completion_interruptible
732 (&req_data->result.completion);
733 if (!ret)
734 ret = req_data->result.err;
735 }
fc9044e2 736 kfree(req_data);
7efd95f6 737out_free_request:
0bd82f5f 738 ablkcipher_request_free(req);
7efd95f6 739out_free_ablkcipher:
0bd82f5f
TS
740 crypto_free_ablkcipher(ctr_tfm);
741 return ret;
742}
743
744static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
745 unsigned int key_len)
746{
747 int ret = 0;
748 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
749 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
60af520c
TS
750 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
751 struct aesni_rfc4106_gcm_ctx *child_ctx =
752 aesni_rfc4106_gcm_ctx_get(cryptd_child);
bf084d8f 753 u8 *new_key_align, *new_key_mem = NULL;
0bd82f5f
TS
754
755 if (key_len < 4) {
756 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
757 return -EINVAL;
758 }
759 /*Account for 4 byte nonce at the end.*/
760 key_len -= 4;
761 if (key_len != AES_KEYSIZE_128) {
762 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
763 return -EINVAL;
764 }
765
766 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
767 /*This must be on a 16 byte boundary!*/
768 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
769 return -EINVAL;
770
771 if ((unsigned long)key % AESNI_ALIGN) {
772 /*key is not aligned: use an auxuliar aligned pointer*/
773 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
774 if (!new_key_mem)
775 return -ENOMEM;
776
bf084d8f
MB
777 new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
778 memcpy(new_key_align, key, key_len);
779 key = new_key_align;
0bd82f5f
TS
780 }
781
782 if (!irq_fpu_usable())
783 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
784 key, key_len);
785 else {
786 kernel_fpu_begin();
787 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
788 kernel_fpu_end();
789 }
790 /*This must be on a 16 byte boundary!*/
791 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
792 ret = -EINVAL;
793 goto exit;
794 }
795 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
60af520c 796 memcpy(child_ctx, ctx, sizeof(*ctx));
0bd82f5f
TS
797exit:
798 kfree(new_key_mem);
799 return ret;
800}
801
802/* This is the Integrity Check Value (aka the authentication tag length and can
803 * be 8, 12 or 16 bytes long. */
804static int rfc4106_set_authsize(struct crypto_aead *parent,
805 unsigned int authsize)
806{
807 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
808 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
809
810 switch (authsize) {
811 case 8:
812 case 12:
813 case 16:
814 break;
815 default:
816 return -EINVAL;
817 }
818 crypto_aead_crt(parent)->authsize = authsize;
819 crypto_aead_crt(cryptd_child)->authsize = authsize;
820 return 0;
821}
822
823static int rfc4106_encrypt(struct aead_request *req)
824{
825 int ret;
826 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
827 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0bd82f5f
TS
828
829 if (!irq_fpu_usable()) {
830 struct aead_request *cryptd_req =
831 (struct aead_request *) aead_request_ctx(req);
832 memcpy(cryptd_req, req, sizeof(*req));
833 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
834 return crypto_aead_encrypt(cryptd_req);
835 } else {
60af520c 836 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
0bd82f5f
TS
837 kernel_fpu_begin();
838 ret = cryptd_child->base.crt_aead.encrypt(req);
839 kernel_fpu_end();
840 return ret;
841 }
842}
843
844static int rfc4106_decrypt(struct aead_request *req)
845{
846 int ret;
847 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
848 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0bd82f5f
TS
849
850 if (!irq_fpu_usable()) {
851 struct aead_request *cryptd_req =
852 (struct aead_request *) aead_request_ctx(req);
853 memcpy(cryptd_req, req, sizeof(*req));
854 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
855 return crypto_aead_decrypt(cryptd_req);
856 } else {
60af520c 857 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
0bd82f5f
TS
858 kernel_fpu_begin();
859 ret = cryptd_child->base.crt_aead.decrypt(req);
860 kernel_fpu_end();
861 return ret;
862 }
863}
864
0bd82f5f
TS
865static int __driver_rfc4106_encrypt(struct aead_request *req)
866{
867 u8 one_entry_in_sg = 0;
868 u8 *src, *dst, *assoc;
869 __be32 counter = cpu_to_be32(1);
870 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
871 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
872 void *aes_ctx = &(ctx->aes_key_expanded);
873 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
874 u8 iv_tab[16+AESNI_ALIGN];
875 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
876 struct scatter_walk src_sg_walk;
877 struct scatter_walk assoc_sg_walk;
878 struct scatter_walk dst_sg_walk;
879 unsigned int i;
880
881 /* Assuming we are supporting rfc4106 64-bit extended */
882 /* sequence numbers We need to have the AAD length equal */
883 /* to 8 or 12 bytes */
884 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
885 return -EINVAL;
886 /* IV below built */
887 for (i = 0; i < 4; i++)
888 *(iv+i) = ctx->nonce[i];
889 for (i = 0; i < 8; i++)
890 *(iv+4+i) = req->iv[i];
891 *((__be32 *)(iv+12)) = counter;
892
893 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
894 one_entry_in_sg = 1;
895 scatterwalk_start(&src_sg_walk, req->src);
896 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
897 src = scatterwalk_map(&src_sg_walk);
898 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
899 dst = src;
900 if (unlikely(req->src != req->dst)) {
901 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 902 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
903 }
904
905 } else {
906 /* Allocate memory for src, dst, assoc */
907 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
908 GFP_ATOMIC);
909 if (unlikely(!src))
910 return -ENOMEM;
911 assoc = (src + req->cryptlen + auth_tag_len);
912 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
913 scatterwalk_map_and_copy(assoc, req->assoc, 0,
914 req->assoclen, 0);
915 dst = src;
916 }
917
918 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
919 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
920 + ((unsigned long)req->cryptlen), auth_tag_len);
921
922 /* The authTag (aka the Integrity Check Value) needs to be written
923 * back to the packet. */
924 if (one_entry_in_sg) {
925 if (unlikely(req->src != req->dst)) {
8fd75e12 926 scatterwalk_unmap(dst);
0bd82f5f
TS
927 scatterwalk_done(&dst_sg_walk, 0, 0);
928 }
8fd75e12
CW
929 scatterwalk_unmap(src);
930 scatterwalk_unmap(assoc);
0bd82f5f
TS
931 scatterwalk_done(&src_sg_walk, 0, 0);
932 scatterwalk_done(&assoc_sg_walk, 0, 0);
933 } else {
934 scatterwalk_map_and_copy(dst, req->dst, 0,
935 req->cryptlen + auth_tag_len, 1);
936 kfree(src);
937 }
938 return 0;
939}
940
941static int __driver_rfc4106_decrypt(struct aead_request *req)
942{
943 u8 one_entry_in_sg = 0;
944 u8 *src, *dst, *assoc;
945 unsigned long tempCipherLen = 0;
946 __be32 counter = cpu_to_be32(1);
947 int retval = 0;
948 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
949 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
950 void *aes_ctx = &(ctx->aes_key_expanded);
951 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
952 u8 iv_and_authTag[32+AESNI_ALIGN];
953 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
954 u8 *authTag = iv + 16;
955 struct scatter_walk src_sg_walk;
956 struct scatter_walk assoc_sg_walk;
957 struct scatter_walk dst_sg_walk;
958 unsigned int i;
959
960 if (unlikely((req->cryptlen < auth_tag_len) ||
961 (req->assoclen != 8 && req->assoclen != 12)))
962 return -EINVAL;
963 /* Assuming we are supporting rfc4106 64-bit extended */
964 /* sequence numbers We need to have the AAD length */
965 /* equal to 8 or 12 bytes */
966
967 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
968 /* IV below built */
969 for (i = 0; i < 4; i++)
970 *(iv+i) = ctx->nonce[i];
971 for (i = 0; i < 8; i++)
972 *(iv+4+i) = req->iv[i];
973 *((__be32 *)(iv+12)) = counter;
974
975 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
976 one_entry_in_sg = 1;
977 scatterwalk_start(&src_sg_walk, req->src);
978 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
979 src = scatterwalk_map(&src_sg_walk);
980 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
981 dst = src;
982 if (unlikely(req->src != req->dst)) {
983 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 984 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
985 }
986
987 } else {
988 /* Allocate memory for src, dst, assoc */
989 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
990 if (!src)
991 return -ENOMEM;
992 assoc = (src + req->cryptlen + auth_tag_len);
993 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
994 scatterwalk_map_and_copy(assoc, req->assoc, 0,
995 req->assoclen, 0);
996 dst = src;
997 }
998
999 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1000 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1001 authTag, auth_tag_len);
1002
1003 /* Compare generated tag with passed in tag. */
1004 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1005 -EBADMSG : 0;
1006
1007 if (one_entry_in_sg) {
1008 if (unlikely(req->src != req->dst)) {
8fd75e12 1009 scatterwalk_unmap(dst);
0bd82f5f
TS
1010 scatterwalk_done(&dst_sg_walk, 0, 0);
1011 }
8fd75e12
CW
1012 scatterwalk_unmap(src);
1013 scatterwalk_unmap(assoc);
0bd82f5f
TS
1014 scatterwalk_done(&src_sg_walk, 0, 0);
1015 scatterwalk_done(&assoc_sg_walk, 0, 0);
1016 } else {
1017 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1018 kfree(src);
1019 }
1020 return retval;
1021}
fa46ccb8 1022#endif
0bd82f5f 1023
fa46ccb8
JK
1024static struct crypto_alg aesni_algs[] = { {
1025 .cra_name = "aes",
1026 .cra_driver_name = "aes-aesni",
1027 .cra_priority = 300,
1028 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1029 .cra_blocksize = AES_BLOCK_SIZE,
1030 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1031 AESNI_ALIGN - 1,
1032 .cra_alignmask = 0,
1033 .cra_module = THIS_MODULE,
1034 .cra_u = {
1035 .cipher = {
1036 .cia_min_keysize = AES_MIN_KEY_SIZE,
1037 .cia_max_keysize = AES_MAX_KEY_SIZE,
1038 .cia_setkey = aes_set_key,
1039 .cia_encrypt = aes_encrypt,
1040 .cia_decrypt = aes_decrypt
1041 }
1042 }
1043}, {
1044 .cra_name = "__aes-aesni",
1045 .cra_driver_name = "__driver-aes-aesni",
1046 .cra_priority = 0,
1047 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1048 .cra_blocksize = AES_BLOCK_SIZE,
1049 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1050 AESNI_ALIGN - 1,
1051 .cra_alignmask = 0,
1052 .cra_module = THIS_MODULE,
1053 .cra_u = {
1054 .cipher = {
1055 .cia_min_keysize = AES_MIN_KEY_SIZE,
1056 .cia_max_keysize = AES_MAX_KEY_SIZE,
1057 .cia_setkey = aes_set_key,
1058 .cia_encrypt = __aes_encrypt,
1059 .cia_decrypt = __aes_decrypt
1060 }
1061 }
1062}, {
1063 .cra_name = "__ecb-aes-aesni",
1064 .cra_driver_name = "__driver-ecb-aes-aesni",
1065 .cra_priority = 0,
1066 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1067 .cra_blocksize = AES_BLOCK_SIZE,
1068 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1069 AESNI_ALIGN - 1,
1070 .cra_alignmask = 0,
1071 .cra_type = &crypto_blkcipher_type,
1072 .cra_module = THIS_MODULE,
1073 .cra_u = {
1074 .blkcipher = {
1075 .min_keysize = AES_MIN_KEY_SIZE,
1076 .max_keysize = AES_MAX_KEY_SIZE,
1077 .setkey = aes_set_key,
1078 .encrypt = ecb_encrypt,
1079 .decrypt = ecb_decrypt,
1080 },
1081 },
1082}, {
1083 .cra_name = "__cbc-aes-aesni",
1084 .cra_driver_name = "__driver-cbc-aes-aesni",
1085 .cra_priority = 0,
1086 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1087 .cra_blocksize = AES_BLOCK_SIZE,
1088 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1089 AESNI_ALIGN - 1,
1090 .cra_alignmask = 0,
1091 .cra_type = &crypto_blkcipher_type,
1092 .cra_module = THIS_MODULE,
1093 .cra_u = {
1094 .blkcipher = {
1095 .min_keysize = AES_MIN_KEY_SIZE,
1096 .max_keysize = AES_MAX_KEY_SIZE,
1097 .setkey = aes_set_key,
1098 .encrypt = cbc_encrypt,
1099 .decrypt = cbc_decrypt,
1100 },
1101 },
1102}, {
1103 .cra_name = "ecb(aes)",
1104 .cra_driver_name = "ecb-aes-aesni",
1105 .cra_priority = 400,
1106 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1107 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1108 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1109 .cra_alignmask = 0,
1110 .cra_type = &crypto_ablkcipher_type,
1111 .cra_module = THIS_MODULE,
1112 .cra_init = ablk_ecb_init,
1113 .cra_exit = ablk_exit,
1114 .cra_u = {
1115 .ablkcipher = {
1116 .min_keysize = AES_MIN_KEY_SIZE,
1117 .max_keysize = AES_MAX_KEY_SIZE,
1118 .setkey = ablk_set_key,
1119 .encrypt = ablk_encrypt,
1120 .decrypt = ablk_decrypt,
1121 },
1122 },
1123}, {
1124 .cra_name = "cbc(aes)",
1125 .cra_driver_name = "cbc-aes-aesni",
1126 .cra_priority = 400,
1127 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1128 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1129 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1130 .cra_alignmask = 0,
1131 .cra_type = &crypto_ablkcipher_type,
1132 .cra_module = THIS_MODULE,
1133 .cra_init = ablk_cbc_init,
1134 .cra_exit = ablk_exit,
1135 .cra_u = {
1136 .ablkcipher = {
1137 .min_keysize = AES_MIN_KEY_SIZE,
1138 .max_keysize = AES_MAX_KEY_SIZE,
1139 .ivsize = AES_BLOCK_SIZE,
1140 .setkey = ablk_set_key,
1141 .encrypt = ablk_encrypt,
1142 .decrypt = ablk_decrypt,
1143 },
1144 },
1145#ifdef CONFIG_X86_64
1146}, {
1147 .cra_name = "__ctr-aes-aesni",
1148 .cra_driver_name = "__driver-ctr-aes-aesni",
1149 .cra_priority = 0,
1150 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1151 .cra_blocksize = 1,
1152 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1153 AESNI_ALIGN - 1,
1154 .cra_alignmask = 0,
1155 .cra_type = &crypto_blkcipher_type,
1156 .cra_module = THIS_MODULE,
1157 .cra_u = {
1158 .blkcipher = {
1159 .min_keysize = AES_MIN_KEY_SIZE,
1160 .max_keysize = AES_MAX_KEY_SIZE,
1161 .ivsize = AES_BLOCK_SIZE,
1162 .setkey = aes_set_key,
1163 .encrypt = ctr_crypt,
1164 .decrypt = ctr_crypt,
1165 },
1166 },
1167}, {
1168 .cra_name = "ctr(aes)",
1169 .cra_driver_name = "ctr-aes-aesni",
1170 .cra_priority = 400,
1171 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1172 .cra_blocksize = 1,
a9629d71 1173 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1174 .cra_alignmask = 0,
1175 .cra_type = &crypto_ablkcipher_type,
1176 .cra_module = THIS_MODULE,
1177 .cra_init = ablk_ctr_init,
1178 .cra_exit = ablk_exit,
1179 .cra_u = {
1180 .ablkcipher = {
1181 .min_keysize = AES_MIN_KEY_SIZE,
1182 .max_keysize = AES_MAX_KEY_SIZE,
1183 .ivsize = AES_BLOCK_SIZE,
1184 .setkey = ablk_set_key,
1185 .encrypt = ablk_encrypt,
1186 .decrypt = ablk_encrypt,
1187 .geniv = "chainiv",
1188 },
1189 },
1190}, {
1191 .cra_name = "__gcm-aes-aesni",
1192 .cra_driver_name = "__driver-gcm-aes-aesni",
0bd82f5f
TS
1193 .cra_priority = 0,
1194 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1195 .cra_blocksize = 1,
fa46ccb8
JK
1196 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1197 AESNI_ALIGN,
0bd82f5f
TS
1198 .cra_alignmask = 0,
1199 .cra_type = &crypto_aead_type,
1200 .cra_module = THIS_MODULE,
0bd82f5f
TS
1201 .cra_u = {
1202 .aead = {
1203 .encrypt = __driver_rfc4106_encrypt,
1204 .decrypt = __driver_rfc4106_decrypt,
1205 },
1206 },
fa46ccb8
JK
1207}, {
1208 .cra_name = "rfc4106(gcm(aes))",
1209 .cra_driver_name = "rfc4106-gcm-aesni",
1210 .cra_priority = 400,
1211 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1212 .cra_blocksize = 1,
1213 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1214 AESNI_ALIGN,
1215 .cra_alignmask = 0,
1216 .cra_type = &crypto_nivaead_type,
1217 .cra_module = THIS_MODULE,
1218 .cra_init = rfc4106_init,
1219 .cra_exit = rfc4106_exit,
1220 .cra_u = {
1221 .aead = {
1222 .setkey = rfc4106_set_key,
1223 .setauthsize = rfc4106_set_authsize,
1224 .encrypt = rfc4106_encrypt,
1225 .decrypt = rfc4106_decrypt,
1226 .geniv = "seqiv",
1227 .ivsize = 8,
1228 .maxauthsize = 16,
1229 },
1230 },
fa46ccb8 1231#endif
023af608 1232#ifdef HAS_PCBC
fa46ccb8 1233}, {
023af608
JK
1234 .cra_name = "pcbc(aes)",
1235 .cra_driver_name = "pcbc-aes-aesni",
fa46ccb8
JK
1236 .cra_priority = 400,
1237 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1238 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1239 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1240 .cra_alignmask = 0,
1241 .cra_type = &crypto_ablkcipher_type,
1242 .cra_module = THIS_MODULE,
023af608 1243 .cra_init = ablk_pcbc_init,
fa46ccb8
JK
1244 .cra_exit = ablk_exit,
1245 .cra_u = {
1246 .ablkcipher = {
023af608
JK
1247 .min_keysize = AES_MIN_KEY_SIZE,
1248 .max_keysize = AES_MAX_KEY_SIZE,
fa46ccb8
JK
1249 .ivsize = AES_BLOCK_SIZE,
1250 .setkey = ablk_set_key,
1251 .encrypt = ablk_encrypt,
1252 .decrypt = ablk_decrypt,
1253 },
1254 },
1255#endif
fa46ccb8 1256}, {
023af608
JK
1257 .cra_name = "__lrw-aes-aesni",
1258 .cra_driver_name = "__driver-lrw-aes-aesni",
1259 .cra_priority = 0,
1260 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1261 .cra_blocksize = AES_BLOCK_SIZE,
1262 .cra_ctxsize = sizeof(struct aesni_lrw_ctx),
1263 .cra_alignmask = 0,
1264 .cra_type = &crypto_blkcipher_type,
1265 .cra_module = THIS_MODULE,
1266 .cra_exit = lrw_aesni_exit_tfm,
1267 .cra_u = {
1268 .blkcipher = {
1269 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1270 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1271 .ivsize = AES_BLOCK_SIZE,
1272 .setkey = lrw_aesni_setkey,
1273 .encrypt = lrw_encrypt,
1274 .decrypt = lrw_decrypt,
1275 },
1276 },
1277}, {
1278 .cra_name = "__xts-aes-aesni",
1279 .cra_driver_name = "__driver-xts-aes-aesni",
1280 .cra_priority = 0,
1281 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1282 .cra_blocksize = AES_BLOCK_SIZE,
1283 .cra_ctxsize = sizeof(struct aesni_xts_ctx),
1284 .cra_alignmask = 0,
1285 .cra_type = &crypto_blkcipher_type,
1286 .cra_module = THIS_MODULE,
1287 .cra_u = {
1288 .blkcipher = {
1289 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1290 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1291 .ivsize = AES_BLOCK_SIZE,
1292 .setkey = xts_aesni_setkey,
1293 .encrypt = xts_encrypt,
1294 .decrypt = xts_decrypt,
1295 },
1296 },
1297}, {
1298 .cra_name = "lrw(aes)",
1299 .cra_driver_name = "lrw-aes-aesni",
fa46ccb8
JK
1300 .cra_priority = 400,
1301 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1302 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1303 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1304 .cra_alignmask = 0,
1305 .cra_type = &crypto_ablkcipher_type,
1306 .cra_module = THIS_MODULE,
023af608 1307 .cra_init = ablk_init,
fa46ccb8
JK
1308 .cra_exit = ablk_exit,
1309 .cra_u = {
1310 .ablkcipher = {
023af608
JK
1311 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1312 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
fa46ccb8
JK
1313 .ivsize = AES_BLOCK_SIZE,
1314 .setkey = ablk_set_key,
1315 .encrypt = ablk_encrypt,
1316 .decrypt = ablk_decrypt,
1317 },
1318 },
fa46ccb8
JK
1319}, {
1320 .cra_name = "xts(aes)",
1321 .cra_driver_name = "xts-aes-aesni",
1322 .cra_priority = 400,
1323 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1324 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1325 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1326 .cra_alignmask = 0,
1327 .cra_type = &crypto_ablkcipher_type,
1328 .cra_module = THIS_MODULE,
023af608 1329 .cra_init = ablk_init,
fa46ccb8
JK
1330 .cra_exit = ablk_exit,
1331 .cra_u = {
1332 .ablkcipher = {
1333 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1334 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1335 .ivsize = AES_BLOCK_SIZE,
1336 .setkey = ablk_set_key,
1337 .encrypt = ablk_encrypt,
1338 .decrypt = ablk_decrypt,
1339 },
1340 },
fa46ccb8 1341} };
0bd82f5f 1342
3bd391f0
AK
1343
1344static const struct x86_cpu_id aesni_cpu_id[] = {
1345 X86_FEATURE_MATCH(X86_FEATURE_AES),
1346 {}
1347};
1348MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1349
54b6a1bd
HY
1350static int __init aesni_init(void)
1351{
7af6c245 1352 int err;
54b6a1bd 1353
3bd391f0 1354 if (!x86_match_cpu(aesni_cpu_id))
54b6a1bd 1355 return -ENODEV;
0bd82f5f 1356
fa46ccb8
JK
1357 err = crypto_fpu_init();
1358 if (err)
1359 return err;
54b6a1bd 1360
fa46ccb8 1361 return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
54b6a1bd
HY
1362}
1363
1364static void __exit aesni_exit(void)
1365{
fa46ccb8 1366 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
b23b6451
AL
1367
1368 crypto_fpu_exit();
54b6a1bd
HY
1369}
1370
1371module_init(aesni_init);
1372module_exit(aesni_exit);
1373
1374MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1375MODULE_LICENSE("GPL");
1376MODULE_ALIAS("aes");