2 * Glue Code for SSE2 assembler versions of Serpent Cipher
4 * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
6 * Glue code based on aesni-intel_glue.c by:
7 * Copyright (C) 2008, Intel Corp.
8 * Author: Huang Ying <ying.huang@intel.com>
10 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
11 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
12 * CTR part based on code (crypto/ctr.c) by:
13 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
32 #include <linux/module.h>
33 #include <linux/hardirq.h>
34 #include <linux/types.h>
35 #include <linux/crypto.h>
36 #include <linux/err.h>
37 #include <crypto/algapi.h>
38 #include <crypto/serpent.h>
39 #include <crypto/cryptd.h>
40 #include <crypto/b128ops.h>
41 #include <crypto/ctr.h>
42 #include <crypto/lrw.h>
44 #include <asm/serpent.h>
45 #include <crypto/scatterwalk.h>
46 #include <linux/workqueue.h>
47 #include <linux/spinlock.h>
49 #if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
53 struct async_serpent_ctx
{
54 struct cryptd_ablkcipher
*cryptd_tfm
;
57 static inline bool serpent_fpu_begin(bool fpu_enabled
, unsigned int nbytes
)
62 /* SSE2 is only used when chunk to be processed is large enough, so
63 * do not enable FPU until it is necessary.
65 if (nbytes
< SERPENT_BLOCK_SIZE
* SERPENT_PARALLEL_BLOCKS
)
72 static inline void serpent_fpu_end(bool fpu_enabled
)
78 static int ecb_crypt(struct blkcipher_desc
*desc
, struct blkcipher_walk
*walk
,
81 bool fpu_enabled
= false;
82 struct serpent_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
83 const unsigned int bsize
= SERPENT_BLOCK_SIZE
;
87 err
= blkcipher_walk_virt(desc
, walk
);
88 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
90 while ((nbytes
= walk
->nbytes
)) {
91 u8
*wsrc
= walk
->src
.virt
.addr
;
92 u8
*wdst
= walk
->dst
.virt
.addr
;
94 fpu_enabled
= serpent_fpu_begin(fpu_enabled
, nbytes
);
96 /* Process multi-block batch */
97 if (nbytes
>= bsize
* SERPENT_PARALLEL_BLOCKS
) {
100 serpent_enc_blk_xway(ctx
, wdst
, wsrc
);
102 serpent_dec_blk_xway(ctx
, wdst
, wsrc
);
104 wsrc
+= bsize
* SERPENT_PARALLEL_BLOCKS
;
105 wdst
+= bsize
* SERPENT_PARALLEL_BLOCKS
;
106 nbytes
-= bsize
* SERPENT_PARALLEL_BLOCKS
;
107 } while (nbytes
>= bsize
* SERPENT_PARALLEL_BLOCKS
);
113 /* Handle leftovers */
116 __serpent_encrypt(ctx
, wdst
, wsrc
);
118 __serpent_decrypt(ctx
, wdst
, wsrc
);
123 } while (nbytes
>= bsize
);
126 err
= blkcipher_walk_done(desc
, walk
, nbytes
);
129 serpent_fpu_end(fpu_enabled
);
133 static int ecb_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
134 struct scatterlist
*src
, unsigned int nbytes
)
136 struct blkcipher_walk walk
;
138 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
139 return ecb_crypt(desc
, &walk
, true);
142 static int ecb_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
143 struct scatterlist
*src
, unsigned int nbytes
)
145 struct blkcipher_walk walk
;
147 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
148 return ecb_crypt(desc
, &walk
, false);
151 static struct crypto_alg blk_ecb_alg
= {
152 .cra_name
= "__ecb-serpent-sse2",
153 .cra_driver_name
= "__driver-ecb-serpent-sse2",
155 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
156 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
157 .cra_ctxsize
= sizeof(struct serpent_ctx
),
159 .cra_type
= &crypto_blkcipher_type
,
160 .cra_module
= THIS_MODULE
,
161 .cra_list
= LIST_HEAD_INIT(blk_ecb_alg
.cra_list
),
164 .min_keysize
= SERPENT_MIN_KEY_SIZE
,
165 .max_keysize
= SERPENT_MAX_KEY_SIZE
,
166 .setkey
= serpent_setkey
,
167 .encrypt
= ecb_encrypt
,
168 .decrypt
= ecb_decrypt
,
173 static unsigned int __cbc_encrypt(struct blkcipher_desc
*desc
,
174 struct blkcipher_walk
*walk
)
176 struct serpent_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
177 const unsigned int bsize
= SERPENT_BLOCK_SIZE
;
178 unsigned int nbytes
= walk
->nbytes
;
179 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
180 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
181 u128
*iv
= (u128
*)walk
->iv
;
184 u128_xor(dst
, src
, iv
);
185 __serpent_encrypt(ctx
, (u8
*)dst
, (u8
*)dst
);
191 } while (nbytes
>= bsize
);
193 u128_xor((u128
*)walk
->iv
, (u128
*)walk
->iv
, iv
);
197 static int cbc_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
198 struct scatterlist
*src
, unsigned int nbytes
)
200 struct blkcipher_walk walk
;
203 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
204 err
= blkcipher_walk_virt(desc
, &walk
);
206 while ((nbytes
= walk
.nbytes
)) {
207 nbytes
= __cbc_encrypt(desc
, &walk
);
208 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
214 static unsigned int __cbc_decrypt(struct blkcipher_desc
*desc
,
215 struct blkcipher_walk
*walk
)
217 struct serpent_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
218 const unsigned int bsize
= SERPENT_BLOCK_SIZE
;
219 unsigned int nbytes
= walk
->nbytes
;
220 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
221 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
222 u128 ivs
[SERPENT_PARALLEL_BLOCKS
- 1];
226 /* Start of the last block. */
227 src
+= nbytes
/ bsize
- 1;
228 dst
+= nbytes
/ bsize
- 1;
232 /* Process multi-block batch */
233 if (nbytes
>= bsize
* SERPENT_PARALLEL_BLOCKS
) {
235 nbytes
-= bsize
* (SERPENT_PARALLEL_BLOCKS
- 1);
236 src
-= SERPENT_PARALLEL_BLOCKS
- 1;
237 dst
-= SERPENT_PARALLEL_BLOCKS
- 1;
239 for (i
= 0; i
< SERPENT_PARALLEL_BLOCKS
- 1; i
++)
242 serpent_dec_blk_xway(ctx
, (u8
*)dst
, (u8
*)src
);
244 for (i
= 0; i
< SERPENT_PARALLEL_BLOCKS
- 1; i
++)
245 u128_xor(dst
+ (i
+ 1), dst
+ (i
+ 1), ivs
+ i
);
251 u128_xor(dst
, dst
, src
- 1);
254 } while (nbytes
>= bsize
* SERPENT_PARALLEL_BLOCKS
);
260 /* Handle leftovers */
262 __serpent_decrypt(ctx
, (u8
*)dst
, (u8
*)src
);
268 u128_xor(dst
, dst
, src
- 1);
274 u128_xor(dst
, dst
, (u128
*)walk
->iv
);
275 *(u128
*)walk
->iv
= last_iv
;
280 static int cbc_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
281 struct scatterlist
*src
, unsigned int nbytes
)
283 bool fpu_enabled
= false;
284 struct blkcipher_walk walk
;
287 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
288 err
= blkcipher_walk_virt(desc
, &walk
);
289 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
291 while ((nbytes
= walk
.nbytes
)) {
292 fpu_enabled
= serpent_fpu_begin(fpu_enabled
, nbytes
);
293 nbytes
= __cbc_decrypt(desc
, &walk
);
294 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
297 serpent_fpu_end(fpu_enabled
);
301 static struct crypto_alg blk_cbc_alg
= {
302 .cra_name
= "__cbc-serpent-sse2",
303 .cra_driver_name
= "__driver-cbc-serpent-sse2",
305 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
306 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
307 .cra_ctxsize
= sizeof(struct serpent_ctx
),
309 .cra_type
= &crypto_blkcipher_type
,
310 .cra_module
= THIS_MODULE
,
311 .cra_list
= LIST_HEAD_INIT(blk_cbc_alg
.cra_list
),
314 .min_keysize
= SERPENT_MIN_KEY_SIZE
,
315 .max_keysize
= SERPENT_MAX_KEY_SIZE
,
316 .setkey
= serpent_setkey
,
317 .encrypt
= cbc_encrypt
,
318 .decrypt
= cbc_decrypt
,
323 static inline void u128_to_be128(be128
*dst
, const u128
*src
)
325 dst
->a
= cpu_to_be64(src
->a
);
326 dst
->b
= cpu_to_be64(src
->b
);
329 static inline void be128_to_u128(u128
*dst
, const be128
*src
)
331 dst
->a
= be64_to_cpu(src
->a
);
332 dst
->b
= be64_to_cpu(src
->b
);
335 static inline void u128_inc(u128
*i
)
342 static void ctr_crypt_final(struct blkcipher_desc
*desc
,
343 struct blkcipher_walk
*walk
)
345 struct serpent_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
346 u8
*ctrblk
= walk
->iv
;
347 u8 keystream
[SERPENT_BLOCK_SIZE
];
348 u8
*src
= walk
->src
.virt
.addr
;
349 u8
*dst
= walk
->dst
.virt
.addr
;
350 unsigned int nbytes
= walk
->nbytes
;
352 __serpent_encrypt(ctx
, keystream
, ctrblk
);
353 crypto_xor(keystream
, src
, nbytes
);
354 memcpy(dst
, keystream
, nbytes
);
356 crypto_inc(ctrblk
, SERPENT_BLOCK_SIZE
);
359 static unsigned int __ctr_crypt(struct blkcipher_desc
*desc
,
360 struct blkcipher_walk
*walk
)
362 struct serpent_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
363 const unsigned int bsize
= SERPENT_BLOCK_SIZE
;
364 unsigned int nbytes
= walk
->nbytes
;
365 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
366 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
368 be128 ctrblocks
[SERPENT_PARALLEL_BLOCKS
];
371 be128_to_u128(&ctrblk
, (be128
*)walk
->iv
);
373 /* Process multi-block batch */
374 if (nbytes
>= bsize
* SERPENT_PARALLEL_BLOCKS
) {
376 /* create ctrblks for parallel encrypt */
377 for (i
= 0; i
< SERPENT_PARALLEL_BLOCKS
; i
++) {
381 u128_to_be128(&ctrblocks
[i
], &ctrblk
);
385 serpent_enc_blk_xway_xor(ctx
, (u8
*)dst
,
388 src
+= SERPENT_PARALLEL_BLOCKS
;
389 dst
+= SERPENT_PARALLEL_BLOCKS
;
390 nbytes
-= bsize
* SERPENT_PARALLEL_BLOCKS
;
391 } while (nbytes
>= bsize
* SERPENT_PARALLEL_BLOCKS
);
397 /* Handle leftovers */
402 u128_to_be128(&ctrblocks
[0], &ctrblk
);
405 __serpent_encrypt(ctx
, (u8
*)ctrblocks
, (u8
*)ctrblocks
);
406 u128_xor(dst
, dst
, (u128
*)ctrblocks
);
411 } while (nbytes
>= bsize
);
414 u128_to_be128((be128
*)walk
->iv
, &ctrblk
);
418 static int ctr_crypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
419 struct scatterlist
*src
, unsigned int nbytes
)
421 bool fpu_enabled
= false;
422 struct blkcipher_walk walk
;
425 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
426 err
= blkcipher_walk_virt_block(desc
, &walk
, SERPENT_BLOCK_SIZE
);
427 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
429 while ((nbytes
= walk
.nbytes
) >= SERPENT_BLOCK_SIZE
) {
430 fpu_enabled
= serpent_fpu_begin(fpu_enabled
, nbytes
);
431 nbytes
= __ctr_crypt(desc
, &walk
);
432 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
435 serpent_fpu_end(fpu_enabled
);
438 ctr_crypt_final(desc
, &walk
);
439 err
= blkcipher_walk_done(desc
, &walk
, 0);
445 static struct crypto_alg blk_ctr_alg
= {
446 .cra_name
= "__ctr-serpent-sse2",
447 .cra_driver_name
= "__driver-ctr-serpent-sse2",
449 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
451 .cra_ctxsize
= sizeof(struct serpent_ctx
),
453 .cra_type
= &crypto_blkcipher_type
,
454 .cra_module
= THIS_MODULE
,
455 .cra_list
= LIST_HEAD_INIT(blk_ctr_alg
.cra_list
),
458 .min_keysize
= SERPENT_MIN_KEY_SIZE
,
459 .max_keysize
= SERPENT_MAX_KEY_SIZE
,
460 .ivsize
= SERPENT_BLOCK_SIZE
,
461 .setkey
= serpent_setkey
,
462 .encrypt
= ctr_crypt
,
463 .decrypt
= ctr_crypt
,
471 struct serpent_ctx
*ctx
;
475 static void encrypt_callback(void *priv
, u8
*srcdst
, unsigned int nbytes
)
477 const unsigned int bsize
= SERPENT_BLOCK_SIZE
;
478 struct crypt_priv
*ctx
= priv
;
481 ctx
->fpu_enabled
= serpent_fpu_begin(ctx
->fpu_enabled
, nbytes
);
483 if (nbytes
== bsize
* SERPENT_PARALLEL_BLOCKS
) {
484 serpent_enc_blk_xway(ctx
->ctx
, srcdst
, srcdst
);
488 for (i
= 0; i
< nbytes
/ bsize
; i
++, srcdst
+= bsize
)
489 __serpent_encrypt(ctx
->ctx
, srcdst
, srcdst
);
492 static void decrypt_callback(void *priv
, u8
*srcdst
, unsigned int nbytes
)
494 const unsigned int bsize
= SERPENT_BLOCK_SIZE
;
495 struct crypt_priv
*ctx
= priv
;
498 ctx
->fpu_enabled
= serpent_fpu_begin(ctx
->fpu_enabled
, nbytes
);
500 if (nbytes
== bsize
* SERPENT_PARALLEL_BLOCKS
) {
501 serpent_dec_blk_xway(ctx
->ctx
, srcdst
, srcdst
);
505 for (i
= 0; i
< nbytes
/ bsize
; i
++, srcdst
+= bsize
)
506 __serpent_decrypt(ctx
->ctx
, srcdst
, srcdst
);
509 struct serpent_lrw_ctx
{
510 struct lrw_table_ctx lrw_table
;
511 struct serpent_ctx serpent_ctx
;
514 static int lrw_serpent_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
517 struct serpent_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
520 err
= __serpent_setkey(&ctx
->serpent_ctx
, key
, keylen
-
525 return lrw_init_table(&ctx
->lrw_table
, key
+ keylen
-
529 static int lrw_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
530 struct scatterlist
*src
, unsigned int nbytes
)
532 struct serpent_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
533 be128 buf
[SERPENT_PARALLEL_BLOCKS
];
534 struct crypt_priv crypt_ctx
= {
535 .ctx
= &ctx
->serpent_ctx
,
536 .fpu_enabled
= false,
538 struct lrw_crypt_req req
= {
540 .tbuflen
= sizeof(buf
),
542 .table_ctx
= &ctx
->lrw_table
,
543 .crypt_ctx
= &crypt_ctx
,
544 .crypt_fn
= encrypt_callback
,
548 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
549 serpent_fpu_end(crypt_ctx
.fpu_enabled
);
554 static int lrw_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
555 struct scatterlist
*src
, unsigned int nbytes
)
557 struct serpent_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
558 be128 buf
[SERPENT_PARALLEL_BLOCKS
];
559 struct crypt_priv crypt_ctx
= {
560 .ctx
= &ctx
->serpent_ctx
,
561 .fpu_enabled
= false,
563 struct lrw_crypt_req req
= {
565 .tbuflen
= sizeof(buf
),
567 .table_ctx
= &ctx
->lrw_table
,
568 .crypt_ctx
= &crypt_ctx
,
569 .crypt_fn
= decrypt_callback
,
573 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
574 serpent_fpu_end(crypt_ctx
.fpu_enabled
);
579 static void lrw_exit_tfm(struct crypto_tfm
*tfm
)
581 struct serpent_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
583 lrw_free_table(&ctx
->lrw_table
);
586 static struct crypto_alg blk_lrw_alg
= {
587 .cra_name
= "__lrw-serpent-sse2",
588 .cra_driver_name
= "__driver-lrw-serpent-sse2",
590 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
591 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
592 .cra_ctxsize
= sizeof(struct serpent_lrw_ctx
),
594 .cra_type
= &crypto_blkcipher_type
,
595 .cra_module
= THIS_MODULE
,
596 .cra_list
= LIST_HEAD_INIT(blk_lrw_alg
.cra_list
),
597 .cra_exit
= lrw_exit_tfm
,
600 .min_keysize
= SERPENT_MIN_KEY_SIZE
+
602 .max_keysize
= SERPENT_MAX_KEY_SIZE
+
604 .ivsize
= SERPENT_BLOCK_SIZE
,
605 .setkey
= lrw_serpent_setkey
,
606 .encrypt
= lrw_encrypt
,
607 .decrypt
= lrw_decrypt
,
614 static int ablk_set_key(struct crypto_ablkcipher
*tfm
, const u8
*key
,
615 unsigned int key_len
)
617 struct async_serpent_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
618 struct crypto_ablkcipher
*child
= &ctx
->cryptd_tfm
->base
;
621 crypto_ablkcipher_clear_flags(child
, CRYPTO_TFM_REQ_MASK
);
622 crypto_ablkcipher_set_flags(child
, crypto_ablkcipher_get_flags(tfm
)
623 & CRYPTO_TFM_REQ_MASK
);
624 err
= crypto_ablkcipher_setkey(child
, key
, key_len
);
625 crypto_ablkcipher_set_flags(tfm
, crypto_ablkcipher_get_flags(child
)
626 & CRYPTO_TFM_RES_MASK
);
630 static int __ablk_encrypt(struct ablkcipher_request
*req
)
632 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
633 struct async_serpent_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
634 struct blkcipher_desc desc
;
636 desc
.tfm
= cryptd_ablkcipher_child(ctx
->cryptd_tfm
);
637 desc
.info
= req
->info
;
640 return crypto_blkcipher_crt(desc
.tfm
)->encrypt(
641 &desc
, req
->dst
, req
->src
, req
->nbytes
);
644 static int ablk_encrypt(struct ablkcipher_request
*req
)
646 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
647 struct async_serpent_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
649 if (!irq_fpu_usable()) {
650 struct ablkcipher_request
*cryptd_req
=
651 ablkcipher_request_ctx(req
);
653 memcpy(cryptd_req
, req
, sizeof(*req
));
654 ablkcipher_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
656 return crypto_ablkcipher_encrypt(cryptd_req
);
658 return __ablk_encrypt(req
);
662 static int ablk_decrypt(struct ablkcipher_request
*req
)
664 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
665 struct async_serpent_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
667 if (!irq_fpu_usable()) {
668 struct ablkcipher_request
*cryptd_req
=
669 ablkcipher_request_ctx(req
);
671 memcpy(cryptd_req
, req
, sizeof(*req
));
672 ablkcipher_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
674 return crypto_ablkcipher_decrypt(cryptd_req
);
676 struct blkcipher_desc desc
;
678 desc
.tfm
= cryptd_ablkcipher_child(ctx
->cryptd_tfm
);
679 desc
.info
= req
->info
;
682 return crypto_blkcipher_crt(desc
.tfm
)->decrypt(
683 &desc
, req
->dst
, req
->src
, req
->nbytes
);
687 static void ablk_exit(struct crypto_tfm
*tfm
)
689 struct async_serpent_ctx
*ctx
= crypto_tfm_ctx(tfm
);
691 cryptd_free_ablkcipher(ctx
->cryptd_tfm
);
694 static void ablk_init_common(struct crypto_tfm
*tfm
,
695 struct cryptd_ablkcipher
*cryptd_tfm
)
697 struct async_serpent_ctx
*ctx
= crypto_tfm_ctx(tfm
);
699 ctx
->cryptd_tfm
= cryptd_tfm
;
700 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ablkcipher_request
) +
701 crypto_ablkcipher_reqsize(&cryptd_tfm
->base
);
704 static int ablk_ecb_init(struct crypto_tfm
*tfm
)
706 struct cryptd_ablkcipher
*cryptd_tfm
;
708 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-ecb-serpent-sse2", 0, 0);
709 if (IS_ERR(cryptd_tfm
))
710 return PTR_ERR(cryptd_tfm
);
711 ablk_init_common(tfm
, cryptd_tfm
);
715 static struct crypto_alg ablk_ecb_alg
= {
716 .cra_name
= "ecb(serpent)",
717 .cra_driver_name
= "ecb-serpent-sse2",
719 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
720 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
721 .cra_ctxsize
= sizeof(struct async_serpent_ctx
),
723 .cra_type
= &crypto_ablkcipher_type
,
724 .cra_module
= THIS_MODULE
,
725 .cra_list
= LIST_HEAD_INIT(ablk_ecb_alg
.cra_list
),
726 .cra_init
= ablk_ecb_init
,
727 .cra_exit
= ablk_exit
,
730 .min_keysize
= SERPENT_MIN_KEY_SIZE
,
731 .max_keysize
= SERPENT_MAX_KEY_SIZE
,
732 .setkey
= ablk_set_key
,
733 .encrypt
= ablk_encrypt
,
734 .decrypt
= ablk_decrypt
,
739 static int ablk_cbc_init(struct crypto_tfm
*tfm
)
741 struct cryptd_ablkcipher
*cryptd_tfm
;
743 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-cbc-serpent-sse2", 0, 0);
744 if (IS_ERR(cryptd_tfm
))
745 return PTR_ERR(cryptd_tfm
);
746 ablk_init_common(tfm
, cryptd_tfm
);
750 static struct crypto_alg ablk_cbc_alg
= {
751 .cra_name
= "cbc(serpent)",
752 .cra_driver_name
= "cbc-serpent-sse2",
754 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
755 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
756 .cra_ctxsize
= sizeof(struct async_serpent_ctx
),
758 .cra_type
= &crypto_ablkcipher_type
,
759 .cra_module
= THIS_MODULE
,
760 .cra_list
= LIST_HEAD_INIT(ablk_cbc_alg
.cra_list
),
761 .cra_init
= ablk_cbc_init
,
762 .cra_exit
= ablk_exit
,
765 .min_keysize
= SERPENT_MIN_KEY_SIZE
,
766 .max_keysize
= SERPENT_MAX_KEY_SIZE
,
767 .ivsize
= SERPENT_BLOCK_SIZE
,
768 .setkey
= ablk_set_key
,
769 .encrypt
= __ablk_encrypt
,
770 .decrypt
= ablk_decrypt
,
775 static int ablk_ctr_init(struct crypto_tfm
*tfm
)
777 struct cryptd_ablkcipher
*cryptd_tfm
;
779 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-ctr-serpent-sse2", 0, 0);
780 if (IS_ERR(cryptd_tfm
))
781 return PTR_ERR(cryptd_tfm
);
782 ablk_init_common(tfm
, cryptd_tfm
);
786 static struct crypto_alg ablk_ctr_alg
= {
787 .cra_name
= "ctr(serpent)",
788 .cra_driver_name
= "ctr-serpent-sse2",
790 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
792 .cra_ctxsize
= sizeof(struct async_serpent_ctx
),
794 .cra_type
= &crypto_ablkcipher_type
,
795 .cra_module
= THIS_MODULE
,
796 .cra_list
= LIST_HEAD_INIT(ablk_ctr_alg
.cra_list
),
797 .cra_init
= ablk_ctr_init
,
798 .cra_exit
= ablk_exit
,
801 .min_keysize
= SERPENT_MIN_KEY_SIZE
,
802 .max_keysize
= SERPENT_MAX_KEY_SIZE
,
803 .ivsize
= SERPENT_BLOCK_SIZE
,
804 .setkey
= ablk_set_key
,
805 .encrypt
= ablk_encrypt
,
806 .decrypt
= ablk_encrypt
,
814 static int ablk_lrw_init(struct crypto_tfm
*tfm
)
816 struct cryptd_ablkcipher
*cryptd_tfm
;
818 cryptd_tfm
= cryptd_alloc_ablkcipher("__driver-lrw-serpent-sse2", 0, 0);
819 if (IS_ERR(cryptd_tfm
))
820 return PTR_ERR(cryptd_tfm
);
821 ablk_init_common(tfm
, cryptd_tfm
);
825 static struct crypto_alg ablk_lrw_alg
= {
826 .cra_name
= "lrw(serpent)",
827 .cra_driver_name
= "lrw-serpent-sse2",
829 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
830 .cra_blocksize
= SERPENT_BLOCK_SIZE
,
831 .cra_ctxsize
= sizeof(struct async_serpent_ctx
),
833 .cra_type
= &crypto_ablkcipher_type
,
834 .cra_module
= THIS_MODULE
,
835 .cra_list
= LIST_HEAD_INIT(ablk_lrw_alg
.cra_list
),
836 .cra_init
= ablk_lrw_init
,
837 .cra_exit
= ablk_exit
,
840 .min_keysize
= SERPENT_MIN_KEY_SIZE
+
842 .max_keysize
= SERPENT_MAX_KEY_SIZE
+
844 .ivsize
= SERPENT_BLOCK_SIZE
,
845 .setkey
= ablk_set_key
,
846 .encrypt
= ablk_encrypt
,
847 .decrypt
= ablk_decrypt
,
854 static int __init
serpent_sse2_init(void)
859 printk(KERN_INFO
"SSE2 instructions are not detected.\n");
863 err
= crypto_register_alg(&blk_ecb_alg
);
866 err
= crypto_register_alg(&blk_cbc_alg
);
869 err
= crypto_register_alg(&blk_ctr_alg
);
872 err
= crypto_register_alg(&ablk_ecb_alg
);
875 err
= crypto_register_alg(&ablk_cbc_alg
);
878 err
= crypto_register_alg(&ablk_ctr_alg
);
882 err
= crypto_register_alg(&blk_lrw_alg
);
885 err
= crypto_register_alg(&ablk_lrw_alg
);
893 crypto_unregister_alg(&blk_lrw_alg
);
895 crypto_unregister_alg(&ablk_ctr_alg
);
898 crypto_unregister_alg(&ablk_cbc_alg
);
900 crypto_unregister_alg(&ablk_ecb_alg
);
902 crypto_unregister_alg(&blk_ctr_alg
);
904 crypto_unregister_alg(&blk_cbc_alg
);
906 crypto_unregister_alg(&blk_ecb_alg
);
911 static void __exit
serpent_sse2_exit(void)
914 crypto_unregister_alg(&ablk_lrw_alg
);
915 crypto_unregister_alg(&blk_lrw_alg
);
917 crypto_unregister_alg(&ablk_ctr_alg
);
918 crypto_unregister_alg(&ablk_cbc_alg
);
919 crypto_unregister_alg(&ablk_ecb_alg
);
920 crypto_unregister_alg(&blk_ctr_alg
);
921 crypto_unregister_alg(&blk_cbc_alg
);
922 crypto_unregister_alg(&blk_ecb_alg
);
925 module_init(serpent_sse2_init
);
926 module_exit(serpent_sse2_exit
);
928 MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
929 MODULE_LICENSE("GPL");
930 MODULE_ALIAS("serpent");