]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/crypto/ixp4xx_crypto.c
crypto: ixp4xx - Fix qmgr_request_queue build failure
[mirror_ubuntu-zesty-kernel.git] / drivers / crypto / ixp4xx_crypto.c
CommitLineData
81bef015
CH
1/*
2 * Intel IXP4xx NPE-C crypto driver
3 *
4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/platform_device.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmapool.h>
15#include <linux/crypto.h>
16#include <linux/kernel.h>
17#include <linux/rtnetlink.h>
18#include <linux/interrupt.h>
19#include <linux/spinlock.h>
20
21#include <crypto/ctr.h>
22#include <crypto/des.h>
23#include <crypto/aes.h>
24#include <crypto/sha.h>
25#include <crypto/algapi.h>
26#include <crypto/aead.h>
27#include <crypto/authenc.h>
28#include <crypto/scatterwalk.h>
29
a09e64fb
RK
30#include <mach/npe.h>
31#include <mach/qmgr.h>
81bef015
CH
32
33#define MAX_KEYLEN 32
34
35/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
36#define NPE_CTX_LEN 80
37#define AES_BLOCK128 16
38
39#define NPE_OP_HASH_VERIFY 0x01
40#define NPE_OP_CCM_ENABLE 0x04
41#define NPE_OP_CRYPT_ENABLE 0x08
42#define NPE_OP_HASH_ENABLE 0x10
43#define NPE_OP_NOT_IN_PLACE 0x20
44#define NPE_OP_HMAC_DISABLE 0x40
45#define NPE_OP_CRYPT_ENCRYPT 0x80
46
47#define NPE_OP_CCM_GEN_MIC 0xcc
48#define NPE_OP_HASH_GEN_ICV 0x50
49#define NPE_OP_ENC_GEN_KEY 0xc9
50
51#define MOD_ECB 0x0000
52#define MOD_CTR 0x1000
53#define MOD_CBC_ENC 0x2000
54#define MOD_CBC_DEC 0x3000
55#define MOD_CCM_ENC 0x4000
56#define MOD_CCM_DEC 0x5000
57
58#define KEYLEN_128 4
59#define KEYLEN_192 6
60#define KEYLEN_256 8
61
62#define CIPH_DECR 0x0000
63#define CIPH_ENCR 0x0400
64
65#define MOD_DES 0x0000
66#define MOD_TDEA2 0x0100
67#define MOD_3DES 0x0200
68#define MOD_AES 0x0800
69#define MOD_AES128 (0x0800 | KEYLEN_128)
70#define MOD_AES192 (0x0900 | KEYLEN_192)
71#define MOD_AES256 (0x0a00 | KEYLEN_256)
72
73#define MAX_IVLEN 16
74#define NPE_ID 2 /* NPE C */
75#define NPE_QLEN 16
76/* Space for registering when the first
77 * NPE_QLEN crypt_ctl are busy */
78#define NPE_QLEN_TOTAL 64
79
80#define SEND_QID 29
81#define RECV_QID 30
82
83#define CTL_FLAG_UNUSED 0x0000
84#define CTL_FLAG_USED 0x1000
85#define CTL_FLAG_PERFORM_ABLK 0x0001
86#define CTL_FLAG_GEN_ICV 0x0002
87#define CTL_FLAG_GEN_REVAES 0x0004
88#define CTL_FLAG_PERFORM_AEAD 0x0008
89#define CTL_FLAG_MASK 0x000f
90
91#define HMAC_IPAD_VALUE 0x36
92#define HMAC_OPAD_VALUE 0x5C
93#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
94
95#define MD5_DIGEST_SIZE 16
96
97struct buffer_desc {
98 u32 phys_next;
99 u16 buf_len;
100 u16 pkt_len;
101 u32 phys_addr;
102 u32 __reserved[4];
103 struct buffer_desc *next;
104};
105
106struct crypt_ctl {
107 u8 mode; /* NPE_OP_* operation mode */
108 u8 init_len;
109 u16 reserved;
110 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
111 u32 icv_rev_aes; /* icv or rev aes */
112 u32 src_buf;
113 u32 dst_buf;
114 u16 auth_offs; /* Authentication start offset */
115 u16 auth_len; /* Authentication data length */
116 u16 crypt_offs; /* Cryption start offset */
117 u16 crypt_len; /* Cryption data length */
118 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
119 u32 crypto_ctx; /* NPE Crypto Param structure address */
120
121 /* Used by Host: 4*4 bytes*/
122 unsigned ctl_flags;
123 union {
124 struct ablkcipher_request *ablk_req;
125 struct aead_request *aead_req;
126 struct crypto_tfm *tfm;
127 } data;
128 struct buffer_desc *regist_buf;
129 u8 *regist_ptr;
130};
131
132struct ablk_ctx {
133 struct buffer_desc *src;
134 struct buffer_desc *dst;
135 unsigned src_nents;
136 unsigned dst_nents;
137};
138
139struct aead_ctx {
140 struct buffer_desc *buffer;
141 unsigned short assoc_nents;
142 unsigned short src_nents;
143 struct scatterlist ivlist;
144 /* used when the hmac is not on one sg entry */
145 u8 *hmac_virt;
146 int encrypt;
147};
148
149struct ix_hash_algo {
150 u32 cfgword;
151 unsigned char *icv;
152};
153
154struct ix_sa_dir {
155 unsigned char *npe_ctx;
156 dma_addr_t npe_ctx_phys;
157 int npe_ctx_idx;
158 u8 npe_mode;
159};
160
161struct ixp_ctx {
162 struct ix_sa_dir encrypt;
163 struct ix_sa_dir decrypt;
164 int authkey_len;
165 u8 authkey[MAX_KEYLEN];
166 int enckey_len;
167 u8 enckey[MAX_KEYLEN];
168 u8 salt[MAX_IVLEN];
169 u8 nonce[CTR_RFC3686_NONCE_SIZE];
170 unsigned salted;
171 atomic_t configuring;
172 struct completion completion;
173};
174
175struct ixp_alg {
176 struct crypto_alg crypto;
177 const struct ix_hash_algo *hash;
178 u32 cfg_enc;
179 u32 cfg_dec;
180
181 int registered;
182};
183
184static const struct ix_hash_algo hash_alg_md5 = {
185 .cfgword = 0xAA010004,
186 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
187 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
188};
189static const struct ix_hash_algo hash_alg_sha1 = {
190 .cfgword = 0x00000005,
191 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
192 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
193};
194
195static struct npe *npe_c;
196static struct dma_pool *buffer_pool = NULL;
197static struct dma_pool *ctx_pool = NULL;
198
199static struct crypt_ctl *crypt_virt = NULL;
200static dma_addr_t crypt_phys;
201
202static int support_aes = 1;
203
204static void dev_release(struct device *dev)
205{
206 return;
207}
208
209#define DRIVER_NAME "ixp4xx_crypto"
210static struct platform_device pseudo_dev = {
211 .name = DRIVER_NAME,
212 .id = 0,
213 .num_resources = 0,
214 .dev = {
215 .coherent_dma_mask = DMA_32BIT_MASK,
216 .release = dev_release,
217 }
218};
219
220static struct device *dev = &pseudo_dev.dev;
221
222static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
223{
224 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
225}
226
227static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
228{
229 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
230}
231
232static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
233{
234 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
235}
236
237static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
238{
239 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
240}
241
242static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
243{
244 return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
245}
246
247static int setup_crypt_desc(void)
248{
249 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
250 crypt_virt = dma_alloc_coherent(dev,
251 NPE_QLEN * sizeof(struct crypt_ctl),
252 &crypt_phys, GFP_KERNEL);
253 if (!crypt_virt)
254 return -ENOMEM;
255 memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
256 return 0;
257}
258
259static spinlock_t desc_lock;
260static struct crypt_ctl *get_crypt_desc(void)
261{
262 int i;
263 static int idx = 0;
264 unsigned long flags;
265
266 spin_lock_irqsave(&desc_lock, flags);
267
268 if (unlikely(!crypt_virt))
269 setup_crypt_desc();
270 if (unlikely(!crypt_virt)) {
271 spin_unlock_irqrestore(&desc_lock, flags);
272 return NULL;
273 }
274 i = idx;
275 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
276 if (++idx >= NPE_QLEN)
277 idx = 0;
278 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
279 spin_unlock_irqrestore(&desc_lock, flags);
280 return crypt_virt +i;
281 } else {
282 spin_unlock_irqrestore(&desc_lock, flags);
283 return NULL;
284 }
285}
286
287static spinlock_t emerg_lock;
288static struct crypt_ctl *get_crypt_desc_emerg(void)
289{
290 int i;
291 static int idx = NPE_QLEN;
292 struct crypt_ctl *desc;
293 unsigned long flags;
294
295 desc = get_crypt_desc();
296 if (desc)
297 return desc;
298 if (unlikely(!crypt_virt))
299 return NULL;
300
301 spin_lock_irqsave(&emerg_lock, flags);
302 i = idx;
303 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
304 if (++idx >= NPE_QLEN_TOTAL)
305 idx = NPE_QLEN;
306 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
307 spin_unlock_irqrestore(&emerg_lock, flags);
308 return crypt_virt +i;
309 } else {
310 spin_unlock_irqrestore(&emerg_lock, flags);
311 return NULL;
312 }
313}
314
315static void free_buf_chain(struct buffer_desc *buf, u32 phys)
316{
317 while (buf) {
318 struct buffer_desc *buf1;
319 u32 phys1;
320
321 buf1 = buf->next;
322 phys1 = buf->phys_next;
323 dma_pool_free(buffer_pool, buf, phys);
324 buf = buf1;
325 phys = phys1;
326 }
327}
328
329static struct tasklet_struct crypto_done_tasklet;
330
331static void finish_scattered_hmac(struct crypt_ctl *crypt)
332{
333 struct aead_request *req = crypt->data.aead_req;
334 struct aead_ctx *req_ctx = aead_request_ctx(req);
335 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
336 int authsize = crypto_aead_authsize(tfm);
337 int decryptlen = req->cryptlen - authsize;
338
339 if (req_ctx->encrypt) {
340 scatterwalk_map_and_copy(req_ctx->hmac_virt,
341 req->src, decryptlen, authsize, 1);
342 }
343 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
344}
345
346static void one_packet(dma_addr_t phys)
347{
348 struct crypt_ctl *crypt;
349 struct ixp_ctx *ctx;
350 int failed;
351 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
352
353 failed = phys & 0x1 ? -EBADMSG : 0;
354 phys &= ~0x3;
355 crypt = crypt_phys2virt(phys);
356
357 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
358 case CTL_FLAG_PERFORM_AEAD: {
359 struct aead_request *req = crypt->data.aead_req;
360 struct aead_ctx *req_ctx = aead_request_ctx(req);
361 dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents,
362 DMA_TO_DEVICE);
363 dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
364 dma_unmap_sg(dev, req->src, req_ctx->src_nents,
365 DMA_BIDIRECTIONAL);
366
367 free_buf_chain(req_ctx->buffer, crypt->src_buf);
368 if (req_ctx->hmac_virt) {
369 finish_scattered_hmac(crypt);
370 }
371 req->base.complete(&req->base, failed);
372 break;
373 }
374 case CTL_FLAG_PERFORM_ABLK: {
375 struct ablkcipher_request *req = crypt->data.ablk_req;
376 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
377 int nents;
378 if (req_ctx->dst) {
379 nents = req_ctx->dst_nents;
380 dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
381 free_buf_chain(req_ctx->dst, crypt->dst_buf);
382 src_direction = DMA_TO_DEVICE;
383 }
384 nents = req_ctx->src_nents;
385 dma_unmap_sg(dev, req->src, nents, src_direction);
386 free_buf_chain(req_ctx->src, crypt->src_buf);
387 req->base.complete(&req->base, failed);
388 break;
389 }
390 case CTL_FLAG_GEN_ICV:
391 ctx = crypto_tfm_ctx(crypt->data.tfm);
392 dma_pool_free(ctx_pool, crypt->regist_ptr,
393 crypt->regist_buf->phys_addr);
394 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
395 if (atomic_dec_and_test(&ctx->configuring))
396 complete(&ctx->completion);
397 break;
398 case CTL_FLAG_GEN_REVAES:
399 ctx = crypto_tfm_ctx(crypt->data.tfm);
400 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
401 if (atomic_dec_and_test(&ctx->configuring))
402 complete(&ctx->completion);
403 break;
404 default:
405 BUG();
406 }
407 crypt->ctl_flags = CTL_FLAG_UNUSED;
408}
409
410static void irqhandler(void *_unused)
411{
412 tasklet_schedule(&crypto_done_tasklet);
413}
414
415static void crypto_done_action(unsigned long arg)
416{
417 int i;
418
419 for(i=0; i<4; i++) {
420 dma_addr_t phys = qmgr_get_entry(RECV_QID);
421 if (!phys)
422 return;
423 one_packet(phys);
424 }
425 tasklet_schedule(&crypto_done_tasklet);
426}
427
428static int init_ixp_crypto(void)
429{
430 int ret = -ENODEV;
431
432 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
433 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
434 printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
435 return ret;
436 }
437 npe_c = npe_request(NPE_ID);
438 if (!npe_c)
439 return ret;
440
441 if (!npe_running(npe_c)) {
442 npe_load_firmware(npe_c, npe_name(npe_c), dev);
443 }
444
445 /* buffer_pool will also be used to sometimes store the hmac,
446 * so assure it is large enough
447 */
448 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
449 buffer_pool = dma_pool_create("buffer", dev,
450 sizeof(struct buffer_desc), 32, 0);
451 ret = -ENOMEM;
452 if (!buffer_pool) {
453 goto err;
454 }
455 ctx_pool = dma_pool_create("context", dev,
456 NPE_CTX_LEN, 16, 0);
457 if (!ctx_pool) {
458 goto err;
459 }
1777f1a9
KH
460 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
461 "ixp_crypto:out", NULL);
81bef015
CH
462 if (ret)
463 goto err;
1777f1a9
KH
464 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
465 "ixp_crypto:in", NULL);
81bef015
CH
466 if (ret) {
467 qmgr_release_queue(SEND_QID);
468 goto err;
469 }
470 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
471 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
472
473 qmgr_enable_irq(RECV_QID);
474 return 0;
475err:
476 if (ctx_pool)
477 dma_pool_destroy(ctx_pool);
478 if (buffer_pool)
479 dma_pool_destroy(buffer_pool);
480 npe_release(npe_c);
481 return ret;
482}
483
484static void release_ixp_crypto(void)
485{
486 qmgr_disable_irq(RECV_QID);
487 tasklet_kill(&crypto_done_tasklet);
488
489 qmgr_release_queue(SEND_QID);
490 qmgr_release_queue(RECV_QID);
491
492 dma_pool_destroy(ctx_pool);
493 dma_pool_destroy(buffer_pool);
494
495 npe_release(npe_c);
496
497 if (crypt_virt) {
498 dma_free_coherent(dev,
499 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
500 crypt_virt, crypt_phys);
501 }
502 return;
503}
504
505static void reset_sa_dir(struct ix_sa_dir *dir)
506{
507 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
508 dir->npe_ctx_idx = 0;
509 dir->npe_mode = 0;
510}
511
512static int init_sa_dir(struct ix_sa_dir *dir)
513{
514 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
515 if (!dir->npe_ctx) {
516 return -ENOMEM;
517 }
518 reset_sa_dir(dir);
519 return 0;
520}
521
522static void free_sa_dir(struct ix_sa_dir *dir)
523{
524 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
525 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
526}
527
528static int init_tfm(struct crypto_tfm *tfm)
529{
530 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
531 int ret;
532
533 atomic_set(&ctx->configuring, 0);
534 ret = init_sa_dir(&ctx->encrypt);
535 if (ret)
536 return ret;
537 ret = init_sa_dir(&ctx->decrypt);
538 if (ret) {
539 free_sa_dir(&ctx->encrypt);
540 }
541 return ret;
542}
543
544static int init_tfm_ablk(struct crypto_tfm *tfm)
545{
546 tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
547 return init_tfm(tfm);
548}
549
550static int init_tfm_aead(struct crypto_tfm *tfm)
551{
552 tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
553 return init_tfm(tfm);
554}
555
556static void exit_tfm(struct crypto_tfm *tfm)
557{
558 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
559 free_sa_dir(&ctx->encrypt);
560 free_sa_dir(&ctx->decrypt);
561}
562
563static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
564 int init_len, u32 ctx_addr, const u8 *key, int key_len)
565{
566 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
567 struct crypt_ctl *crypt;
568 struct buffer_desc *buf;
569 int i;
570 u8 *pad;
571 u32 pad_phys, buf_phys;
572
573 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
574 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
575 if (!pad)
576 return -ENOMEM;
577 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
578 if (!buf) {
579 dma_pool_free(ctx_pool, pad, pad_phys);
580 return -ENOMEM;
581 }
582 crypt = get_crypt_desc_emerg();
583 if (!crypt) {
584 dma_pool_free(ctx_pool, pad, pad_phys);
585 dma_pool_free(buffer_pool, buf, buf_phys);
586 return -EAGAIN;
587 }
588
589 memcpy(pad, key, key_len);
590 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
591 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
592 pad[i] ^= xpad;
593 }
594
595 crypt->data.tfm = tfm;
596 crypt->regist_ptr = pad;
597 crypt->regist_buf = buf;
598
599 crypt->auth_offs = 0;
600 crypt->auth_len = HMAC_PAD_BLOCKLEN;
601 crypt->crypto_ctx = ctx_addr;
602 crypt->src_buf = buf_phys;
603 crypt->icv_rev_aes = target;
604 crypt->mode = NPE_OP_HASH_GEN_ICV;
605 crypt->init_len = init_len;
606 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
607
608 buf->next = 0;
609 buf->buf_len = HMAC_PAD_BLOCKLEN;
610 buf->pkt_len = 0;
611 buf->phys_addr = pad_phys;
612
613 atomic_inc(&ctx->configuring);
614 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
615 BUG_ON(qmgr_stat_overflow(SEND_QID));
616 return 0;
617}
618
619static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
620 const u8 *key, int key_len, unsigned digest_len)
621{
622 u32 itarget, otarget, npe_ctx_addr;
623 unsigned char *cinfo;
624 int init_len, ret = 0;
625 u32 cfgword;
626 struct ix_sa_dir *dir;
627 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
628 const struct ix_hash_algo *algo;
629
630 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
631 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
632 algo = ix_hash(tfm);
633
634 /* write cfg word to cryptinfo */
635 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
636 *(u32*)cinfo = cpu_to_be32(cfgword);
637 cinfo += sizeof(cfgword);
638
639 /* write ICV to cryptinfo */
640 memcpy(cinfo, algo->icv, digest_len);
641 cinfo += digest_len;
642
643 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
644 + sizeof(algo->cfgword);
645 otarget = itarget + digest_len;
646 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
647 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
648
649 dir->npe_ctx_idx += init_len;
650 dir->npe_mode |= NPE_OP_HASH_ENABLE;
651
652 if (!encrypt)
653 dir->npe_mode |= NPE_OP_HASH_VERIFY;
654
655 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
656 init_len, npe_ctx_addr, key, key_len);
657 if (ret)
658 return ret;
659 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
660 init_len, npe_ctx_addr, key, key_len);
661}
662
663static int gen_rev_aes_key(struct crypto_tfm *tfm)
664{
665 struct crypt_ctl *crypt;
666 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
667 struct ix_sa_dir *dir = &ctx->decrypt;
668
669 crypt = get_crypt_desc_emerg();
670 if (!crypt) {
671 return -EAGAIN;
672 }
673 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
674
675 crypt->data.tfm = tfm;
676 crypt->crypt_offs = 0;
677 crypt->crypt_len = AES_BLOCK128;
678 crypt->src_buf = 0;
679 crypt->crypto_ctx = dir->npe_ctx_phys;
680 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
681 crypt->mode = NPE_OP_ENC_GEN_KEY;
682 crypt->init_len = dir->npe_ctx_idx;
683 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
684
685 atomic_inc(&ctx->configuring);
686 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
687 BUG_ON(qmgr_stat_overflow(SEND_QID));
688 return 0;
689}
690
691static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
692 const u8 *key, int key_len)
693{
694 u8 *cinfo;
695 u32 cipher_cfg;
696 u32 keylen_cfg = 0;
697 struct ix_sa_dir *dir;
698 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
699 u32 *flags = &tfm->crt_flags;
700
701 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
702 cinfo = dir->npe_ctx;
703
704 if (encrypt) {
705 cipher_cfg = cipher_cfg_enc(tfm);
706 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
707 } else {
708 cipher_cfg = cipher_cfg_dec(tfm);
709 }
710 if (cipher_cfg & MOD_AES) {
711 switch (key_len) {
712 case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
713 case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
714 case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
715 default:
716 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
717 return -EINVAL;
718 }
719 cipher_cfg |= keylen_cfg;
720 } else if (cipher_cfg & MOD_3DES) {
721 const u32 *K = (const u32 *)key;
722 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
723 !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
724 {
725 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
726 return -EINVAL;
727 }
728 } else {
729 u32 tmp[DES_EXPKEY_WORDS];
730 if (des_ekey(tmp, key) == 0) {
731 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
732 }
733 }
734 /* write cfg word to cryptinfo */
735 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
736 cinfo += sizeof(cipher_cfg);
737
738 /* write cipher key to cryptinfo */
739 memcpy(cinfo, key, key_len);
740 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
741 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
742 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
743 key_len = DES3_EDE_KEY_SIZE;
744 }
745 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
746 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
747 if ((cipher_cfg & MOD_AES) && !encrypt) {
748 return gen_rev_aes_key(tfm);
749 }
750 return 0;
751}
752
753static int count_sg(struct scatterlist *sg, int nbytes)
754{
755 int i;
756 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
757 nbytes -= sg->length;
758 return i;
759}
760
761static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
762 unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
763{
764 int nents = 0;
765
766 while (nbytes > 0) {
767 struct buffer_desc *next_buf;
768 u32 next_buf_phys;
769 unsigned len = min(nbytes, sg_dma_len(sg));
770
771 nents++;
772 nbytes -= len;
773 if (!buf->phys_addr) {
774 buf->phys_addr = sg_dma_address(sg);
775 buf->buf_len = len;
776 buf->next = NULL;
777 buf->phys_next = 0;
778 goto next;
779 }
780 /* Two consecutive chunks on one page may be handled by the old
781 * buffer descriptor, increased by the length of the new one
782 */
783 if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) {
784 buf->buf_len += len;
785 goto next;
786 }
787 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
788 if (!next_buf)
789 return NULL;
790 buf->next = next_buf;
791 buf->phys_next = next_buf_phys;
792
793 buf = next_buf;
794 buf->next = NULL;
795 buf->phys_next = 0;
796 buf->phys_addr = sg_dma_address(sg);
797 buf->buf_len = len;
798next:
799 if (nbytes > 0) {
800 sg = sg_next(sg);
801 }
802 }
803 return buf;
804}
805
806static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
807 unsigned int key_len)
808{
809 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
810 u32 *flags = &tfm->base.crt_flags;
811 int ret;
812
813 init_completion(&ctx->completion);
814 atomic_inc(&ctx->configuring);
815
816 reset_sa_dir(&ctx->encrypt);
817 reset_sa_dir(&ctx->decrypt);
818
819 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
820 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
821
822 ret = setup_cipher(&tfm->base, 0, key, key_len);
823 if (ret)
824 goto out;
825 ret = setup_cipher(&tfm->base, 1, key, key_len);
826 if (ret)
827 goto out;
828
829 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
830 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
831 ret = -EINVAL;
832 } else {
833 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
834 }
835 }
836out:
837 if (!atomic_dec_and_test(&ctx->configuring))
838 wait_for_completion(&ctx->completion);
839 return ret;
840}
841
842static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
843 unsigned int key_len)
844{
845 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
846
847 /* the nonce is stored in bytes at end of key */
848 if (key_len < CTR_RFC3686_NONCE_SIZE)
849 return -EINVAL;
850
851 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
852 CTR_RFC3686_NONCE_SIZE);
853
854 key_len -= CTR_RFC3686_NONCE_SIZE;
855 return ablk_setkey(tfm, key, key_len);
856}
857
858static int ablk_perform(struct ablkcipher_request *req, int encrypt)
859{
860 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
861 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
862 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
863 int ret = -ENOMEM;
864 struct ix_sa_dir *dir;
865 struct crypt_ctl *crypt;
866 unsigned int nbytes = req->nbytes, nents;
867 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
868 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
869 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
870 GFP_KERNEL : GFP_ATOMIC;
871
872 if (qmgr_stat_full(SEND_QID))
873 return -EAGAIN;
874 if (atomic_read(&ctx->configuring))
875 return -EAGAIN;
876
877 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
878
879 crypt = get_crypt_desc();
880 if (!crypt)
881 return ret;
882
883 crypt->data.ablk_req = req;
884 crypt->crypto_ctx = dir->npe_ctx_phys;
885 crypt->mode = dir->npe_mode;
886 crypt->init_len = dir->npe_ctx_idx;
887
888 crypt->crypt_offs = 0;
889 crypt->crypt_len = nbytes;
890
891 BUG_ON(ivsize && !req->info);
892 memcpy(crypt->iv, req->info, ivsize);
893 if (req->src != req->dst) {
894 crypt->mode |= NPE_OP_NOT_IN_PLACE;
895 nents = count_sg(req->dst, nbytes);
896 /* This was never tested by Intel
897 * for more than one dst buffer, I think. */
898 BUG_ON(nents != 1);
899 req_ctx->dst_nents = nents;
900 dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
901 req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf);
902 if (!req_ctx->dst)
903 goto unmap_sg_dest;
904 req_ctx->dst->phys_addr = 0;
905 if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags))
906 goto free_buf_dest;
907 src_direction = DMA_TO_DEVICE;
908 } else {
909 req_ctx->dst = NULL;
910 req_ctx->dst_nents = 0;
911 }
912 nents = count_sg(req->src, nbytes);
913 req_ctx->src_nents = nents;
914 dma_map_sg(dev, req->src, nents, src_direction);
915
916 req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
917 if (!req_ctx->src)
918 goto unmap_sg_src;
919 req_ctx->src->phys_addr = 0;
920 if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags))
921 goto free_buf_src;
922
923 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
924 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
925 BUG_ON(qmgr_stat_overflow(SEND_QID));
926 return -EINPROGRESS;
927
928free_buf_src:
929 free_buf_chain(req_ctx->src, crypt->src_buf);
930unmap_sg_src:
931 dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction);
932free_buf_dest:
933 if (req->src != req->dst) {
934 free_buf_chain(req_ctx->dst, crypt->dst_buf);
935unmap_sg_dest:
936 dma_unmap_sg(dev, req->src, req_ctx->dst_nents,
937 DMA_FROM_DEVICE);
938 }
939 crypt->ctl_flags = CTL_FLAG_UNUSED;
940 return ret;
941}
942
943static int ablk_encrypt(struct ablkcipher_request *req)
944{
945 return ablk_perform(req, 1);
946}
947
948static int ablk_decrypt(struct ablkcipher_request *req)
949{
950 return ablk_perform(req, 0);
951}
952
953static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
954{
955 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
956 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
957 u8 iv[CTR_RFC3686_BLOCK_SIZE];
958 u8 *info = req->info;
959 int ret;
960
961 /* set up counter block */
962 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
963 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
964
965 /* initialize counter portion of counter block */
966 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
967 cpu_to_be32(1);
968
969 req->info = iv;
970 ret = ablk_perform(req, 1);
971 req->info = info;
972 return ret;
973}
974
975static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
976 unsigned int nbytes)
977{
978 int offset = 0;
979
980 if (!nbytes)
981 return 0;
982
983 for (;;) {
984 if (start < offset + sg->length)
985 break;
986
987 offset += sg->length;
988 sg = sg_next(sg);
989 }
990 return (start + nbytes > offset + sg->length);
991}
992
993static int aead_perform(struct aead_request *req, int encrypt,
994 int cryptoffset, int eff_cryptlen, u8 *iv)
995{
996 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
997 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
998 unsigned ivsize = crypto_aead_ivsize(tfm);
999 unsigned authsize = crypto_aead_authsize(tfm);
1000 int ret = -ENOMEM;
1001 struct ix_sa_dir *dir;
1002 struct crypt_ctl *crypt;
1003 unsigned int cryptlen, nents;
1004 struct buffer_desc *buf;
1005 struct aead_ctx *req_ctx = aead_request_ctx(req);
1006 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1007 GFP_KERNEL : GFP_ATOMIC;
1008
1009 if (qmgr_stat_full(SEND_QID))
1010 return -EAGAIN;
1011 if (atomic_read(&ctx->configuring))
1012 return -EAGAIN;
1013
1014 if (encrypt) {
1015 dir = &ctx->encrypt;
1016 cryptlen = req->cryptlen;
1017 } else {
1018 dir = &ctx->decrypt;
1019 /* req->cryptlen includes the authsize when decrypting */
1020 cryptlen = req->cryptlen -authsize;
1021 eff_cryptlen -= authsize;
1022 }
1023 crypt = get_crypt_desc();
1024 if (!crypt)
1025 return ret;
1026
1027 crypt->data.aead_req = req;
1028 crypt->crypto_ctx = dir->npe_ctx_phys;
1029 crypt->mode = dir->npe_mode;
1030 crypt->init_len = dir->npe_ctx_idx;
1031
1032 crypt->crypt_offs = cryptoffset;
1033 crypt->crypt_len = eff_cryptlen;
1034
1035 crypt->auth_offs = 0;
1036 crypt->auth_len = req->assoclen + ivsize + cryptlen;
1037 BUG_ON(ivsize && !req->iv);
1038 memcpy(crypt->iv, req->iv, ivsize);
1039
1040 if (req->src != req->dst) {
1041 BUG(); /* -ENOTSUP because of my lazyness */
1042 }
1043
1044 req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
1045 if (!req_ctx->buffer)
1046 goto out;
1047 req_ctx->buffer->phys_addr = 0;
1048 /* ASSOC data */
1049 nents = count_sg(req->assoc, req->assoclen);
1050 req_ctx->assoc_nents = nents;
1051 dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE);
1052 buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags);
1053 if (!buf)
1054 goto unmap_sg_assoc;
1055 /* IV */
1056 sg_init_table(&req_ctx->ivlist, 1);
1057 sg_set_buf(&req_ctx->ivlist, iv, ivsize);
1058 dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
1059 buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags);
1060 if (!buf)
1061 goto unmap_sg_iv;
1062 if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
1063 /* The 12 hmac bytes are scattered,
1064 * we need to copy them into a safe buffer */
1065 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1066 &crypt->icv_rev_aes);
1067 if (unlikely(!req_ctx->hmac_virt))
1068 goto unmap_sg_iv;
1069 if (!encrypt) {
1070 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1071 req->src, cryptlen, authsize, 0);
1072 }
1073 req_ctx->encrypt = encrypt;
1074 } else {
1075 req_ctx->hmac_virt = NULL;
1076 }
1077 /* Crypt */
1078 nents = count_sg(req->src, cryptlen + authsize);
1079 req_ctx->src_nents = nents;
1080 dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
1081 buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags);
1082 if (!buf)
1083 goto unmap_sg_src;
1084 if (!req_ctx->hmac_virt) {
1085 crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
1086 }
1087 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1088 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1089 BUG_ON(qmgr_stat_overflow(SEND_QID));
1090 return -EINPROGRESS;
1091unmap_sg_src:
1092 dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL);
1093 if (req_ctx->hmac_virt) {
1094 dma_pool_free(buffer_pool, req_ctx->hmac_virt,
1095 crypt->icv_rev_aes);
1096 }
1097unmap_sg_iv:
1098 dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
1099unmap_sg_assoc:
1100 dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE);
1101 free_buf_chain(req_ctx->buffer, crypt->src_buf);
1102out:
1103 crypt->ctl_flags = CTL_FLAG_UNUSED;
1104 return ret;
1105}
1106
1107static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1108{
1109 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1110 u32 *flags = &tfm->base.crt_flags;
1111 unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
1112 int ret;
1113
1114 if (!ctx->enckey_len && !ctx->authkey_len)
1115 return 0;
1116 init_completion(&ctx->completion);
1117 atomic_inc(&ctx->configuring);
1118
1119 reset_sa_dir(&ctx->encrypt);
1120 reset_sa_dir(&ctx->decrypt);
1121
1122 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1123 if (ret)
1124 goto out;
1125 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1126 if (ret)
1127 goto out;
1128 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1129 ctx->authkey_len, digest_len);
1130 if (ret)
1131 goto out;
1132 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1133 ctx->authkey_len, digest_len);
1134 if (ret)
1135 goto out;
1136
1137 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1138 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1139 ret = -EINVAL;
1140 goto out;
1141 } else {
1142 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1143 }
1144 }
1145out:
1146 if (!atomic_dec_and_test(&ctx->configuring))
1147 wait_for_completion(&ctx->completion);
1148 return ret;
1149}
1150
1151static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1152{
1153 int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
1154
1155 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1156 return -EINVAL;
1157 return aead_setup(tfm, authsize);
1158}
1159
1160static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1161 unsigned int keylen)
1162{
1163 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1164 struct rtattr *rta = (struct rtattr *)key;
1165 struct crypto_authenc_key_param *param;
1166
1167 if (!RTA_OK(rta, keylen))
1168 goto badkey;
1169 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1170 goto badkey;
1171 if (RTA_PAYLOAD(rta) < sizeof(*param))
1172 goto badkey;
1173
1174 param = RTA_DATA(rta);
1175 ctx->enckey_len = be32_to_cpu(param->enckeylen);
1176
1177 key += RTA_ALIGN(rta->rta_len);
1178 keylen -= RTA_ALIGN(rta->rta_len);
1179
1180 if (keylen < ctx->enckey_len)
1181 goto badkey;
1182
1183 ctx->authkey_len = keylen - ctx->enckey_len;
1184 memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
1185 memcpy(ctx->authkey, key, ctx->authkey_len);
1186
1187 return aead_setup(tfm, crypto_aead_authsize(tfm));
1188badkey:
1189 ctx->enckey_len = 0;
1190 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1191 return -EINVAL;
1192}
1193
1194static int aead_encrypt(struct aead_request *req)
1195{
1196 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1197 return aead_perform(req, 1, req->assoclen + ivsize,
1198 req->cryptlen, req->iv);
1199}
1200
1201static int aead_decrypt(struct aead_request *req)
1202{
1203 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1204 return aead_perform(req, 0, req->assoclen + ivsize,
1205 req->cryptlen, req->iv);
1206}
1207
1208static int aead_givencrypt(struct aead_givcrypt_request *req)
1209{
1210 struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
1211 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1212 unsigned len, ivsize = crypto_aead_ivsize(tfm);
1213 __be64 seq;
1214
1215 /* copied from eseqiv.c */
1216 if (!ctx->salted) {
1217 get_random_bytes(ctx->salt, ivsize);
1218 ctx->salted = 1;
1219 }
1220 memcpy(req->areq.iv, ctx->salt, ivsize);
1221 len = ivsize;
1222 if (ivsize > sizeof(u64)) {
1223 memset(req->giv, 0, ivsize - sizeof(u64));
1224 len = sizeof(u64);
1225 }
1226 seq = cpu_to_be64(req->seq);
1227 memcpy(req->giv + ivsize - len, &seq, len);
1228 return aead_perform(&req->areq, 1, req->areq.assoclen,
1229 req->areq.cryptlen +ivsize, req->giv);
1230}
1231
1232static struct ixp_alg ixp4xx_algos[] = {
1233{
1234 .crypto = {
1235 .cra_name = "cbc(des)",
1236 .cra_blocksize = DES_BLOCK_SIZE,
1237 .cra_u = { .ablkcipher = {
1238 .min_keysize = DES_KEY_SIZE,
1239 .max_keysize = DES_KEY_SIZE,
1240 .ivsize = DES_BLOCK_SIZE,
1241 .geniv = "eseqiv",
1242 }
1243 }
1244 },
1245 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1246 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1247
1248}, {
1249 .crypto = {
1250 .cra_name = "ecb(des)",
1251 .cra_blocksize = DES_BLOCK_SIZE,
1252 .cra_u = { .ablkcipher = {
1253 .min_keysize = DES_KEY_SIZE,
1254 .max_keysize = DES_KEY_SIZE,
1255 }
1256 }
1257 },
1258 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1259 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1260}, {
1261 .crypto = {
1262 .cra_name = "cbc(des3_ede)",
1263 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1264 .cra_u = { .ablkcipher = {
1265 .min_keysize = DES3_EDE_KEY_SIZE,
1266 .max_keysize = DES3_EDE_KEY_SIZE,
1267 .ivsize = DES3_EDE_BLOCK_SIZE,
1268 .geniv = "eseqiv",
1269 }
1270 }
1271 },
1272 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1273 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1274}, {
1275 .crypto = {
1276 .cra_name = "ecb(des3_ede)",
1277 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1278 .cra_u = { .ablkcipher = {
1279 .min_keysize = DES3_EDE_KEY_SIZE,
1280 .max_keysize = DES3_EDE_KEY_SIZE,
1281 }
1282 }
1283 },
1284 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1285 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1286}, {
1287 .crypto = {
1288 .cra_name = "cbc(aes)",
1289 .cra_blocksize = AES_BLOCK_SIZE,
1290 .cra_u = { .ablkcipher = {
1291 .min_keysize = AES_MIN_KEY_SIZE,
1292 .max_keysize = AES_MAX_KEY_SIZE,
1293 .ivsize = AES_BLOCK_SIZE,
1294 .geniv = "eseqiv",
1295 }
1296 }
1297 },
1298 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1299 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1300}, {
1301 .crypto = {
1302 .cra_name = "ecb(aes)",
1303 .cra_blocksize = AES_BLOCK_SIZE,
1304 .cra_u = { .ablkcipher = {
1305 .min_keysize = AES_MIN_KEY_SIZE,
1306 .max_keysize = AES_MAX_KEY_SIZE,
1307 }
1308 }
1309 },
1310 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1311 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1312}, {
1313 .crypto = {
1314 .cra_name = "ctr(aes)",
1315 .cra_blocksize = AES_BLOCK_SIZE,
1316 .cra_u = { .ablkcipher = {
1317 .min_keysize = AES_MIN_KEY_SIZE,
1318 .max_keysize = AES_MAX_KEY_SIZE,
1319 .ivsize = AES_BLOCK_SIZE,
1320 .geniv = "eseqiv",
1321 }
1322 }
1323 },
1324 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1325 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1326}, {
1327 .crypto = {
1328 .cra_name = "rfc3686(ctr(aes))",
1329 .cra_blocksize = AES_BLOCK_SIZE,
1330 .cra_u = { .ablkcipher = {
1331 .min_keysize = AES_MIN_KEY_SIZE,
1332 .max_keysize = AES_MAX_KEY_SIZE,
1333 .ivsize = AES_BLOCK_SIZE,
1334 .geniv = "eseqiv",
1335 .setkey = ablk_rfc3686_setkey,
1336 .encrypt = ablk_rfc3686_crypt,
1337 .decrypt = ablk_rfc3686_crypt }
1338 }
1339 },
1340 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1341 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1342}, {
1343 .crypto = {
1344 .cra_name = "authenc(hmac(md5),cbc(des))",
1345 .cra_blocksize = DES_BLOCK_SIZE,
1346 .cra_u = { .aead = {
1347 .ivsize = DES_BLOCK_SIZE,
1348 .maxauthsize = MD5_DIGEST_SIZE,
1349 }
1350 }
1351 },
1352 .hash = &hash_alg_md5,
1353 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1354 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1355}, {
1356 .crypto = {
1357 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1358 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1359 .cra_u = { .aead = {
1360 .ivsize = DES3_EDE_BLOCK_SIZE,
1361 .maxauthsize = MD5_DIGEST_SIZE,
1362 }
1363 }
1364 },
1365 .hash = &hash_alg_md5,
1366 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1367 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1368}, {
1369 .crypto = {
1370 .cra_name = "authenc(hmac(sha1),cbc(des))",
1371 .cra_blocksize = DES_BLOCK_SIZE,
1372 .cra_u = { .aead = {
1373 .ivsize = DES_BLOCK_SIZE,
1374 .maxauthsize = SHA1_DIGEST_SIZE,
1375 }
1376 }
1377 },
1378 .hash = &hash_alg_sha1,
1379 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1380 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1381}, {
1382 .crypto = {
1383 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1384 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1385 .cra_u = { .aead = {
1386 .ivsize = DES3_EDE_BLOCK_SIZE,
1387 .maxauthsize = SHA1_DIGEST_SIZE,
1388 }
1389 }
1390 },
1391 .hash = &hash_alg_sha1,
1392 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1393 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1394}, {
1395 .crypto = {
1396 .cra_name = "authenc(hmac(md5),cbc(aes))",
1397 .cra_blocksize = AES_BLOCK_SIZE,
1398 .cra_u = { .aead = {
1399 .ivsize = AES_BLOCK_SIZE,
1400 .maxauthsize = MD5_DIGEST_SIZE,
1401 }
1402 }
1403 },
1404 .hash = &hash_alg_md5,
1405 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1406 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1407}, {
1408 .crypto = {
1409 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1410 .cra_blocksize = AES_BLOCK_SIZE,
1411 .cra_u = { .aead = {
1412 .ivsize = AES_BLOCK_SIZE,
1413 .maxauthsize = SHA1_DIGEST_SIZE,
1414 }
1415 }
1416 },
1417 .hash = &hash_alg_sha1,
1418 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1419 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1420} };
1421
1422#define IXP_POSTFIX "-ixp4xx"
1423static int __init ixp_module_init(void)
1424{
1425 int num = ARRAY_SIZE(ixp4xx_algos);
1426 int i,err ;
1427
1428 if (platform_device_register(&pseudo_dev))
1429 return -ENODEV;
1430
1431 spin_lock_init(&desc_lock);
1432 spin_lock_init(&emerg_lock);
1433
1434 err = init_ixp_crypto();
1435 if (err) {
1436 platform_device_unregister(&pseudo_dev);
1437 return err;
1438 }
1439 for (i=0; i< num; i++) {
1440 struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1441
1442 if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1443 "%s"IXP_POSTFIX, cra->cra_name) >=
1444 CRYPTO_MAX_ALG_NAME)
1445 {
1446 continue;
1447 }
1448 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1449 continue;
1450 }
1451 if (!ixp4xx_algos[i].hash) {
1452 /* block ciphers */
1453 cra->cra_type = &crypto_ablkcipher_type;
1454 cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1455 CRYPTO_ALG_ASYNC;
1456 if (!cra->cra_ablkcipher.setkey)
1457 cra->cra_ablkcipher.setkey = ablk_setkey;
1458 if (!cra->cra_ablkcipher.encrypt)
1459 cra->cra_ablkcipher.encrypt = ablk_encrypt;
1460 if (!cra->cra_ablkcipher.decrypt)
1461 cra->cra_ablkcipher.decrypt = ablk_decrypt;
1462 cra->cra_init = init_tfm_ablk;
1463 } else {
1464 /* authenc */
1465 cra->cra_type = &crypto_aead_type;
1466 cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
1467 CRYPTO_ALG_ASYNC;
1468 cra->cra_aead.setkey = aead_setkey;
1469 cra->cra_aead.setauthsize = aead_setauthsize;
1470 cra->cra_aead.encrypt = aead_encrypt;
1471 cra->cra_aead.decrypt = aead_decrypt;
1472 cra->cra_aead.givencrypt = aead_givencrypt;
1473 cra->cra_init = init_tfm_aead;
1474 }
1475 cra->cra_ctxsize = sizeof(struct ixp_ctx);
1476 cra->cra_module = THIS_MODULE;
1477 cra->cra_alignmask = 3;
1478 cra->cra_priority = 300;
1479 cra->cra_exit = exit_tfm;
1480 if (crypto_register_alg(cra))
1481 printk(KERN_ERR "Failed to register '%s'\n",
1482 cra->cra_name);
1483 else
1484 ixp4xx_algos[i].registered = 1;
1485 }
1486 return 0;
1487}
1488
1489static void __exit ixp_module_exit(void)
1490{
1491 int num = ARRAY_SIZE(ixp4xx_algos);
1492 int i;
1493
1494 for (i=0; i< num; i++) {
1495 if (ixp4xx_algos[i].registered)
1496 crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1497 }
1498 release_ixp_crypto();
1499 platform_device_unregister(&pseudo_dev);
1500}
1501
1502module_init(ixp_module_init);
1503module_exit(ixp_module_exit);
1504
1505MODULE_LICENSE("GPL");
1506MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1507MODULE_DESCRIPTION("IXP4xx hardware crypto");
1508