2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr
*talitos_ptr
, dma_addr_t dma_addr
)
60 talitos_ptr
->ptr
= cpu_to_be32(lower_32_bits(dma_addr
));
61 talitos_ptr
->eptr
= upper_32_bits(dma_addr
);
65 * map virtual single (contiguous) pointer to h/w descriptor pointer
67 static void map_single_talitos_ptr(struct device
*dev
,
68 struct talitos_ptr
*talitos_ptr
,
69 unsigned short len
, void *data
,
71 enum dma_data_direction dir
)
73 dma_addr_t dma_addr
= dma_map_single(dev
, data
, len
, dir
);
75 talitos_ptr
->len
= cpu_to_be16(len
);
76 to_talitos_ptr(talitos_ptr
, dma_addr
);
77 talitos_ptr
->j_extent
= extent
;
81 * unmap bus single (contiguous) h/w descriptor pointer
83 static void unmap_single_talitos_ptr(struct device
*dev
,
84 struct talitos_ptr
*talitos_ptr
,
85 enum dma_data_direction dir
)
87 dma_unmap_single(dev
, be32_to_cpu(talitos_ptr
->ptr
),
88 be16_to_cpu(talitos_ptr
->len
), dir
);
91 static int reset_channel(struct device
*dev
, int ch
)
93 struct talitos_private
*priv
= dev_get_drvdata(dev
);
94 unsigned int timeout
= TALITOS_TIMEOUT
;
96 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
, TALITOS_CCCR_RESET
);
98 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) & TALITOS_CCCR_RESET
)
103 dev_err(dev
, "failed to reset channel %d\n", ch
);
107 /* set 36-bit addressing, done writeback enable and done IRQ enable */
108 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, TALITOS_CCCR_LO_EAE
|
109 TALITOS_CCCR_LO_CDWE
| TALITOS_CCCR_LO_CDIE
);
111 /* and ICCR writeback, if available */
112 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
113 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
114 TALITOS_CCCR_LO_IWSE
);
119 static int reset_device(struct device
*dev
)
121 struct talitos_private
*priv
= dev_get_drvdata(dev
);
122 unsigned int timeout
= TALITOS_TIMEOUT
;
123 u32 mcr
= TALITOS_MCR_SWR
;
125 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
127 while ((in_be32(priv
->reg
+ TALITOS_MCR
) & TALITOS_MCR_SWR
)
132 mcr
= TALITOS_MCR_RCA1
| TALITOS_MCR_RCA3
;
133 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
137 dev_err(dev
, "failed to reset device\n");
145 * Reset and initialize the device
147 static int init_device(struct device
*dev
)
149 struct talitos_private
*priv
= dev_get_drvdata(dev
);
154 * errata documentation: warning: certain SEC interrupts
155 * are not fully cleared by writing the MCR:SWR bit,
156 * set bit twice to completely reset
158 err
= reset_device(dev
);
162 err
= reset_device(dev
);
167 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
168 err
= reset_channel(dev
, ch
);
173 /* enable channel done and error interrupts */
174 setbits32(priv
->reg
+ TALITOS_IMR
, TALITOS_IMR_INIT
);
175 setbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS_IMR_LO_INIT
);
177 /* disable integrity check error interrupts (use writeback instead) */
178 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
179 setbits32(priv
->reg
+ TALITOS_MDEUICR_LO
,
180 TALITOS_MDEUICR_LO_ICE
);
186 * talitos_submit - submits a descriptor to the device for processing
187 * @dev: the SEC device to be used
188 * @ch: the SEC device channel to be used
189 * @desc: the descriptor to be processed by the device
190 * @callback: whom to call when processing is complete
191 * @context: a handle for use by caller (optional)
193 * desc must contain valid dma-mapped (bus physical) address pointers.
194 * callback must check err and feedback in descriptor header
195 * for device processing status.
197 int talitos_submit(struct device
*dev
, int ch
, struct talitos_desc
*desc
,
198 void (*callback
)(struct device
*dev
,
199 struct talitos_desc
*desc
,
200 void *context
, int error
),
203 struct talitos_private
*priv
= dev_get_drvdata(dev
);
204 struct talitos_request
*request
;
208 spin_lock_irqsave(&priv
->chan
[ch
].head_lock
, flags
);
210 if (!atomic_inc_not_zero(&priv
->chan
[ch
].submit_count
)) {
211 /* h/w fifo is full */
212 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
216 head
= priv
->chan
[ch
].head
;
217 request
= &priv
->chan
[ch
].fifo
[head
];
219 /* map descriptor and save caller data */
220 request
->dma_desc
= dma_map_single(dev
, desc
, sizeof(*desc
),
222 request
->callback
= callback
;
223 request
->context
= context
;
225 /* increment fifo head */
226 priv
->chan
[ch
].head
= (priv
->chan
[ch
].head
+ 1) & (priv
->fifo_len
- 1);
229 request
->desc
= desc
;
233 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF
,
234 upper_32_bits(request
->dma_desc
));
235 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF_LO
,
236 lower_32_bits(request
->dma_desc
));
238 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
242 EXPORT_SYMBOL(talitos_submit
);
245 * process what was done, notify callback of error if not
247 static void flush_channel(struct device
*dev
, int ch
, int error
, int reset_ch
)
249 struct talitos_private
*priv
= dev_get_drvdata(dev
);
250 struct talitos_request
*request
, saved_req
;
254 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
256 tail
= priv
->chan
[ch
].tail
;
257 while (priv
->chan
[ch
].fifo
[tail
].desc
) {
258 request
= &priv
->chan
[ch
].fifo
[tail
];
260 /* descriptors with their done bits set don't get the error */
262 if ((request
->desc
->hdr
& DESC_HDR_DONE
) == DESC_HDR_DONE
)
270 dma_unmap_single(dev
, request
->dma_desc
,
271 sizeof(struct talitos_desc
),
274 /* copy entries so we can call callback outside lock */
275 saved_req
.desc
= request
->desc
;
276 saved_req
.callback
= request
->callback
;
277 saved_req
.context
= request
->context
;
279 /* release request entry in fifo */
281 request
->desc
= NULL
;
283 /* increment fifo tail */
284 priv
->chan
[ch
].tail
= (tail
+ 1) & (priv
->fifo_len
- 1);
286 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
288 atomic_dec(&priv
->chan
[ch
].submit_count
);
290 saved_req
.callback(dev
, saved_req
.desc
, saved_req
.context
,
292 /* channel may resume processing in single desc error case */
293 if (error
&& !reset_ch
&& status
== error
)
295 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
296 tail
= priv
->chan
[ch
].tail
;
299 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
303 * process completed requests for channels that have done status
305 #define DEF_TALITOS_DONE(name, ch_done_mask) \
306 static void talitos_done_##name(unsigned long data) \
308 struct device *dev = (struct device *)data; \
309 struct talitos_private *priv = dev_get_drvdata(dev); \
310 unsigned long flags; \
312 if (ch_done_mask & 1) \
313 flush_channel(dev, 0, 0, 0); \
314 if (priv->num_channels == 1) \
316 if (ch_done_mask & (1 << 2)) \
317 flush_channel(dev, 1, 0, 0); \
318 if (ch_done_mask & (1 << 4)) \
319 flush_channel(dev, 2, 0, 0); \
320 if (ch_done_mask & (1 << 6)) \
321 flush_channel(dev, 3, 0, 0); \
324 /* At this point, all completed channels have been processed */ \
325 /* Unmask done interrupts for channels completed later on. */ \
326 spin_lock_irqsave(&priv->reg_lock, flags); \
327 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
328 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
329 spin_unlock_irqrestore(&priv->reg_lock, flags); \
331 DEF_TALITOS_DONE(4ch
, TALITOS_ISR_4CHDONE
)
332 DEF_TALITOS_DONE(ch0_2
, TALITOS_ISR_CH_0_2_DONE
)
333 DEF_TALITOS_DONE(ch1_3
, TALITOS_ISR_CH_1_3_DONE
)
336 * locate current (offending) descriptor
338 static u32
current_desc_hdr(struct device
*dev
, int ch
)
340 struct talitos_private
*priv
= dev_get_drvdata(dev
);
341 int tail
= priv
->chan
[ch
].tail
;
344 cur_desc
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR_LO
);
346 while (priv
->chan
[ch
].fifo
[tail
].dma_desc
!= cur_desc
) {
347 tail
= (tail
+ 1) & (priv
->fifo_len
- 1);
348 if (tail
== priv
->chan
[ch
].tail
) {
349 dev_err(dev
, "couldn't locate current descriptor\n");
354 return priv
->chan
[ch
].fifo
[tail
].desc
->hdr
;
358 * user diagnostics; report root cause of error based on execution unit status
360 static void report_eu_error(struct device
*dev
, int ch
, u32 desc_hdr
)
362 struct talitos_private
*priv
= dev_get_drvdata(dev
);
366 desc_hdr
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
);
368 switch (desc_hdr
& DESC_HDR_SEL0_MASK
) {
369 case DESC_HDR_SEL0_AFEU
:
370 dev_err(dev
, "AFEUISR 0x%08x_%08x\n",
371 in_be32(priv
->reg
+ TALITOS_AFEUISR
),
372 in_be32(priv
->reg
+ TALITOS_AFEUISR_LO
));
374 case DESC_HDR_SEL0_DEU
:
375 dev_err(dev
, "DEUISR 0x%08x_%08x\n",
376 in_be32(priv
->reg
+ TALITOS_DEUISR
),
377 in_be32(priv
->reg
+ TALITOS_DEUISR_LO
));
379 case DESC_HDR_SEL0_MDEUA
:
380 case DESC_HDR_SEL0_MDEUB
:
381 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
382 in_be32(priv
->reg
+ TALITOS_MDEUISR
),
383 in_be32(priv
->reg
+ TALITOS_MDEUISR_LO
));
385 case DESC_HDR_SEL0_RNG
:
386 dev_err(dev
, "RNGUISR 0x%08x_%08x\n",
387 in_be32(priv
->reg
+ TALITOS_RNGUISR
),
388 in_be32(priv
->reg
+ TALITOS_RNGUISR_LO
));
390 case DESC_HDR_SEL0_PKEU
:
391 dev_err(dev
, "PKEUISR 0x%08x_%08x\n",
392 in_be32(priv
->reg
+ TALITOS_PKEUISR
),
393 in_be32(priv
->reg
+ TALITOS_PKEUISR_LO
));
395 case DESC_HDR_SEL0_AESU
:
396 dev_err(dev
, "AESUISR 0x%08x_%08x\n",
397 in_be32(priv
->reg
+ TALITOS_AESUISR
),
398 in_be32(priv
->reg
+ TALITOS_AESUISR_LO
));
400 case DESC_HDR_SEL0_CRCU
:
401 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
402 in_be32(priv
->reg
+ TALITOS_CRCUISR
),
403 in_be32(priv
->reg
+ TALITOS_CRCUISR_LO
));
405 case DESC_HDR_SEL0_KEU
:
406 dev_err(dev
, "KEUISR 0x%08x_%08x\n",
407 in_be32(priv
->reg
+ TALITOS_KEUISR
),
408 in_be32(priv
->reg
+ TALITOS_KEUISR_LO
));
412 switch (desc_hdr
& DESC_HDR_SEL1_MASK
) {
413 case DESC_HDR_SEL1_MDEUA
:
414 case DESC_HDR_SEL1_MDEUB
:
415 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
416 in_be32(priv
->reg
+ TALITOS_MDEUISR
),
417 in_be32(priv
->reg
+ TALITOS_MDEUISR_LO
));
419 case DESC_HDR_SEL1_CRCU
:
420 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
421 in_be32(priv
->reg
+ TALITOS_CRCUISR
),
422 in_be32(priv
->reg
+ TALITOS_CRCUISR_LO
));
426 for (i
= 0; i
< 8; i
++)
427 dev_err(dev
, "DESCBUF 0x%08x_%08x\n",
428 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
+ 8*i
),
429 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF_LO
+ 8*i
));
433 * recover from error interrupts
435 static void talitos_error(struct device
*dev
, u32 isr
, u32 isr_lo
)
437 struct talitos_private
*priv
= dev_get_drvdata(dev
);
438 unsigned int timeout
= TALITOS_TIMEOUT
;
439 int ch
, error
, reset_dev
= 0, reset_ch
= 0;
442 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
443 /* skip channels without errors */
444 if (!(isr
& (1 << (ch
* 2 + 1))))
449 v
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR
);
450 v_lo
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR_LO
);
452 if (v_lo
& TALITOS_CCPSR_LO_DOF
) {
453 dev_err(dev
, "double fetch fifo overflow error\n");
457 if (v_lo
& TALITOS_CCPSR_LO_SOF
) {
458 /* h/w dropped descriptor */
459 dev_err(dev
, "single fetch fifo overflow error\n");
462 if (v_lo
& TALITOS_CCPSR_LO_MDTE
)
463 dev_err(dev
, "master data transfer error\n");
464 if (v_lo
& TALITOS_CCPSR_LO_SGDLZ
)
465 dev_err(dev
, "s/g data length zero error\n");
466 if (v_lo
& TALITOS_CCPSR_LO_FPZ
)
467 dev_err(dev
, "fetch pointer zero error\n");
468 if (v_lo
& TALITOS_CCPSR_LO_IDH
)
469 dev_err(dev
, "illegal descriptor header error\n");
470 if (v_lo
& TALITOS_CCPSR_LO_IEU
)
471 dev_err(dev
, "invalid execution unit error\n");
472 if (v_lo
& TALITOS_CCPSR_LO_EU
)
473 report_eu_error(dev
, ch
, current_desc_hdr(dev
, ch
));
474 if (v_lo
& TALITOS_CCPSR_LO_GB
)
475 dev_err(dev
, "gather boundary error\n");
476 if (v_lo
& TALITOS_CCPSR_LO_GRL
)
477 dev_err(dev
, "gather return/length error\n");
478 if (v_lo
& TALITOS_CCPSR_LO_SB
)
479 dev_err(dev
, "scatter boundary error\n");
480 if (v_lo
& TALITOS_CCPSR_LO_SRL
)
481 dev_err(dev
, "scatter return/length error\n");
483 flush_channel(dev
, ch
, error
, reset_ch
);
486 reset_channel(dev
, ch
);
488 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
490 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, 0);
491 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
492 TALITOS_CCCR_CONT
) && --timeout
)
495 dev_err(dev
, "failed to restart channel %d\n",
501 if (reset_dev
|| isr
& ~TALITOS_ISR_4CHERR
|| isr_lo
) {
502 dev_err(dev
, "done overflow, internal time out, or rngu error: "
503 "ISR 0x%08x_%08x\n", isr
, isr_lo
);
505 /* purge request queues */
506 for (ch
= 0; ch
< priv
->num_channels
; ch
++)
507 flush_channel(dev
, ch
, -EIO
, 1);
509 /* reset and reinitialize the device */
514 #define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
515 static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
517 struct device *dev = data; \
518 struct talitos_private *priv = dev_get_drvdata(dev); \
520 unsigned long flags; \
522 spin_lock_irqsave(&priv->reg_lock, flags); \
523 isr = in_be32(priv->reg + TALITOS_ISR); \
524 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
525 /* Acknowledge interrupt */ \
526 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
527 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
529 if (unlikely(isr & ch_err_mask || isr_lo)) { \
530 spin_unlock_irqrestore(&priv->reg_lock, flags); \
531 talitos_error(dev, isr & ch_err_mask, isr_lo); \
534 if (likely(isr & ch_done_mask)) { \
535 /* mask further done interrupts. */ \
536 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
537 /* done_task will unmask done interrupts at exit */ \
538 tasklet_schedule(&priv->done_task[tlet]); \
540 spin_unlock_irqrestore(&priv->reg_lock, flags); \
543 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
546 DEF_TALITOS_INTERRUPT(4ch
, TALITOS_ISR_4CHDONE
, TALITOS_ISR_4CHERR
, 0)
547 DEF_TALITOS_INTERRUPT(ch0_2
, TALITOS_ISR_CH_0_2_DONE
, TALITOS_ISR_CH_0_2_ERR
, 0)
548 DEF_TALITOS_INTERRUPT(ch1_3
, TALITOS_ISR_CH_1_3_DONE
, TALITOS_ISR_CH_1_3_ERR
, 1)
553 static int talitos_rng_data_present(struct hwrng
*rng
, int wait
)
555 struct device
*dev
= (struct device
*)rng
->priv
;
556 struct talitos_private
*priv
= dev_get_drvdata(dev
);
560 for (i
= 0; i
< 20; i
++) {
561 ofl
= in_be32(priv
->reg
+ TALITOS_RNGUSR_LO
) &
562 TALITOS_RNGUSR_LO_OFL
;
571 static int talitos_rng_data_read(struct hwrng
*rng
, u32
*data
)
573 struct device
*dev
= (struct device
*)rng
->priv
;
574 struct talitos_private
*priv
= dev_get_drvdata(dev
);
576 /* rng fifo requires 64-bit accesses */
577 *data
= in_be32(priv
->reg
+ TALITOS_RNGU_FIFO
);
578 *data
= in_be32(priv
->reg
+ TALITOS_RNGU_FIFO_LO
);
583 static int talitos_rng_init(struct hwrng
*rng
)
585 struct device
*dev
= (struct device
*)rng
->priv
;
586 struct talitos_private
*priv
= dev_get_drvdata(dev
);
587 unsigned int timeout
= TALITOS_TIMEOUT
;
589 setbits32(priv
->reg
+ TALITOS_RNGURCR_LO
, TALITOS_RNGURCR_LO_SR
);
590 while (!(in_be32(priv
->reg
+ TALITOS_RNGUSR_LO
) & TALITOS_RNGUSR_LO_RD
)
594 dev_err(dev
, "failed to reset rng hw\n");
598 /* start generating */
599 setbits32(priv
->reg
+ TALITOS_RNGUDSR_LO
, 0);
604 static int talitos_register_rng(struct device
*dev
)
606 struct talitos_private
*priv
= dev_get_drvdata(dev
);
608 priv
->rng
.name
= dev_driver_string(dev
),
609 priv
->rng
.init
= talitos_rng_init
,
610 priv
->rng
.data_present
= talitos_rng_data_present
,
611 priv
->rng
.data_read
= talitos_rng_data_read
,
612 priv
->rng
.priv
= (unsigned long)dev
;
614 return hwrng_register(&priv
->rng
);
617 static void talitos_unregister_rng(struct device
*dev
)
619 struct talitos_private
*priv
= dev_get_drvdata(dev
);
621 hwrng_unregister(&priv
->rng
);
627 #define TALITOS_CRA_PRIORITY 3000
628 #define TALITOS_MAX_KEY_SIZE 96
629 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
631 #define MD5_BLOCK_SIZE 64
636 __be32 desc_hdr_template
;
637 u8 key
[TALITOS_MAX_KEY_SIZE
];
638 u8 iv
[TALITOS_MAX_IV_LENGTH
];
640 unsigned int enckeylen
;
641 unsigned int authkeylen
;
642 unsigned int authsize
;
645 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
646 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
648 struct talitos_ahash_req_ctx
{
649 u32 hw_context
[TALITOS_MDEU_MAX_CONTEXT_SIZE
/ sizeof(u32
)];
650 unsigned int hw_context_size
;
651 u8 buf
[HASH_MAX_BLOCK_SIZE
];
652 u8 bufnext
[HASH_MAX_BLOCK_SIZE
];
656 unsigned int to_hash_later
;
658 struct scatterlist bufsl
[2];
659 struct scatterlist
*psrc
;
662 static int aead_setauthsize(struct crypto_aead
*authenc
,
663 unsigned int authsize
)
665 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
667 ctx
->authsize
= authsize
;
672 static int aead_setkey(struct crypto_aead
*authenc
,
673 const u8
*key
, unsigned int keylen
)
675 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
676 struct crypto_authenc_keys keys
;
678 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
681 if (keys
.authkeylen
+ keys
.enckeylen
> TALITOS_MAX_KEY_SIZE
)
684 memcpy(ctx
->key
, keys
.authkey
, keys
.authkeylen
);
685 memcpy(&ctx
->key
[keys
.authkeylen
], keys
.enckey
, keys
.enckeylen
);
687 ctx
->keylen
= keys
.authkeylen
+ keys
.enckeylen
;
688 ctx
->enckeylen
= keys
.enckeylen
;
689 ctx
->authkeylen
= keys
.authkeylen
;
694 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
699 * talitos_edesc - s/w-extended descriptor
700 * @assoc_nents: number of segments in associated data scatterlist
701 * @src_nents: number of segments in input scatterlist
702 * @dst_nents: number of segments in output scatterlist
703 * @assoc_chained: whether assoc is chained or not
704 * @src_chained: whether src is chained or not
705 * @dst_chained: whether dst is chained or not
706 * @iv_dma: dma address of iv for checking continuity and link table
707 * @dma_len: length of dma mapped link_tbl space
708 * @dma_link_tbl: bus physical address of link_tbl
709 * @desc: h/w descriptor
710 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
712 * if decrypting (with authcheck), or either one of src_nents or dst_nents
713 * is greater than 1, an integrity check value is concatenated to the end
716 struct talitos_edesc
{
725 dma_addr_t dma_link_tbl
;
726 struct talitos_desc desc
;
727 struct talitos_ptr link_tbl
[0];
730 static int talitos_map_sg(struct device
*dev
, struct scatterlist
*sg
,
731 unsigned int nents
, enum dma_data_direction dir
,
734 if (unlikely(chained
))
736 dma_map_sg(dev
, sg
, 1, dir
);
737 sg
= scatterwalk_sg_next(sg
);
740 dma_map_sg(dev
, sg
, nents
, dir
);
744 static void talitos_unmap_sg_chain(struct device
*dev
, struct scatterlist
*sg
,
745 enum dma_data_direction dir
)
748 dma_unmap_sg(dev
, sg
, 1, dir
);
749 sg
= scatterwalk_sg_next(sg
);
753 static void talitos_sg_unmap(struct device
*dev
,
754 struct talitos_edesc
*edesc
,
755 struct scatterlist
*src
,
756 struct scatterlist
*dst
)
758 unsigned int src_nents
= edesc
->src_nents
? : 1;
759 unsigned int dst_nents
= edesc
->dst_nents
? : 1;
762 if (edesc
->src_chained
)
763 talitos_unmap_sg_chain(dev
, src
, DMA_TO_DEVICE
);
765 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
768 if (edesc
->dst_chained
)
769 talitos_unmap_sg_chain(dev
, dst
,
772 dma_unmap_sg(dev
, dst
, dst_nents
,
776 if (edesc
->src_chained
)
777 talitos_unmap_sg_chain(dev
, src
, DMA_BIDIRECTIONAL
);
779 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
782 static void ipsec_esp_unmap(struct device
*dev
,
783 struct talitos_edesc
*edesc
,
784 struct aead_request
*areq
)
786 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[6], DMA_FROM_DEVICE
);
787 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[3], DMA_TO_DEVICE
);
788 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
789 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[0], DMA_TO_DEVICE
);
791 if (edesc
->assoc_chained
)
792 talitos_unmap_sg_chain(dev
, areq
->assoc
, DMA_TO_DEVICE
);
794 /* assoc_nents counts also for IV in non-contiguous cases */
795 dma_unmap_sg(dev
, areq
->assoc
,
796 edesc
->assoc_nents
? edesc
->assoc_nents
- 1 : 1,
799 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
);
802 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
807 * ipsec_esp descriptor callbacks
809 static void ipsec_esp_encrypt_done(struct device
*dev
,
810 struct talitos_desc
*desc
, void *context
,
813 struct aead_request
*areq
= context
;
814 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
815 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
816 struct talitos_edesc
*edesc
;
817 struct scatterlist
*sg
;
820 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
822 ipsec_esp_unmap(dev
, edesc
, areq
);
824 /* copy the generated ICV to dst */
825 if (edesc
->dst_nents
) {
826 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
827 edesc
->dst_nents
+ 2 +
829 sg
= sg_last(areq
->dst
, edesc
->dst_nents
);
830 memcpy((char *)sg_virt(sg
) + sg
->length
- ctx
->authsize
,
831 icvdata
, ctx
->authsize
);
836 aead_request_complete(areq
, err
);
839 static void ipsec_esp_decrypt_swauth_done(struct device
*dev
,
840 struct talitos_desc
*desc
,
841 void *context
, int err
)
843 struct aead_request
*req
= context
;
844 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
845 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
846 struct talitos_edesc
*edesc
;
847 struct scatterlist
*sg
;
850 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
852 ipsec_esp_unmap(dev
, edesc
, req
);
857 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
858 edesc
->dst_nents
+ 2 +
861 icvdata
= &edesc
->link_tbl
[0];
863 sg
= sg_last(req
->dst
, edesc
->dst_nents
? : 1);
864 err
= memcmp(icvdata
, (char *)sg_virt(sg
) + sg
->length
-
865 ctx
->authsize
, ctx
->authsize
) ? -EBADMSG
: 0;
870 aead_request_complete(req
, err
);
873 static void ipsec_esp_decrypt_hwauth_done(struct device
*dev
,
874 struct talitos_desc
*desc
,
875 void *context
, int err
)
877 struct aead_request
*req
= context
;
878 struct talitos_edesc
*edesc
;
880 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
882 ipsec_esp_unmap(dev
, edesc
, req
);
884 /* check ICV auth status */
885 if (!err
&& ((desc
->hdr_lo
& DESC_HDR_LO_ICCR1_MASK
) !=
886 DESC_HDR_LO_ICCR1_PASS
))
891 aead_request_complete(req
, err
);
895 * convert scatterlist to SEC h/w link table format
896 * stop at cryptlen bytes
898 static int sg_to_link_tbl(struct scatterlist
*sg
, int sg_count
,
899 int cryptlen
, struct talitos_ptr
*link_tbl_ptr
)
904 to_talitos_ptr(link_tbl_ptr
, sg_dma_address(sg
));
905 link_tbl_ptr
->len
= cpu_to_be16(sg_dma_len(sg
));
906 link_tbl_ptr
->j_extent
= 0;
908 cryptlen
-= sg_dma_len(sg
);
909 sg
= scatterwalk_sg_next(sg
);
912 /* adjust (decrease) last one (or two) entry's len to cryptlen */
914 while (be16_to_cpu(link_tbl_ptr
->len
) <= (-cryptlen
)) {
915 /* Empty this entry, and move to previous one */
916 cryptlen
+= be16_to_cpu(link_tbl_ptr
->len
);
917 link_tbl_ptr
->len
= 0;
921 be16_add_cpu(&link_tbl_ptr
->len
, cryptlen
);
923 /* tag end of link table */
924 link_tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
930 * fill in and submit ipsec_esp descriptor
932 static int ipsec_esp(struct talitos_edesc
*edesc
, struct aead_request
*areq
,
933 u64 seq
, void (*callback
) (struct device
*dev
,
934 struct talitos_desc
*desc
,
935 void *context
, int error
))
937 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
938 struct talitos_ctx
*ctx
= crypto_aead_ctx(aead
);
939 struct device
*dev
= ctx
->dev
;
940 struct talitos_desc
*desc
= &edesc
->desc
;
941 unsigned int cryptlen
= areq
->cryptlen
;
942 unsigned int authsize
= ctx
->authsize
;
943 unsigned int ivsize
= crypto_aead_ivsize(aead
);
948 map_single_talitos_ptr(dev
, &desc
->ptr
[0], ctx
->authkeylen
, &ctx
->key
,
952 desc
->ptr
[1].len
= cpu_to_be16(areq
->assoclen
+ ivsize
);
953 if (edesc
->assoc_nents
) {
954 int tbl_off
= edesc
->src_nents
+ edesc
->dst_nents
+ 2;
955 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
957 to_talitos_ptr(&desc
->ptr
[1], edesc
->dma_link_tbl
+ tbl_off
*
958 sizeof(struct talitos_ptr
));
959 desc
->ptr
[1].j_extent
= DESC_PTR_LNKTBL_JUMP
;
961 /* assoc_nents - 1 entries for assoc, 1 for IV */
962 sg_count
= sg_to_link_tbl(areq
->assoc
, edesc
->assoc_nents
- 1,
963 areq
->assoclen
, tbl_ptr
);
965 /* add IV to link table */
966 tbl_ptr
+= sg_count
- 1;
967 tbl_ptr
->j_extent
= 0;
969 to_talitos_ptr(tbl_ptr
, edesc
->iv_dma
);
970 tbl_ptr
->len
= cpu_to_be16(ivsize
);
971 tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
973 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
974 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
976 to_talitos_ptr(&desc
->ptr
[1], sg_dma_address(areq
->assoc
));
977 desc
->ptr
[1].j_extent
= 0;
981 to_talitos_ptr(&desc
->ptr
[2], edesc
->iv_dma
);
982 desc
->ptr
[2].len
= cpu_to_be16(ivsize
);
983 desc
->ptr
[2].j_extent
= 0;
984 /* Sync needed for the aead_givencrypt case */
985 dma_sync_single_for_device(dev
, edesc
->iv_dma
, ivsize
, DMA_TO_DEVICE
);
988 map_single_talitos_ptr(dev
, &desc
->ptr
[3], ctx
->enckeylen
,
989 (char *)&ctx
->key
+ ctx
->authkeylen
, 0,
994 * map and adjust cipher len to aead request cryptlen.
995 * extent is bytes of HMAC postpended to ciphertext,
996 * typically 12 for ipsec
998 desc
->ptr
[4].len
= cpu_to_be16(cryptlen
);
999 desc
->ptr
[4].j_extent
= authsize
;
1001 sg_count
= talitos_map_sg(dev
, areq
->src
, edesc
->src_nents
? : 1,
1002 (areq
->src
== areq
->dst
) ? DMA_BIDIRECTIONAL
1004 edesc
->src_chained
);
1006 if (sg_count
== 1) {
1007 to_talitos_ptr(&desc
->ptr
[4], sg_dma_address(areq
->src
));
1009 sg_link_tbl_len
= cryptlen
;
1011 if (edesc
->desc
.hdr
& DESC_HDR_MODE1_MDEU_CICV
)
1012 sg_link_tbl_len
= cryptlen
+ authsize
;
1014 sg_count
= sg_to_link_tbl(areq
->src
, sg_count
, sg_link_tbl_len
,
1015 &edesc
->link_tbl
[0]);
1017 desc
->ptr
[4].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1018 to_talitos_ptr(&desc
->ptr
[4], edesc
->dma_link_tbl
);
1019 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1023 /* Only one segment now, so no link tbl needed */
1024 to_talitos_ptr(&desc
->ptr
[4],
1025 sg_dma_address(areq
->src
));
1030 desc
->ptr
[5].len
= cpu_to_be16(cryptlen
);
1031 desc
->ptr
[5].j_extent
= authsize
;
1033 if (areq
->src
!= areq
->dst
)
1034 sg_count
= talitos_map_sg(dev
, areq
->dst
,
1035 edesc
->dst_nents
? : 1,
1036 DMA_FROM_DEVICE
, edesc
->dst_chained
);
1038 if (sg_count
== 1) {
1039 to_talitos_ptr(&desc
->ptr
[5], sg_dma_address(areq
->dst
));
1041 int tbl_off
= edesc
->src_nents
+ 1;
1042 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
1044 to_talitos_ptr(&desc
->ptr
[5], edesc
->dma_link_tbl
+
1045 tbl_off
* sizeof(struct talitos_ptr
));
1046 sg_count
= sg_to_link_tbl(areq
->dst
, sg_count
, cryptlen
,
1049 /* Add an entry to the link table for ICV data */
1050 tbl_ptr
+= sg_count
- 1;
1051 tbl_ptr
->j_extent
= 0;
1053 tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
1054 tbl_ptr
->len
= cpu_to_be16(authsize
);
1056 /* icv data follows link tables */
1057 to_talitos_ptr(tbl_ptr
, edesc
->dma_link_tbl
+
1058 (tbl_off
+ edesc
->dst_nents
+ 1 +
1059 edesc
->assoc_nents
) *
1060 sizeof(struct talitos_ptr
));
1061 desc
->ptr
[5].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1062 dma_sync_single_for_device(ctx
->dev
, edesc
->dma_link_tbl
,
1063 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1067 map_single_talitos_ptr(dev
, &desc
->ptr
[6], ivsize
, ctx
->iv
, 0,
1070 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1071 if (ret
!= -EINPROGRESS
) {
1072 ipsec_esp_unmap(dev
, edesc
, areq
);
1079 * derive number of elements in scatterlist
1081 static int sg_count(struct scatterlist
*sg_list
, int nbytes
, bool *chained
)
1083 struct scatterlist
*sg
= sg_list
;
1087 while (nbytes
> 0) {
1089 nbytes
-= sg
->length
;
1090 if (!sg_is_last(sg
) && (sg
+ 1)->length
== 0)
1092 sg
= scatterwalk_sg_next(sg
);
1099 * allocate and map the extended descriptor
1101 static struct talitos_edesc
*talitos_edesc_alloc(struct device
*dev
,
1102 struct scatterlist
*assoc
,
1103 struct scatterlist
*src
,
1104 struct scatterlist
*dst
,
1106 unsigned int assoclen
,
1107 unsigned int cryptlen
,
1108 unsigned int authsize
,
1109 unsigned int ivsize
,
1113 struct talitos_edesc
*edesc
;
1114 int assoc_nents
= 0, src_nents
, dst_nents
, alloc_len
, dma_len
;
1115 bool assoc_chained
= false, src_chained
= false, dst_chained
= false;
1116 dma_addr_t iv_dma
= 0;
1117 gfp_t flags
= cryptoflags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1120 if (cryptlen
+ authsize
> TALITOS_MAX_DATA_LEN
) {
1121 dev_err(dev
, "length exceeds h/w max limit\n");
1122 return ERR_PTR(-EINVAL
);
1126 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
1130 * Currently it is assumed that iv is provided whenever assoc
1135 assoc_nents
= sg_count(assoc
, assoclen
, &assoc_chained
);
1136 talitos_map_sg(dev
, assoc
, assoc_nents
, DMA_TO_DEVICE
,
1138 assoc_nents
= (assoc_nents
== 1) ? 0 : assoc_nents
;
1140 if (assoc_nents
|| sg_dma_address(assoc
) + assoclen
!= iv_dma
)
1141 assoc_nents
= assoc_nents
? assoc_nents
+ 1 : 2;
1144 src_nents
= sg_count(src
, cryptlen
+ authsize
, &src_chained
);
1145 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1151 dst_nents
= src_nents
;
1153 dst_nents
= sg_count(dst
, cryptlen
+ authsize
,
1155 dst_nents
= (dst_nents
== 1) ? 0 : dst_nents
;
1160 * allocate space for base edesc plus the link tables,
1161 * allowing for two separate entries for ICV and generated ICV (+ 2),
1162 * and the ICV data itself
1164 alloc_len
= sizeof(struct talitos_edesc
);
1165 if (assoc_nents
|| src_nents
|| dst_nents
) {
1166 dma_len
= (src_nents
+ dst_nents
+ 2 + assoc_nents
) *
1167 sizeof(struct talitos_ptr
) + authsize
;
1168 alloc_len
+= dma_len
;
1171 alloc_len
+= icv_stashing
? authsize
: 0;
1174 edesc
= kmalloc(alloc_len
, GFP_DMA
| flags
);
1176 talitos_unmap_sg_chain(dev
, assoc
, DMA_TO_DEVICE
);
1178 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
1179 dev_err(dev
, "could not allocate edescriptor\n");
1180 return ERR_PTR(-ENOMEM
);
1183 edesc
->assoc_nents
= assoc_nents
;
1184 edesc
->src_nents
= src_nents
;
1185 edesc
->dst_nents
= dst_nents
;
1186 edesc
->assoc_chained
= assoc_chained
;
1187 edesc
->src_chained
= src_chained
;
1188 edesc
->dst_chained
= dst_chained
;
1189 edesc
->iv_dma
= iv_dma
;
1190 edesc
->dma_len
= dma_len
;
1192 edesc
->dma_link_tbl
= dma_map_single(dev
, &edesc
->link_tbl
[0],
1199 static struct talitos_edesc
*aead_edesc_alloc(struct aead_request
*areq
, u8
*iv
,
1202 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1203 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1204 unsigned int ivsize
= crypto_aead_ivsize(authenc
);
1206 return talitos_edesc_alloc(ctx
->dev
, areq
->assoc
, areq
->src
, areq
->dst
,
1207 iv
, areq
->assoclen
, areq
->cryptlen
,
1208 ctx
->authsize
, ivsize
, icv_stashing
,
1212 static int aead_encrypt(struct aead_request
*req
)
1214 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1215 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1216 struct talitos_edesc
*edesc
;
1218 /* allocate extended descriptor */
1219 edesc
= aead_edesc_alloc(req
, req
->iv
, 0);
1221 return PTR_ERR(edesc
);
1224 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1226 return ipsec_esp(edesc
, req
, 0, ipsec_esp_encrypt_done
);
1229 static int aead_decrypt(struct aead_request
*req
)
1231 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1232 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1233 unsigned int authsize
= ctx
->authsize
;
1234 struct talitos_private
*priv
= dev_get_drvdata(ctx
->dev
);
1235 struct talitos_edesc
*edesc
;
1236 struct scatterlist
*sg
;
1239 req
->cryptlen
-= authsize
;
1241 /* allocate extended descriptor */
1242 edesc
= aead_edesc_alloc(req
, req
->iv
, 1);
1244 return PTR_ERR(edesc
);
1246 if ((priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
) &&
1247 ((!edesc
->src_nents
&& !edesc
->dst_nents
) ||
1248 priv
->features
& TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
)) {
1250 /* decrypt and check the ICV */
1251 edesc
->desc
.hdr
= ctx
->desc_hdr_template
|
1252 DESC_HDR_DIR_INBOUND
|
1253 DESC_HDR_MODE1_MDEU_CICV
;
1255 /* reset integrity check result bits */
1256 edesc
->desc
.hdr_lo
= 0;
1258 return ipsec_esp(edesc
, req
, 0, ipsec_esp_decrypt_hwauth_done
);
1261 /* Have to check the ICV with software */
1262 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1264 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1266 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
1267 edesc
->dst_nents
+ 2 +
1268 edesc
->assoc_nents
];
1270 icvdata
= &edesc
->link_tbl
[0];
1272 sg
= sg_last(req
->src
, edesc
->src_nents
? : 1);
1274 memcpy(icvdata
, (char *)sg_virt(sg
) + sg
->length
- ctx
->authsize
,
1277 return ipsec_esp(edesc
, req
, 0, ipsec_esp_decrypt_swauth_done
);
1280 static int aead_givencrypt(struct aead_givcrypt_request
*req
)
1282 struct aead_request
*areq
= &req
->areq
;
1283 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1284 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1285 struct talitos_edesc
*edesc
;
1287 /* allocate extended descriptor */
1288 edesc
= aead_edesc_alloc(areq
, req
->giv
, 0);
1290 return PTR_ERR(edesc
);
1293 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1295 memcpy(req
->giv
, ctx
->iv
, crypto_aead_ivsize(authenc
));
1296 /* avoid consecutive packets going out with same IV */
1297 *(__be64
*)req
->giv
^= cpu_to_be64(req
->seq
);
1299 return ipsec_esp(edesc
, areq
, req
->seq
, ipsec_esp_encrypt_done
);
1302 static int ablkcipher_setkey(struct crypto_ablkcipher
*cipher
,
1303 const u8
*key
, unsigned int keylen
)
1305 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1307 memcpy(&ctx
->key
, key
, keylen
);
1308 ctx
->keylen
= keylen
;
1313 static void common_nonsnoop_unmap(struct device
*dev
,
1314 struct talitos_edesc
*edesc
,
1315 struct ablkcipher_request
*areq
)
1317 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1318 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
1319 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1], DMA_TO_DEVICE
);
1321 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
);
1324 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1328 static void ablkcipher_done(struct device
*dev
,
1329 struct talitos_desc
*desc
, void *context
,
1332 struct ablkcipher_request
*areq
= context
;
1333 struct talitos_edesc
*edesc
;
1335 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1337 common_nonsnoop_unmap(dev
, edesc
, areq
);
1341 areq
->base
.complete(&areq
->base
, err
);
1344 static int common_nonsnoop(struct talitos_edesc
*edesc
,
1345 struct ablkcipher_request
*areq
,
1346 void (*callback
) (struct device
*dev
,
1347 struct talitos_desc
*desc
,
1348 void *context
, int error
))
1350 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1351 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1352 struct device
*dev
= ctx
->dev
;
1353 struct talitos_desc
*desc
= &edesc
->desc
;
1354 unsigned int cryptlen
= areq
->nbytes
;
1355 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1358 /* first DWORD empty */
1359 desc
->ptr
[0].len
= 0;
1360 to_talitos_ptr(&desc
->ptr
[0], 0);
1361 desc
->ptr
[0].j_extent
= 0;
1364 to_talitos_ptr(&desc
->ptr
[1], edesc
->iv_dma
);
1365 desc
->ptr
[1].len
= cpu_to_be16(ivsize
);
1366 desc
->ptr
[1].j_extent
= 0;
1369 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1370 (char *)&ctx
->key
, 0, DMA_TO_DEVICE
);
1375 desc
->ptr
[3].len
= cpu_to_be16(cryptlen
);
1376 desc
->ptr
[3].j_extent
= 0;
1378 sg_count
= talitos_map_sg(dev
, areq
->src
, edesc
->src_nents
? : 1,
1379 (areq
->src
== areq
->dst
) ? DMA_BIDIRECTIONAL
1381 edesc
->src_chained
);
1383 if (sg_count
== 1) {
1384 to_talitos_ptr(&desc
->ptr
[3], sg_dma_address(areq
->src
));
1386 sg_count
= sg_to_link_tbl(areq
->src
, sg_count
, cryptlen
,
1387 &edesc
->link_tbl
[0]);
1389 to_talitos_ptr(&desc
->ptr
[3], edesc
->dma_link_tbl
);
1390 desc
->ptr
[3].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1391 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1395 /* Only one segment now, so no link tbl needed */
1396 to_talitos_ptr(&desc
->ptr
[3],
1397 sg_dma_address(areq
->src
));
1402 desc
->ptr
[4].len
= cpu_to_be16(cryptlen
);
1403 desc
->ptr
[4].j_extent
= 0;
1405 if (areq
->src
!= areq
->dst
)
1406 sg_count
= talitos_map_sg(dev
, areq
->dst
,
1407 edesc
->dst_nents
? : 1,
1408 DMA_FROM_DEVICE
, edesc
->dst_chained
);
1410 if (sg_count
== 1) {
1411 to_talitos_ptr(&desc
->ptr
[4], sg_dma_address(areq
->dst
));
1413 struct talitos_ptr
*link_tbl_ptr
=
1414 &edesc
->link_tbl
[edesc
->src_nents
+ 1];
1416 to_talitos_ptr(&desc
->ptr
[4], edesc
->dma_link_tbl
+
1417 (edesc
->src_nents
+ 1) *
1418 sizeof(struct talitos_ptr
));
1419 desc
->ptr
[4].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1420 sg_count
= sg_to_link_tbl(areq
->dst
, sg_count
, cryptlen
,
1422 dma_sync_single_for_device(ctx
->dev
, edesc
->dma_link_tbl
,
1423 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1427 map_single_talitos_ptr(dev
, &desc
->ptr
[5], ivsize
, ctx
->iv
, 0,
1430 /* last DWORD empty */
1431 desc
->ptr
[6].len
= 0;
1432 to_talitos_ptr(&desc
->ptr
[6], 0);
1433 desc
->ptr
[6].j_extent
= 0;
1435 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1436 if (ret
!= -EINPROGRESS
) {
1437 common_nonsnoop_unmap(dev
, edesc
, areq
);
1443 static struct talitos_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
*
1446 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1447 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1448 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1450 return talitos_edesc_alloc(ctx
->dev
, NULL
, areq
->src
, areq
->dst
,
1451 areq
->info
, 0, areq
->nbytes
, 0, ivsize
, 0,
1455 static int ablkcipher_encrypt(struct ablkcipher_request
*areq
)
1457 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1458 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1459 struct talitos_edesc
*edesc
;
1461 /* allocate extended descriptor */
1462 edesc
= ablkcipher_edesc_alloc(areq
);
1464 return PTR_ERR(edesc
);
1467 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1469 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1472 static int ablkcipher_decrypt(struct ablkcipher_request
*areq
)
1474 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1475 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1476 struct talitos_edesc
*edesc
;
1478 /* allocate extended descriptor */
1479 edesc
= ablkcipher_edesc_alloc(areq
);
1481 return PTR_ERR(edesc
);
1483 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1485 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1488 static void common_nonsnoop_hash_unmap(struct device
*dev
,
1489 struct talitos_edesc
*edesc
,
1490 struct ahash_request
*areq
)
1492 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1494 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1496 /* When using hashctx-in, must unmap it. */
1497 if (edesc
->desc
.ptr
[1].len
)
1498 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1],
1501 if (edesc
->desc
.ptr
[2].len
)
1502 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2],
1505 talitos_sg_unmap(dev
, edesc
, req_ctx
->psrc
, NULL
);
1508 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1513 static void ahash_done(struct device
*dev
,
1514 struct talitos_desc
*desc
, void *context
,
1517 struct ahash_request
*areq
= context
;
1518 struct talitos_edesc
*edesc
=
1519 container_of(desc
, struct talitos_edesc
, desc
);
1520 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1522 if (!req_ctx
->last
&& req_ctx
->to_hash_later
) {
1523 /* Position any partial block for next update/final/finup */
1524 memcpy(req_ctx
->buf
, req_ctx
->bufnext
, req_ctx
->to_hash_later
);
1525 req_ctx
->nbuf
= req_ctx
->to_hash_later
;
1527 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1531 areq
->base
.complete(&areq
->base
, err
);
1534 static int common_nonsnoop_hash(struct talitos_edesc
*edesc
,
1535 struct ahash_request
*areq
, unsigned int length
,
1536 void (*callback
) (struct device
*dev
,
1537 struct talitos_desc
*desc
,
1538 void *context
, int error
))
1540 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1541 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1542 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1543 struct device
*dev
= ctx
->dev
;
1544 struct talitos_desc
*desc
= &edesc
->desc
;
1547 /* first DWORD empty */
1548 desc
->ptr
[0] = zero_entry
;
1550 /* hash context in */
1551 if (!req_ctx
->first
|| req_ctx
->swinit
) {
1552 map_single_talitos_ptr(dev
, &desc
->ptr
[1],
1553 req_ctx
->hw_context_size
,
1554 (char *)req_ctx
->hw_context
, 0,
1556 req_ctx
->swinit
= 0;
1558 desc
->ptr
[1] = zero_entry
;
1559 /* Indicate next op is not the first. */
1565 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1566 (char *)&ctx
->key
, 0, DMA_TO_DEVICE
);
1568 desc
->ptr
[2] = zero_entry
;
1573 desc
->ptr
[3].len
= cpu_to_be16(length
);
1574 desc
->ptr
[3].j_extent
= 0;
1576 sg_count
= talitos_map_sg(dev
, req_ctx
->psrc
,
1577 edesc
->src_nents
? : 1,
1578 DMA_TO_DEVICE
, edesc
->src_chained
);
1580 if (sg_count
== 1) {
1581 to_talitos_ptr(&desc
->ptr
[3], sg_dma_address(req_ctx
->psrc
));
1583 sg_count
= sg_to_link_tbl(req_ctx
->psrc
, sg_count
, length
,
1584 &edesc
->link_tbl
[0]);
1586 desc
->ptr
[3].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1587 to_talitos_ptr(&desc
->ptr
[3], edesc
->dma_link_tbl
);
1588 dma_sync_single_for_device(ctx
->dev
,
1589 edesc
->dma_link_tbl
,
1593 /* Only one segment now, so no link tbl needed */
1594 to_talitos_ptr(&desc
->ptr
[3],
1595 sg_dma_address(req_ctx
->psrc
));
1599 /* fifth DWORD empty */
1600 desc
->ptr
[4] = zero_entry
;
1602 /* hash/HMAC out -or- hash context out */
1604 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1605 crypto_ahash_digestsize(tfm
),
1606 areq
->result
, 0, DMA_FROM_DEVICE
);
1608 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1609 req_ctx
->hw_context_size
,
1610 req_ctx
->hw_context
, 0, DMA_FROM_DEVICE
);
1612 /* last DWORD empty */
1613 desc
->ptr
[6] = zero_entry
;
1615 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1616 if (ret
!= -EINPROGRESS
) {
1617 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1623 static struct talitos_edesc
*ahash_edesc_alloc(struct ahash_request
*areq
,
1624 unsigned int nbytes
)
1626 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1627 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1628 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1630 return talitos_edesc_alloc(ctx
->dev
, NULL
, req_ctx
->psrc
, NULL
, NULL
, 0,
1631 nbytes
, 0, 0, 0, areq
->base
.flags
);
1634 static int ahash_init(struct ahash_request
*areq
)
1636 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1637 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1639 /* Initialize the context */
1641 req_ctx
->first
= 1; /* first indicates h/w must init its context */
1642 req_ctx
->swinit
= 0; /* assume h/w init of context */
1643 req_ctx
->hw_context_size
=
1644 (crypto_ahash_digestsize(tfm
) <= SHA256_DIGEST_SIZE
)
1645 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1646 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
;
1652 * on h/w without explicit sha224 support, we initialize h/w context
1653 * manually with sha224 constants, and tell it to run sha256.
1655 static int ahash_init_sha224_swinit(struct ahash_request
*areq
)
1657 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1660 req_ctx
->swinit
= 1;/* prevent h/w initting context with sha256 values*/
1662 req_ctx
->hw_context
[0] = SHA224_H0
;
1663 req_ctx
->hw_context
[1] = SHA224_H1
;
1664 req_ctx
->hw_context
[2] = SHA224_H2
;
1665 req_ctx
->hw_context
[3] = SHA224_H3
;
1666 req_ctx
->hw_context
[4] = SHA224_H4
;
1667 req_ctx
->hw_context
[5] = SHA224_H5
;
1668 req_ctx
->hw_context
[6] = SHA224_H6
;
1669 req_ctx
->hw_context
[7] = SHA224_H7
;
1671 /* init 64-bit count */
1672 req_ctx
->hw_context
[8] = 0;
1673 req_ctx
->hw_context
[9] = 0;
1678 static int ahash_process_req(struct ahash_request
*areq
, unsigned int nbytes
)
1680 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1681 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1682 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1683 struct talitos_edesc
*edesc
;
1684 unsigned int blocksize
=
1685 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1686 unsigned int nbytes_to_hash
;
1687 unsigned int to_hash_later
;
1691 if (!req_ctx
->last
&& (nbytes
+ req_ctx
->nbuf
<= blocksize
)) {
1692 /* Buffer up to one whole block */
1693 sg_copy_to_buffer(areq
->src
,
1694 sg_count(areq
->src
, nbytes
, &chained
),
1695 req_ctx
->buf
+ req_ctx
->nbuf
, nbytes
);
1696 req_ctx
->nbuf
+= nbytes
;
1700 /* At least (blocksize + 1) bytes are available to hash */
1701 nbytes_to_hash
= nbytes
+ req_ctx
->nbuf
;
1702 to_hash_later
= nbytes_to_hash
& (blocksize
- 1);
1706 else if (to_hash_later
)
1707 /* There is a partial block. Hash the full block(s) now */
1708 nbytes_to_hash
-= to_hash_later
;
1710 /* Keep one block buffered */
1711 nbytes_to_hash
-= blocksize
;
1712 to_hash_later
= blocksize
;
1715 /* Chain in any previously buffered data */
1716 if (req_ctx
->nbuf
) {
1717 nsg
= (req_ctx
->nbuf
< nbytes_to_hash
) ? 2 : 1;
1718 sg_init_table(req_ctx
->bufsl
, nsg
);
1719 sg_set_buf(req_ctx
->bufsl
, req_ctx
->buf
, req_ctx
->nbuf
);
1721 scatterwalk_sg_chain(req_ctx
->bufsl
, 2, areq
->src
);
1722 req_ctx
->psrc
= req_ctx
->bufsl
;
1724 req_ctx
->psrc
= areq
->src
;
1726 if (to_hash_later
) {
1727 int nents
= sg_count(areq
->src
, nbytes
, &chained
);
1728 sg_pcopy_to_buffer(areq
->src
, nents
,
1731 nbytes
- to_hash_later
);
1733 req_ctx
->to_hash_later
= to_hash_later
;
1735 /* Allocate extended descriptor */
1736 edesc
= ahash_edesc_alloc(areq
, nbytes_to_hash
);
1738 return PTR_ERR(edesc
);
1740 edesc
->desc
.hdr
= ctx
->desc_hdr_template
;
1742 /* On last one, request SEC to pad; otherwise continue */
1744 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_PAD
;
1746 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_CONT
;
1748 /* request SEC to INIT hash. */
1749 if (req_ctx
->first
&& !req_ctx
->swinit
)
1750 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_INIT
;
1752 /* When the tfm context has a keylen, it's an HMAC.
1753 * A first or last (ie. not middle) descriptor must request HMAC.
1755 if (ctx
->keylen
&& (req_ctx
->first
|| req_ctx
->last
))
1756 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_HMAC
;
1758 return common_nonsnoop_hash(edesc
, areq
, nbytes_to_hash
,
1762 static int ahash_update(struct ahash_request
*areq
)
1764 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1768 return ahash_process_req(areq
, areq
->nbytes
);
1771 static int ahash_final(struct ahash_request
*areq
)
1773 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1777 return ahash_process_req(areq
, 0);
1780 static int ahash_finup(struct ahash_request
*areq
)
1782 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1786 return ahash_process_req(areq
, areq
->nbytes
);
1789 static int ahash_digest(struct ahash_request
*areq
)
1791 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1792 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
1797 return ahash_process_req(areq
, areq
->nbytes
);
1800 struct keyhash_result
{
1801 struct completion completion
;
1805 static void keyhash_complete(struct crypto_async_request
*req
, int err
)
1807 struct keyhash_result
*res
= req
->data
;
1809 if (err
== -EINPROGRESS
)
1813 complete(&res
->completion
);
1816 static int keyhash(struct crypto_ahash
*tfm
, const u8
*key
, unsigned int keylen
,
1819 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1821 struct scatterlist sg
[1];
1822 struct ahash_request
*req
;
1823 struct keyhash_result hresult
;
1826 init_completion(&hresult
.completion
);
1828 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
1832 /* Keep tfm keylen == 0 during hash of the long key */
1834 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
1835 keyhash_complete
, &hresult
);
1837 sg_init_one(&sg
[0], key
, keylen
);
1839 ahash_request_set_crypt(req
, sg
, hash
, keylen
);
1840 ret
= crypto_ahash_digest(req
);
1846 ret
= wait_for_completion_interruptible(
1847 &hresult
.completion
);
1854 ahash_request_free(req
);
1859 static int ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1860 unsigned int keylen
)
1862 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1863 unsigned int blocksize
=
1864 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1865 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
1866 unsigned int keysize
= keylen
;
1867 u8 hash
[SHA512_DIGEST_SIZE
];
1870 if (keylen
<= blocksize
)
1871 memcpy(ctx
->key
, key
, keysize
);
1873 /* Must get the hash of the long key */
1874 ret
= keyhash(tfm
, key
, keylen
, hash
);
1877 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1881 keysize
= digestsize
;
1882 memcpy(ctx
->key
, hash
, digestsize
);
1885 ctx
->keylen
= keysize
;
1891 struct talitos_alg_template
{
1894 struct crypto_alg crypto
;
1895 struct ahash_alg hash
;
1897 __be32 desc_hdr_template
;
1900 static struct talitos_alg_template driver_algs
[] = {
1901 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
1902 { .type
= CRYPTO_ALG_TYPE_AEAD
,
1904 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1905 .cra_driver_name
= "authenc-hmac-sha1-cbc-aes-talitos",
1906 .cra_blocksize
= AES_BLOCK_SIZE
,
1907 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1909 .ivsize
= AES_BLOCK_SIZE
,
1910 .maxauthsize
= SHA1_DIGEST_SIZE
,
1913 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
1914 DESC_HDR_SEL0_AESU
|
1915 DESC_HDR_MODE0_AESU_CBC
|
1916 DESC_HDR_SEL1_MDEUA
|
1917 DESC_HDR_MODE1_MDEU_INIT
|
1918 DESC_HDR_MODE1_MDEU_PAD
|
1919 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
1921 { .type
= CRYPTO_ALG_TYPE_AEAD
,
1923 .cra_name
= "authenc(hmac(sha1),cbc(des3_ede))",
1924 .cra_driver_name
= "authenc-hmac-sha1-cbc-3des-talitos",
1925 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1926 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1928 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1929 .maxauthsize
= SHA1_DIGEST_SIZE
,
1932 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
1934 DESC_HDR_MODE0_DEU_CBC
|
1935 DESC_HDR_MODE0_DEU_3DES
|
1936 DESC_HDR_SEL1_MDEUA
|
1937 DESC_HDR_MODE1_MDEU_INIT
|
1938 DESC_HDR_MODE1_MDEU_PAD
|
1939 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
1941 { .type
= CRYPTO_ALG_TYPE_AEAD
,
1943 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
1944 .cra_driver_name
= "authenc-hmac-sha224-cbc-aes-talitos",
1945 .cra_blocksize
= AES_BLOCK_SIZE
,
1946 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1948 .ivsize
= AES_BLOCK_SIZE
,
1949 .maxauthsize
= SHA224_DIGEST_SIZE
,
1952 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
1953 DESC_HDR_SEL0_AESU
|
1954 DESC_HDR_MODE0_AESU_CBC
|
1955 DESC_HDR_SEL1_MDEUA
|
1956 DESC_HDR_MODE1_MDEU_INIT
|
1957 DESC_HDR_MODE1_MDEU_PAD
|
1958 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
1960 { .type
= CRYPTO_ALG_TYPE_AEAD
,
1962 .cra_name
= "authenc(hmac(sha224),cbc(des3_ede))",
1963 .cra_driver_name
= "authenc-hmac-sha224-cbc-3des-talitos",
1964 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1965 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1967 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1968 .maxauthsize
= SHA224_DIGEST_SIZE
,
1971 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
1973 DESC_HDR_MODE0_DEU_CBC
|
1974 DESC_HDR_MODE0_DEU_3DES
|
1975 DESC_HDR_SEL1_MDEUA
|
1976 DESC_HDR_MODE1_MDEU_INIT
|
1977 DESC_HDR_MODE1_MDEU_PAD
|
1978 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
1980 { .type
= CRYPTO_ALG_TYPE_AEAD
,
1982 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1983 .cra_driver_name
= "authenc-hmac-sha256-cbc-aes-talitos",
1984 .cra_blocksize
= AES_BLOCK_SIZE
,
1985 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1987 .ivsize
= AES_BLOCK_SIZE
,
1988 .maxauthsize
= SHA256_DIGEST_SIZE
,
1991 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
1992 DESC_HDR_SEL0_AESU
|
1993 DESC_HDR_MODE0_AESU_CBC
|
1994 DESC_HDR_SEL1_MDEUA
|
1995 DESC_HDR_MODE1_MDEU_INIT
|
1996 DESC_HDR_MODE1_MDEU_PAD
|
1997 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
1999 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2001 .cra_name
= "authenc(hmac(sha256),cbc(des3_ede))",
2002 .cra_driver_name
= "authenc-hmac-sha256-cbc-3des-talitos",
2003 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2004 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2006 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2007 .maxauthsize
= SHA256_DIGEST_SIZE
,
2010 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2012 DESC_HDR_MODE0_DEU_CBC
|
2013 DESC_HDR_MODE0_DEU_3DES
|
2014 DESC_HDR_SEL1_MDEUA
|
2015 DESC_HDR_MODE1_MDEU_INIT
|
2016 DESC_HDR_MODE1_MDEU_PAD
|
2017 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2019 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2021 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
2022 .cra_driver_name
= "authenc-hmac-sha384-cbc-aes-talitos",
2023 .cra_blocksize
= AES_BLOCK_SIZE
,
2024 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2026 .ivsize
= AES_BLOCK_SIZE
,
2027 .maxauthsize
= SHA384_DIGEST_SIZE
,
2030 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2031 DESC_HDR_SEL0_AESU
|
2032 DESC_HDR_MODE0_AESU_CBC
|
2033 DESC_HDR_SEL1_MDEUB
|
2034 DESC_HDR_MODE1_MDEU_INIT
|
2035 DESC_HDR_MODE1_MDEU_PAD
|
2036 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2038 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2040 .cra_name
= "authenc(hmac(sha384),cbc(des3_ede))",
2041 .cra_driver_name
= "authenc-hmac-sha384-cbc-3des-talitos",
2042 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2043 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2045 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2046 .maxauthsize
= SHA384_DIGEST_SIZE
,
2049 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2051 DESC_HDR_MODE0_DEU_CBC
|
2052 DESC_HDR_MODE0_DEU_3DES
|
2053 DESC_HDR_SEL1_MDEUB
|
2054 DESC_HDR_MODE1_MDEU_INIT
|
2055 DESC_HDR_MODE1_MDEU_PAD
|
2056 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2058 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2060 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
2061 .cra_driver_name
= "authenc-hmac-sha512-cbc-aes-talitos",
2062 .cra_blocksize
= AES_BLOCK_SIZE
,
2063 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2065 .ivsize
= AES_BLOCK_SIZE
,
2066 .maxauthsize
= SHA512_DIGEST_SIZE
,
2069 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2070 DESC_HDR_SEL0_AESU
|
2071 DESC_HDR_MODE0_AESU_CBC
|
2072 DESC_HDR_SEL1_MDEUB
|
2073 DESC_HDR_MODE1_MDEU_INIT
|
2074 DESC_HDR_MODE1_MDEU_PAD
|
2075 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2077 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2079 .cra_name
= "authenc(hmac(sha512),cbc(des3_ede))",
2080 .cra_driver_name
= "authenc-hmac-sha512-cbc-3des-talitos",
2081 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2082 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2084 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2085 .maxauthsize
= SHA512_DIGEST_SIZE
,
2088 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2090 DESC_HDR_MODE0_DEU_CBC
|
2091 DESC_HDR_MODE0_DEU_3DES
|
2092 DESC_HDR_SEL1_MDEUB
|
2093 DESC_HDR_MODE1_MDEU_INIT
|
2094 DESC_HDR_MODE1_MDEU_PAD
|
2095 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2097 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2099 .cra_name
= "authenc(hmac(md5),cbc(aes))",
2100 .cra_driver_name
= "authenc-hmac-md5-cbc-aes-talitos",
2101 .cra_blocksize
= AES_BLOCK_SIZE
,
2102 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2104 .ivsize
= AES_BLOCK_SIZE
,
2105 .maxauthsize
= MD5_DIGEST_SIZE
,
2108 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2109 DESC_HDR_SEL0_AESU
|
2110 DESC_HDR_MODE0_AESU_CBC
|
2111 DESC_HDR_SEL1_MDEUA
|
2112 DESC_HDR_MODE1_MDEU_INIT
|
2113 DESC_HDR_MODE1_MDEU_PAD
|
2114 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2116 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2118 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2119 .cra_driver_name
= "authenc-hmac-md5-cbc-3des-talitos",
2120 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2121 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2123 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2124 .maxauthsize
= MD5_DIGEST_SIZE
,
2127 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2129 DESC_HDR_MODE0_DEU_CBC
|
2130 DESC_HDR_MODE0_DEU_3DES
|
2131 DESC_HDR_SEL1_MDEUA
|
2132 DESC_HDR_MODE1_MDEU_INIT
|
2133 DESC_HDR_MODE1_MDEU_PAD
|
2134 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2136 /* ABLKCIPHER algorithms. */
2137 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2139 .cra_name
= "cbc(aes)",
2140 .cra_driver_name
= "cbc-aes-talitos",
2141 .cra_blocksize
= AES_BLOCK_SIZE
,
2142 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2145 .min_keysize
= AES_MIN_KEY_SIZE
,
2146 .max_keysize
= AES_MAX_KEY_SIZE
,
2147 .ivsize
= AES_BLOCK_SIZE
,
2150 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2151 DESC_HDR_SEL0_AESU
|
2152 DESC_HDR_MODE0_AESU_CBC
,
2154 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2156 .cra_name
= "cbc(des3_ede)",
2157 .cra_driver_name
= "cbc-3des-talitos",
2158 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2159 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2162 .min_keysize
= DES3_EDE_KEY_SIZE
,
2163 .max_keysize
= DES3_EDE_KEY_SIZE
,
2164 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2167 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2169 DESC_HDR_MODE0_DEU_CBC
|
2170 DESC_HDR_MODE0_DEU_3DES
,
2172 /* AHASH algorithms. */
2173 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2175 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2178 .cra_driver_name
= "md5-talitos",
2179 .cra_blocksize
= MD5_BLOCK_SIZE
,
2180 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2184 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2185 DESC_HDR_SEL0_MDEUA
|
2186 DESC_HDR_MODE0_MDEU_MD5
,
2188 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2190 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2193 .cra_driver_name
= "sha1-talitos",
2194 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2195 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2199 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2200 DESC_HDR_SEL0_MDEUA
|
2201 DESC_HDR_MODE0_MDEU_SHA1
,
2203 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2205 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2207 .cra_name
= "sha224",
2208 .cra_driver_name
= "sha224-talitos",
2209 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2210 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2214 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2215 DESC_HDR_SEL0_MDEUA
|
2216 DESC_HDR_MODE0_MDEU_SHA224
,
2218 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2220 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2222 .cra_name
= "sha256",
2223 .cra_driver_name
= "sha256-talitos",
2224 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2225 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2229 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2230 DESC_HDR_SEL0_MDEUA
|
2231 DESC_HDR_MODE0_MDEU_SHA256
,
2233 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2235 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2237 .cra_name
= "sha384",
2238 .cra_driver_name
= "sha384-talitos",
2239 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2240 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2244 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2245 DESC_HDR_SEL0_MDEUB
|
2246 DESC_HDR_MODE0_MDEUB_SHA384
,
2248 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2250 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2252 .cra_name
= "sha512",
2253 .cra_driver_name
= "sha512-talitos",
2254 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2255 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2259 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2260 DESC_HDR_SEL0_MDEUB
|
2261 DESC_HDR_MODE0_MDEUB_SHA512
,
2263 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2265 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2267 .cra_name
= "hmac(md5)",
2268 .cra_driver_name
= "hmac-md5-talitos",
2269 .cra_blocksize
= MD5_BLOCK_SIZE
,
2270 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2274 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2275 DESC_HDR_SEL0_MDEUA
|
2276 DESC_HDR_MODE0_MDEU_MD5
,
2278 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2280 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2282 .cra_name
= "hmac(sha1)",
2283 .cra_driver_name
= "hmac-sha1-talitos",
2284 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2285 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2289 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2290 DESC_HDR_SEL0_MDEUA
|
2291 DESC_HDR_MODE0_MDEU_SHA1
,
2293 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2295 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2297 .cra_name
= "hmac(sha224)",
2298 .cra_driver_name
= "hmac-sha224-talitos",
2299 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2300 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2304 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2305 DESC_HDR_SEL0_MDEUA
|
2306 DESC_HDR_MODE0_MDEU_SHA224
,
2308 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2310 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2312 .cra_name
= "hmac(sha256)",
2313 .cra_driver_name
= "hmac-sha256-talitos",
2314 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2315 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2319 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2320 DESC_HDR_SEL0_MDEUA
|
2321 DESC_HDR_MODE0_MDEU_SHA256
,
2323 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2325 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2327 .cra_name
= "hmac(sha384)",
2328 .cra_driver_name
= "hmac-sha384-talitos",
2329 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2330 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2334 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2335 DESC_HDR_SEL0_MDEUB
|
2336 DESC_HDR_MODE0_MDEUB_SHA384
,
2338 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2340 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2342 .cra_name
= "hmac(sha512)",
2343 .cra_driver_name
= "hmac-sha512-talitos",
2344 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2345 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2349 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2350 DESC_HDR_SEL0_MDEUB
|
2351 DESC_HDR_MODE0_MDEUB_SHA512
,
2355 struct talitos_crypto_alg
{
2356 struct list_head entry
;
2358 struct talitos_alg_template algt
;
2361 static int talitos_cra_init(struct crypto_tfm
*tfm
)
2363 struct crypto_alg
*alg
= tfm
->__crt_alg
;
2364 struct talitos_crypto_alg
*talitos_alg
;
2365 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2366 struct talitos_private
*priv
;
2368 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_AHASH
)
2369 talitos_alg
= container_of(__crypto_ahash_alg(alg
),
2370 struct talitos_crypto_alg
,
2373 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
2376 /* update context with ptr to dev */
2377 ctx
->dev
= talitos_alg
->dev
;
2379 /* assign SEC channel to tfm in round-robin fashion */
2380 priv
= dev_get_drvdata(ctx
->dev
);
2381 ctx
->ch
= atomic_inc_return(&priv
->last_chan
) &
2382 (priv
->num_channels
- 1);
2384 /* copy descriptor header template value */
2385 ctx
->desc_hdr_template
= talitos_alg
->algt
.desc_hdr_template
;
2387 /* select done notification */
2388 ctx
->desc_hdr_template
|= DESC_HDR_DONE_NOTIFY
;
2393 static int talitos_cra_init_aead(struct crypto_tfm
*tfm
)
2395 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2397 talitos_cra_init(tfm
);
2399 /* random first IV */
2400 get_random_bytes(ctx
->iv
, TALITOS_MAX_IV_LENGTH
);
2405 static int talitos_cra_init_ahash(struct crypto_tfm
*tfm
)
2407 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2409 talitos_cra_init(tfm
);
2412 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2413 sizeof(struct talitos_ahash_req_ctx
));
2419 * given the alg's descriptor header template, determine whether descriptor
2420 * type and primary/secondary execution units required match the hw
2421 * capabilities description provided in the device tree node.
2423 static int hw_supports(struct device
*dev
, __be32 desc_hdr_template
)
2425 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2428 ret
= (1 << DESC_TYPE(desc_hdr_template
) & priv
->desc_types
) &&
2429 (1 << PRIMARY_EU(desc_hdr_template
) & priv
->exec_units
);
2431 if (SECONDARY_EU(desc_hdr_template
))
2432 ret
= ret
&& (1 << SECONDARY_EU(desc_hdr_template
)
2433 & priv
->exec_units
);
2438 static int talitos_remove(struct platform_device
*ofdev
)
2440 struct device
*dev
= &ofdev
->dev
;
2441 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2442 struct talitos_crypto_alg
*t_alg
, *n
;
2445 list_for_each_entry_safe(t_alg
, n
, &priv
->alg_list
, entry
) {
2446 switch (t_alg
->algt
.type
) {
2447 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2448 case CRYPTO_ALG_TYPE_AEAD
:
2449 crypto_unregister_alg(&t_alg
->algt
.alg
.crypto
);
2451 case CRYPTO_ALG_TYPE_AHASH
:
2452 crypto_unregister_ahash(&t_alg
->algt
.alg
.hash
);
2455 list_del(&t_alg
->entry
);
2459 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
))
2460 talitos_unregister_rng(dev
);
2462 for (i
= 0; i
< priv
->num_channels
; i
++)
2463 kfree(priv
->chan
[i
].fifo
);
2467 for (i
= 0; i
< 2; i
++)
2469 free_irq(priv
->irq
[i
], dev
);
2470 irq_dispose_mapping(priv
->irq
[i
]);
2473 tasklet_kill(&priv
->done_task
[0]);
2475 tasklet_kill(&priv
->done_task
[1]);
2479 dev_set_drvdata(dev
, NULL
);
2486 static struct talitos_crypto_alg
*talitos_alg_alloc(struct device
*dev
,
2487 struct talitos_alg_template
2490 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2491 struct talitos_crypto_alg
*t_alg
;
2492 struct crypto_alg
*alg
;
2494 t_alg
= kzalloc(sizeof(struct talitos_crypto_alg
), GFP_KERNEL
);
2496 return ERR_PTR(-ENOMEM
);
2498 t_alg
->algt
= *template;
2500 switch (t_alg
->algt
.type
) {
2501 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2502 alg
= &t_alg
->algt
.alg
.crypto
;
2503 alg
->cra_init
= talitos_cra_init
;
2504 alg
->cra_type
= &crypto_ablkcipher_type
;
2505 alg
->cra_ablkcipher
.setkey
= ablkcipher_setkey
;
2506 alg
->cra_ablkcipher
.encrypt
= ablkcipher_encrypt
;
2507 alg
->cra_ablkcipher
.decrypt
= ablkcipher_decrypt
;
2508 alg
->cra_ablkcipher
.geniv
= "eseqiv";
2510 case CRYPTO_ALG_TYPE_AEAD
:
2511 alg
= &t_alg
->algt
.alg
.crypto
;
2512 alg
->cra_init
= talitos_cra_init_aead
;
2513 alg
->cra_type
= &crypto_aead_type
;
2514 alg
->cra_aead
.setkey
= aead_setkey
;
2515 alg
->cra_aead
.setauthsize
= aead_setauthsize
;
2516 alg
->cra_aead
.encrypt
= aead_encrypt
;
2517 alg
->cra_aead
.decrypt
= aead_decrypt
;
2518 alg
->cra_aead
.givencrypt
= aead_givencrypt
;
2519 alg
->cra_aead
.geniv
= "<built-in>";
2521 case CRYPTO_ALG_TYPE_AHASH
:
2522 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
2523 alg
->cra_init
= talitos_cra_init_ahash
;
2524 alg
->cra_type
= &crypto_ahash_type
;
2525 t_alg
->algt
.alg
.hash
.init
= ahash_init
;
2526 t_alg
->algt
.alg
.hash
.update
= ahash_update
;
2527 t_alg
->algt
.alg
.hash
.final
= ahash_final
;
2528 t_alg
->algt
.alg
.hash
.finup
= ahash_finup
;
2529 t_alg
->algt
.alg
.hash
.digest
= ahash_digest
;
2530 t_alg
->algt
.alg
.hash
.setkey
= ahash_setkey
;
2532 if (!(priv
->features
& TALITOS_FTR_HMAC_OK
) &&
2533 !strncmp(alg
->cra_name
, "hmac", 4)) {
2535 return ERR_PTR(-ENOTSUPP
);
2537 if (!(priv
->features
& TALITOS_FTR_SHA224_HWINIT
) &&
2538 (!strcmp(alg
->cra_name
, "sha224") ||
2539 !strcmp(alg
->cra_name
, "hmac(sha224)"))) {
2540 t_alg
->algt
.alg
.hash
.init
= ahash_init_sha224_swinit
;
2541 t_alg
->algt
.desc_hdr_template
=
2542 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2543 DESC_HDR_SEL0_MDEUA
|
2544 DESC_HDR_MODE0_MDEU_SHA256
;
2548 dev_err(dev
, "unknown algorithm type %d\n", t_alg
->algt
.type
);
2549 return ERR_PTR(-EINVAL
);
2552 alg
->cra_module
= THIS_MODULE
;
2553 alg
->cra_priority
= TALITOS_CRA_PRIORITY
;
2554 alg
->cra_alignmask
= 0;
2555 alg
->cra_ctxsize
= sizeof(struct talitos_ctx
);
2556 alg
->cra_flags
|= CRYPTO_ALG_KERN_DRIVER_ONLY
;
2563 static int talitos_probe_irq(struct platform_device
*ofdev
)
2565 struct device
*dev
= &ofdev
->dev
;
2566 struct device_node
*np
= ofdev
->dev
.of_node
;
2567 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2570 priv
->irq
[0] = irq_of_parse_and_map(np
, 0);
2571 if (!priv
->irq
[0]) {
2572 dev_err(dev
, "failed to map irq\n");
2576 priv
->irq
[1] = irq_of_parse_and_map(np
, 1);
2578 /* get the primary irq line */
2579 if (!priv
->irq
[1]) {
2580 err
= request_irq(priv
->irq
[0], talitos_interrupt_4ch
, 0,
2581 dev_driver_string(dev
), dev
);
2585 err
= request_irq(priv
->irq
[0], talitos_interrupt_ch0_2
, 0,
2586 dev_driver_string(dev
), dev
);
2590 /* get the secondary irq line */
2591 err
= request_irq(priv
->irq
[1], talitos_interrupt_ch1_3
, 0,
2592 dev_driver_string(dev
), dev
);
2594 dev_err(dev
, "failed to request secondary irq\n");
2595 irq_dispose_mapping(priv
->irq
[1]);
2603 dev_err(dev
, "failed to request primary irq\n");
2604 irq_dispose_mapping(priv
->irq
[0]);
2611 static int talitos_probe(struct platform_device
*ofdev
)
2613 struct device
*dev
= &ofdev
->dev
;
2614 struct device_node
*np
= ofdev
->dev
.of_node
;
2615 struct talitos_private
*priv
;
2616 const unsigned int *prop
;
2619 priv
= kzalloc(sizeof(struct talitos_private
), GFP_KERNEL
);
2623 dev_set_drvdata(dev
, priv
);
2625 priv
->ofdev
= ofdev
;
2627 spin_lock_init(&priv
->reg_lock
);
2629 err
= talitos_probe_irq(ofdev
);
2633 if (!priv
->irq
[1]) {
2634 tasklet_init(&priv
->done_task
[0], talitos_done_4ch
,
2635 (unsigned long)dev
);
2637 tasklet_init(&priv
->done_task
[0], talitos_done_ch0_2
,
2638 (unsigned long)dev
);
2639 tasklet_init(&priv
->done_task
[1], talitos_done_ch1_3
,
2640 (unsigned long)dev
);
2643 INIT_LIST_HEAD(&priv
->alg_list
);
2645 priv
->reg
= of_iomap(np
, 0);
2647 dev_err(dev
, "failed to of_iomap\n");
2652 /* get SEC version capabilities from device tree */
2653 prop
= of_get_property(np
, "fsl,num-channels", NULL
);
2655 priv
->num_channels
= *prop
;
2657 prop
= of_get_property(np
, "fsl,channel-fifo-len", NULL
);
2659 priv
->chfifo_len
= *prop
;
2661 prop
= of_get_property(np
, "fsl,exec-units-mask", NULL
);
2663 priv
->exec_units
= *prop
;
2665 prop
= of_get_property(np
, "fsl,descriptor-types-mask", NULL
);
2667 priv
->desc_types
= *prop
;
2669 if (!is_power_of_2(priv
->num_channels
) || !priv
->chfifo_len
||
2670 !priv
->exec_units
|| !priv
->desc_types
) {
2671 dev_err(dev
, "invalid property data in device tree node\n");
2676 if (of_device_is_compatible(np
, "fsl,sec3.0"))
2677 priv
->features
|= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
;
2679 if (of_device_is_compatible(np
, "fsl,sec2.1"))
2680 priv
->features
|= TALITOS_FTR_HW_AUTH_CHECK
|
2681 TALITOS_FTR_SHA224_HWINIT
|
2682 TALITOS_FTR_HMAC_OK
;
2684 priv
->chan
= kzalloc(sizeof(struct talitos_channel
) *
2685 priv
->num_channels
, GFP_KERNEL
);
2687 dev_err(dev
, "failed to allocate channel management space\n");
2692 for (i
= 0; i
< priv
->num_channels
; i
++) {
2693 priv
->chan
[i
].reg
= priv
->reg
+ TALITOS_CH_STRIDE
* (i
+ 1);
2694 if (!priv
->irq
[1] || !(i
& 1))
2695 priv
->chan
[i
].reg
+= TALITOS_CH_BASE_OFFSET
;
2698 for (i
= 0; i
< priv
->num_channels
; i
++) {
2699 spin_lock_init(&priv
->chan
[i
].head_lock
);
2700 spin_lock_init(&priv
->chan
[i
].tail_lock
);
2703 priv
->fifo_len
= roundup_pow_of_two(priv
->chfifo_len
);
2705 for (i
= 0; i
< priv
->num_channels
; i
++) {
2706 priv
->chan
[i
].fifo
= kzalloc(sizeof(struct talitos_request
) *
2707 priv
->fifo_len
, GFP_KERNEL
);
2708 if (!priv
->chan
[i
].fifo
) {
2709 dev_err(dev
, "failed to allocate request fifo %d\n", i
);
2715 for (i
= 0; i
< priv
->num_channels
; i
++)
2716 atomic_set(&priv
->chan
[i
].submit_count
,
2717 -(priv
->chfifo_len
- 1));
2719 dma_set_mask(dev
, DMA_BIT_MASK(36));
2721 /* reset and initialize the h/w */
2722 err
= init_device(dev
);
2724 dev_err(dev
, "failed to initialize device\n");
2728 /* register the RNG, if available */
2729 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
)) {
2730 err
= talitos_register_rng(dev
);
2732 dev_err(dev
, "failed to register hwrng: %d\n", err
);
2735 dev_info(dev
, "hwrng\n");
2738 /* register crypto algorithms the device supports */
2739 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
2740 if (hw_supports(dev
, driver_algs
[i
].desc_hdr_template
)) {
2741 struct talitos_crypto_alg
*t_alg
;
2744 t_alg
= talitos_alg_alloc(dev
, &driver_algs
[i
]);
2745 if (IS_ERR(t_alg
)) {
2746 err
= PTR_ERR(t_alg
);
2747 if (err
== -ENOTSUPP
)
2752 switch (t_alg
->algt
.type
) {
2753 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2754 case CRYPTO_ALG_TYPE_AEAD
:
2755 err
= crypto_register_alg(
2756 &t_alg
->algt
.alg
.crypto
);
2757 name
= t_alg
->algt
.alg
.crypto
.cra_driver_name
;
2759 case CRYPTO_ALG_TYPE_AHASH
:
2760 err
= crypto_register_ahash(
2761 &t_alg
->algt
.alg
.hash
);
2763 t_alg
->algt
.alg
.hash
.halg
.base
.cra_driver_name
;
2767 dev_err(dev
, "%s alg registration failed\n",
2771 list_add_tail(&t_alg
->entry
, &priv
->alg_list
);
2774 if (!list_empty(&priv
->alg_list
))
2775 dev_info(dev
, "%s algorithms registered in /proc/crypto\n",
2776 (char *)of_get_property(np
, "compatible", NULL
));
2781 talitos_remove(ofdev
);
2786 static const struct of_device_id talitos_match
[] = {
2788 .compatible
= "fsl,sec2.0",
2792 MODULE_DEVICE_TABLE(of
, talitos_match
);
2794 static struct platform_driver talitos_driver
= {
2797 .owner
= THIS_MODULE
,
2798 .of_match_table
= talitos_match
,
2800 .probe
= talitos_probe
,
2801 .remove
= talitos_remove
,
2804 module_platform_driver(talitos_driver
);
2806 MODULE_LICENSE("GPL");
2807 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2808 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");