2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_platform.h>
36 #include <linux/dma-mapping.h>
38 #include <linux/spinlock.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/slab.h>
42 #include <crypto/algapi.h>
43 #include <crypto/aes.h>
44 #include <crypto/des.h>
45 #include <crypto/sha.h>
46 #include <crypto/md5.h>
47 #include <crypto/aead.h>
48 #include <crypto/authenc.h>
49 #include <crypto/skcipher.h>
50 #include <crypto/hash.h>
51 #include <crypto/internal/hash.h>
52 #include <crypto/scatterwalk.h>
56 static void to_talitos_ptr(struct talitos_ptr
*talitos_ptr
, dma_addr_t dma_addr
)
58 talitos_ptr
->ptr
= cpu_to_be32(lower_32_bits(dma_addr
));
59 talitos_ptr
->eptr
= upper_32_bits(dma_addr
);
63 * map virtual single (contiguous) pointer to h/w descriptor pointer
65 static void map_single_talitos_ptr(struct device
*dev
,
66 struct talitos_ptr
*talitos_ptr
,
67 unsigned short len
, void *data
,
69 enum dma_data_direction dir
)
71 dma_addr_t dma_addr
= dma_map_single(dev
, data
, len
, dir
);
73 talitos_ptr
->len
= cpu_to_be16(len
);
74 to_talitos_ptr(talitos_ptr
, dma_addr
);
75 talitos_ptr
->j_extent
= extent
;
79 * unmap bus single (contiguous) h/w descriptor pointer
81 static void unmap_single_talitos_ptr(struct device
*dev
,
82 struct talitos_ptr
*talitos_ptr
,
83 enum dma_data_direction dir
)
85 dma_unmap_single(dev
, be32_to_cpu(talitos_ptr
->ptr
),
86 be16_to_cpu(talitos_ptr
->len
), dir
);
89 static int reset_channel(struct device
*dev
, int ch
)
91 struct talitos_private
*priv
= dev_get_drvdata(dev
);
92 unsigned int timeout
= TALITOS_TIMEOUT
;
94 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
, TALITOS_CCCR_RESET
);
96 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) & TALITOS_CCCR_RESET
)
101 dev_err(dev
, "failed to reset channel %d\n", ch
);
105 /* set 36-bit addressing, done writeback enable and done IRQ enable */
106 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, TALITOS_CCCR_LO_EAE
|
107 TALITOS_CCCR_LO_CDWE
| TALITOS_CCCR_LO_CDIE
);
109 /* and ICCR writeback, if available */
110 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
111 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
112 TALITOS_CCCR_LO_IWSE
);
117 static int reset_device(struct device
*dev
)
119 struct talitos_private
*priv
= dev_get_drvdata(dev
);
120 unsigned int timeout
= TALITOS_TIMEOUT
;
121 u32 mcr
= TALITOS_MCR_SWR
;
123 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
125 while ((in_be32(priv
->reg
+ TALITOS_MCR
) & TALITOS_MCR_SWR
)
130 mcr
= TALITOS_MCR_RCA1
| TALITOS_MCR_RCA3
;
131 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
135 dev_err(dev
, "failed to reset device\n");
143 * Reset and initialize the device
145 static int init_device(struct device
*dev
)
147 struct talitos_private
*priv
= dev_get_drvdata(dev
);
152 * errata documentation: warning: certain SEC interrupts
153 * are not fully cleared by writing the MCR:SWR bit,
154 * set bit twice to completely reset
156 err
= reset_device(dev
);
160 err
= reset_device(dev
);
165 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
166 err
= reset_channel(dev
, ch
);
171 /* enable channel done and error interrupts */
172 setbits32(priv
->reg
+ TALITOS_IMR
, TALITOS_IMR_INIT
);
173 setbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS_IMR_LO_INIT
);
175 /* disable integrity check error interrupts (use writeback instead) */
176 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
177 setbits32(priv
->reg
+ TALITOS_MDEUICR_LO
,
178 TALITOS_MDEUICR_LO_ICE
);
184 * talitos_submit - submits a descriptor to the device for processing
185 * @dev: the SEC device to be used
186 * @ch: the SEC device channel to be used
187 * @desc: the descriptor to be processed by the device
188 * @callback: whom to call when processing is complete
189 * @context: a handle for use by caller (optional)
191 * desc must contain valid dma-mapped (bus physical) address pointers.
192 * callback must check err and feedback in descriptor header
193 * for device processing status.
195 int talitos_submit(struct device
*dev
, int ch
, struct talitos_desc
*desc
,
196 void (*callback
)(struct device
*dev
,
197 struct talitos_desc
*desc
,
198 void *context
, int error
),
201 struct talitos_private
*priv
= dev_get_drvdata(dev
);
202 struct talitos_request
*request
;
206 spin_lock_irqsave(&priv
->chan
[ch
].head_lock
, flags
);
208 if (!atomic_inc_not_zero(&priv
->chan
[ch
].submit_count
)) {
209 /* h/w fifo is full */
210 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
214 head
= priv
->chan
[ch
].head
;
215 request
= &priv
->chan
[ch
].fifo
[head
];
217 /* map descriptor and save caller data */
218 request
->dma_desc
= dma_map_single(dev
, desc
, sizeof(*desc
),
220 request
->callback
= callback
;
221 request
->context
= context
;
223 /* increment fifo head */
224 priv
->chan
[ch
].head
= (priv
->chan
[ch
].head
+ 1) & (priv
->fifo_len
- 1);
227 request
->desc
= desc
;
231 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF
,
232 upper_32_bits(request
->dma_desc
));
233 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF_LO
,
234 lower_32_bits(request
->dma_desc
));
236 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
240 EXPORT_SYMBOL(talitos_submit
);
243 * process what was done, notify callback of error if not
245 static void flush_channel(struct device
*dev
, int ch
, int error
, int reset_ch
)
247 struct talitos_private
*priv
= dev_get_drvdata(dev
);
248 struct talitos_request
*request
, saved_req
;
252 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
254 tail
= priv
->chan
[ch
].tail
;
255 while (priv
->chan
[ch
].fifo
[tail
].desc
) {
256 request
= &priv
->chan
[ch
].fifo
[tail
];
258 /* descriptors with their done bits set don't get the error */
260 if ((request
->desc
->hdr
& DESC_HDR_DONE
) == DESC_HDR_DONE
)
268 dma_unmap_single(dev
, request
->dma_desc
,
269 sizeof(struct talitos_desc
),
272 /* copy entries so we can call callback outside lock */
273 saved_req
.desc
= request
->desc
;
274 saved_req
.callback
= request
->callback
;
275 saved_req
.context
= request
->context
;
277 /* release request entry in fifo */
279 request
->desc
= NULL
;
281 /* increment fifo tail */
282 priv
->chan
[ch
].tail
= (tail
+ 1) & (priv
->fifo_len
- 1);
284 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
286 atomic_dec(&priv
->chan
[ch
].submit_count
);
288 saved_req
.callback(dev
, saved_req
.desc
, saved_req
.context
,
290 /* channel may resume processing in single desc error case */
291 if (error
&& !reset_ch
&& status
== error
)
293 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
294 tail
= priv
->chan
[ch
].tail
;
297 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
301 * process completed requests for channels that have done status
303 #define DEF_TALITOS_DONE(name, ch_done_mask) \
304 static void talitos_done_##name(unsigned long data) \
306 struct device *dev = (struct device *)data; \
307 struct talitos_private *priv = dev_get_drvdata(dev); \
308 unsigned long flags; \
310 if (ch_done_mask & 1) \
311 flush_channel(dev, 0, 0, 0); \
312 if (priv->num_channels == 1) \
314 if (ch_done_mask & (1 << 2)) \
315 flush_channel(dev, 1, 0, 0); \
316 if (ch_done_mask & (1 << 4)) \
317 flush_channel(dev, 2, 0, 0); \
318 if (ch_done_mask & (1 << 6)) \
319 flush_channel(dev, 3, 0, 0); \
322 /* At this point, all completed channels have been processed */ \
323 /* Unmask done interrupts for channels completed later on. */ \
324 spin_lock_irqsave(&priv->reg_lock, flags); \
325 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
326 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
327 spin_unlock_irqrestore(&priv->reg_lock, flags); \
329 DEF_TALITOS_DONE(4ch
, TALITOS_ISR_4CHDONE
)
330 DEF_TALITOS_DONE(ch0_2
, TALITOS_ISR_CH_0_2_DONE
)
331 DEF_TALITOS_DONE(ch1_3
, TALITOS_ISR_CH_1_3_DONE
)
334 * locate current (offending) descriptor
336 static u32
current_desc_hdr(struct device
*dev
, int ch
)
338 struct talitos_private
*priv
= dev_get_drvdata(dev
);
339 int tail
= priv
->chan
[ch
].tail
;
342 cur_desc
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR_LO
);
344 while (priv
->chan
[ch
].fifo
[tail
].dma_desc
!= cur_desc
) {
345 tail
= (tail
+ 1) & (priv
->fifo_len
- 1);
346 if (tail
== priv
->chan
[ch
].tail
) {
347 dev_err(dev
, "couldn't locate current descriptor\n");
352 return priv
->chan
[ch
].fifo
[tail
].desc
->hdr
;
356 * user diagnostics; report root cause of error based on execution unit status
358 static void report_eu_error(struct device
*dev
, int ch
, u32 desc_hdr
)
360 struct talitos_private
*priv
= dev_get_drvdata(dev
);
364 desc_hdr
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
);
366 switch (desc_hdr
& DESC_HDR_SEL0_MASK
) {
367 case DESC_HDR_SEL0_AFEU
:
368 dev_err(dev
, "AFEUISR 0x%08x_%08x\n",
369 in_be32(priv
->reg
+ TALITOS_AFEUISR
),
370 in_be32(priv
->reg
+ TALITOS_AFEUISR_LO
));
372 case DESC_HDR_SEL0_DEU
:
373 dev_err(dev
, "DEUISR 0x%08x_%08x\n",
374 in_be32(priv
->reg
+ TALITOS_DEUISR
),
375 in_be32(priv
->reg
+ TALITOS_DEUISR_LO
));
377 case DESC_HDR_SEL0_MDEUA
:
378 case DESC_HDR_SEL0_MDEUB
:
379 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
380 in_be32(priv
->reg
+ TALITOS_MDEUISR
),
381 in_be32(priv
->reg
+ TALITOS_MDEUISR_LO
));
383 case DESC_HDR_SEL0_RNG
:
384 dev_err(dev
, "RNGUISR 0x%08x_%08x\n",
385 in_be32(priv
->reg
+ TALITOS_RNGUISR
),
386 in_be32(priv
->reg
+ TALITOS_RNGUISR_LO
));
388 case DESC_HDR_SEL0_PKEU
:
389 dev_err(dev
, "PKEUISR 0x%08x_%08x\n",
390 in_be32(priv
->reg
+ TALITOS_PKEUISR
),
391 in_be32(priv
->reg
+ TALITOS_PKEUISR_LO
));
393 case DESC_HDR_SEL0_AESU
:
394 dev_err(dev
, "AESUISR 0x%08x_%08x\n",
395 in_be32(priv
->reg
+ TALITOS_AESUISR
),
396 in_be32(priv
->reg
+ TALITOS_AESUISR_LO
));
398 case DESC_HDR_SEL0_CRCU
:
399 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
400 in_be32(priv
->reg
+ TALITOS_CRCUISR
),
401 in_be32(priv
->reg
+ TALITOS_CRCUISR_LO
));
403 case DESC_HDR_SEL0_KEU
:
404 dev_err(dev
, "KEUISR 0x%08x_%08x\n",
405 in_be32(priv
->reg
+ TALITOS_KEUISR
),
406 in_be32(priv
->reg
+ TALITOS_KEUISR_LO
));
410 switch (desc_hdr
& DESC_HDR_SEL1_MASK
) {
411 case DESC_HDR_SEL1_MDEUA
:
412 case DESC_HDR_SEL1_MDEUB
:
413 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
414 in_be32(priv
->reg
+ TALITOS_MDEUISR
),
415 in_be32(priv
->reg
+ TALITOS_MDEUISR_LO
));
417 case DESC_HDR_SEL1_CRCU
:
418 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
419 in_be32(priv
->reg
+ TALITOS_CRCUISR
),
420 in_be32(priv
->reg
+ TALITOS_CRCUISR_LO
));
424 for (i
= 0; i
< 8; i
++)
425 dev_err(dev
, "DESCBUF 0x%08x_%08x\n",
426 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
+ 8*i
),
427 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF_LO
+ 8*i
));
431 * recover from error interrupts
433 static void talitos_error(struct device
*dev
, u32 isr
, u32 isr_lo
)
435 struct talitos_private
*priv
= dev_get_drvdata(dev
);
436 unsigned int timeout
= TALITOS_TIMEOUT
;
437 int ch
, error
, reset_dev
= 0, reset_ch
= 0;
440 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
441 /* skip channels without errors */
442 if (!(isr
& (1 << (ch
* 2 + 1))))
447 v
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR
);
448 v_lo
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR_LO
);
450 if (v_lo
& TALITOS_CCPSR_LO_DOF
) {
451 dev_err(dev
, "double fetch fifo overflow error\n");
455 if (v_lo
& TALITOS_CCPSR_LO_SOF
) {
456 /* h/w dropped descriptor */
457 dev_err(dev
, "single fetch fifo overflow error\n");
460 if (v_lo
& TALITOS_CCPSR_LO_MDTE
)
461 dev_err(dev
, "master data transfer error\n");
462 if (v_lo
& TALITOS_CCPSR_LO_SGDLZ
)
463 dev_err(dev
, "s/g data length zero error\n");
464 if (v_lo
& TALITOS_CCPSR_LO_FPZ
)
465 dev_err(dev
, "fetch pointer zero error\n");
466 if (v_lo
& TALITOS_CCPSR_LO_IDH
)
467 dev_err(dev
, "illegal descriptor header error\n");
468 if (v_lo
& TALITOS_CCPSR_LO_IEU
)
469 dev_err(dev
, "invalid execution unit error\n");
470 if (v_lo
& TALITOS_CCPSR_LO_EU
)
471 report_eu_error(dev
, ch
, current_desc_hdr(dev
, ch
));
472 if (v_lo
& TALITOS_CCPSR_LO_GB
)
473 dev_err(dev
, "gather boundary error\n");
474 if (v_lo
& TALITOS_CCPSR_LO_GRL
)
475 dev_err(dev
, "gather return/length error\n");
476 if (v_lo
& TALITOS_CCPSR_LO_SB
)
477 dev_err(dev
, "scatter boundary error\n");
478 if (v_lo
& TALITOS_CCPSR_LO_SRL
)
479 dev_err(dev
, "scatter return/length error\n");
481 flush_channel(dev
, ch
, error
, reset_ch
);
484 reset_channel(dev
, ch
);
486 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
488 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, 0);
489 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
490 TALITOS_CCCR_CONT
) && --timeout
)
493 dev_err(dev
, "failed to restart channel %d\n",
499 if (reset_dev
|| isr
& ~TALITOS_ISR_4CHERR
|| isr_lo
) {
500 dev_err(dev
, "done overflow, internal time out, or rngu error: "
501 "ISR 0x%08x_%08x\n", isr
, isr_lo
);
503 /* purge request queues */
504 for (ch
= 0; ch
< priv
->num_channels
; ch
++)
505 flush_channel(dev
, ch
, -EIO
, 1);
507 /* reset and reinitialize the device */
512 #define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
513 static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
515 struct device *dev = data; \
516 struct talitos_private *priv = dev_get_drvdata(dev); \
518 unsigned long flags; \
520 spin_lock_irqsave(&priv->reg_lock, flags); \
521 isr = in_be32(priv->reg + TALITOS_ISR); \
522 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
523 /* Acknowledge interrupt */ \
524 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
525 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
527 if (unlikely(isr & ch_err_mask || isr_lo)) { \
528 spin_unlock_irqrestore(&priv->reg_lock, flags); \
529 talitos_error(dev, isr & ch_err_mask, isr_lo); \
532 if (likely(isr & ch_done_mask)) { \
533 /* mask further done interrupts. */ \
534 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
535 /* done_task will unmask done interrupts at exit */ \
536 tasklet_schedule(&priv->done_task[tlet]); \
538 spin_unlock_irqrestore(&priv->reg_lock, flags); \
541 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
544 DEF_TALITOS_INTERRUPT(4ch
, TALITOS_ISR_4CHDONE
, TALITOS_ISR_4CHERR
, 0)
545 DEF_TALITOS_INTERRUPT(ch0_2
, TALITOS_ISR_CH_0_2_DONE
, TALITOS_ISR_CH_0_2_ERR
, 0)
546 DEF_TALITOS_INTERRUPT(ch1_3
, TALITOS_ISR_CH_1_3_DONE
, TALITOS_ISR_CH_1_3_ERR
, 1)
551 static int talitos_rng_data_present(struct hwrng
*rng
, int wait
)
553 struct device
*dev
= (struct device
*)rng
->priv
;
554 struct talitos_private
*priv
= dev_get_drvdata(dev
);
558 for (i
= 0; i
< 20; i
++) {
559 ofl
= in_be32(priv
->reg
+ TALITOS_RNGUSR_LO
) &
560 TALITOS_RNGUSR_LO_OFL
;
569 static int talitos_rng_data_read(struct hwrng
*rng
, u32
*data
)
571 struct device
*dev
= (struct device
*)rng
->priv
;
572 struct talitos_private
*priv
= dev_get_drvdata(dev
);
574 /* rng fifo requires 64-bit accesses */
575 *data
= in_be32(priv
->reg
+ TALITOS_RNGU_FIFO
);
576 *data
= in_be32(priv
->reg
+ TALITOS_RNGU_FIFO_LO
);
581 static int talitos_rng_init(struct hwrng
*rng
)
583 struct device
*dev
= (struct device
*)rng
->priv
;
584 struct talitos_private
*priv
= dev_get_drvdata(dev
);
585 unsigned int timeout
= TALITOS_TIMEOUT
;
587 setbits32(priv
->reg
+ TALITOS_RNGURCR_LO
, TALITOS_RNGURCR_LO_SR
);
588 while (!(in_be32(priv
->reg
+ TALITOS_RNGUSR_LO
) & TALITOS_RNGUSR_LO_RD
)
592 dev_err(dev
, "failed to reset rng hw\n");
596 /* start generating */
597 setbits32(priv
->reg
+ TALITOS_RNGUDSR_LO
, 0);
602 static int talitos_register_rng(struct device
*dev
)
604 struct talitos_private
*priv
= dev_get_drvdata(dev
);
606 priv
->rng
.name
= dev_driver_string(dev
),
607 priv
->rng
.init
= talitos_rng_init
,
608 priv
->rng
.data_present
= talitos_rng_data_present
,
609 priv
->rng
.data_read
= talitos_rng_data_read
,
610 priv
->rng
.priv
= (unsigned long)dev
;
612 return hwrng_register(&priv
->rng
);
615 static void talitos_unregister_rng(struct device
*dev
)
617 struct talitos_private
*priv
= dev_get_drvdata(dev
);
619 hwrng_unregister(&priv
->rng
);
625 #define TALITOS_CRA_PRIORITY 3000
626 #define TALITOS_MAX_KEY_SIZE 96
627 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
629 #define MD5_BLOCK_SIZE 64
634 __be32 desc_hdr_template
;
635 u8 key
[TALITOS_MAX_KEY_SIZE
];
636 u8 iv
[TALITOS_MAX_IV_LENGTH
];
638 unsigned int enckeylen
;
639 unsigned int authkeylen
;
640 unsigned int authsize
;
643 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
644 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
646 struct talitos_ahash_req_ctx
{
647 u32 hw_context
[TALITOS_MDEU_MAX_CONTEXT_SIZE
/ sizeof(u32
)];
648 unsigned int hw_context_size
;
649 u8 buf
[HASH_MAX_BLOCK_SIZE
];
650 u8 bufnext
[HASH_MAX_BLOCK_SIZE
];
654 unsigned int to_hash_later
;
656 struct scatterlist bufsl
[2];
657 struct scatterlist
*psrc
;
660 static int aead_setauthsize(struct crypto_aead
*authenc
,
661 unsigned int authsize
)
663 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
665 ctx
->authsize
= authsize
;
670 static int aead_setkey(struct crypto_aead
*authenc
,
671 const u8
*key
, unsigned int keylen
)
673 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
674 struct rtattr
*rta
= (void *)key
;
675 struct crypto_authenc_key_param
*param
;
676 unsigned int authkeylen
;
677 unsigned int enckeylen
;
679 if (!RTA_OK(rta
, keylen
))
682 if (rta
->rta_type
!= CRYPTO_AUTHENC_KEYA_PARAM
)
685 if (RTA_PAYLOAD(rta
) < sizeof(*param
))
688 param
= RTA_DATA(rta
);
689 enckeylen
= be32_to_cpu(param
->enckeylen
);
691 key
+= RTA_ALIGN(rta
->rta_len
);
692 keylen
-= RTA_ALIGN(rta
->rta_len
);
694 if (keylen
< enckeylen
)
697 authkeylen
= keylen
- enckeylen
;
699 if (keylen
> TALITOS_MAX_KEY_SIZE
)
702 memcpy(&ctx
->key
, key
, keylen
);
704 ctx
->keylen
= keylen
;
705 ctx
->enckeylen
= enckeylen
;
706 ctx
->authkeylen
= authkeylen
;
711 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
716 * talitos_edesc - s/w-extended descriptor
717 * @assoc_nents: number of segments in associated data scatterlist
718 * @src_nents: number of segments in input scatterlist
719 * @dst_nents: number of segments in output scatterlist
720 * @assoc_chained: whether assoc is chained or not
721 * @src_chained: whether src is chained or not
722 * @dst_chained: whether dst is chained or not
723 * @iv_dma: dma address of iv for checking continuity and link table
724 * @dma_len: length of dma mapped link_tbl space
725 * @dma_link_tbl: bus physical address of link_tbl
726 * @desc: h/w descriptor
727 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
729 * if decrypting (with authcheck), or either one of src_nents or dst_nents
730 * is greater than 1, an integrity check value is concatenated to the end
733 struct talitos_edesc
{
742 dma_addr_t dma_link_tbl
;
743 struct talitos_desc desc
;
744 struct talitos_ptr link_tbl
[0];
747 static int talitos_map_sg(struct device
*dev
, struct scatterlist
*sg
,
748 unsigned int nents
, enum dma_data_direction dir
,
751 if (unlikely(chained
))
753 dma_map_sg(dev
, sg
, 1, dir
);
754 sg
= scatterwalk_sg_next(sg
);
757 dma_map_sg(dev
, sg
, nents
, dir
);
761 static void talitos_unmap_sg_chain(struct device
*dev
, struct scatterlist
*sg
,
762 enum dma_data_direction dir
)
765 dma_unmap_sg(dev
, sg
, 1, dir
);
766 sg
= scatterwalk_sg_next(sg
);
770 static void talitos_sg_unmap(struct device
*dev
,
771 struct talitos_edesc
*edesc
,
772 struct scatterlist
*src
,
773 struct scatterlist
*dst
)
775 unsigned int src_nents
= edesc
->src_nents
? : 1;
776 unsigned int dst_nents
= edesc
->dst_nents
? : 1;
779 if (edesc
->src_chained
)
780 talitos_unmap_sg_chain(dev
, src
, DMA_TO_DEVICE
);
782 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
785 if (edesc
->dst_chained
)
786 talitos_unmap_sg_chain(dev
, dst
,
789 dma_unmap_sg(dev
, dst
, dst_nents
,
793 if (edesc
->src_chained
)
794 talitos_unmap_sg_chain(dev
, src
, DMA_BIDIRECTIONAL
);
796 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
799 static void ipsec_esp_unmap(struct device
*dev
,
800 struct talitos_edesc
*edesc
,
801 struct aead_request
*areq
)
803 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[6], DMA_FROM_DEVICE
);
804 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[3], DMA_TO_DEVICE
);
805 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
806 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[0], DMA_TO_DEVICE
);
808 if (edesc
->assoc_chained
)
809 talitos_unmap_sg_chain(dev
, areq
->assoc
, DMA_TO_DEVICE
);
811 /* assoc_nents counts also for IV in non-contiguous cases */
812 dma_unmap_sg(dev
, areq
->assoc
,
813 edesc
->assoc_nents
? edesc
->assoc_nents
- 1 : 1,
816 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
);
819 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
824 * ipsec_esp descriptor callbacks
826 static void ipsec_esp_encrypt_done(struct device
*dev
,
827 struct talitos_desc
*desc
, void *context
,
830 struct aead_request
*areq
= context
;
831 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
832 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
833 struct talitos_edesc
*edesc
;
834 struct scatterlist
*sg
;
837 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
839 ipsec_esp_unmap(dev
, edesc
, areq
);
841 /* copy the generated ICV to dst */
842 if (edesc
->dst_nents
) {
843 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
844 edesc
->dst_nents
+ 2 +
846 sg
= sg_last(areq
->dst
, edesc
->dst_nents
);
847 memcpy((char *)sg_virt(sg
) + sg
->length
- ctx
->authsize
,
848 icvdata
, ctx
->authsize
);
853 aead_request_complete(areq
, err
);
856 static void ipsec_esp_decrypt_swauth_done(struct device
*dev
,
857 struct talitos_desc
*desc
,
858 void *context
, int err
)
860 struct aead_request
*req
= context
;
861 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
862 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
863 struct talitos_edesc
*edesc
;
864 struct scatterlist
*sg
;
867 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
869 ipsec_esp_unmap(dev
, edesc
, req
);
874 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
875 edesc
->dst_nents
+ 2 +
878 icvdata
= &edesc
->link_tbl
[0];
880 sg
= sg_last(req
->dst
, edesc
->dst_nents
? : 1);
881 err
= memcmp(icvdata
, (char *)sg_virt(sg
) + sg
->length
-
882 ctx
->authsize
, ctx
->authsize
) ? -EBADMSG
: 0;
887 aead_request_complete(req
, err
);
890 static void ipsec_esp_decrypt_hwauth_done(struct device
*dev
,
891 struct talitos_desc
*desc
,
892 void *context
, int err
)
894 struct aead_request
*req
= context
;
895 struct talitos_edesc
*edesc
;
897 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
899 ipsec_esp_unmap(dev
, edesc
, req
);
901 /* check ICV auth status */
902 if (!err
&& ((desc
->hdr_lo
& DESC_HDR_LO_ICCR1_MASK
) !=
903 DESC_HDR_LO_ICCR1_PASS
))
908 aead_request_complete(req
, err
);
912 * convert scatterlist to SEC h/w link table format
913 * stop at cryptlen bytes
915 static int sg_to_link_tbl(struct scatterlist
*sg
, int sg_count
,
916 int cryptlen
, struct talitos_ptr
*link_tbl_ptr
)
921 to_talitos_ptr(link_tbl_ptr
, sg_dma_address(sg
));
922 link_tbl_ptr
->len
= cpu_to_be16(sg_dma_len(sg
));
923 link_tbl_ptr
->j_extent
= 0;
925 cryptlen
-= sg_dma_len(sg
);
926 sg
= scatterwalk_sg_next(sg
);
929 /* adjust (decrease) last one (or two) entry's len to cryptlen */
931 while (be16_to_cpu(link_tbl_ptr
->len
) <= (-cryptlen
)) {
932 /* Empty this entry, and move to previous one */
933 cryptlen
+= be16_to_cpu(link_tbl_ptr
->len
);
934 link_tbl_ptr
->len
= 0;
938 be16_add_cpu(&link_tbl_ptr
->len
, cryptlen
);
940 /* tag end of link table */
941 link_tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
947 * fill in and submit ipsec_esp descriptor
949 static int ipsec_esp(struct talitos_edesc
*edesc
, struct aead_request
*areq
,
950 u64 seq
, void (*callback
) (struct device
*dev
,
951 struct talitos_desc
*desc
,
952 void *context
, int error
))
954 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
955 struct talitos_ctx
*ctx
= crypto_aead_ctx(aead
);
956 struct device
*dev
= ctx
->dev
;
957 struct talitos_desc
*desc
= &edesc
->desc
;
958 unsigned int cryptlen
= areq
->cryptlen
;
959 unsigned int authsize
= ctx
->authsize
;
960 unsigned int ivsize
= crypto_aead_ivsize(aead
);
965 map_single_talitos_ptr(dev
, &desc
->ptr
[0], ctx
->authkeylen
, &ctx
->key
,
969 desc
->ptr
[1].len
= cpu_to_be16(areq
->assoclen
+ ivsize
);
970 if (edesc
->assoc_nents
) {
971 int tbl_off
= edesc
->src_nents
+ edesc
->dst_nents
+ 2;
972 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
974 to_talitos_ptr(&desc
->ptr
[1], edesc
->dma_link_tbl
+ tbl_off
*
975 sizeof(struct talitos_ptr
));
976 desc
->ptr
[1].j_extent
= DESC_PTR_LNKTBL_JUMP
;
978 /* assoc_nents - 1 entries for assoc, 1 for IV */
979 sg_count
= sg_to_link_tbl(areq
->assoc
, edesc
->assoc_nents
- 1,
980 areq
->assoclen
, tbl_ptr
);
982 /* add IV to link table */
983 tbl_ptr
+= sg_count
- 1;
984 tbl_ptr
->j_extent
= 0;
986 to_talitos_ptr(tbl_ptr
, edesc
->iv_dma
);
987 tbl_ptr
->len
= cpu_to_be16(ivsize
);
988 tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
990 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
991 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
993 to_talitos_ptr(&desc
->ptr
[1], sg_dma_address(areq
->assoc
));
994 desc
->ptr
[1].j_extent
= 0;
998 to_talitos_ptr(&desc
->ptr
[2], edesc
->iv_dma
);
999 desc
->ptr
[2].len
= cpu_to_be16(ivsize
);
1000 desc
->ptr
[2].j_extent
= 0;
1001 /* Sync needed for the aead_givencrypt case */
1002 dma_sync_single_for_device(dev
, edesc
->iv_dma
, ivsize
, DMA_TO_DEVICE
);
1005 map_single_talitos_ptr(dev
, &desc
->ptr
[3], ctx
->enckeylen
,
1006 (char *)&ctx
->key
+ ctx
->authkeylen
, 0,
1011 * map and adjust cipher len to aead request cryptlen.
1012 * extent is bytes of HMAC postpended to ciphertext,
1013 * typically 12 for ipsec
1015 desc
->ptr
[4].len
= cpu_to_be16(cryptlen
);
1016 desc
->ptr
[4].j_extent
= authsize
;
1018 sg_count
= talitos_map_sg(dev
, areq
->src
, edesc
->src_nents
? : 1,
1019 (areq
->src
== areq
->dst
) ? DMA_BIDIRECTIONAL
1021 edesc
->src_chained
);
1023 if (sg_count
== 1) {
1024 to_talitos_ptr(&desc
->ptr
[4], sg_dma_address(areq
->src
));
1026 sg_link_tbl_len
= cryptlen
;
1028 if (edesc
->desc
.hdr
& DESC_HDR_MODE1_MDEU_CICV
)
1029 sg_link_tbl_len
= cryptlen
+ authsize
;
1031 sg_count
= sg_to_link_tbl(areq
->src
, sg_count
, sg_link_tbl_len
,
1032 &edesc
->link_tbl
[0]);
1034 desc
->ptr
[4].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1035 to_talitos_ptr(&desc
->ptr
[4], edesc
->dma_link_tbl
);
1036 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1040 /* Only one segment now, so no link tbl needed */
1041 to_talitos_ptr(&desc
->ptr
[4],
1042 sg_dma_address(areq
->src
));
1047 desc
->ptr
[5].len
= cpu_to_be16(cryptlen
);
1048 desc
->ptr
[5].j_extent
= authsize
;
1050 if (areq
->src
!= areq
->dst
)
1051 sg_count
= talitos_map_sg(dev
, areq
->dst
,
1052 edesc
->dst_nents
? : 1,
1053 DMA_FROM_DEVICE
, edesc
->dst_chained
);
1055 if (sg_count
== 1) {
1056 to_talitos_ptr(&desc
->ptr
[5], sg_dma_address(areq
->dst
));
1058 int tbl_off
= edesc
->src_nents
+ 1;
1059 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
1061 to_talitos_ptr(&desc
->ptr
[5], edesc
->dma_link_tbl
+
1062 tbl_off
* sizeof(struct talitos_ptr
));
1063 sg_count
= sg_to_link_tbl(areq
->dst
, sg_count
, cryptlen
,
1066 /* Add an entry to the link table for ICV data */
1067 tbl_ptr
+= sg_count
- 1;
1068 tbl_ptr
->j_extent
= 0;
1070 tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
1071 tbl_ptr
->len
= cpu_to_be16(authsize
);
1073 /* icv data follows link tables */
1074 to_talitos_ptr(tbl_ptr
, edesc
->dma_link_tbl
+
1075 (tbl_off
+ edesc
->dst_nents
+ 1 +
1076 edesc
->assoc_nents
) *
1077 sizeof(struct talitos_ptr
));
1078 desc
->ptr
[5].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1079 dma_sync_single_for_device(ctx
->dev
, edesc
->dma_link_tbl
,
1080 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1084 map_single_talitos_ptr(dev
, &desc
->ptr
[6], ivsize
, ctx
->iv
, 0,
1087 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1088 if (ret
!= -EINPROGRESS
) {
1089 ipsec_esp_unmap(dev
, edesc
, areq
);
1096 * derive number of elements in scatterlist
1098 static int sg_count(struct scatterlist
*sg_list
, int nbytes
, bool *chained
)
1100 struct scatterlist
*sg
= sg_list
;
1104 while (nbytes
> 0) {
1106 nbytes
-= sg
->length
;
1107 if (!sg_is_last(sg
) && (sg
+ 1)->length
== 0)
1109 sg
= scatterwalk_sg_next(sg
);
1116 * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
1118 * @nents: Number of SG entries
1119 * @buf: Where to copy to
1120 * @buflen: The number of bytes to copy
1121 * @skip: The number of bytes to skip before copying.
1122 * Note: skip + buflen should equal SG total size.
1124 * Returns the number of copied bytes.
1127 static size_t sg_copy_end_to_buffer(struct scatterlist
*sgl
, unsigned int nents
,
1128 void *buf
, size_t buflen
, unsigned int skip
)
1130 unsigned int offset
= 0;
1131 unsigned int boffset
= 0;
1132 struct sg_mapping_iter miter
;
1133 unsigned long flags
;
1134 unsigned int sg_flags
= SG_MITER_ATOMIC
;
1135 size_t total_buffer
= buflen
+ skip
;
1137 sg_flags
|= SG_MITER_FROM_SG
;
1139 sg_miter_start(&miter
, sgl
, nents
, sg_flags
);
1141 local_irq_save(flags
);
1143 while (sg_miter_next(&miter
) && offset
< total_buffer
) {
1145 unsigned int ignore
;
1147 if ((offset
+ miter
.length
) > skip
) {
1148 if (offset
< skip
) {
1149 /* Copy part of this segment */
1150 ignore
= skip
- offset
;
1151 len
= miter
.length
- ignore
;
1152 if (boffset
+ len
> buflen
)
1153 len
= buflen
- boffset
;
1154 memcpy(buf
+ boffset
, miter
.addr
+ ignore
, len
);
1156 /* Copy all of this segment (up to buflen) */
1158 if (boffset
+ len
> buflen
)
1159 len
= buflen
- boffset
;
1160 memcpy(buf
+ boffset
, miter
.addr
, len
);
1164 offset
+= miter
.length
;
1167 sg_miter_stop(&miter
);
1169 local_irq_restore(flags
);
1174 * allocate and map the extended descriptor
1176 static struct talitos_edesc
*talitos_edesc_alloc(struct device
*dev
,
1177 struct scatterlist
*assoc
,
1178 struct scatterlist
*src
,
1179 struct scatterlist
*dst
,
1181 unsigned int assoclen
,
1182 unsigned int cryptlen
,
1183 unsigned int authsize
,
1184 unsigned int ivsize
,
1188 struct talitos_edesc
*edesc
;
1189 int assoc_nents
= 0, src_nents
, dst_nents
, alloc_len
, dma_len
;
1190 bool assoc_chained
= false, src_chained
= false, dst_chained
= false;
1191 dma_addr_t iv_dma
= 0;
1192 gfp_t flags
= cryptoflags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1195 if (cryptlen
+ authsize
> TALITOS_MAX_DATA_LEN
) {
1196 dev_err(dev
, "length exceeds h/w max limit\n");
1197 return ERR_PTR(-EINVAL
);
1201 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
1205 * Currently it is assumed that iv is provided whenever assoc
1210 assoc_nents
= sg_count(assoc
, assoclen
, &assoc_chained
);
1211 talitos_map_sg(dev
, assoc
, assoc_nents
, DMA_TO_DEVICE
,
1213 assoc_nents
= (assoc_nents
== 1) ? 0 : assoc_nents
;
1215 if (assoc_nents
|| sg_dma_address(assoc
) + assoclen
!= iv_dma
)
1216 assoc_nents
= assoc_nents
? assoc_nents
+ 1 : 2;
1219 src_nents
= sg_count(src
, cryptlen
+ authsize
, &src_chained
);
1220 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1226 dst_nents
= src_nents
;
1228 dst_nents
= sg_count(dst
, cryptlen
+ authsize
,
1230 dst_nents
= (dst_nents
== 1) ? 0 : dst_nents
;
1235 * allocate space for base edesc plus the link tables,
1236 * allowing for two separate entries for ICV and generated ICV (+ 2),
1237 * and the ICV data itself
1239 alloc_len
= sizeof(struct talitos_edesc
);
1240 if (assoc_nents
|| src_nents
|| dst_nents
) {
1241 dma_len
= (src_nents
+ dst_nents
+ 2 + assoc_nents
) *
1242 sizeof(struct talitos_ptr
) + authsize
;
1243 alloc_len
+= dma_len
;
1246 alloc_len
+= icv_stashing
? authsize
: 0;
1249 edesc
= kmalloc(alloc_len
, GFP_DMA
| flags
);
1251 talitos_unmap_sg_chain(dev
, assoc
, DMA_TO_DEVICE
);
1253 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
1254 dev_err(dev
, "could not allocate edescriptor\n");
1255 return ERR_PTR(-ENOMEM
);
1258 edesc
->assoc_nents
= assoc_nents
;
1259 edesc
->src_nents
= src_nents
;
1260 edesc
->dst_nents
= dst_nents
;
1261 edesc
->assoc_chained
= assoc_chained
;
1262 edesc
->src_chained
= src_chained
;
1263 edesc
->dst_chained
= dst_chained
;
1264 edesc
->iv_dma
= iv_dma
;
1265 edesc
->dma_len
= dma_len
;
1267 edesc
->dma_link_tbl
= dma_map_single(dev
, &edesc
->link_tbl
[0],
1274 static struct talitos_edesc
*aead_edesc_alloc(struct aead_request
*areq
, u8
*iv
,
1277 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1278 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1279 unsigned int ivsize
= crypto_aead_ivsize(authenc
);
1281 return talitos_edesc_alloc(ctx
->dev
, areq
->assoc
, areq
->src
, areq
->dst
,
1282 iv
, areq
->assoclen
, areq
->cryptlen
,
1283 ctx
->authsize
, ivsize
, icv_stashing
,
1287 static int aead_encrypt(struct aead_request
*req
)
1289 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1290 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1291 struct talitos_edesc
*edesc
;
1293 /* allocate extended descriptor */
1294 edesc
= aead_edesc_alloc(req
, req
->iv
, 0);
1296 return PTR_ERR(edesc
);
1299 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1301 return ipsec_esp(edesc
, req
, 0, ipsec_esp_encrypt_done
);
1304 static int aead_decrypt(struct aead_request
*req
)
1306 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1307 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1308 unsigned int authsize
= ctx
->authsize
;
1309 struct talitos_private
*priv
= dev_get_drvdata(ctx
->dev
);
1310 struct talitos_edesc
*edesc
;
1311 struct scatterlist
*sg
;
1314 req
->cryptlen
-= authsize
;
1316 /* allocate extended descriptor */
1317 edesc
= aead_edesc_alloc(req
, req
->iv
, 1);
1319 return PTR_ERR(edesc
);
1321 if ((priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
) &&
1322 ((!edesc
->src_nents
&& !edesc
->dst_nents
) ||
1323 priv
->features
& TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
)) {
1325 /* decrypt and check the ICV */
1326 edesc
->desc
.hdr
= ctx
->desc_hdr_template
|
1327 DESC_HDR_DIR_INBOUND
|
1328 DESC_HDR_MODE1_MDEU_CICV
;
1330 /* reset integrity check result bits */
1331 edesc
->desc
.hdr_lo
= 0;
1333 return ipsec_esp(edesc
, req
, 0, ipsec_esp_decrypt_hwauth_done
);
1336 /* Have to check the ICV with software */
1337 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1339 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1341 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
1342 edesc
->dst_nents
+ 2 +
1343 edesc
->assoc_nents
];
1345 icvdata
= &edesc
->link_tbl
[0];
1347 sg
= sg_last(req
->src
, edesc
->src_nents
? : 1);
1349 memcpy(icvdata
, (char *)sg_virt(sg
) + sg
->length
- ctx
->authsize
,
1352 return ipsec_esp(edesc
, req
, 0, ipsec_esp_decrypt_swauth_done
);
1355 static int aead_givencrypt(struct aead_givcrypt_request
*req
)
1357 struct aead_request
*areq
= &req
->areq
;
1358 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1359 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1360 struct talitos_edesc
*edesc
;
1362 /* allocate extended descriptor */
1363 edesc
= aead_edesc_alloc(areq
, req
->giv
, 0);
1365 return PTR_ERR(edesc
);
1368 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1370 memcpy(req
->giv
, ctx
->iv
, crypto_aead_ivsize(authenc
));
1371 /* avoid consecutive packets going out with same IV */
1372 *(__be64
*)req
->giv
^= cpu_to_be64(req
->seq
);
1374 return ipsec_esp(edesc
, areq
, req
->seq
, ipsec_esp_encrypt_done
);
1377 static int ablkcipher_setkey(struct crypto_ablkcipher
*cipher
,
1378 const u8
*key
, unsigned int keylen
)
1380 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1382 memcpy(&ctx
->key
, key
, keylen
);
1383 ctx
->keylen
= keylen
;
1388 static void common_nonsnoop_unmap(struct device
*dev
,
1389 struct talitos_edesc
*edesc
,
1390 struct ablkcipher_request
*areq
)
1392 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1393 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
1394 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1], DMA_TO_DEVICE
);
1396 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
);
1399 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1403 static void ablkcipher_done(struct device
*dev
,
1404 struct talitos_desc
*desc
, void *context
,
1407 struct ablkcipher_request
*areq
= context
;
1408 struct talitos_edesc
*edesc
;
1410 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1412 common_nonsnoop_unmap(dev
, edesc
, areq
);
1416 areq
->base
.complete(&areq
->base
, err
);
1419 static int common_nonsnoop(struct talitos_edesc
*edesc
,
1420 struct ablkcipher_request
*areq
,
1421 void (*callback
) (struct device
*dev
,
1422 struct talitos_desc
*desc
,
1423 void *context
, int error
))
1425 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1426 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1427 struct device
*dev
= ctx
->dev
;
1428 struct talitos_desc
*desc
= &edesc
->desc
;
1429 unsigned int cryptlen
= areq
->nbytes
;
1430 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1433 /* first DWORD empty */
1434 desc
->ptr
[0].len
= 0;
1435 to_talitos_ptr(&desc
->ptr
[0], 0);
1436 desc
->ptr
[0].j_extent
= 0;
1439 to_talitos_ptr(&desc
->ptr
[1], edesc
->iv_dma
);
1440 desc
->ptr
[1].len
= cpu_to_be16(ivsize
);
1441 desc
->ptr
[1].j_extent
= 0;
1444 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1445 (char *)&ctx
->key
, 0, DMA_TO_DEVICE
);
1450 desc
->ptr
[3].len
= cpu_to_be16(cryptlen
);
1451 desc
->ptr
[3].j_extent
= 0;
1453 sg_count
= talitos_map_sg(dev
, areq
->src
, edesc
->src_nents
? : 1,
1454 (areq
->src
== areq
->dst
) ? DMA_BIDIRECTIONAL
1456 edesc
->src_chained
);
1458 if (sg_count
== 1) {
1459 to_talitos_ptr(&desc
->ptr
[3], sg_dma_address(areq
->src
));
1461 sg_count
= sg_to_link_tbl(areq
->src
, sg_count
, cryptlen
,
1462 &edesc
->link_tbl
[0]);
1464 to_talitos_ptr(&desc
->ptr
[3], edesc
->dma_link_tbl
);
1465 desc
->ptr
[3].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1466 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1470 /* Only one segment now, so no link tbl needed */
1471 to_talitos_ptr(&desc
->ptr
[3],
1472 sg_dma_address(areq
->src
));
1477 desc
->ptr
[4].len
= cpu_to_be16(cryptlen
);
1478 desc
->ptr
[4].j_extent
= 0;
1480 if (areq
->src
!= areq
->dst
)
1481 sg_count
= talitos_map_sg(dev
, areq
->dst
,
1482 edesc
->dst_nents
? : 1,
1483 DMA_FROM_DEVICE
, edesc
->dst_chained
);
1485 if (sg_count
== 1) {
1486 to_talitos_ptr(&desc
->ptr
[4], sg_dma_address(areq
->dst
));
1488 struct talitos_ptr
*link_tbl_ptr
=
1489 &edesc
->link_tbl
[edesc
->src_nents
+ 1];
1491 to_talitos_ptr(&desc
->ptr
[4], edesc
->dma_link_tbl
+
1492 (edesc
->src_nents
+ 1) *
1493 sizeof(struct talitos_ptr
));
1494 desc
->ptr
[4].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1495 sg_count
= sg_to_link_tbl(areq
->dst
, sg_count
, cryptlen
,
1497 dma_sync_single_for_device(ctx
->dev
, edesc
->dma_link_tbl
,
1498 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1502 map_single_talitos_ptr(dev
, &desc
->ptr
[5], ivsize
, ctx
->iv
, 0,
1505 /* last DWORD empty */
1506 desc
->ptr
[6].len
= 0;
1507 to_talitos_ptr(&desc
->ptr
[6], 0);
1508 desc
->ptr
[6].j_extent
= 0;
1510 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1511 if (ret
!= -EINPROGRESS
) {
1512 common_nonsnoop_unmap(dev
, edesc
, areq
);
1518 static struct talitos_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
*
1521 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1522 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1523 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1525 return talitos_edesc_alloc(ctx
->dev
, NULL
, areq
->src
, areq
->dst
,
1526 areq
->info
, 0, areq
->nbytes
, 0, ivsize
, 0,
1530 static int ablkcipher_encrypt(struct ablkcipher_request
*areq
)
1532 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1533 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1534 struct talitos_edesc
*edesc
;
1536 /* allocate extended descriptor */
1537 edesc
= ablkcipher_edesc_alloc(areq
);
1539 return PTR_ERR(edesc
);
1542 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1544 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1547 static int ablkcipher_decrypt(struct ablkcipher_request
*areq
)
1549 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1550 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1551 struct talitos_edesc
*edesc
;
1553 /* allocate extended descriptor */
1554 edesc
= ablkcipher_edesc_alloc(areq
);
1556 return PTR_ERR(edesc
);
1558 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1560 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1563 static void common_nonsnoop_hash_unmap(struct device
*dev
,
1564 struct talitos_edesc
*edesc
,
1565 struct ahash_request
*areq
)
1567 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1569 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1571 /* When using hashctx-in, must unmap it. */
1572 if (edesc
->desc
.ptr
[1].len
)
1573 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1],
1576 if (edesc
->desc
.ptr
[2].len
)
1577 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2],
1580 talitos_sg_unmap(dev
, edesc
, req_ctx
->psrc
, NULL
);
1583 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1588 static void ahash_done(struct device
*dev
,
1589 struct talitos_desc
*desc
, void *context
,
1592 struct ahash_request
*areq
= context
;
1593 struct talitos_edesc
*edesc
=
1594 container_of(desc
, struct talitos_edesc
, desc
);
1595 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1597 if (!req_ctx
->last
&& req_ctx
->to_hash_later
) {
1598 /* Position any partial block for next update/final/finup */
1599 memcpy(req_ctx
->buf
, req_ctx
->bufnext
, req_ctx
->to_hash_later
);
1600 req_ctx
->nbuf
= req_ctx
->to_hash_later
;
1602 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1606 areq
->base
.complete(&areq
->base
, err
);
1609 static int common_nonsnoop_hash(struct talitos_edesc
*edesc
,
1610 struct ahash_request
*areq
, unsigned int length
,
1611 void (*callback
) (struct device
*dev
,
1612 struct talitos_desc
*desc
,
1613 void *context
, int error
))
1615 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1616 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1617 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1618 struct device
*dev
= ctx
->dev
;
1619 struct talitos_desc
*desc
= &edesc
->desc
;
1622 /* first DWORD empty */
1623 desc
->ptr
[0] = zero_entry
;
1625 /* hash context in */
1626 if (!req_ctx
->first
|| req_ctx
->swinit
) {
1627 map_single_talitos_ptr(dev
, &desc
->ptr
[1],
1628 req_ctx
->hw_context_size
,
1629 (char *)req_ctx
->hw_context
, 0,
1631 req_ctx
->swinit
= 0;
1633 desc
->ptr
[1] = zero_entry
;
1634 /* Indicate next op is not the first. */
1640 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1641 (char *)&ctx
->key
, 0, DMA_TO_DEVICE
);
1643 desc
->ptr
[2] = zero_entry
;
1648 desc
->ptr
[3].len
= cpu_to_be16(length
);
1649 desc
->ptr
[3].j_extent
= 0;
1651 sg_count
= talitos_map_sg(dev
, req_ctx
->psrc
,
1652 edesc
->src_nents
? : 1,
1653 DMA_TO_DEVICE
, edesc
->src_chained
);
1655 if (sg_count
== 1) {
1656 to_talitos_ptr(&desc
->ptr
[3], sg_dma_address(req_ctx
->psrc
));
1658 sg_count
= sg_to_link_tbl(req_ctx
->psrc
, sg_count
, length
,
1659 &edesc
->link_tbl
[0]);
1661 desc
->ptr
[3].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1662 to_talitos_ptr(&desc
->ptr
[3], edesc
->dma_link_tbl
);
1663 dma_sync_single_for_device(ctx
->dev
,
1664 edesc
->dma_link_tbl
,
1668 /* Only one segment now, so no link tbl needed */
1669 to_talitos_ptr(&desc
->ptr
[3],
1670 sg_dma_address(req_ctx
->psrc
));
1674 /* fifth DWORD empty */
1675 desc
->ptr
[4] = zero_entry
;
1677 /* hash/HMAC out -or- hash context out */
1679 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1680 crypto_ahash_digestsize(tfm
),
1681 areq
->result
, 0, DMA_FROM_DEVICE
);
1683 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1684 req_ctx
->hw_context_size
,
1685 req_ctx
->hw_context
, 0, DMA_FROM_DEVICE
);
1687 /* last DWORD empty */
1688 desc
->ptr
[6] = zero_entry
;
1690 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1691 if (ret
!= -EINPROGRESS
) {
1692 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1698 static struct talitos_edesc
*ahash_edesc_alloc(struct ahash_request
*areq
,
1699 unsigned int nbytes
)
1701 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1702 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1703 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1705 return talitos_edesc_alloc(ctx
->dev
, NULL
, req_ctx
->psrc
, NULL
, NULL
, 0,
1706 nbytes
, 0, 0, 0, areq
->base
.flags
);
1709 static int ahash_init(struct ahash_request
*areq
)
1711 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1712 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1714 /* Initialize the context */
1716 req_ctx
->first
= 1; /* first indicates h/w must init its context */
1717 req_ctx
->swinit
= 0; /* assume h/w init of context */
1718 req_ctx
->hw_context_size
=
1719 (crypto_ahash_digestsize(tfm
) <= SHA256_DIGEST_SIZE
)
1720 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1721 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
;
1727 * on h/w without explicit sha224 support, we initialize h/w context
1728 * manually with sha224 constants, and tell it to run sha256.
1730 static int ahash_init_sha224_swinit(struct ahash_request
*areq
)
1732 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1735 req_ctx
->swinit
= 1;/* prevent h/w initting context with sha256 values*/
1737 req_ctx
->hw_context
[0] = SHA224_H0
;
1738 req_ctx
->hw_context
[1] = SHA224_H1
;
1739 req_ctx
->hw_context
[2] = SHA224_H2
;
1740 req_ctx
->hw_context
[3] = SHA224_H3
;
1741 req_ctx
->hw_context
[4] = SHA224_H4
;
1742 req_ctx
->hw_context
[5] = SHA224_H5
;
1743 req_ctx
->hw_context
[6] = SHA224_H6
;
1744 req_ctx
->hw_context
[7] = SHA224_H7
;
1746 /* init 64-bit count */
1747 req_ctx
->hw_context
[8] = 0;
1748 req_ctx
->hw_context
[9] = 0;
1753 static int ahash_process_req(struct ahash_request
*areq
, unsigned int nbytes
)
1755 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1756 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1757 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1758 struct talitos_edesc
*edesc
;
1759 unsigned int blocksize
=
1760 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1761 unsigned int nbytes_to_hash
;
1762 unsigned int to_hash_later
;
1766 if (!req_ctx
->last
&& (nbytes
+ req_ctx
->nbuf
<= blocksize
)) {
1767 /* Buffer up to one whole block */
1768 sg_copy_to_buffer(areq
->src
,
1769 sg_count(areq
->src
, nbytes
, &chained
),
1770 req_ctx
->buf
+ req_ctx
->nbuf
, nbytes
);
1771 req_ctx
->nbuf
+= nbytes
;
1775 /* At least (blocksize + 1) bytes are available to hash */
1776 nbytes_to_hash
= nbytes
+ req_ctx
->nbuf
;
1777 to_hash_later
= nbytes_to_hash
& (blocksize
- 1);
1781 else if (to_hash_later
)
1782 /* There is a partial block. Hash the full block(s) now */
1783 nbytes_to_hash
-= to_hash_later
;
1785 /* Keep one block buffered */
1786 nbytes_to_hash
-= blocksize
;
1787 to_hash_later
= blocksize
;
1790 /* Chain in any previously buffered data */
1791 if (req_ctx
->nbuf
) {
1792 nsg
= (req_ctx
->nbuf
< nbytes_to_hash
) ? 2 : 1;
1793 sg_init_table(req_ctx
->bufsl
, nsg
);
1794 sg_set_buf(req_ctx
->bufsl
, req_ctx
->buf
, req_ctx
->nbuf
);
1796 scatterwalk_sg_chain(req_ctx
->bufsl
, 2, areq
->src
);
1797 req_ctx
->psrc
= req_ctx
->bufsl
;
1799 req_ctx
->psrc
= areq
->src
;
1801 if (to_hash_later
) {
1802 int nents
= sg_count(areq
->src
, nbytes
, &chained
);
1803 sg_copy_end_to_buffer(areq
->src
, nents
,
1806 nbytes
- to_hash_later
);
1808 req_ctx
->to_hash_later
= to_hash_later
;
1810 /* Allocate extended descriptor */
1811 edesc
= ahash_edesc_alloc(areq
, nbytes_to_hash
);
1813 return PTR_ERR(edesc
);
1815 edesc
->desc
.hdr
= ctx
->desc_hdr_template
;
1817 /* On last one, request SEC to pad; otherwise continue */
1819 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_PAD
;
1821 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_CONT
;
1823 /* request SEC to INIT hash. */
1824 if (req_ctx
->first
&& !req_ctx
->swinit
)
1825 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_INIT
;
1827 /* When the tfm context has a keylen, it's an HMAC.
1828 * A first or last (ie. not middle) descriptor must request HMAC.
1830 if (ctx
->keylen
&& (req_ctx
->first
|| req_ctx
->last
))
1831 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_HMAC
;
1833 return common_nonsnoop_hash(edesc
, areq
, nbytes_to_hash
,
1837 static int ahash_update(struct ahash_request
*areq
)
1839 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1843 return ahash_process_req(areq
, areq
->nbytes
);
1846 static int ahash_final(struct ahash_request
*areq
)
1848 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1852 return ahash_process_req(areq
, 0);
1855 static int ahash_finup(struct ahash_request
*areq
)
1857 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1861 return ahash_process_req(areq
, areq
->nbytes
);
1864 static int ahash_digest(struct ahash_request
*areq
)
1866 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1867 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
1872 return ahash_process_req(areq
, areq
->nbytes
);
1875 struct keyhash_result
{
1876 struct completion completion
;
1880 static void keyhash_complete(struct crypto_async_request
*req
, int err
)
1882 struct keyhash_result
*res
= req
->data
;
1884 if (err
== -EINPROGRESS
)
1888 complete(&res
->completion
);
1891 static int keyhash(struct crypto_ahash
*tfm
, const u8
*key
, unsigned int keylen
,
1894 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1896 struct scatterlist sg
[1];
1897 struct ahash_request
*req
;
1898 struct keyhash_result hresult
;
1901 init_completion(&hresult
.completion
);
1903 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
1907 /* Keep tfm keylen == 0 during hash of the long key */
1909 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
1910 keyhash_complete
, &hresult
);
1912 sg_init_one(&sg
[0], key
, keylen
);
1914 ahash_request_set_crypt(req
, sg
, hash
, keylen
);
1915 ret
= crypto_ahash_digest(req
);
1921 ret
= wait_for_completion_interruptible(
1922 &hresult
.completion
);
1929 ahash_request_free(req
);
1934 static int ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1935 unsigned int keylen
)
1937 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1938 unsigned int blocksize
=
1939 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1940 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
1941 unsigned int keysize
= keylen
;
1942 u8 hash
[SHA512_DIGEST_SIZE
];
1945 if (keylen
<= blocksize
)
1946 memcpy(ctx
->key
, key
, keysize
);
1948 /* Must get the hash of the long key */
1949 ret
= keyhash(tfm
, key
, keylen
, hash
);
1952 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1956 keysize
= digestsize
;
1957 memcpy(ctx
->key
, hash
, digestsize
);
1960 ctx
->keylen
= keysize
;
1966 struct talitos_alg_template
{
1969 struct crypto_alg crypto
;
1970 struct ahash_alg hash
;
1972 __be32 desc_hdr_template
;
1975 static struct talitos_alg_template driver_algs
[] = {
1976 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
1977 { .type
= CRYPTO_ALG_TYPE_AEAD
,
1979 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1980 .cra_driver_name
= "authenc-hmac-sha1-cbc-aes-talitos",
1981 .cra_blocksize
= AES_BLOCK_SIZE
,
1982 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1984 .ivsize
= AES_BLOCK_SIZE
,
1985 .maxauthsize
= SHA1_DIGEST_SIZE
,
1988 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
1989 DESC_HDR_SEL0_AESU
|
1990 DESC_HDR_MODE0_AESU_CBC
|
1991 DESC_HDR_SEL1_MDEUA
|
1992 DESC_HDR_MODE1_MDEU_INIT
|
1993 DESC_HDR_MODE1_MDEU_PAD
|
1994 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
1996 { .type
= CRYPTO_ALG_TYPE_AEAD
,
1998 .cra_name
= "authenc(hmac(sha1),cbc(des3_ede))",
1999 .cra_driver_name
= "authenc-hmac-sha1-cbc-3des-talitos",
2000 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2001 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2003 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2004 .maxauthsize
= SHA1_DIGEST_SIZE
,
2007 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2009 DESC_HDR_MODE0_DEU_CBC
|
2010 DESC_HDR_MODE0_DEU_3DES
|
2011 DESC_HDR_SEL1_MDEUA
|
2012 DESC_HDR_MODE1_MDEU_INIT
|
2013 DESC_HDR_MODE1_MDEU_PAD
|
2014 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2016 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2018 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
2019 .cra_driver_name
= "authenc-hmac-sha224-cbc-aes-talitos",
2020 .cra_blocksize
= AES_BLOCK_SIZE
,
2021 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2023 .ivsize
= AES_BLOCK_SIZE
,
2024 .maxauthsize
= SHA224_DIGEST_SIZE
,
2027 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2028 DESC_HDR_SEL0_AESU
|
2029 DESC_HDR_MODE0_AESU_CBC
|
2030 DESC_HDR_SEL1_MDEUA
|
2031 DESC_HDR_MODE1_MDEU_INIT
|
2032 DESC_HDR_MODE1_MDEU_PAD
|
2033 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2035 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2037 .cra_name
= "authenc(hmac(sha224),cbc(des3_ede))",
2038 .cra_driver_name
= "authenc-hmac-sha224-cbc-3des-talitos",
2039 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2040 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2042 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2043 .maxauthsize
= SHA224_DIGEST_SIZE
,
2046 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2048 DESC_HDR_MODE0_DEU_CBC
|
2049 DESC_HDR_MODE0_DEU_3DES
|
2050 DESC_HDR_SEL1_MDEUA
|
2051 DESC_HDR_MODE1_MDEU_INIT
|
2052 DESC_HDR_MODE1_MDEU_PAD
|
2053 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2055 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2057 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
2058 .cra_driver_name
= "authenc-hmac-sha256-cbc-aes-talitos",
2059 .cra_blocksize
= AES_BLOCK_SIZE
,
2060 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2062 .ivsize
= AES_BLOCK_SIZE
,
2063 .maxauthsize
= SHA256_DIGEST_SIZE
,
2066 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2067 DESC_HDR_SEL0_AESU
|
2068 DESC_HDR_MODE0_AESU_CBC
|
2069 DESC_HDR_SEL1_MDEUA
|
2070 DESC_HDR_MODE1_MDEU_INIT
|
2071 DESC_HDR_MODE1_MDEU_PAD
|
2072 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2074 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2076 .cra_name
= "authenc(hmac(sha256),cbc(des3_ede))",
2077 .cra_driver_name
= "authenc-hmac-sha256-cbc-3des-talitos",
2078 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2079 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2081 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2082 .maxauthsize
= SHA256_DIGEST_SIZE
,
2085 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2087 DESC_HDR_MODE0_DEU_CBC
|
2088 DESC_HDR_MODE0_DEU_3DES
|
2089 DESC_HDR_SEL1_MDEUA
|
2090 DESC_HDR_MODE1_MDEU_INIT
|
2091 DESC_HDR_MODE1_MDEU_PAD
|
2092 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2094 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2096 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
2097 .cra_driver_name
= "authenc-hmac-sha384-cbc-aes-talitos",
2098 .cra_blocksize
= AES_BLOCK_SIZE
,
2099 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2101 .ivsize
= AES_BLOCK_SIZE
,
2102 .maxauthsize
= SHA384_DIGEST_SIZE
,
2105 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2106 DESC_HDR_SEL0_AESU
|
2107 DESC_HDR_MODE0_AESU_CBC
|
2108 DESC_HDR_SEL1_MDEUB
|
2109 DESC_HDR_MODE1_MDEU_INIT
|
2110 DESC_HDR_MODE1_MDEU_PAD
|
2111 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2113 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2115 .cra_name
= "authenc(hmac(sha384),cbc(des3_ede))",
2116 .cra_driver_name
= "authenc-hmac-sha384-cbc-3des-talitos",
2117 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2118 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2120 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2121 .maxauthsize
= SHA384_DIGEST_SIZE
,
2124 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2126 DESC_HDR_MODE0_DEU_CBC
|
2127 DESC_HDR_MODE0_DEU_3DES
|
2128 DESC_HDR_SEL1_MDEUB
|
2129 DESC_HDR_MODE1_MDEU_INIT
|
2130 DESC_HDR_MODE1_MDEU_PAD
|
2131 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2133 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2135 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
2136 .cra_driver_name
= "authenc-hmac-sha512-cbc-aes-talitos",
2137 .cra_blocksize
= AES_BLOCK_SIZE
,
2138 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2140 .ivsize
= AES_BLOCK_SIZE
,
2141 .maxauthsize
= SHA512_DIGEST_SIZE
,
2144 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2145 DESC_HDR_SEL0_AESU
|
2146 DESC_HDR_MODE0_AESU_CBC
|
2147 DESC_HDR_SEL1_MDEUB
|
2148 DESC_HDR_MODE1_MDEU_INIT
|
2149 DESC_HDR_MODE1_MDEU_PAD
|
2150 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2152 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2154 .cra_name
= "authenc(hmac(sha512),cbc(des3_ede))",
2155 .cra_driver_name
= "authenc-hmac-sha512-cbc-3des-talitos",
2156 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2157 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2159 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2160 .maxauthsize
= SHA512_DIGEST_SIZE
,
2163 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2165 DESC_HDR_MODE0_DEU_CBC
|
2166 DESC_HDR_MODE0_DEU_3DES
|
2167 DESC_HDR_SEL1_MDEUB
|
2168 DESC_HDR_MODE1_MDEU_INIT
|
2169 DESC_HDR_MODE1_MDEU_PAD
|
2170 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2172 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2174 .cra_name
= "authenc(hmac(md5),cbc(aes))",
2175 .cra_driver_name
= "authenc-hmac-md5-cbc-aes-talitos",
2176 .cra_blocksize
= AES_BLOCK_SIZE
,
2177 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2179 .ivsize
= AES_BLOCK_SIZE
,
2180 .maxauthsize
= MD5_DIGEST_SIZE
,
2183 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2184 DESC_HDR_SEL0_AESU
|
2185 DESC_HDR_MODE0_AESU_CBC
|
2186 DESC_HDR_SEL1_MDEUA
|
2187 DESC_HDR_MODE1_MDEU_INIT
|
2188 DESC_HDR_MODE1_MDEU_PAD
|
2189 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2191 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2193 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2194 .cra_driver_name
= "authenc-hmac-md5-cbc-3des-talitos",
2195 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2196 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2198 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2199 .maxauthsize
= MD5_DIGEST_SIZE
,
2202 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2204 DESC_HDR_MODE0_DEU_CBC
|
2205 DESC_HDR_MODE0_DEU_3DES
|
2206 DESC_HDR_SEL1_MDEUA
|
2207 DESC_HDR_MODE1_MDEU_INIT
|
2208 DESC_HDR_MODE1_MDEU_PAD
|
2209 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2211 /* ABLKCIPHER algorithms. */
2212 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2214 .cra_name
= "cbc(aes)",
2215 .cra_driver_name
= "cbc-aes-talitos",
2216 .cra_blocksize
= AES_BLOCK_SIZE
,
2217 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2220 .min_keysize
= AES_MIN_KEY_SIZE
,
2221 .max_keysize
= AES_MAX_KEY_SIZE
,
2222 .ivsize
= AES_BLOCK_SIZE
,
2225 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2226 DESC_HDR_SEL0_AESU
|
2227 DESC_HDR_MODE0_AESU_CBC
,
2229 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2231 .cra_name
= "cbc(des3_ede)",
2232 .cra_driver_name
= "cbc-3des-talitos",
2233 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2234 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2237 .min_keysize
= DES3_EDE_KEY_SIZE
,
2238 .max_keysize
= DES3_EDE_KEY_SIZE
,
2239 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2242 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2244 DESC_HDR_MODE0_DEU_CBC
|
2245 DESC_HDR_MODE0_DEU_3DES
,
2247 /* AHASH algorithms. */
2248 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2250 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2253 .cra_driver_name
= "md5-talitos",
2254 .cra_blocksize
= MD5_BLOCK_SIZE
,
2255 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2259 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2260 DESC_HDR_SEL0_MDEUA
|
2261 DESC_HDR_MODE0_MDEU_MD5
,
2263 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2265 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2268 .cra_driver_name
= "sha1-talitos",
2269 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2270 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2274 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2275 DESC_HDR_SEL0_MDEUA
|
2276 DESC_HDR_MODE0_MDEU_SHA1
,
2278 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2280 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2282 .cra_name
= "sha224",
2283 .cra_driver_name
= "sha224-talitos",
2284 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2285 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2289 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2290 DESC_HDR_SEL0_MDEUA
|
2291 DESC_HDR_MODE0_MDEU_SHA224
,
2293 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2295 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2297 .cra_name
= "sha256",
2298 .cra_driver_name
= "sha256-talitos",
2299 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2300 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2304 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2305 DESC_HDR_SEL0_MDEUA
|
2306 DESC_HDR_MODE0_MDEU_SHA256
,
2308 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2310 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2312 .cra_name
= "sha384",
2313 .cra_driver_name
= "sha384-talitos",
2314 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2315 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2319 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2320 DESC_HDR_SEL0_MDEUB
|
2321 DESC_HDR_MODE0_MDEUB_SHA384
,
2323 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2325 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2327 .cra_name
= "sha512",
2328 .cra_driver_name
= "sha512-talitos",
2329 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2330 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2334 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2335 DESC_HDR_SEL0_MDEUB
|
2336 DESC_HDR_MODE0_MDEUB_SHA512
,
2338 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2340 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2342 .cra_name
= "hmac(md5)",
2343 .cra_driver_name
= "hmac-md5-talitos",
2344 .cra_blocksize
= MD5_BLOCK_SIZE
,
2345 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2349 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2350 DESC_HDR_SEL0_MDEUA
|
2351 DESC_HDR_MODE0_MDEU_MD5
,
2353 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2355 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2357 .cra_name
= "hmac(sha1)",
2358 .cra_driver_name
= "hmac-sha1-talitos",
2359 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2360 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2364 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2365 DESC_HDR_SEL0_MDEUA
|
2366 DESC_HDR_MODE0_MDEU_SHA1
,
2368 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2370 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2372 .cra_name
= "hmac(sha224)",
2373 .cra_driver_name
= "hmac-sha224-talitos",
2374 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2375 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2379 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2380 DESC_HDR_SEL0_MDEUA
|
2381 DESC_HDR_MODE0_MDEU_SHA224
,
2383 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2385 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2387 .cra_name
= "hmac(sha256)",
2388 .cra_driver_name
= "hmac-sha256-talitos",
2389 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2390 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2394 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2395 DESC_HDR_SEL0_MDEUA
|
2396 DESC_HDR_MODE0_MDEU_SHA256
,
2398 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2400 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2402 .cra_name
= "hmac(sha384)",
2403 .cra_driver_name
= "hmac-sha384-talitos",
2404 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2405 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2409 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2410 DESC_HDR_SEL0_MDEUB
|
2411 DESC_HDR_MODE0_MDEUB_SHA384
,
2413 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2415 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2417 .cra_name
= "hmac(sha512)",
2418 .cra_driver_name
= "hmac-sha512-talitos",
2419 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2420 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2424 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2425 DESC_HDR_SEL0_MDEUB
|
2426 DESC_HDR_MODE0_MDEUB_SHA512
,
2430 struct talitos_crypto_alg
{
2431 struct list_head entry
;
2433 struct talitos_alg_template algt
;
2436 static int talitos_cra_init(struct crypto_tfm
*tfm
)
2438 struct crypto_alg
*alg
= tfm
->__crt_alg
;
2439 struct talitos_crypto_alg
*talitos_alg
;
2440 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2441 struct talitos_private
*priv
;
2443 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_AHASH
)
2444 talitos_alg
= container_of(__crypto_ahash_alg(alg
),
2445 struct talitos_crypto_alg
,
2448 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
2451 /* update context with ptr to dev */
2452 ctx
->dev
= talitos_alg
->dev
;
2454 /* assign SEC channel to tfm in round-robin fashion */
2455 priv
= dev_get_drvdata(ctx
->dev
);
2456 ctx
->ch
= atomic_inc_return(&priv
->last_chan
) &
2457 (priv
->num_channels
- 1);
2459 /* copy descriptor header template value */
2460 ctx
->desc_hdr_template
= talitos_alg
->algt
.desc_hdr_template
;
2462 /* select done notification */
2463 ctx
->desc_hdr_template
|= DESC_HDR_DONE_NOTIFY
;
2468 static int talitos_cra_init_aead(struct crypto_tfm
*tfm
)
2470 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2472 talitos_cra_init(tfm
);
2474 /* random first IV */
2475 get_random_bytes(ctx
->iv
, TALITOS_MAX_IV_LENGTH
);
2480 static int talitos_cra_init_ahash(struct crypto_tfm
*tfm
)
2482 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2484 talitos_cra_init(tfm
);
2487 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2488 sizeof(struct talitos_ahash_req_ctx
));
2494 * given the alg's descriptor header template, determine whether descriptor
2495 * type and primary/secondary execution units required match the hw
2496 * capabilities description provided in the device tree node.
2498 static int hw_supports(struct device
*dev
, __be32 desc_hdr_template
)
2500 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2503 ret
= (1 << DESC_TYPE(desc_hdr_template
) & priv
->desc_types
) &&
2504 (1 << PRIMARY_EU(desc_hdr_template
) & priv
->exec_units
);
2506 if (SECONDARY_EU(desc_hdr_template
))
2507 ret
= ret
&& (1 << SECONDARY_EU(desc_hdr_template
)
2508 & priv
->exec_units
);
2513 static int talitos_remove(struct platform_device
*ofdev
)
2515 struct device
*dev
= &ofdev
->dev
;
2516 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2517 struct talitos_crypto_alg
*t_alg
, *n
;
2520 list_for_each_entry_safe(t_alg
, n
, &priv
->alg_list
, entry
) {
2521 switch (t_alg
->algt
.type
) {
2522 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2523 case CRYPTO_ALG_TYPE_AEAD
:
2524 crypto_unregister_alg(&t_alg
->algt
.alg
.crypto
);
2526 case CRYPTO_ALG_TYPE_AHASH
:
2527 crypto_unregister_ahash(&t_alg
->algt
.alg
.hash
);
2530 list_del(&t_alg
->entry
);
2534 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
))
2535 talitos_unregister_rng(dev
);
2537 for (i
= 0; i
< priv
->num_channels
; i
++)
2538 kfree(priv
->chan
[i
].fifo
);
2542 for (i
= 0; i
< 2; i
++)
2544 free_irq(priv
->irq
[i
], dev
);
2545 irq_dispose_mapping(priv
->irq
[i
]);
2548 tasklet_kill(&priv
->done_task
[0]);
2550 tasklet_kill(&priv
->done_task
[1]);
2554 dev_set_drvdata(dev
, NULL
);
2561 static struct talitos_crypto_alg
*talitos_alg_alloc(struct device
*dev
,
2562 struct talitos_alg_template
2565 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2566 struct talitos_crypto_alg
*t_alg
;
2567 struct crypto_alg
*alg
;
2569 t_alg
= kzalloc(sizeof(struct talitos_crypto_alg
), GFP_KERNEL
);
2571 return ERR_PTR(-ENOMEM
);
2573 t_alg
->algt
= *template;
2575 switch (t_alg
->algt
.type
) {
2576 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2577 alg
= &t_alg
->algt
.alg
.crypto
;
2578 alg
->cra_init
= talitos_cra_init
;
2579 alg
->cra_type
= &crypto_ablkcipher_type
;
2580 alg
->cra_ablkcipher
.setkey
= ablkcipher_setkey
;
2581 alg
->cra_ablkcipher
.encrypt
= ablkcipher_encrypt
;
2582 alg
->cra_ablkcipher
.decrypt
= ablkcipher_decrypt
;
2583 alg
->cra_ablkcipher
.geniv
= "eseqiv";
2585 case CRYPTO_ALG_TYPE_AEAD
:
2586 alg
= &t_alg
->algt
.alg
.crypto
;
2587 alg
->cra_init
= talitos_cra_init_aead
;
2588 alg
->cra_type
= &crypto_aead_type
;
2589 alg
->cra_aead
.setkey
= aead_setkey
;
2590 alg
->cra_aead
.setauthsize
= aead_setauthsize
;
2591 alg
->cra_aead
.encrypt
= aead_encrypt
;
2592 alg
->cra_aead
.decrypt
= aead_decrypt
;
2593 alg
->cra_aead
.givencrypt
= aead_givencrypt
;
2594 alg
->cra_aead
.geniv
= "<built-in>";
2596 case CRYPTO_ALG_TYPE_AHASH
:
2597 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
2598 alg
->cra_init
= talitos_cra_init_ahash
;
2599 alg
->cra_type
= &crypto_ahash_type
;
2600 t_alg
->algt
.alg
.hash
.init
= ahash_init
;
2601 t_alg
->algt
.alg
.hash
.update
= ahash_update
;
2602 t_alg
->algt
.alg
.hash
.final
= ahash_final
;
2603 t_alg
->algt
.alg
.hash
.finup
= ahash_finup
;
2604 t_alg
->algt
.alg
.hash
.digest
= ahash_digest
;
2605 t_alg
->algt
.alg
.hash
.setkey
= ahash_setkey
;
2607 if (!(priv
->features
& TALITOS_FTR_HMAC_OK
) &&
2608 !strncmp(alg
->cra_name
, "hmac", 4)) {
2610 return ERR_PTR(-ENOTSUPP
);
2612 if (!(priv
->features
& TALITOS_FTR_SHA224_HWINIT
) &&
2613 (!strcmp(alg
->cra_name
, "sha224") ||
2614 !strcmp(alg
->cra_name
, "hmac(sha224)"))) {
2615 t_alg
->algt
.alg
.hash
.init
= ahash_init_sha224_swinit
;
2616 t_alg
->algt
.desc_hdr_template
=
2617 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2618 DESC_HDR_SEL0_MDEUA
|
2619 DESC_HDR_MODE0_MDEU_SHA256
;
2623 dev_err(dev
, "unknown algorithm type %d\n", t_alg
->algt
.type
);
2624 return ERR_PTR(-EINVAL
);
2627 alg
->cra_module
= THIS_MODULE
;
2628 alg
->cra_priority
= TALITOS_CRA_PRIORITY
;
2629 alg
->cra_alignmask
= 0;
2630 alg
->cra_ctxsize
= sizeof(struct talitos_ctx
);
2631 alg
->cra_flags
|= CRYPTO_ALG_KERN_DRIVER_ONLY
;
2638 static int talitos_probe_irq(struct platform_device
*ofdev
)
2640 struct device
*dev
= &ofdev
->dev
;
2641 struct device_node
*np
= ofdev
->dev
.of_node
;
2642 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2645 priv
->irq
[0] = irq_of_parse_and_map(np
, 0);
2646 if (!priv
->irq
[0]) {
2647 dev_err(dev
, "failed to map irq\n");
2651 priv
->irq
[1] = irq_of_parse_and_map(np
, 1);
2653 /* get the primary irq line */
2654 if (!priv
->irq
[1]) {
2655 err
= request_irq(priv
->irq
[0], talitos_interrupt_4ch
, 0,
2656 dev_driver_string(dev
), dev
);
2660 err
= request_irq(priv
->irq
[0], talitos_interrupt_ch0_2
, 0,
2661 dev_driver_string(dev
), dev
);
2665 /* get the secondary irq line */
2666 err
= request_irq(priv
->irq
[1], talitos_interrupt_ch1_3
, 0,
2667 dev_driver_string(dev
), dev
);
2669 dev_err(dev
, "failed to request secondary irq\n");
2670 irq_dispose_mapping(priv
->irq
[1]);
2678 dev_err(dev
, "failed to request primary irq\n");
2679 irq_dispose_mapping(priv
->irq
[0]);
2686 static int talitos_probe(struct platform_device
*ofdev
)
2688 struct device
*dev
= &ofdev
->dev
;
2689 struct device_node
*np
= ofdev
->dev
.of_node
;
2690 struct talitos_private
*priv
;
2691 const unsigned int *prop
;
2694 priv
= kzalloc(sizeof(struct talitos_private
), GFP_KERNEL
);
2698 dev_set_drvdata(dev
, priv
);
2700 priv
->ofdev
= ofdev
;
2702 spin_lock_init(&priv
->reg_lock
);
2704 err
= talitos_probe_irq(ofdev
);
2708 if (!priv
->irq
[1]) {
2709 tasklet_init(&priv
->done_task
[0], talitos_done_4ch
,
2710 (unsigned long)dev
);
2712 tasklet_init(&priv
->done_task
[0], talitos_done_ch0_2
,
2713 (unsigned long)dev
);
2714 tasklet_init(&priv
->done_task
[1], talitos_done_ch1_3
,
2715 (unsigned long)dev
);
2718 INIT_LIST_HEAD(&priv
->alg_list
);
2720 priv
->reg
= of_iomap(np
, 0);
2722 dev_err(dev
, "failed to of_iomap\n");
2727 /* get SEC version capabilities from device tree */
2728 prop
= of_get_property(np
, "fsl,num-channels", NULL
);
2730 priv
->num_channels
= *prop
;
2732 prop
= of_get_property(np
, "fsl,channel-fifo-len", NULL
);
2734 priv
->chfifo_len
= *prop
;
2736 prop
= of_get_property(np
, "fsl,exec-units-mask", NULL
);
2738 priv
->exec_units
= *prop
;
2740 prop
= of_get_property(np
, "fsl,descriptor-types-mask", NULL
);
2742 priv
->desc_types
= *prop
;
2744 if (!is_power_of_2(priv
->num_channels
) || !priv
->chfifo_len
||
2745 !priv
->exec_units
|| !priv
->desc_types
) {
2746 dev_err(dev
, "invalid property data in device tree node\n");
2751 if (of_device_is_compatible(np
, "fsl,sec3.0"))
2752 priv
->features
|= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
;
2754 if (of_device_is_compatible(np
, "fsl,sec2.1"))
2755 priv
->features
|= TALITOS_FTR_HW_AUTH_CHECK
|
2756 TALITOS_FTR_SHA224_HWINIT
|
2757 TALITOS_FTR_HMAC_OK
;
2759 priv
->chan
= kzalloc(sizeof(struct talitos_channel
) *
2760 priv
->num_channels
, GFP_KERNEL
);
2762 dev_err(dev
, "failed to allocate channel management space\n");
2767 for (i
= 0; i
< priv
->num_channels
; i
++) {
2768 priv
->chan
[i
].reg
= priv
->reg
+ TALITOS_CH_STRIDE
* (i
+ 1);
2769 if (!priv
->irq
[1] || !(i
& 1))
2770 priv
->chan
[i
].reg
+= TALITOS_CH_BASE_OFFSET
;
2773 for (i
= 0; i
< priv
->num_channels
; i
++) {
2774 spin_lock_init(&priv
->chan
[i
].head_lock
);
2775 spin_lock_init(&priv
->chan
[i
].tail_lock
);
2778 priv
->fifo_len
= roundup_pow_of_two(priv
->chfifo_len
);
2780 for (i
= 0; i
< priv
->num_channels
; i
++) {
2781 priv
->chan
[i
].fifo
= kzalloc(sizeof(struct talitos_request
) *
2782 priv
->fifo_len
, GFP_KERNEL
);
2783 if (!priv
->chan
[i
].fifo
) {
2784 dev_err(dev
, "failed to allocate request fifo %d\n", i
);
2790 for (i
= 0; i
< priv
->num_channels
; i
++)
2791 atomic_set(&priv
->chan
[i
].submit_count
,
2792 -(priv
->chfifo_len
- 1));
2794 dma_set_mask(dev
, DMA_BIT_MASK(36));
2796 /* reset and initialize the h/w */
2797 err
= init_device(dev
);
2799 dev_err(dev
, "failed to initialize device\n");
2803 /* register the RNG, if available */
2804 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
)) {
2805 err
= talitos_register_rng(dev
);
2807 dev_err(dev
, "failed to register hwrng: %d\n", err
);
2810 dev_info(dev
, "hwrng\n");
2813 /* register crypto algorithms the device supports */
2814 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
2815 if (hw_supports(dev
, driver_algs
[i
].desc_hdr_template
)) {
2816 struct talitos_crypto_alg
*t_alg
;
2819 t_alg
= talitos_alg_alloc(dev
, &driver_algs
[i
]);
2820 if (IS_ERR(t_alg
)) {
2821 err
= PTR_ERR(t_alg
);
2822 if (err
== -ENOTSUPP
)
2827 switch (t_alg
->algt
.type
) {
2828 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2829 case CRYPTO_ALG_TYPE_AEAD
:
2830 err
= crypto_register_alg(
2831 &t_alg
->algt
.alg
.crypto
);
2832 name
= t_alg
->algt
.alg
.crypto
.cra_driver_name
;
2834 case CRYPTO_ALG_TYPE_AHASH
:
2835 err
= crypto_register_ahash(
2836 &t_alg
->algt
.alg
.hash
);
2838 t_alg
->algt
.alg
.hash
.halg
.base
.cra_driver_name
;
2842 dev_err(dev
, "%s alg registration failed\n",
2846 list_add_tail(&t_alg
->entry
, &priv
->alg_list
);
2849 if (!list_empty(&priv
->alg_list
))
2850 dev_info(dev
, "%s algorithms registered in /proc/crypto\n",
2851 (char *)of_get_property(np
, "compatible", NULL
));
2856 talitos_remove(ofdev
);
2861 static const struct of_device_id talitos_match
[] = {
2863 .compatible
= "fsl,sec2.0",
2867 MODULE_DEVICE_TABLE(of
, talitos_match
);
2869 static struct platform_driver talitos_driver
= {
2872 .owner
= THIS_MODULE
,
2873 .of_match_table
= talitos_match
,
2875 .probe
= talitos_probe
,
2876 .remove
= talitos_remove
,
2879 module_platform_driver(talitos_driver
);
2881 MODULE_LICENSE("GPL");
2882 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2883 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");