]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/crypto/talitos.c
crypto: talitos - fix hash on SEC1.
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / talitos.c
1 /*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55
56 #include "talitos.h"
57
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 unsigned int len, bool is_sec1)
60 {
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 if (is_sec1) {
63 ptr->len1 = cpu_to_be16(len);
64 } else {
65 ptr->len = cpu_to_be16(len);
66 ptr->eptr = upper_32_bits(dma_addr);
67 }
68 }
69
70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71 struct talitos_ptr *src_ptr, bool is_sec1)
72 {
73 dst_ptr->ptr = src_ptr->ptr;
74 if (is_sec1) {
75 dst_ptr->len1 = src_ptr->len1;
76 } else {
77 dst_ptr->len = src_ptr->len;
78 dst_ptr->eptr = src_ptr->eptr;
79 }
80 }
81
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
83 bool is_sec1)
84 {
85 if (is_sec1)
86 return be16_to_cpu(ptr->len1);
87 else
88 return be16_to_cpu(ptr->len);
89 }
90
91 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
92 bool is_sec1)
93 {
94 if (!is_sec1)
95 ptr->j_extent = val;
96 }
97
98 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
99 {
100 if (!is_sec1)
101 ptr->j_extent |= val;
102 }
103
104 /*
105 * map virtual single (contiguous) pointer to h/w descriptor pointer
106 */
107 static void map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir)
111 {
112 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
113 struct talitos_private *priv = dev_get_drvdata(dev);
114 bool is_sec1 = has_ftr_sec1(priv);
115
116 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
117 }
118
119 /*
120 * unmap bus single (contiguous) h/w descriptor pointer
121 */
122 static void unmap_single_talitos_ptr(struct device *dev,
123 struct talitos_ptr *ptr,
124 enum dma_data_direction dir)
125 {
126 struct talitos_private *priv = dev_get_drvdata(dev);
127 bool is_sec1 = has_ftr_sec1(priv);
128
129 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
130 from_talitos_ptr_len(ptr, is_sec1), dir);
131 }
132
133 static int reset_channel(struct device *dev, int ch)
134 {
135 struct talitos_private *priv = dev_get_drvdata(dev);
136 unsigned int timeout = TALITOS_TIMEOUT;
137 bool is_sec1 = has_ftr_sec1(priv);
138
139 if (is_sec1) {
140 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
141 TALITOS1_CCCR_LO_RESET);
142
143 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
144 TALITOS1_CCCR_LO_RESET) && --timeout)
145 cpu_relax();
146 } else {
147 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
148 TALITOS2_CCCR_RESET);
149
150 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
151 TALITOS2_CCCR_RESET) && --timeout)
152 cpu_relax();
153 }
154
155 if (timeout == 0) {
156 dev_err(dev, "failed to reset channel %d\n", ch);
157 return -EIO;
158 }
159
160 /* set 36-bit addressing, done writeback enable and done IRQ enable */
161 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
162 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
163 /* enable chaining descriptors */
164 if (is_sec1)
165 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
166 TALITOS_CCCR_LO_NE);
167
168 /* and ICCR writeback, if available */
169 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
170 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
171 TALITOS_CCCR_LO_IWSE);
172
173 return 0;
174 }
175
176 static int reset_device(struct device *dev)
177 {
178 struct talitos_private *priv = dev_get_drvdata(dev);
179 unsigned int timeout = TALITOS_TIMEOUT;
180 bool is_sec1 = has_ftr_sec1(priv);
181 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
182
183 setbits32(priv->reg + TALITOS_MCR, mcr);
184
185 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
186 && --timeout)
187 cpu_relax();
188
189 if (priv->irq[1]) {
190 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
191 setbits32(priv->reg + TALITOS_MCR, mcr);
192 }
193
194 if (timeout == 0) {
195 dev_err(dev, "failed to reset device\n");
196 return -EIO;
197 }
198
199 return 0;
200 }
201
202 /*
203 * Reset and initialize the device
204 */
205 static int init_device(struct device *dev)
206 {
207 struct talitos_private *priv = dev_get_drvdata(dev);
208 int ch, err;
209 bool is_sec1 = has_ftr_sec1(priv);
210
211 /*
212 * Master reset
213 * errata documentation: warning: certain SEC interrupts
214 * are not fully cleared by writing the MCR:SWR bit,
215 * set bit twice to completely reset
216 */
217 err = reset_device(dev);
218 if (err)
219 return err;
220
221 err = reset_device(dev);
222 if (err)
223 return err;
224
225 /* reset channels */
226 for (ch = 0; ch < priv->num_channels; ch++) {
227 err = reset_channel(dev, ch);
228 if (err)
229 return err;
230 }
231
232 /* enable channel done and error interrupts */
233 if (is_sec1) {
234 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
235 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
236 /* disable parity error check in DEU (erroneous? test vect.) */
237 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
238 } else {
239 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
240 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
241 }
242
243 /* disable integrity check error interrupts (use writeback instead) */
244 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
245 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
246 TALITOS_MDEUICR_LO_ICE);
247
248 return 0;
249 }
250
251 /**
252 * talitos_submit - submits a descriptor to the device for processing
253 * @dev: the SEC device to be used
254 * @ch: the SEC device channel to be used
255 * @desc: the descriptor to be processed by the device
256 * @callback: whom to call when processing is complete
257 * @context: a handle for use by caller (optional)
258 *
259 * desc must contain valid dma-mapped (bus physical) address pointers.
260 * callback must check err and feedback in descriptor header
261 * for device processing status.
262 */
263 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
264 void (*callback)(struct device *dev,
265 struct talitos_desc *desc,
266 void *context, int error),
267 void *context)
268 {
269 struct talitos_private *priv = dev_get_drvdata(dev);
270 struct talitos_request *request;
271 unsigned long flags;
272 int head;
273 bool is_sec1 = has_ftr_sec1(priv);
274
275 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
276
277 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
278 /* h/w fifo is full */
279 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
280 return -EAGAIN;
281 }
282
283 head = priv->chan[ch].head;
284 request = &priv->chan[ch].fifo[head];
285
286 /* map descriptor and save caller data */
287 if (is_sec1) {
288 desc->hdr1 = desc->hdr;
289 request->dma_desc = dma_map_single(dev, &desc->hdr1,
290 TALITOS_DESC_SIZE,
291 DMA_BIDIRECTIONAL);
292 } else {
293 request->dma_desc = dma_map_single(dev, desc,
294 TALITOS_DESC_SIZE,
295 DMA_BIDIRECTIONAL);
296 }
297 request->callback = callback;
298 request->context = context;
299
300 /* increment fifo head */
301 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
302
303 smp_wmb();
304 request->desc = desc;
305
306 /* GO! */
307 wmb();
308 out_be32(priv->chan[ch].reg + TALITOS_FF,
309 upper_32_bits(request->dma_desc));
310 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
311 lower_32_bits(request->dma_desc));
312
313 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
314
315 return -EINPROGRESS;
316 }
317 EXPORT_SYMBOL(talitos_submit);
318
319 static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
320 {
321 struct talitos_edesc *edesc;
322
323 if (!is_sec1)
324 return request->desc->hdr;
325
326 if (!request->desc->next_desc)
327 return request->desc->hdr1;
328
329 edesc = container_of(request->desc, struct talitos_edesc, desc);
330
331 return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
332 }
333
334 /*
335 * process what was done, notify callback of error if not
336 */
337 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
338 {
339 struct talitos_private *priv = dev_get_drvdata(dev);
340 struct talitos_request *request, saved_req;
341 unsigned long flags;
342 int tail, status;
343 bool is_sec1 = has_ftr_sec1(priv);
344
345 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
346
347 tail = priv->chan[ch].tail;
348 while (priv->chan[ch].fifo[tail].desc) {
349 __be32 hdr;
350
351 request = &priv->chan[ch].fifo[tail];
352
353 /* descriptors with their done bits set don't get the error */
354 rmb();
355 hdr = get_request_hdr(request, is_sec1);
356
357 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
358 status = 0;
359 else
360 if (!error)
361 break;
362 else
363 status = error;
364
365 dma_unmap_single(dev, request->dma_desc,
366 TALITOS_DESC_SIZE,
367 DMA_BIDIRECTIONAL);
368
369 /* copy entries so we can call callback outside lock */
370 saved_req.desc = request->desc;
371 saved_req.callback = request->callback;
372 saved_req.context = request->context;
373
374 /* release request entry in fifo */
375 smp_wmb();
376 request->desc = NULL;
377
378 /* increment fifo tail */
379 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
380
381 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
382
383 atomic_dec(&priv->chan[ch].submit_count);
384
385 saved_req.callback(dev, saved_req.desc, saved_req.context,
386 status);
387 /* channel may resume processing in single desc error case */
388 if (error && !reset_ch && status == error)
389 return;
390 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
391 tail = priv->chan[ch].tail;
392 }
393
394 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
395 }
396
397 /*
398 * process completed requests for channels that have done status
399 */
400 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
401 static void talitos1_done_##name(unsigned long data) \
402 { \
403 struct device *dev = (struct device *)data; \
404 struct talitos_private *priv = dev_get_drvdata(dev); \
405 unsigned long flags; \
406 \
407 if (ch_done_mask & 0x10000000) \
408 flush_channel(dev, 0, 0, 0); \
409 if (ch_done_mask & 0x40000000) \
410 flush_channel(dev, 1, 0, 0); \
411 if (ch_done_mask & 0x00010000) \
412 flush_channel(dev, 2, 0, 0); \
413 if (ch_done_mask & 0x00040000) \
414 flush_channel(dev, 3, 0, 0); \
415 \
416 /* At this point, all completed channels have been processed */ \
417 /* Unmask done interrupts for channels completed later on. */ \
418 spin_lock_irqsave(&priv->reg_lock, flags); \
419 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
420 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
421 spin_unlock_irqrestore(&priv->reg_lock, flags); \
422 }
423
424 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
425 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
426
427 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
428 static void talitos2_done_##name(unsigned long data) \
429 { \
430 struct device *dev = (struct device *)data; \
431 struct talitos_private *priv = dev_get_drvdata(dev); \
432 unsigned long flags; \
433 \
434 if (ch_done_mask & 1) \
435 flush_channel(dev, 0, 0, 0); \
436 if (ch_done_mask & (1 << 2)) \
437 flush_channel(dev, 1, 0, 0); \
438 if (ch_done_mask & (1 << 4)) \
439 flush_channel(dev, 2, 0, 0); \
440 if (ch_done_mask & (1 << 6)) \
441 flush_channel(dev, 3, 0, 0); \
442 \
443 /* At this point, all completed channels have been processed */ \
444 /* Unmask done interrupts for channels completed later on. */ \
445 spin_lock_irqsave(&priv->reg_lock, flags); \
446 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
447 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
448 spin_unlock_irqrestore(&priv->reg_lock, flags); \
449 }
450
451 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
452 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
453 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
454 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
455
456 /*
457 * locate current (offending) descriptor
458 */
459 static u32 current_desc_hdr(struct device *dev, int ch)
460 {
461 struct talitos_private *priv = dev_get_drvdata(dev);
462 int tail, iter;
463 dma_addr_t cur_desc;
464
465 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
466 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
467
468 if (!cur_desc) {
469 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
470 return 0;
471 }
472
473 tail = priv->chan[ch].tail;
474
475 iter = tail;
476 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
477 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
478 iter = (iter + 1) & (priv->fifo_len - 1);
479 if (iter == tail) {
480 dev_err(dev, "couldn't locate current descriptor\n");
481 return 0;
482 }
483 }
484
485 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
486 struct talitos_edesc *edesc;
487
488 edesc = container_of(priv->chan[ch].fifo[iter].desc,
489 struct talitos_edesc, desc);
490 return ((struct talitos_desc *)
491 (edesc->buf + edesc->dma_len))->hdr;
492 }
493
494 return priv->chan[ch].fifo[iter].desc->hdr;
495 }
496
497 /*
498 * user diagnostics; report root cause of error based on execution unit status
499 */
500 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
501 {
502 struct talitos_private *priv = dev_get_drvdata(dev);
503 int i;
504
505 if (!desc_hdr)
506 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
507
508 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
509 case DESC_HDR_SEL0_AFEU:
510 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
511 in_be32(priv->reg_afeu + TALITOS_EUISR),
512 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
513 break;
514 case DESC_HDR_SEL0_DEU:
515 dev_err(dev, "DEUISR 0x%08x_%08x\n",
516 in_be32(priv->reg_deu + TALITOS_EUISR),
517 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
518 break;
519 case DESC_HDR_SEL0_MDEUA:
520 case DESC_HDR_SEL0_MDEUB:
521 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
522 in_be32(priv->reg_mdeu + TALITOS_EUISR),
523 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
524 break;
525 case DESC_HDR_SEL0_RNG:
526 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
527 in_be32(priv->reg_rngu + TALITOS_ISR),
528 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
529 break;
530 case DESC_HDR_SEL0_PKEU:
531 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
532 in_be32(priv->reg_pkeu + TALITOS_EUISR),
533 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
534 break;
535 case DESC_HDR_SEL0_AESU:
536 dev_err(dev, "AESUISR 0x%08x_%08x\n",
537 in_be32(priv->reg_aesu + TALITOS_EUISR),
538 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
539 break;
540 case DESC_HDR_SEL0_CRCU:
541 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
542 in_be32(priv->reg_crcu + TALITOS_EUISR),
543 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
544 break;
545 case DESC_HDR_SEL0_KEU:
546 dev_err(dev, "KEUISR 0x%08x_%08x\n",
547 in_be32(priv->reg_pkeu + TALITOS_EUISR),
548 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
549 break;
550 }
551
552 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
553 case DESC_HDR_SEL1_MDEUA:
554 case DESC_HDR_SEL1_MDEUB:
555 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
556 in_be32(priv->reg_mdeu + TALITOS_EUISR),
557 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
558 break;
559 case DESC_HDR_SEL1_CRCU:
560 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
561 in_be32(priv->reg_crcu + TALITOS_EUISR),
562 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
563 break;
564 }
565
566 for (i = 0; i < 8; i++)
567 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
568 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
569 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
570 }
571
572 /*
573 * recover from error interrupts
574 */
575 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
576 {
577 struct talitos_private *priv = dev_get_drvdata(dev);
578 unsigned int timeout = TALITOS_TIMEOUT;
579 int ch, error, reset_dev = 0;
580 u32 v_lo;
581 bool is_sec1 = has_ftr_sec1(priv);
582 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
583
584 for (ch = 0; ch < priv->num_channels; ch++) {
585 /* skip channels without errors */
586 if (is_sec1) {
587 /* bits 29, 31, 17, 19 */
588 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
589 continue;
590 } else {
591 if (!(isr & (1 << (ch * 2 + 1))))
592 continue;
593 }
594
595 error = -EINVAL;
596
597 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
598
599 if (v_lo & TALITOS_CCPSR_LO_DOF) {
600 dev_err(dev, "double fetch fifo overflow error\n");
601 error = -EAGAIN;
602 reset_ch = 1;
603 }
604 if (v_lo & TALITOS_CCPSR_LO_SOF) {
605 /* h/w dropped descriptor */
606 dev_err(dev, "single fetch fifo overflow error\n");
607 error = -EAGAIN;
608 }
609 if (v_lo & TALITOS_CCPSR_LO_MDTE)
610 dev_err(dev, "master data transfer error\n");
611 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
612 dev_err(dev, is_sec1 ? "pointer not complete error\n"
613 : "s/g data length zero error\n");
614 if (v_lo & TALITOS_CCPSR_LO_FPZ)
615 dev_err(dev, is_sec1 ? "parity error\n"
616 : "fetch pointer zero error\n");
617 if (v_lo & TALITOS_CCPSR_LO_IDH)
618 dev_err(dev, "illegal descriptor header error\n");
619 if (v_lo & TALITOS_CCPSR_LO_IEU)
620 dev_err(dev, is_sec1 ? "static assignment error\n"
621 : "invalid exec unit error\n");
622 if (v_lo & TALITOS_CCPSR_LO_EU)
623 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
624 if (!is_sec1) {
625 if (v_lo & TALITOS_CCPSR_LO_GB)
626 dev_err(dev, "gather boundary error\n");
627 if (v_lo & TALITOS_CCPSR_LO_GRL)
628 dev_err(dev, "gather return/length error\n");
629 if (v_lo & TALITOS_CCPSR_LO_SB)
630 dev_err(dev, "scatter boundary error\n");
631 if (v_lo & TALITOS_CCPSR_LO_SRL)
632 dev_err(dev, "scatter return/length error\n");
633 }
634
635 flush_channel(dev, ch, error, reset_ch);
636
637 if (reset_ch) {
638 reset_channel(dev, ch);
639 } else {
640 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
641 TALITOS2_CCCR_CONT);
642 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
643 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
644 TALITOS2_CCCR_CONT) && --timeout)
645 cpu_relax();
646 if (timeout == 0) {
647 dev_err(dev, "failed to restart channel %d\n",
648 ch);
649 reset_dev = 1;
650 }
651 }
652 }
653 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
654 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
655 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
656 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
657 isr, isr_lo);
658 else
659 dev_err(dev, "done overflow, internal time out, or "
660 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
661
662 /* purge request queues */
663 for (ch = 0; ch < priv->num_channels; ch++)
664 flush_channel(dev, ch, -EIO, 1);
665
666 /* reset and reinitialize the device */
667 init_device(dev);
668 }
669 }
670
671 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
672 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
673 { \
674 struct device *dev = data; \
675 struct talitos_private *priv = dev_get_drvdata(dev); \
676 u32 isr, isr_lo; \
677 unsigned long flags; \
678 \
679 spin_lock_irqsave(&priv->reg_lock, flags); \
680 isr = in_be32(priv->reg + TALITOS_ISR); \
681 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
682 /* Acknowledge interrupt */ \
683 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
684 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
685 \
686 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
687 spin_unlock_irqrestore(&priv->reg_lock, flags); \
688 talitos_error(dev, isr & ch_err_mask, isr_lo); \
689 } \
690 else { \
691 if (likely(isr & ch_done_mask)) { \
692 /* mask further done interrupts. */ \
693 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
694 /* done_task will unmask done interrupts at exit */ \
695 tasklet_schedule(&priv->done_task[tlet]); \
696 } \
697 spin_unlock_irqrestore(&priv->reg_lock, flags); \
698 } \
699 \
700 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
701 IRQ_NONE; \
702 }
703
704 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
705
706 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
707 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
708 { \
709 struct device *dev = data; \
710 struct talitos_private *priv = dev_get_drvdata(dev); \
711 u32 isr, isr_lo; \
712 unsigned long flags; \
713 \
714 spin_lock_irqsave(&priv->reg_lock, flags); \
715 isr = in_be32(priv->reg + TALITOS_ISR); \
716 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
717 /* Acknowledge interrupt */ \
718 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
719 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
720 \
721 if (unlikely(isr & ch_err_mask || isr_lo)) { \
722 spin_unlock_irqrestore(&priv->reg_lock, flags); \
723 talitos_error(dev, isr & ch_err_mask, isr_lo); \
724 } \
725 else { \
726 if (likely(isr & ch_done_mask)) { \
727 /* mask further done interrupts. */ \
728 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
729 /* done_task will unmask done interrupts at exit */ \
730 tasklet_schedule(&priv->done_task[tlet]); \
731 } \
732 spin_unlock_irqrestore(&priv->reg_lock, flags); \
733 } \
734 \
735 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
736 IRQ_NONE; \
737 }
738
739 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
740 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
741 0)
742 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
743 1)
744
745 /*
746 * hwrng
747 */
748 static int talitos_rng_data_present(struct hwrng *rng, int wait)
749 {
750 struct device *dev = (struct device *)rng->priv;
751 struct talitos_private *priv = dev_get_drvdata(dev);
752 u32 ofl;
753 int i;
754
755 for (i = 0; i < 20; i++) {
756 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
757 TALITOS_RNGUSR_LO_OFL;
758 if (ofl || !wait)
759 break;
760 udelay(10);
761 }
762
763 return !!ofl;
764 }
765
766 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
767 {
768 struct device *dev = (struct device *)rng->priv;
769 struct talitos_private *priv = dev_get_drvdata(dev);
770
771 /* rng fifo requires 64-bit accesses */
772 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
773 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
774
775 return sizeof(u32);
776 }
777
778 static int talitos_rng_init(struct hwrng *rng)
779 {
780 struct device *dev = (struct device *)rng->priv;
781 struct talitos_private *priv = dev_get_drvdata(dev);
782 unsigned int timeout = TALITOS_TIMEOUT;
783
784 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
785 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
786 & TALITOS_RNGUSR_LO_RD)
787 && --timeout)
788 cpu_relax();
789 if (timeout == 0) {
790 dev_err(dev, "failed to reset rng hw\n");
791 return -ENODEV;
792 }
793
794 /* start generating */
795 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
796
797 return 0;
798 }
799
800 static int talitos_register_rng(struct device *dev)
801 {
802 struct talitos_private *priv = dev_get_drvdata(dev);
803 int err;
804
805 priv->rng.name = dev_driver_string(dev),
806 priv->rng.init = talitos_rng_init,
807 priv->rng.data_present = talitos_rng_data_present,
808 priv->rng.data_read = talitos_rng_data_read,
809 priv->rng.priv = (unsigned long)dev;
810
811 err = hwrng_register(&priv->rng);
812 if (!err)
813 priv->rng_registered = true;
814
815 return err;
816 }
817
818 static void talitos_unregister_rng(struct device *dev)
819 {
820 struct talitos_private *priv = dev_get_drvdata(dev);
821
822 if (!priv->rng_registered)
823 return;
824
825 hwrng_unregister(&priv->rng);
826 priv->rng_registered = false;
827 }
828
829 /*
830 * crypto alg
831 */
832 #define TALITOS_CRA_PRIORITY 3000
833 /*
834 * Defines a priority for doing AEAD with descriptors type
835 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
836 */
837 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
838 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
839 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
840
841 struct talitos_ctx {
842 struct device *dev;
843 int ch;
844 __be32 desc_hdr_template;
845 u8 key[TALITOS_MAX_KEY_SIZE];
846 u8 iv[TALITOS_MAX_IV_LENGTH];
847 dma_addr_t dma_key;
848 unsigned int keylen;
849 unsigned int enckeylen;
850 unsigned int authkeylen;
851 };
852
853 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
854 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
855
856 struct talitos_ahash_req_ctx {
857 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
858 unsigned int hw_context_size;
859 u8 buf[2][HASH_MAX_BLOCK_SIZE];
860 int buf_idx;
861 unsigned int swinit;
862 unsigned int first;
863 unsigned int last;
864 unsigned int to_hash_later;
865 unsigned int nbuf;
866 struct scatterlist bufsl[2];
867 struct scatterlist *psrc;
868 };
869
870 struct talitos_export_state {
871 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
872 u8 buf[HASH_MAX_BLOCK_SIZE];
873 unsigned int swinit;
874 unsigned int first;
875 unsigned int last;
876 unsigned int to_hash_later;
877 unsigned int nbuf;
878 };
879
880 static int aead_setkey(struct crypto_aead *authenc,
881 const u8 *key, unsigned int keylen)
882 {
883 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
884 struct device *dev = ctx->dev;
885 struct crypto_authenc_keys keys;
886
887 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
888 goto badkey;
889
890 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
891 goto badkey;
892
893 if (ctx->keylen)
894 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
895
896 memcpy(ctx->key, keys.authkey, keys.authkeylen);
897 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
898
899 ctx->keylen = keys.authkeylen + keys.enckeylen;
900 ctx->enckeylen = keys.enckeylen;
901 ctx->authkeylen = keys.authkeylen;
902 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
903 DMA_TO_DEVICE);
904
905 return 0;
906
907 badkey:
908 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
909 return -EINVAL;
910 }
911
912 static void talitos_sg_unmap(struct device *dev,
913 struct talitos_edesc *edesc,
914 struct scatterlist *src,
915 struct scatterlist *dst,
916 unsigned int len, unsigned int offset)
917 {
918 struct talitos_private *priv = dev_get_drvdata(dev);
919 bool is_sec1 = has_ftr_sec1(priv);
920 unsigned int src_nents = edesc->src_nents ? : 1;
921 unsigned int dst_nents = edesc->dst_nents ? : 1;
922
923 if (is_sec1 && dst && dst_nents > 1) {
924 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
925 len, DMA_FROM_DEVICE);
926 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
927 offset);
928 }
929 if (src != dst) {
930 if (src_nents == 1 || !is_sec1)
931 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
932
933 if (dst && (dst_nents == 1 || !is_sec1))
934 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
935 } else if (src_nents == 1 || !is_sec1) {
936 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
937 }
938 }
939
940 static void ipsec_esp_unmap(struct device *dev,
941 struct talitos_edesc *edesc,
942 struct aead_request *areq)
943 {
944 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
945 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
946 unsigned int ivsize = crypto_aead_ivsize(aead);
947 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
948 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
949
950 if (is_ipsec_esp)
951 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
952 DMA_FROM_DEVICE);
953 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
954
955 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
956 areq->assoclen);
957
958 if (edesc->dma_len)
959 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
960 DMA_BIDIRECTIONAL);
961
962 if (!is_ipsec_esp) {
963 unsigned int dst_nents = edesc->dst_nents ? : 1;
964
965 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
966 areq->assoclen + areq->cryptlen - ivsize);
967 }
968 }
969
970 /*
971 * ipsec_esp descriptor callbacks
972 */
973 static void ipsec_esp_encrypt_done(struct device *dev,
974 struct talitos_desc *desc, void *context,
975 int err)
976 {
977 struct talitos_private *priv = dev_get_drvdata(dev);
978 bool is_sec1 = has_ftr_sec1(priv);
979 struct aead_request *areq = context;
980 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
981 unsigned int authsize = crypto_aead_authsize(authenc);
982 unsigned int ivsize = crypto_aead_ivsize(authenc);
983 struct talitos_edesc *edesc;
984 struct scatterlist *sg;
985 void *icvdata;
986
987 edesc = container_of(desc, struct talitos_edesc, desc);
988
989 ipsec_esp_unmap(dev, edesc, areq);
990
991 /* copy the generated ICV to dst */
992 if (edesc->icv_ool) {
993 if (is_sec1)
994 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
995 else
996 icvdata = &edesc->link_tbl[edesc->src_nents +
997 edesc->dst_nents + 2];
998 sg = sg_last(areq->dst, edesc->dst_nents);
999 memcpy((char *)sg_virt(sg) + sg->length - authsize,
1000 icvdata, authsize);
1001 }
1002
1003 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1004
1005 kfree(edesc);
1006
1007 aead_request_complete(areq, err);
1008 }
1009
1010 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1011 struct talitos_desc *desc,
1012 void *context, int err)
1013 {
1014 struct aead_request *req = context;
1015 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1016 unsigned int authsize = crypto_aead_authsize(authenc);
1017 struct talitos_edesc *edesc;
1018 struct scatterlist *sg;
1019 char *oicv, *icv;
1020 struct talitos_private *priv = dev_get_drvdata(dev);
1021 bool is_sec1 = has_ftr_sec1(priv);
1022
1023 edesc = container_of(desc, struct talitos_edesc, desc);
1024
1025 ipsec_esp_unmap(dev, edesc, req);
1026
1027 if (!err) {
1028 /* auth check */
1029 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1030 icv = (char *)sg_virt(sg) + sg->length - authsize;
1031
1032 if (edesc->dma_len) {
1033 if (is_sec1)
1034 oicv = (char *)&edesc->dma_link_tbl +
1035 req->assoclen + req->cryptlen;
1036 else
1037 oicv = (char *)
1038 &edesc->link_tbl[edesc->src_nents +
1039 edesc->dst_nents + 2];
1040 if (edesc->icv_ool)
1041 icv = oicv + authsize;
1042 } else
1043 oicv = (char *)&edesc->link_tbl[0];
1044
1045 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1046 }
1047
1048 kfree(edesc);
1049
1050 aead_request_complete(req, err);
1051 }
1052
1053 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1054 struct talitos_desc *desc,
1055 void *context, int err)
1056 {
1057 struct aead_request *req = context;
1058 struct talitos_edesc *edesc;
1059
1060 edesc = container_of(desc, struct talitos_edesc, desc);
1061
1062 ipsec_esp_unmap(dev, edesc, req);
1063
1064 /* check ICV auth status */
1065 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1066 DESC_HDR_LO_ICCR1_PASS))
1067 err = -EBADMSG;
1068
1069 kfree(edesc);
1070
1071 aead_request_complete(req, err);
1072 }
1073
1074 /*
1075 * convert scatterlist to SEC h/w link table format
1076 * stop at cryptlen bytes
1077 */
1078 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1079 unsigned int offset, int cryptlen,
1080 struct talitos_ptr *link_tbl_ptr)
1081 {
1082 int n_sg = sg_count;
1083 int count = 0;
1084
1085 while (cryptlen && sg && n_sg--) {
1086 unsigned int len = sg_dma_len(sg);
1087
1088 if (offset >= len) {
1089 offset -= len;
1090 goto next;
1091 }
1092
1093 len -= offset;
1094
1095 if (len > cryptlen)
1096 len = cryptlen;
1097
1098 to_talitos_ptr(link_tbl_ptr + count,
1099 sg_dma_address(sg) + offset, len, 0);
1100 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1101 count++;
1102 cryptlen -= len;
1103 offset = 0;
1104
1105 next:
1106 sg = sg_next(sg);
1107 }
1108
1109 /* tag end of link table */
1110 if (count > 0)
1111 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1112 DESC_PTR_LNKTBL_RETURN, 0);
1113
1114 return count;
1115 }
1116
1117 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1118 unsigned int len, struct talitos_edesc *edesc,
1119 struct talitos_ptr *ptr, int sg_count,
1120 unsigned int offset, int tbl_off, int elen)
1121 {
1122 struct talitos_private *priv = dev_get_drvdata(dev);
1123 bool is_sec1 = has_ftr_sec1(priv);
1124
1125 if (!src) {
1126 to_talitos_ptr(ptr, 0, 0, is_sec1);
1127 return 1;
1128 }
1129 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1130 if (sg_count == 1) {
1131 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1132 return sg_count;
1133 }
1134 if (is_sec1) {
1135 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1136 return sg_count;
1137 }
1138 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1139 &edesc->link_tbl[tbl_off]);
1140 if (sg_count == 1) {
1141 /* Only one segment now, so no link tbl needed*/
1142 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1143 return sg_count;
1144 }
1145 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1146 tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1147 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1148
1149 return sg_count;
1150 }
1151
1152 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1153 unsigned int len, struct talitos_edesc *edesc,
1154 struct talitos_ptr *ptr, int sg_count,
1155 unsigned int offset, int tbl_off)
1156 {
1157 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1158 tbl_off, 0);
1159 }
1160
1161 /*
1162 * fill in and submit ipsec_esp descriptor
1163 */
1164 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1165 void (*callback)(struct device *dev,
1166 struct talitos_desc *desc,
1167 void *context, int error))
1168 {
1169 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1170 unsigned int authsize = crypto_aead_authsize(aead);
1171 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1172 struct device *dev = ctx->dev;
1173 struct talitos_desc *desc = &edesc->desc;
1174 unsigned int cryptlen = areq->cryptlen;
1175 unsigned int ivsize = crypto_aead_ivsize(aead);
1176 int tbl_off = 0;
1177 int sg_count, ret;
1178 int elen = 0;
1179 bool sync_needed = false;
1180 struct talitos_private *priv = dev_get_drvdata(dev);
1181 bool is_sec1 = has_ftr_sec1(priv);
1182 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1183 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1184 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1185
1186 /* hmac key */
1187 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1188
1189 sg_count = edesc->src_nents ?: 1;
1190 if (is_sec1 && sg_count > 1)
1191 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1192 areq->assoclen + cryptlen);
1193 else
1194 sg_count = dma_map_sg(dev, areq->src, sg_count,
1195 (areq->src == areq->dst) ?
1196 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1197
1198 /* hmac data */
1199 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1200 &desc->ptr[1], sg_count, 0, tbl_off);
1201
1202 if (ret > 1) {
1203 tbl_off += ret;
1204 sync_needed = true;
1205 }
1206
1207 /* cipher iv */
1208 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1209
1210 /* cipher key */
1211 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1212 ctx->enckeylen, is_sec1);
1213
1214 /*
1215 * cipher in
1216 * map and adjust cipher len to aead request cryptlen.
1217 * extent is bytes of HMAC postpended to ciphertext,
1218 * typically 12 for ipsec
1219 */
1220 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1221 elen = authsize;
1222
1223 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1224 sg_count, areq->assoclen, tbl_off, elen);
1225
1226 if (ret > 1) {
1227 tbl_off += ret;
1228 sync_needed = true;
1229 }
1230
1231 /* cipher out */
1232 if (areq->src != areq->dst) {
1233 sg_count = edesc->dst_nents ? : 1;
1234 if (!is_sec1 || sg_count == 1)
1235 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1236 }
1237
1238 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1239 sg_count, areq->assoclen, tbl_off);
1240
1241 if (is_ipsec_esp)
1242 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1243
1244 /* ICV data */
1245 if (ret > 1) {
1246 tbl_off += ret;
1247 edesc->icv_ool = true;
1248 sync_needed = true;
1249
1250 if (is_ipsec_esp) {
1251 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1252 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1253 sizeof(struct talitos_ptr) + authsize;
1254
1255 /* Add an entry to the link table for ICV data */
1256 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1257 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1258 is_sec1);
1259
1260 /* icv data follows link tables */
1261 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1262 authsize, is_sec1);
1263 } else {
1264 dma_addr_t addr = edesc->dma_link_tbl;
1265
1266 if (is_sec1)
1267 addr += areq->assoclen + cryptlen;
1268 else
1269 addr += sizeof(struct talitos_ptr) * tbl_off;
1270
1271 to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1272 }
1273 } else if (!is_ipsec_esp) {
1274 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1275 &desc->ptr[6], sg_count, areq->assoclen +
1276 cryptlen,
1277 tbl_off);
1278 if (ret > 1) {
1279 tbl_off += ret;
1280 edesc->icv_ool = true;
1281 sync_needed = true;
1282 } else {
1283 edesc->icv_ool = false;
1284 }
1285 } else {
1286 edesc->icv_ool = false;
1287 }
1288
1289 /* iv out */
1290 if (is_ipsec_esp)
1291 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1292 DMA_FROM_DEVICE);
1293
1294 if (sync_needed)
1295 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1296 edesc->dma_len,
1297 DMA_BIDIRECTIONAL);
1298
1299 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1300 if (ret != -EINPROGRESS) {
1301 ipsec_esp_unmap(dev, edesc, areq);
1302 kfree(edesc);
1303 }
1304 return ret;
1305 }
1306
1307 /*
1308 * allocate and map the extended descriptor
1309 */
1310 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1311 struct scatterlist *src,
1312 struct scatterlist *dst,
1313 u8 *iv,
1314 unsigned int assoclen,
1315 unsigned int cryptlen,
1316 unsigned int authsize,
1317 unsigned int ivsize,
1318 int icv_stashing,
1319 u32 cryptoflags,
1320 bool encrypt)
1321 {
1322 struct talitos_edesc *edesc;
1323 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1324 dma_addr_t iv_dma = 0;
1325 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1326 GFP_ATOMIC;
1327 struct talitos_private *priv = dev_get_drvdata(dev);
1328 bool is_sec1 = has_ftr_sec1(priv);
1329 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1330
1331 if (cryptlen + authsize > max_len) {
1332 dev_err(dev, "length exceeds h/w max limit\n");
1333 return ERR_PTR(-EINVAL);
1334 }
1335
1336 if (!dst || dst == src) {
1337 src_len = assoclen + cryptlen + authsize;
1338 src_nents = sg_nents_for_len(src, src_len);
1339 if (src_nents < 0) {
1340 dev_err(dev, "Invalid number of src SG.\n");
1341 return ERR_PTR(-EINVAL);
1342 }
1343 src_nents = (src_nents == 1) ? 0 : src_nents;
1344 dst_nents = dst ? src_nents : 0;
1345 dst_len = 0;
1346 } else { /* dst && dst != src*/
1347 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1348 src_nents = sg_nents_for_len(src, src_len);
1349 if (src_nents < 0) {
1350 dev_err(dev, "Invalid number of src SG.\n");
1351 return ERR_PTR(-EINVAL);
1352 }
1353 src_nents = (src_nents == 1) ? 0 : src_nents;
1354 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1355 dst_nents = sg_nents_for_len(dst, dst_len);
1356 if (dst_nents < 0) {
1357 dev_err(dev, "Invalid number of dst SG.\n");
1358 return ERR_PTR(-EINVAL);
1359 }
1360 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1361 }
1362
1363 /*
1364 * allocate space for base edesc plus the link tables,
1365 * allowing for two separate entries for AD and generated ICV (+ 2),
1366 * and space for two sets of ICVs (stashed and generated)
1367 */
1368 alloc_len = sizeof(struct talitos_edesc);
1369 if (src_nents || dst_nents) {
1370 if (is_sec1)
1371 dma_len = (src_nents ? src_len : 0) +
1372 (dst_nents ? dst_len : 0);
1373 else
1374 dma_len = (src_nents + dst_nents + 2) *
1375 sizeof(struct talitos_ptr) + authsize * 2;
1376 alloc_len += dma_len;
1377 } else {
1378 dma_len = 0;
1379 alloc_len += icv_stashing ? authsize : 0;
1380 }
1381
1382 /* if its a ahash, add space for a second desc next to the first one */
1383 if (is_sec1 && !dst)
1384 alloc_len += sizeof(struct talitos_desc);
1385 alloc_len += ivsize;
1386
1387 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1388 if (!edesc)
1389 return ERR_PTR(-ENOMEM);
1390 if (ivsize) {
1391 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1392 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1393 }
1394 memset(&edesc->desc, 0, sizeof(edesc->desc));
1395
1396 edesc->src_nents = src_nents;
1397 edesc->dst_nents = dst_nents;
1398 edesc->iv_dma = iv_dma;
1399 edesc->dma_len = dma_len;
1400 if (dma_len)
1401 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1402 edesc->dma_len,
1403 DMA_BIDIRECTIONAL);
1404
1405 return edesc;
1406 }
1407
1408 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1409 int icv_stashing, bool encrypt)
1410 {
1411 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1412 unsigned int authsize = crypto_aead_authsize(authenc);
1413 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1414 unsigned int ivsize = crypto_aead_ivsize(authenc);
1415
1416 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1417 iv, areq->assoclen, areq->cryptlen,
1418 authsize, ivsize, icv_stashing,
1419 areq->base.flags, encrypt);
1420 }
1421
1422 static int aead_encrypt(struct aead_request *req)
1423 {
1424 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1425 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1426 struct talitos_edesc *edesc;
1427
1428 /* allocate extended descriptor */
1429 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1430 if (IS_ERR(edesc))
1431 return PTR_ERR(edesc);
1432
1433 /* set encrypt */
1434 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1435
1436 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1437 }
1438
1439 static int aead_decrypt(struct aead_request *req)
1440 {
1441 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1442 unsigned int authsize = crypto_aead_authsize(authenc);
1443 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1444 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1445 struct talitos_edesc *edesc;
1446 struct scatterlist *sg;
1447 void *icvdata;
1448
1449 req->cryptlen -= authsize;
1450
1451 /* allocate extended descriptor */
1452 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1453 if (IS_ERR(edesc))
1454 return PTR_ERR(edesc);
1455
1456 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1457 ((!edesc->src_nents && !edesc->dst_nents) ||
1458 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1459
1460 /* decrypt and check the ICV */
1461 edesc->desc.hdr = ctx->desc_hdr_template |
1462 DESC_HDR_DIR_INBOUND |
1463 DESC_HDR_MODE1_MDEU_CICV;
1464
1465 /* reset integrity check result bits */
1466
1467 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1468 }
1469
1470 /* Have to check the ICV with software */
1471 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1472
1473 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1474 if (edesc->dma_len)
1475 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1476 edesc->dst_nents + 2];
1477 else
1478 icvdata = &edesc->link_tbl[0];
1479
1480 sg = sg_last(req->src, edesc->src_nents ? : 1);
1481
1482 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1483
1484 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1485 }
1486
1487 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1488 const u8 *key, unsigned int keylen)
1489 {
1490 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1491 struct device *dev = ctx->dev;
1492 u32 tmp[DES_EXPKEY_WORDS];
1493
1494 if (keylen > TALITOS_MAX_KEY_SIZE) {
1495 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1496 return -EINVAL;
1497 }
1498
1499 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1500 CRYPTO_TFM_REQ_WEAK_KEY) &&
1501 !des_ekey(tmp, key)) {
1502 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1503 return -EINVAL;
1504 }
1505
1506 if (ctx->keylen)
1507 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1508
1509 memcpy(&ctx->key, key, keylen);
1510 ctx->keylen = keylen;
1511
1512 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1513
1514 return 0;
1515 }
1516
1517 static void common_nonsnoop_unmap(struct device *dev,
1518 struct talitos_edesc *edesc,
1519 struct ablkcipher_request *areq)
1520 {
1521 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1522
1523 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1524 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1525
1526 if (edesc->dma_len)
1527 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1528 DMA_BIDIRECTIONAL);
1529 }
1530
1531 static void ablkcipher_done(struct device *dev,
1532 struct talitos_desc *desc, void *context,
1533 int err)
1534 {
1535 struct ablkcipher_request *areq = context;
1536 struct talitos_edesc *edesc;
1537
1538 edesc = container_of(desc, struct talitos_edesc, desc);
1539
1540 common_nonsnoop_unmap(dev, edesc, areq);
1541
1542 kfree(edesc);
1543
1544 areq->base.complete(&areq->base, err);
1545 }
1546
1547 static int common_nonsnoop(struct talitos_edesc *edesc,
1548 struct ablkcipher_request *areq,
1549 void (*callback) (struct device *dev,
1550 struct talitos_desc *desc,
1551 void *context, int error))
1552 {
1553 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1554 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1555 struct device *dev = ctx->dev;
1556 struct talitos_desc *desc = &edesc->desc;
1557 unsigned int cryptlen = areq->nbytes;
1558 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1559 int sg_count, ret;
1560 bool sync_needed = false;
1561 struct talitos_private *priv = dev_get_drvdata(dev);
1562 bool is_sec1 = has_ftr_sec1(priv);
1563
1564 /* first DWORD empty */
1565
1566 /* cipher iv */
1567 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1568
1569 /* cipher key */
1570 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1571
1572 sg_count = edesc->src_nents ?: 1;
1573 if (is_sec1 && sg_count > 1)
1574 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1575 cryptlen);
1576 else
1577 sg_count = dma_map_sg(dev, areq->src, sg_count,
1578 (areq->src == areq->dst) ?
1579 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1580 /*
1581 * cipher in
1582 */
1583 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1584 &desc->ptr[3], sg_count, 0, 0);
1585 if (sg_count > 1)
1586 sync_needed = true;
1587
1588 /* cipher out */
1589 if (areq->src != areq->dst) {
1590 sg_count = edesc->dst_nents ? : 1;
1591 if (!is_sec1 || sg_count == 1)
1592 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1593 }
1594
1595 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1596 sg_count, 0, (edesc->src_nents + 1));
1597 if (ret > 1)
1598 sync_needed = true;
1599
1600 /* iv out */
1601 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1602 DMA_FROM_DEVICE);
1603
1604 /* last DWORD empty */
1605
1606 if (sync_needed)
1607 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1608 edesc->dma_len, DMA_BIDIRECTIONAL);
1609
1610 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1611 if (ret != -EINPROGRESS) {
1612 common_nonsnoop_unmap(dev, edesc, areq);
1613 kfree(edesc);
1614 }
1615 return ret;
1616 }
1617
1618 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1619 areq, bool encrypt)
1620 {
1621 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1622 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1623 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1624
1625 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1626 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1627 areq->base.flags, encrypt);
1628 }
1629
1630 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1631 {
1632 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1633 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1634 struct talitos_edesc *edesc;
1635
1636 /* allocate extended descriptor */
1637 edesc = ablkcipher_edesc_alloc(areq, true);
1638 if (IS_ERR(edesc))
1639 return PTR_ERR(edesc);
1640
1641 /* set encrypt */
1642 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1643
1644 return common_nonsnoop(edesc, areq, ablkcipher_done);
1645 }
1646
1647 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1648 {
1649 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1650 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1651 struct talitos_edesc *edesc;
1652
1653 /* allocate extended descriptor */
1654 edesc = ablkcipher_edesc_alloc(areq, false);
1655 if (IS_ERR(edesc))
1656 return PTR_ERR(edesc);
1657
1658 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1659
1660 return common_nonsnoop(edesc, areq, ablkcipher_done);
1661 }
1662
1663 static void common_nonsnoop_hash_unmap(struct device *dev,
1664 struct talitos_edesc *edesc,
1665 struct ahash_request *areq)
1666 {
1667 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1668 struct talitos_private *priv = dev_get_drvdata(dev);
1669 bool is_sec1 = has_ftr_sec1(priv);
1670 struct talitos_desc *desc = &edesc->desc;
1671 struct talitos_desc *desc2 = (struct talitos_desc *)
1672 (edesc->buf + edesc->dma_len);
1673
1674 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1675 if (desc->next_desc &&
1676 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1677 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1678
1679 if (req_ctx->psrc)
1680 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1681
1682 /* When using hashctx-in, must unmap it. */
1683 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1684 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1685 DMA_TO_DEVICE);
1686 else if (desc->next_desc)
1687 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1688 DMA_TO_DEVICE);
1689
1690 if (is_sec1 && req_ctx->nbuf)
1691 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1692 DMA_TO_DEVICE);
1693
1694 if (edesc->dma_len)
1695 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1696 DMA_BIDIRECTIONAL);
1697
1698 if (edesc->desc.next_desc)
1699 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1700 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1701 }
1702
1703 static void ahash_done(struct device *dev,
1704 struct talitos_desc *desc, void *context,
1705 int err)
1706 {
1707 struct ahash_request *areq = context;
1708 struct talitos_edesc *edesc =
1709 container_of(desc, struct talitos_edesc, desc);
1710 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1711
1712 if (!req_ctx->last && req_ctx->to_hash_later) {
1713 /* Position any partial block for next update/final/finup */
1714 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1715 req_ctx->nbuf = req_ctx->to_hash_later;
1716 }
1717 common_nonsnoop_hash_unmap(dev, edesc, areq);
1718
1719 kfree(edesc);
1720
1721 areq->base.complete(&areq->base, err);
1722 }
1723
1724 /*
1725 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1726 * ourself and submit a padded block
1727 */
1728 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1729 struct talitos_edesc *edesc,
1730 struct talitos_ptr *ptr)
1731 {
1732 static u8 padded_hash[64] = {
1733 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1734 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1735 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1736 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1737 };
1738
1739 pr_err_once("Bug in SEC1, padding ourself\n");
1740 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1741 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1742 (char *)padded_hash, DMA_TO_DEVICE);
1743 }
1744
1745 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1746 struct ahash_request *areq, unsigned int length,
1747 void (*callback) (struct device *dev,
1748 struct talitos_desc *desc,
1749 void *context, int error))
1750 {
1751 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1752 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1753 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1754 struct device *dev = ctx->dev;
1755 struct talitos_desc *desc = &edesc->desc;
1756 int ret;
1757 bool sync_needed = false;
1758 struct talitos_private *priv = dev_get_drvdata(dev);
1759 bool is_sec1 = has_ftr_sec1(priv);
1760 int sg_count;
1761
1762 /* first DWORD empty */
1763
1764 /* hash context in */
1765 if (!req_ctx->first || req_ctx->swinit) {
1766 map_single_talitos_ptr(dev, &desc->ptr[1],
1767 req_ctx->hw_context_size,
1768 (char *)req_ctx->hw_context,
1769 DMA_TO_DEVICE);
1770 req_ctx->swinit = 0;
1771 }
1772 /* Indicate next op is not the first. */
1773 req_ctx->first = 0;
1774
1775 /* HMAC key */
1776 if (ctx->keylen)
1777 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1778 is_sec1);
1779
1780 if (is_sec1 && req_ctx->nbuf)
1781 length -= req_ctx->nbuf;
1782
1783 sg_count = edesc->src_nents ?: 1;
1784 if (is_sec1 && sg_count > 1)
1785 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1786 else if (length)
1787 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1788 DMA_TO_DEVICE);
1789 /*
1790 * data in
1791 */
1792 if (is_sec1 && req_ctx->nbuf) {
1793 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1794 req_ctx->buf[req_ctx->buf_idx],
1795 DMA_TO_DEVICE);
1796 } else {
1797 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1798 &desc->ptr[3], sg_count, 0, 0);
1799 if (sg_count > 1)
1800 sync_needed = true;
1801 }
1802
1803 /* fifth DWORD empty */
1804
1805 /* hash/HMAC out -or- hash context out */
1806 if (req_ctx->last)
1807 map_single_talitos_ptr(dev, &desc->ptr[5],
1808 crypto_ahash_digestsize(tfm),
1809 areq->result, DMA_FROM_DEVICE);
1810 else
1811 map_single_talitos_ptr(dev, &desc->ptr[5],
1812 req_ctx->hw_context_size,
1813 req_ctx->hw_context, DMA_FROM_DEVICE);
1814
1815 /* last DWORD empty */
1816
1817 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1818 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1819
1820 if (is_sec1 && req_ctx->nbuf && length) {
1821 struct talitos_desc *desc2 = (struct talitos_desc *)
1822 (edesc->buf + edesc->dma_len);
1823 dma_addr_t next_desc;
1824
1825 memset(desc2, 0, sizeof(*desc2));
1826 desc2->hdr = desc->hdr;
1827 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1828 desc2->hdr1 = desc2->hdr;
1829 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1830 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1831 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1832
1833 if (desc->ptr[1].ptr)
1834 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1835 is_sec1);
1836 else
1837 map_single_talitos_ptr(dev, &desc2->ptr[1],
1838 req_ctx->hw_context_size,
1839 req_ctx->hw_context,
1840 DMA_TO_DEVICE);
1841 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1842 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1843 &desc2->ptr[3], sg_count, 0, 0);
1844 if (sg_count > 1)
1845 sync_needed = true;
1846 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1847 if (req_ctx->last)
1848 map_single_talitos_ptr(dev, &desc->ptr[5],
1849 req_ctx->hw_context_size,
1850 req_ctx->hw_context,
1851 DMA_FROM_DEVICE);
1852
1853 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1854 DMA_BIDIRECTIONAL);
1855 desc->next_desc = cpu_to_be32(next_desc);
1856 }
1857
1858 if (sync_needed)
1859 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1860 edesc->dma_len, DMA_BIDIRECTIONAL);
1861
1862 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1863 if (ret != -EINPROGRESS) {
1864 common_nonsnoop_hash_unmap(dev, edesc, areq);
1865 kfree(edesc);
1866 }
1867 return ret;
1868 }
1869
1870 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1871 unsigned int nbytes)
1872 {
1873 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1874 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1875 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1876 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1877 bool is_sec1 = has_ftr_sec1(priv);
1878
1879 if (is_sec1)
1880 nbytes -= req_ctx->nbuf;
1881
1882 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1883 nbytes, 0, 0, 0, areq->base.flags, false);
1884 }
1885
1886 static int ahash_init(struct ahash_request *areq)
1887 {
1888 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1889 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1890 unsigned int size;
1891
1892 /* Initialize the context */
1893 req_ctx->buf_idx = 0;
1894 req_ctx->nbuf = 0;
1895 req_ctx->first = 1; /* first indicates h/w must init its context */
1896 req_ctx->swinit = 0; /* assume h/w init of context */
1897 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1898 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1899 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1900 req_ctx->hw_context_size = size;
1901
1902 return 0;
1903 }
1904
1905 /*
1906 * on h/w without explicit sha224 support, we initialize h/w context
1907 * manually with sha224 constants, and tell it to run sha256.
1908 */
1909 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1910 {
1911 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1912
1913 ahash_init(areq);
1914 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1915
1916 req_ctx->hw_context[0] = SHA224_H0;
1917 req_ctx->hw_context[1] = SHA224_H1;
1918 req_ctx->hw_context[2] = SHA224_H2;
1919 req_ctx->hw_context[3] = SHA224_H3;
1920 req_ctx->hw_context[4] = SHA224_H4;
1921 req_ctx->hw_context[5] = SHA224_H5;
1922 req_ctx->hw_context[6] = SHA224_H6;
1923 req_ctx->hw_context[7] = SHA224_H7;
1924
1925 /* init 64-bit count */
1926 req_ctx->hw_context[8] = 0;
1927 req_ctx->hw_context[9] = 0;
1928
1929 return 0;
1930 }
1931
1932 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1933 {
1934 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1935 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1936 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1937 struct talitos_edesc *edesc;
1938 unsigned int blocksize =
1939 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1940 unsigned int nbytes_to_hash;
1941 unsigned int to_hash_later;
1942 unsigned int nsg;
1943 int nents;
1944 struct device *dev = ctx->dev;
1945 struct talitos_private *priv = dev_get_drvdata(dev);
1946 bool is_sec1 = has_ftr_sec1(priv);
1947 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1948
1949 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1950 /* Buffer up to one whole block */
1951 nents = sg_nents_for_len(areq->src, nbytes);
1952 if (nents < 0) {
1953 dev_err(ctx->dev, "Invalid number of src SG.\n");
1954 return nents;
1955 }
1956 sg_copy_to_buffer(areq->src, nents,
1957 ctx_buf + req_ctx->nbuf, nbytes);
1958 req_ctx->nbuf += nbytes;
1959 return 0;
1960 }
1961
1962 /* At least (blocksize + 1) bytes are available to hash */
1963 nbytes_to_hash = nbytes + req_ctx->nbuf;
1964 to_hash_later = nbytes_to_hash & (blocksize - 1);
1965
1966 if (req_ctx->last)
1967 to_hash_later = 0;
1968 else if (to_hash_later)
1969 /* There is a partial block. Hash the full block(s) now */
1970 nbytes_to_hash -= to_hash_later;
1971 else {
1972 /* Keep one block buffered */
1973 nbytes_to_hash -= blocksize;
1974 to_hash_later = blocksize;
1975 }
1976
1977 /* Chain in any previously buffered data */
1978 if (!is_sec1 && req_ctx->nbuf) {
1979 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1980 sg_init_table(req_ctx->bufsl, nsg);
1981 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
1982 if (nsg > 1)
1983 sg_chain(req_ctx->bufsl, 2, areq->src);
1984 req_ctx->psrc = req_ctx->bufsl;
1985 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
1986 int offset;
1987
1988 if (nbytes_to_hash > blocksize)
1989 offset = blocksize - req_ctx->nbuf;
1990 else
1991 offset = nbytes_to_hash - req_ctx->nbuf;
1992 nents = sg_nents_for_len(areq->src, offset);
1993 if (nents < 0) {
1994 dev_err(ctx->dev, "Invalid number of src SG.\n");
1995 return nents;
1996 }
1997 sg_copy_to_buffer(areq->src, nents,
1998 ctx_buf + req_ctx->nbuf, offset);
1999 req_ctx->nbuf += offset;
2000 req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2001 offset);
2002 } else
2003 req_ctx->psrc = areq->src;
2004
2005 if (to_hash_later) {
2006 nents = sg_nents_for_len(areq->src, nbytes);
2007 if (nents < 0) {
2008 dev_err(ctx->dev, "Invalid number of src SG.\n");
2009 return nents;
2010 }
2011 sg_pcopy_to_buffer(areq->src, nents,
2012 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2013 to_hash_later,
2014 nbytes - to_hash_later);
2015 }
2016 req_ctx->to_hash_later = to_hash_later;
2017
2018 /* Allocate extended descriptor */
2019 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2020 if (IS_ERR(edesc))
2021 return PTR_ERR(edesc);
2022
2023 edesc->desc.hdr = ctx->desc_hdr_template;
2024
2025 /* On last one, request SEC to pad; otherwise continue */
2026 if (req_ctx->last)
2027 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2028 else
2029 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2030
2031 /* request SEC to INIT hash. */
2032 if (req_ctx->first && !req_ctx->swinit)
2033 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2034
2035 /* When the tfm context has a keylen, it's an HMAC.
2036 * A first or last (ie. not middle) descriptor must request HMAC.
2037 */
2038 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2039 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2040
2041 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2042 }
2043
2044 static int ahash_update(struct ahash_request *areq)
2045 {
2046 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2047
2048 req_ctx->last = 0;
2049
2050 return ahash_process_req(areq, areq->nbytes);
2051 }
2052
2053 static int ahash_final(struct ahash_request *areq)
2054 {
2055 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2056
2057 req_ctx->last = 1;
2058
2059 return ahash_process_req(areq, 0);
2060 }
2061
2062 static int ahash_finup(struct ahash_request *areq)
2063 {
2064 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2065
2066 req_ctx->last = 1;
2067
2068 return ahash_process_req(areq, areq->nbytes);
2069 }
2070
2071 static int ahash_digest(struct ahash_request *areq)
2072 {
2073 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2074 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2075
2076 ahash->init(areq);
2077 req_ctx->last = 1;
2078
2079 return ahash_process_req(areq, areq->nbytes);
2080 }
2081
2082 static int ahash_export(struct ahash_request *areq, void *out)
2083 {
2084 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2085 struct talitos_export_state *export = out;
2086
2087 memcpy(export->hw_context, req_ctx->hw_context,
2088 req_ctx->hw_context_size);
2089 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2090 export->swinit = req_ctx->swinit;
2091 export->first = req_ctx->first;
2092 export->last = req_ctx->last;
2093 export->to_hash_later = req_ctx->to_hash_later;
2094 export->nbuf = req_ctx->nbuf;
2095
2096 return 0;
2097 }
2098
2099 static int ahash_import(struct ahash_request *areq, const void *in)
2100 {
2101 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2102 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2103 const struct talitos_export_state *export = in;
2104 unsigned int size;
2105
2106 memset(req_ctx, 0, sizeof(*req_ctx));
2107 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2108 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2109 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2110 req_ctx->hw_context_size = size;
2111 memcpy(req_ctx->hw_context, export->hw_context, size);
2112 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2113 req_ctx->swinit = export->swinit;
2114 req_ctx->first = export->first;
2115 req_ctx->last = export->last;
2116 req_ctx->to_hash_later = export->to_hash_later;
2117 req_ctx->nbuf = export->nbuf;
2118
2119 return 0;
2120 }
2121
2122 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2123 u8 *hash)
2124 {
2125 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2126
2127 struct scatterlist sg[1];
2128 struct ahash_request *req;
2129 struct crypto_wait wait;
2130 int ret;
2131
2132 crypto_init_wait(&wait);
2133
2134 req = ahash_request_alloc(tfm, GFP_KERNEL);
2135 if (!req)
2136 return -ENOMEM;
2137
2138 /* Keep tfm keylen == 0 during hash of the long key */
2139 ctx->keylen = 0;
2140 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2141 crypto_req_done, &wait);
2142
2143 sg_init_one(&sg[0], key, keylen);
2144
2145 ahash_request_set_crypt(req, sg, hash, keylen);
2146 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2147
2148 ahash_request_free(req);
2149
2150 return ret;
2151 }
2152
2153 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2154 unsigned int keylen)
2155 {
2156 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2157 struct device *dev = ctx->dev;
2158 unsigned int blocksize =
2159 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2160 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2161 unsigned int keysize = keylen;
2162 u8 hash[SHA512_DIGEST_SIZE];
2163 int ret;
2164
2165 if (keylen <= blocksize)
2166 memcpy(ctx->key, key, keysize);
2167 else {
2168 /* Must get the hash of the long key */
2169 ret = keyhash(tfm, key, keylen, hash);
2170
2171 if (ret) {
2172 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2173 return -EINVAL;
2174 }
2175
2176 keysize = digestsize;
2177 memcpy(ctx->key, hash, digestsize);
2178 }
2179
2180 if (ctx->keylen)
2181 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2182
2183 ctx->keylen = keysize;
2184 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2185
2186 return 0;
2187 }
2188
2189
2190 struct talitos_alg_template {
2191 u32 type;
2192 u32 priority;
2193 union {
2194 struct crypto_alg crypto;
2195 struct ahash_alg hash;
2196 struct aead_alg aead;
2197 } alg;
2198 __be32 desc_hdr_template;
2199 };
2200
2201 static struct talitos_alg_template driver_algs[] = {
2202 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2203 { .type = CRYPTO_ALG_TYPE_AEAD,
2204 .alg.aead = {
2205 .base = {
2206 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2207 .cra_driver_name = "authenc-hmac-sha1-"
2208 "cbc-aes-talitos-hsna",
2209 .cra_blocksize = AES_BLOCK_SIZE,
2210 .cra_flags = CRYPTO_ALG_ASYNC,
2211 },
2212 .ivsize = AES_BLOCK_SIZE,
2213 .maxauthsize = SHA1_DIGEST_SIZE,
2214 },
2215 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2216 DESC_HDR_SEL0_AESU |
2217 DESC_HDR_MODE0_AESU_CBC |
2218 DESC_HDR_SEL1_MDEUA |
2219 DESC_HDR_MODE1_MDEU_INIT |
2220 DESC_HDR_MODE1_MDEU_PAD |
2221 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2222 },
2223 { .type = CRYPTO_ALG_TYPE_AEAD,
2224 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2225 .alg.aead = {
2226 .base = {
2227 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2228 .cra_driver_name = "authenc-hmac-sha1-"
2229 "cbc-aes-talitos",
2230 .cra_blocksize = AES_BLOCK_SIZE,
2231 .cra_flags = CRYPTO_ALG_ASYNC,
2232 },
2233 .ivsize = AES_BLOCK_SIZE,
2234 .maxauthsize = SHA1_DIGEST_SIZE,
2235 },
2236 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2237 DESC_HDR_SEL0_AESU |
2238 DESC_HDR_MODE0_AESU_CBC |
2239 DESC_HDR_SEL1_MDEUA |
2240 DESC_HDR_MODE1_MDEU_INIT |
2241 DESC_HDR_MODE1_MDEU_PAD |
2242 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2243 },
2244 { .type = CRYPTO_ALG_TYPE_AEAD,
2245 .alg.aead = {
2246 .base = {
2247 .cra_name = "authenc(hmac(sha1),"
2248 "cbc(des3_ede))",
2249 .cra_driver_name = "authenc-hmac-sha1-"
2250 "cbc-3des-talitos-hsna",
2251 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2252 .cra_flags = CRYPTO_ALG_ASYNC,
2253 },
2254 .ivsize = DES3_EDE_BLOCK_SIZE,
2255 .maxauthsize = SHA1_DIGEST_SIZE,
2256 },
2257 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2258 DESC_HDR_SEL0_DEU |
2259 DESC_HDR_MODE0_DEU_CBC |
2260 DESC_HDR_MODE0_DEU_3DES |
2261 DESC_HDR_SEL1_MDEUA |
2262 DESC_HDR_MODE1_MDEU_INIT |
2263 DESC_HDR_MODE1_MDEU_PAD |
2264 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2265 },
2266 { .type = CRYPTO_ALG_TYPE_AEAD,
2267 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2268 .alg.aead = {
2269 .base = {
2270 .cra_name = "authenc(hmac(sha1),"
2271 "cbc(des3_ede))",
2272 .cra_driver_name = "authenc-hmac-sha1-"
2273 "cbc-3des-talitos",
2274 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2275 .cra_flags = CRYPTO_ALG_ASYNC,
2276 },
2277 .ivsize = DES3_EDE_BLOCK_SIZE,
2278 .maxauthsize = SHA1_DIGEST_SIZE,
2279 },
2280 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2281 DESC_HDR_SEL0_DEU |
2282 DESC_HDR_MODE0_DEU_CBC |
2283 DESC_HDR_MODE0_DEU_3DES |
2284 DESC_HDR_SEL1_MDEUA |
2285 DESC_HDR_MODE1_MDEU_INIT |
2286 DESC_HDR_MODE1_MDEU_PAD |
2287 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2288 },
2289 { .type = CRYPTO_ALG_TYPE_AEAD,
2290 .alg.aead = {
2291 .base = {
2292 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2293 .cra_driver_name = "authenc-hmac-sha224-"
2294 "cbc-aes-talitos-hsna",
2295 .cra_blocksize = AES_BLOCK_SIZE,
2296 .cra_flags = CRYPTO_ALG_ASYNC,
2297 },
2298 .ivsize = AES_BLOCK_SIZE,
2299 .maxauthsize = SHA224_DIGEST_SIZE,
2300 },
2301 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2302 DESC_HDR_SEL0_AESU |
2303 DESC_HDR_MODE0_AESU_CBC |
2304 DESC_HDR_SEL1_MDEUA |
2305 DESC_HDR_MODE1_MDEU_INIT |
2306 DESC_HDR_MODE1_MDEU_PAD |
2307 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2308 },
2309 { .type = CRYPTO_ALG_TYPE_AEAD,
2310 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2311 .alg.aead = {
2312 .base = {
2313 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2314 .cra_driver_name = "authenc-hmac-sha224-"
2315 "cbc-aes-talitos",
2316 .cra_blocksize = AES_BLOCK_SIZE,
2317 .cra_flags = CRYPTO_ALG_ASYNC,
2318 },
2319 .ivsize = AES_BLOCK_SIZE,
2320 .maxauthsize = SHA224_DIGEST_SIZE,
2321 },
2322 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2323 DESC_HDR_SEL0_AESU |
2324 DESC_HDR_MODE0_AESU_CBC |
2325 DESC_HDR_SEL1_MDEUA |
2326 DESC_HDR_MODE1_MDEU_INIT |
2327 DESC_HDR_MODE1_MDEU_PAD |
2328 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2329 },
2330 { .type = CRYPTO_ALG_TYPE_AEAD,
2331 .alg.aead = {
2332 .base = {
2333 .cra_name = "authenc(hmac(sha224),"
2334 "cbc(des3_ede))",
2335 .cra_driver_name = "authenc-hmac-sha224-"
2336 "cbc-3des-talitos-hsna",
2337 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2338 .cra_flags = CRYPTO_ALG_ASYNC,
2339 },
2340 .ivsize = DES3_EDE_BLOCK_SIZE,
2341 .maxauthsize = SHA224_DIGEST_SIZE,
2342 },
2343 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2344 DESC_HDR_SEL0_DEU |
2345 DESC_HDR_MODE0_DEU_CBC |
2346 DESC_HDR_MODE0_DEU_3DES |
2347 DESC_HDR_SEL1_MDEUA |
2348 DESC_HDR_MODE1_MDEU_INIT |
2349 DESC_HDR_MODE1_MDEU_PAD |
2350 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2351 },
2352 { .type = CRYPTO_ALG_TYPE_AEAD,
2353 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2354 .alg.aead = {
2355 .base = {
2356 .cra_name = "authenc(hmac(sha224),"
2357 "cbc(des3_ede))",
2358 .cra_driver_name = "authenc-hmac-sha224-"
2359 "cbc-3des-talitos",
2360 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2361 .cra_flags = CRYPTO_ALG_ASYNC,
2362 },
2363 .ivsize = DES3_EDE_BLOCK_SIZE,
2364 .maxauthsize = SHA224_DIGEST_SIZE,
2365 },
2366 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2367 DESC_HDR_SEL0_DEU |
2368 DESC_HDR_MODE0_DEU_CBC |
2369 DESC_HDR_MODE0_DEU_3DES |
2370 DESC_HDR_SEL1_MDEUA |
2371 DESC_HDR_MODE1_MDEU_INIT |
2372 DESC_HDR_MODE1_MDEU_PAD |
2373 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2374 },
2375 { .type = CRYPTO_ALG_TYPE_AEAD,
2376 .alg.aead = {
2377 .base = {
2378 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2379 .cra_driver_name = "authenc-hmac-sha256-"
2380 "cbc-aes-talitos-hsna",
2381 .cra_blocksize = AES_BLOCK_SIZE,
2382 .cra_flags = CRYPTO_ALG_ASYNC,
2383 },
2384 .ivsize = AES_BLOCK_SIZE,
2385 .maxauthsize = SHA256_DIGEST_SIZE,
2386 },
2387 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2388 DESC_HDR_SEL0_AESU |
2389 DESC_HDR_MODE0_AESU_CBC |
2390 DESC_HDR_SEL1_MDEUA |
2391 DESC_HDR_MODE1_MDEU_INIT |
2392 DESC_HDR_MODE1_MDEU_PAD |
2393 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2394 },
2395 { .type = CRYPTO_ALG_TYPE_AEAD,
2396 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2397 .alg.aead = {
2398 .base = {
2399 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2400 .cra_driver_name = "authenc-hmac-sha256-"
2401 "cbc-aes-talitos",
2402 .cra_blocksize = AES_BLOCK_SIZE,
2403 .cra_flags = CRYPTO_ALG_ASYNC,
2404 },
2405 .ivsize = AES_BLOCK_SIZE,
2406 .maxauthsize = SHA256_DIGEST_SIZE,
2407 },
2408 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2409 DESC_HDR_SEL0_AESU |
2410 DESC_HDR_MODE0_AESU_CBC |
2411 DESC_HDR_SEL1_MDEUA |
2412 DESC_HDR_MODE1_MDEU_INIT |
2413 DESC_HDR_MODE1_MDEU_PAD |
2414 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2415 },
2416 { .type = CRYPTO_ALG_TYPE_AEAD,
2417 .alg.aead = {
2418 .base = {
2419 .cra_name = "authenc(hmac(sha256),"
2420 "cbc(des3_ede))",
2421 .cra_driver_name = "authenc-hmac-sha256-"
2422 "cbc-3des-talitos-hsna",
2423 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2424 .cra_flags = CRYPTO_ALG_ASYNC,
2425 },
2426 .ivsize = DES3_EDE_BLOCK_SIZE,
2427 .maxauthsize = SHA256_DIGEST_SIZE,
2428 },
2429 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2430 DESC_HDR_SEL0_DEU |
2431 DESC_HDR_MODE0_DEU_CBC |
2432 DESC_HDR_MODE0_DEU_3DES |
2433 DESC_HDR_SEL1_MDEUA |
2434 DESC_HDR_MODE1_MDEU_INIT |
2435 DESC_HDR_MODE1_MDEU_PAD |
2436 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2437 },
2438 { .type = CRYPTO_ALG_TYPE_AEAD,
2439 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2440 .alg.aead = {
2441 .base = {
2442 .cra_name = "authenc(hmac(sha256),"
2443 "cbc(des3_ede))",
2444 .cra_driver_name = "authenc-hmac-sha256-"
2445 "cbc-3des-talitos",
2446 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2447 .cra_flags = CRYPTO_ALG_ASYNC,
2448 },
2449 .ivsize = DES3_EDE_BLOCK_SIZE,
2450 .maxauthsize = SHA256_DIGEST_SIZE,
2451 },
2452 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2453 DESC_HDR_SEL0_DEU |
2454 DESC_HDR_MODE0_DEU_CBC |
2455 DESC_HDR_MODE0_DEU_3DES |
2456 DESC_HDR_SEL1_MDEUA |
2457 DESC_HDR_MODE1_MDEU_INIT |
2458 DESC_HDR_MODE1_MDEU_PAD |
2459 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2460 },
2461 { .type = CRYPTO_ALG_TYPE_AEAD,
2462 .alg.aead = {
2463 .base = {
2464 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2465 .cra_driver_name = "authenc-hmac-sha384-"
2466 "cbc-aes-talitos",
2467 .cra_blocksize = AES_BLOCK_SIZE,
2468 .cra_flags = CRYPTO_ALG_ASYNC,
2469 },
2470 .ivsize = AES_BLOCK_SIZE,
2471 .maxauthsize = SHA384_DIGEST_SIZE,
2472 },
2473 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2474 DESC_HDR_SEL0_AESU |
2475 DESC_HDR_MODE0_AESU_CBC |
2476 DESC_HDR_SEL1_MDEUB |
2477 DESC_HDR_MODE1_MDEU_INIT |
2478 DESC_HDR_MODE1_MDEU_PAD |
2479 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2480 },
2481 { .type = CRYPTO_ALG_TYPE_AEAD,
2482 .alg.aead = {
2483 .base = {
2484 .cra_name = "authenc(hmac(sha384),"
2485 "cbc(des3_ede))",
2486 .cra_driver_name = "authenc-hmac-sha384-"
2487 "cbc-3des-talitos",
2488 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2489 .cra_flags = CRYPTO_ALG_ASYNC,
2490 },
2491 .ivsize = DES3_EDE_BLOCK_SIZE,
2492 .maxauthsize = SHA384_DIGEST_SIZE,
2493 },
2494 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2495 DESC_HDR_SEL0_DEU |
2496 DESC_HDR_MODE0_DEU_CBC |
2497 DESC_HDR_MODE0_DEU_3DES |
2498 DESC_HDR_SEL1_MDEUB |
2499 DESC_HDR_MODE1_MDEU_INIT |
2500 DESC_HDR_MODE1_MDEU_PAD |
2501 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2502 },
2503 { .type = CRYPTO_ALG_TYPE_AEAD,
2504 .alg.aead = {
2505 .base = {
2506 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2507 .cra_driver_name = "authenc-hmac-sha512-"
2508 "cbc-aes-talitos",
2509 .cra_blocksize = AES_BLOCK_SIZE,
2510 .cra_flags = CRYPTO_ALG_ASYNC,
2511 },
2512 .ivsize = AES_BLOCK_SIZE,
2513 .maxauthsize = SHA512_DIGEST_SIZE,
2514 },
2515 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2516 DESC_HDR_SEL0_AESU |
2517 DESC_HDR_MODE0_AESU_CBC |
2518 DESC_HDR_SEL1_MDEUB |
2519 DESC_HDR_MODE1_MDEU_INIT |
2520 DESC_HDR_MODE1_MDEU_PAD |
2521 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2522 },
2523 { .type = CRYPTO_ALG_TYPE_AEAD,
2524 .alg.aead = {
2525 .base = {
2526 .cra_name = "authenc(hmac(sha512),"
2527 "cbc(des3_ede))",
2528 .cra_driver_name = "authenc-hmac-sha512-"
2529 "cbc-3des-talitos",
2530 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2531 .cra_flags = CRYPTO_ALG_ASYNC,
2532 },
2533 .ivsize = DES3_EDE_BLOCK_SIZE,
2534 .maxauthsize = SHA512_DIGEST_SIZE,
2535 },
2536 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2537 DESC_HDR_SEL0_DEU |
2538 DESC_HDR_MODE0_DEU_CBC |
2539 DESC_HDR_MODE0_DEU_3DES |
2540 DESC_HDR_SEL1_MDEUB |
2541 DESC_HDR_MODE1_MDEU_INIT |
2542 DESC_HDR_MODE1_MDEU_PAD |
2543 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2544 },
2545 { .type = CRYPTO_ALG_TYPE_AEAD,
2546 .alg.aead = {
2547 .base = {
2548 .cra_name = "authenc(hmac(md5),cbc(aes))",
2549 .cra_driver_name = "authenc-hmac-md5-"
2550 "cbc-aes-talitos-hsna",
2551 .cra_blocksize = AES_BLOCK_SIZE,
2552 .cra_flags = CRYPTO_ALG_ASYNC,
2553 },
2554 .ivsize = AES_BLOCK_SIZE,
2555 .maxauthsize = MD5_DIGEST_SIZE,
2556 },
2557 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2558 DESC_HDR_SEL0_AESU |
2559 DESC_HDR_MODE0_AESU_CBC |
2560 DESC_HDR_SEL1_MDEUA |
2561 DESC_HDR_MODE1_MDEU_INIT |
2562 DESC_HDR_MODE1_MDEU_PAD |
2563 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2564 },
2565 { .type = CRYPTO_ALG_TYPE_AEAD,
2566 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2567 .alg.aead = {
2568 .base = {
2569 .cra_name = "authenc(hmac(md5),cbc(aes))",
2570 .cra_driver_name = "authenc-hmac-md5-"
2571 "cbc-aes-talitos",
2572 .cra_blocksize = AES_BLOCK_SIZE,
2573 .cra_flags = CRYPTO_ALG_ASYNC,
2574 },
2575 .ivsize = AES_BLOCK_SIZE,
2576 .maxauthsize = MD5_DIGEST_SIZE,
2577 },
2578 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2579 DESC_HDR_SEL0_AESU |
2580 DESC_HDR_MODE0_AESU_CBC |
2581 DESC_HDR_SEL1_MDEUA |
2582 DESC_HDR_MODE1_MDEU_INIT |
2583 DESC_HDR_MODE1_MDEU_PAD |
2584 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2585 },
2586 { .type = CRYPTO_ALG_TYPE_AEAD,
2587 .alg.aead = {
2588 .base = {
2589 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2590 .cra_driver_name = "authenc-hmac-md5-"
2591 "cbc-3des-talitos-hsna",
2592 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2593 .cra_flags = CRYPTO_ALG_ASYNC,
2594 },
2595 .ivsize = DES3_EDE_BLOCK_SIZE,
2596 .maxauthsize = MD5_DIGEST_SIZE,
2597 },
2598 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2599 DESC_HDR_SEL0_DEU |
2600 DESC_HDR_MODE0_DEU_CBC |
2601 DESC_HDR_MODE0_DEU_3DES |
2602 DESC_HDR_SEL1_MDEUA |
2603 DESC_HDR_MODE1_MDEU_INIT |
2604 DESC_HDR_MODE1_MDEU_PAD |
2605 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2606 },
2607 { .type = CRYPTO_ALG_TYPE_AEAD,
2608 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2609 .alg.aead = {
2610 .base = {
2611 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2612 .cra_driver_name = "authenc-hmac-md5-"
2613 "cbc-3des-talitos",
2614 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2615 .cra_flags = CRYPTO_ALG_ASYNC,
2616 },
2617 .ivsize = DES3_EDE_BLOCK_SIZE,
2618 .maxauthsize = MD5_DIGEST_SIZE,
2619 },
2620 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2621 DESC_HDR_SEL0_DEU |
2622 DESC_HDR_MODE0_DEU_CBC |
2623 DESC_HDR_MODE0_DEU_3DES |
2624 DESC_HDR_SEL1_MDEUA |
2625 DESC_HDR_MODE1_MDEU_INIT |
2626 DESC_HDR_MODE1_MDEU_PAD |
2627 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2628 },
2629 /* ABLKCIPHER algorithms. */
2630 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2631 .alg.crypto = {
2632 .cra_name = "ecb(aes)",
2633 .cra_driver_name = "ecb-aes-talitos",
2634 .cra_blocksize = AES_BLOCK_SIZE,
2635 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2636 CRYPTO_ALG_ASYNC,
2637 .cra_ablkcipher = {
2638 .min_keysize = AES_MIN_KEY_SIZE,
2639 .max_keysize = AES_MAX_KEY_SIZE,
2640 .ivsize = AES_BLOCK_SIZE,
2641 }
2642 },
2643 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2644 DESC_HDR_SEL0_AESU,
2645 },
2646 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2647 .alg.crypto = {
2648 .cra_name = "cbc(aes)",
2649 .cra_driver_name = "cbc-aes-talitos",
2650 .cra_blocksize = AES_BLOCK_SIZE,
2651 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2652 CRYPTO_ALG_ASYNC,
2653 .cra_ablkcipher = {
2654 .min_keysize = AES_MIN_KEY_SIZE,
2655 .max_keysize = AES_MAX_KEY_SIZE,
2656 .ivsize = AES_BLOCK_SIZE,
2657 }
2658 },
2659 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2660 DESC_HDR_SEL0_AESU |
2661 DESC_HDR_MODE0_AESU_CBC,
2662 },
2663 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2664 .alg.crypto = {
2665 .cra_name = "ctr(aes)",
2666 .cra_driver_name = "ctr-aes-talitos",
2667 .cra_blocksize = AES_BLOCK_SIZE,
2668 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2669 CRYPTO_ALG_ASYNC,
2670 .cra_ablkcipher = {
2671 .min_keysize = AES_MIN_KEY_SIZE,
2672 .max_keysize = AES_MAX_KEY_SIZE,
2673 .ivsize = AES_BLOCK_SIZE,
2674 }
2675 },
2676 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2677 DESC_HDR_SEL0_AESU |
2678 DESC_HDR_MODE0_AESU_CTR,
2679 },
2680 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2681 .alg.crypto = {
2682 .cra_name = "ecb(des)",
2683 .cra_driver_name = "ecb-des-talitos",
2684 .cra_blocksize = DES_BLOCK_SIZE,
2685 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2686 CRYPTO_ALG_ASYNC,
2687 .cra_ablkcipher = {
2688 .min_keysize = DES_KEY_SIZE,
2689 .max_keysize = DES_KEY_SIZE,
2690 .ivsize = DES_BLOCK_SIZE,
2691 }
2692 },
2693 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2694 DESC_HDR_SEL0_DEU,
2695 },
2696 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2697 .alg.crypto = {
2698 .cra_name = "cbc(des)",
2699 .cra_driver_name = "cbc-des-talitos",
2700 .cra_blocksize = DES_BLOCK_SIZE,
2701 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2702 CRYPTO_ALG_ASYNC,
2703 .cra_ablkcipher = {
2704 .min_keysize = DES_KEY_SIZE,
2705 .max_keysize = DES_KEY_SIZE,
2706 .ivsize = DES_BLOCK_SIZE,
2707 }
2708 },
2709 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2710 DESC_HDR_SEL0_DEU |
2711 DESC_HDR_MODE0_DEU_CBC,
2712 },
2713 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2714 .alg.crypto = {
2715 .cra_name = "ecb(des3_ede)",
2716 .cra_driver_name = "ecb-3des-talitos",
2717 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2718 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2719 CRYPTO_ALG_ASYNC,
2720 .cra_ablkcipher = {
2721 .min_keysize = DES3_EDE_KEY_SIZE,
2722 .max_keysize = DES3_EDE_KEY_SIZE,
2723 .ivsize = DES3_EDE_BLOCK_SIZE,
2724 }
2725 },
2726 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2727 DESC_HDR_SEL0_DEU |
2728 DESC_HDR_MODE0_DEU_3DES,
2729 },
2730 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2731 .alg.crypto = {
2732 .cra_name = "cbc(des3_ede)",
2733 .cra_driver_name = "cbc-3des-talitos",
2734 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2735 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2736 CRYPTO_ALG_ASYNC,
2737 .cra_ablkcipher = {
2738 .min_keysize = DES3_EDE_KEY_SIZE,
2739 .max_keysize = DES3_EDE_KEY_SIZE,
2740 .ivsize = DES3_EDE_BLOCK_SIZE,
2741 }
2742 },
2743 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2744 DESC_HDR_SEL0_DEU |
2745 DESC_HDR_MODE0_DEU_CBC |
2746 DESC_HDR_MODE0_DEU_3DES,
2747 },
2748 /* AHASH algorithms. */
2749 { .type = CRYPTO_ALG_TYPE_AHASH,
2750 .alg.hash = {
2751 .halg.digestsize = MD5_DIGEST_SIZE,
2752 .halg.statesize = sizeof(struct talitos_export_state),
2753 .halg.base = {
2754 .cra_name = "md5",
2755 .cra_driver_name = "md5-talitos",
2756 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2757 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2758 CRYPTO_ALG_ASYNC,
2759 }
2760 },
2761 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2762 DESC_HDR_SEL0_MDEUA |
2763 DESC_HDR_MODE0_MDEU_MD5,
2764 },
2765 { .type = CRYPTO_ALG_TYPE_AHASH,
2766 .alg.hash = {
2767 .halg.digestsize = SHA1_DIGEST_SIZE,
2768 .halg.statesize = sizeof(struct talitos_export_state),
2769 .halg.base = {
2770 .cra_name = "sha1",
2771 .cra_driver_name = "sha1-talitos",
2772 .cra_blocksize = SHA1_BLOCK_SIZE,
2773 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2774 CRYPTO_ALG_ASYNC,
2775 }
2776 },
2777 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2778 DESC_HDR_SEL0_MDEUA |
2779 DESC_HDR_MODE0_MDEU_SHA1,
2780 },
2781 { .type = CRYPTO_ALG_TYPE_AHASH,
2782 .alg.hash = {
2783 .halg.digestsize = SHA224_DIGEST_SIZE,
2784 .halg.statesize = sizeof(struct talitos_export_state),
2785 .halg.base = {
2786 .cra_name = "sha224",
2787 .cra_driver_name = "sha224-talitos",
2788 .cra_blocksize = SHA224_BLOCK_SIZE,
2789 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2790 CRYPTO_ALG_ASYNC,
2791 }
2792 },
2793 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2794 DESC_HDR_SEL0_MDEUA |
2795 DESC_HDR_MODE0_MDEU_SHA224,
2796 },
2797 { .type = CRYPTO_ALG_TYPE_AHASH,
2798 .alg.hash = {
2799 .halg.digestsize = SHA256_DIGEST_SIZE,
2800 .halg.statesize = sizeof(struct talitos_export_state),
2801 .halg.base = {
2802 .cra_name = "sha256",
2803 .cra_driver_name = "sha256-talitos",
2804 .cra_blocksize = SHA256_BLOCK_SIZE,
2805 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2806 CRYPTO_ALG_ASYNC,
2807 }
2808 },
2809 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2810 DESC_HDR_SEL0_MDEUA |
2811 DESC_HDR_MODE0_MDEU_SHA256,
2812 },
2813 { .type = CRYPTO_ALG_TYPE_AHASH,
2814 .alg.hash = {
2815 .halg.digestsize = SHA384_DIGEST_SIZE,
2816 .halg.statesize = sizeof(struct talitos_export_state),
2817 .halg.base = {
2818 .cra_name = "sha384",
2819 .cra_driver_name = "sha384-talitos",
2820 .cra_blocksize = SHA384_BLOCK_SIZE,
2821 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2822 CRYPTO_ALG_ASYNC,
2823 }
2824 },
2825 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2826 DESC_HDR_SEL0_MDEUB |
2827 DESC_HDR_MODE0_MDEUB_SHA384,
2828 },
2829 { .type = CRYPTO_ALG_TYPE_AHASH,
2830 .alg.hash = {
2831 .halg.digestsize = SHA512_DIGEST_SIZE,
2832 .halg.statesize = sizeof(struct talitos_export_state),
2833 .halg.base = {
2834 .cra_name = "sha512",
2835 .cra_driver_name = "sha512-talitos",
2836 .cra_blocksize = SHA512_BLOCK_SIZE,
2837 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2838 CRYPTO_ALG_ASYNC,
2839 }
2840 },
2841 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2842 DESC_HDR_SEL0_MDEUB |
2843 DESC_HDR_MODE0_MDEUB_SHA512,
2844 },
2845 { .type = CRYPTO_ALG_TYPE_AHASH,
2846 .alg.hash = {
2847 .halg.digestsize = MD5_DIGEST_SIZE,
2848 .halg.statesize = sizeof(struct talitos_export_state),
2849 .halg.base = {
2850 .cra_name = "hmac(md5)",
2851 .cra_driver_name = "hmac-md5-talitos",
2852 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2853 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2854 CRYPTO_ALG_ASYNC,
2855 }
2856 },
2857 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2858 DESC_HDR_SEL0_MDEUA |
2859 DESC_HDR_MODE0_MDEU_MD5,
2860 },
2861 { .type = CRYPTO_ALG_TYPE_AHASH,
2862 .alg.hash = {
2863 .halg.digestsize = SHA1_DIGEST_SIZE,
2864 .halg.statesize = sizeof(struct talitos_export_state),
2865 .halg.base = {
2866 .cra_name = "hmac(sha1)",
2867 .cra_driver_name = "hmac-sha1-talitos",
2868 .cra_blocksize = SHA1_BLOCK_SIZE,
2869 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2870 CRYPTO_ALG_ASYNC,
2871 }
2872 },
2873 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2874 DESC_HDR_SEL0_MDEUA |
2875 DESC_HDR_MODE0_MDEU_SHA1,
2876 },
2877 { .type = CRYPTO_ALG_TYPE_AHASH,
2878 .alg.hash = {
2879 .halg.digestsize = SHA224_DIGEST_SIZE,
2880 .halg.statesize = sizeof(struct talitos_export_state),
2881 .halg.base = {
2882 .cra_name = "hmac(sha224)",
2883 .cra_driver_name = "hmac-sha224-talitos",
2884 .cra_blocksize = SHA224_BLOCK_SIZE,
2885 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2886 CRYPTO_ALG_ASYNC,
2887 }
2888 },
2889 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2890 DESC_HDR_SEL0_MDEUA |
2891 DESC_HDR_MODE0_MDEU_SHA224,
2892 },
2893 { .type = CRYPTO_ALG_TYPE_AHASH,
2894 .alg.hash = {
2895 .halg.digestsize = SHA256_DIGEST_SIZE,
2896 .halg.statesize = sizeof(struct talitos_export_state),
2897 .halg.base = {
2898 .cra_name = "hmac(sha256)",
2899 .cra_driver_name = "hmac-sha256-talitos",
2900 .cra_blocksize = SHA256_BLOCK_SIZE,
2901 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2902 CRYPTO_ALG_ASYNC,
2903 }
2904 },
2905 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2906 DESC_HDR_SEL0_MDEUA |
2907 DESC_HDR_MODE0_MDEU_SHA256,
2908 },
2909 { .type = CRYPTO_ALG_TYPE_AHASH,
2910 .alg.hash = {
2911 .halg.digestsize = SHA384_DIGEST_SIZE,
2912 .halg.statesize = sizeof(struct talitos_export_state),
2913 .halg.base = {
2914 .cra_name = "hmac(sha384)",
2915 .cra_driver_name = "hmac-sha384-talitos",
2916 .cra_blocksize = SHA384_BLOCK_SIZE,
2917 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2918 CRYPTO_ALG_ASYNC,
2919 }
2920 },
2921 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2922 DESC_HDR_SEL0_MDEUB |
2923 DESC_HDR_MODE0_MDEUB_SHA384,
2924 },
2925 { .type = CRYPTO_ALG_TYPE_AHASH,
2926 .alg.hash = {
2927 .halg.digestsize = SHA512_DIGEST_SIZE,
2928 .halg.statesize = sizeof(struct talitos_export_state),
2929 .halg.base = {
2930 .cra_name = "hmac(sha512)",
2931 .cra_driver_name = "hmac-sha512-talitos",
2932 .cra_blocksize = SHA512_BLOCK_SIZE,
2933 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2934 CRYPTO_ALG_ASYNC,
2935 }
2936 },
2937 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2938 DESC_HDR_SEL0_MDEUB |
2939 DESC_HDR_MODE0_MDEUB_SHA512,
2940 }
2941 };
2942
2943 struct talitos_crypto_alg {
2944 struct list_head entry;
2945 struct device *dev;
2946 struct talitos_alg_template algt;
2947 };
2948
2949 static int talitos_init_common(struct talitos_ctx *ctx,
2950 struct talitos_crypto_alg *talitos_alg)
2951 {
2952 struct talitos_private *priv;
2953
2954 /* update context with ptr to dev */
2955 ctx->dev = talitos_alg->dev;
2956
2957 /* assign SEC channel to tfm in round-robin fashion */
2958 priv = dev_get_drvdata(ctx->dev);
2959 ctx->ch = atomic_inc_return(&priv->last_chan) &
2960 (priv->num_channels - 1);
2961
2962 /* copy descriptor header template value */
2963 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2964
2965 /* select done notification */
2966 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2967
2968 return 0;
2969 }
2970
2971 static int talitos_cra_init(struct crypto_tfm *tfm)
2972 {
2973 struct crypto_alg *alg = tfm->__crt_alg;
2974 struct talitos_crypto_alg *talitos_alg;
2975 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2976
2977 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2978 talitos_alg = container_of(__crypto_ahash_alg(alg),
2979 struct talitos_crypto_alg,
2980 algt.alg.hash);
2981 else
2982 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2983 algt.alg.crypto);
2984
2985 return talitos_init_common(ctx, talitos_alg);
2986 }
2987
2988 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2989 {
2990 struct aead_alg *alg = crypto_aead_alg(tfm);
2991 struct talitos_crypto_alg *talitos_alg;
2992 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2993
2994 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2995 algt.alg.aead);
2996
2997 return talitos_init_common(ctx, talitos_alg);
2998 }
2999
3000 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3001 {
3002 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3003
3004 talitos_cra_init(tfm);
3005
3006 ctx->keylen = 0;
3007 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3008 sizeof(struct talitos_ahash_req_ctx));
3009
3010 return 0;
3011 }
3012
3013 static void talitos_cra_exit(struct crypto_tfm *tfm)
3014 {
3015 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3016 struct device *dev = ctx->dev;
3017
3018 if (ctx->keylen)
3019 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3020 }
3021
3022 /*
3023 * given the alg's descriptor header template, determine whether descriptor
3024 * type and primary/secondary execution units required match the hw
3025 * capabilities description provided in the device tree node.
3026 */
3027 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3028 {
3029 struct talitos_private *priv = dev_get_drvdata(dev);
3030 int ret;
3031
3032 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3033 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3034
3035 if (SECONDARY_EU(desc_hdr_template))
3036 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3037 & priv->exec_units);
3038
3039 return ret;
3040 }
3041
3042 static int talitos_remove(struct platform_device *ofdev)
3043 {
3044 struct device *dev = &ofdev->dev;
3045 struct talitos_private *priv = dev_get_drvdata(dev);
3046 struct talitos_crypto_alg *t_alg, *n;
3047 int i;
3048
3049 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3050 switch (t_alg->algt.type) {
3051 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3052 break;
3053 case CRYPTO_ALG_TYPE_AEAD:
3054 crypto_unregister_aead(&t_alg->algt.alg.aead);
3055 case CRYPTO_ALG_TYPE_AHASH:
3056 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3057 break;
3058 }
3059 list_del(&t_alg->entry);
3060 }
3061
3062 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3063 talitos_unregister_rng(dev);
3064
3065 for (i = 0; i < 2; i++)
3066 if (priv->irq[i]) {
3067 free_irq(priv->irq[i], dev);
3068 irq_dispose_mapping(priv->irq[i]);
3069 }
3070
3071 tasklet_kill(&priv->done_task[0]);
3072 if (priv->irq[1])
3073 tasklet_kill(&priv->done_task[1]);
3074
3075 return 0;
3076 }
3077
3078 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3079 struct talitos_alg_template
3080 *template)
3081 {
3082 struct talitos_private *priv = dev_get_drvdata(dev);
3083 struct talitos_crypto_alg *t_alg;
3084 struct crypto_alg *alg;
3085
3086 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3087 GFP_KERNEL);
3088 if (!t_alg)
3089 return ERR_PTR(-ENOMEM);
3090
3091 t_alg->algt = *template;
3092
3093 switch (t_alg->algt.type) {
3094 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3095 alg = &t_alg->algt.alg.crypto;
3096 alg->cra_init = talitos_cra_init;
3097 alg->cra_exit = talitos_cra_exit;
3098 alg->cra_type = &crypto_ablkcipher_type;
3099 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3100 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3101 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3102 alg->cra_ablkcipher.geniv = "eseqiv";
3103 break;
3104 case CRYPTO_ALG_TYPE_AEAD:
3105 alg = &t_alg->algt.alg.aead.base;
3106 alg->cra_exit = talitos_cra_exit;
3107 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3108 t_alg->algt.alg.aead.setkey = aead_setkey;
3109 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3110 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3111 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3112 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3113 devm_kfree(dev, t_alg);
3114 return ERR_PTR(-ENOTSUPP);
3115 }
3116 break;
3117 case CRYPTO_ALG_TYPE_AHASH:
3118 alg = &t_alg->algt.alg.hash.halg.base;
3119 alg->cra_init = talitos_cra_init_ahash;
3120 alg->cra_exit = talitos_cra_exit;
3121 alg->cra_type = &crypto_ahash_type;
3122 t_alg->algt.alg.hash.init = ahash_init;
3123 t_alg->algt.alg.hash.update = ahash_update;
3124 t_alg->algt.alg.hash.final = ahash_final;
3125 t_alg->algt.alg.hash.finup = ahash_finup;
3126 t_alg->algt.alg.hash.digest = ahash_digest;
3127 if (!strncmp(alg->cra_name, "hmac", 4))
3128 t_alg->algt.alg.hash.setkey = ahash_setkey;
3129 t_alg->algt.alg.hash.import = ahash_import;
3130 t_alg->algt.alg.hash.export = ahash_export;
3131
3132 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3133 !strncmp(alg->cra_name, "hmac", 4)) {
3134 devm_kfree(dev, t_alg);
3135 return ERR_PTR(-ENOTSUPP);
3136 }
3137 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3138 (!strcmp(alg->cra_name, "sha224") ||
3139 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3140 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3141 t_alg->algt.desc_hdr_template =
3142 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3143 DESC_HDR_SEL0_MDEUA |
3144 DESC_HDR_MODE0_MDEU_SHA256;
3145 }
3146 break;
3147 default:
3148 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3149 devm_kfree(dev, t_alg);
3150 return ERR_PTR(-EINVAL);
3151 }
3152
3153 alg->cra_module = THIS_MODULE;
3154 if (t_alg->algt.priority)
3155 alg->cra_priority = t_alg->algt.priority;
3156 else
3157 alg->cra_priority = TALITOS_CRA_PRIORITY;
3158 alg->cra_alignmask = 0;
3159 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3160 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3161
3162 t_alg->dev = dev;
3163
3164 return t_alg;
3165 }
3166
3167 static int talitos_probe_irq(struct platform_device *ofdev)
3168 {
3169 struct device *dev = &ofdev->dev;
3170 struct device_node *np = ofdev->dev.of_node;
3171 struct talitos_private *priv = dev_get_drvdata(dev);
3172 int err;
3173 bool is_sec1 = has_ftr_sec1(priv);
3174
3175 priv->irq[0] = irq_of_parse_and_map(np, 0);
3176 if (!priv->irq[0]) {
3177 dev_err(dev, "failed to map irq\n");
3178 return -EINVAL;
3179 }
3180 if (is_sec1) {
3181 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3182 dev_driver_string(dev), dev);
3183 goto primary_out;
3184 }
3185
3186 priv->irq[1] = irq_of_parse_and_map(np, 1);
3187
3188 /* get the primary irq line */
3189 if (!priv->irq[1]) {
3190 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3191 dev_driver_string(dev), dev);
3192 goto primary_out;
3193 }
3194
3195 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3196 dev_driver_string(dev), dev);
3197 if (err)
3198 goto primary_out;
3199
3200 /* get the secondary irq line */
3201 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3202 dev_driver_string(dev), dev);
3203 if (err) {
3204 dev_err(dev, "failed to request secondary irq\n");
3205 irq_dispose_mapping(priv->irq[1]);
3206 priv->irq[1] = 0;
3207 }
3208
3209 return err;
3210
3211 primary_out:
3212 if (err) {
3213 dev_err(dev, "failed to request primary irq\n");
3214 irq_dispose_mapping(priv->irq[0]);
3215 priv->irq[0] = 0;
3216 }
3217
3218 return err;
3219 }
3220
3221 static int talitos_probe(struct platform_device *ofdev)
3222 {
3223 struct device *dev = &ofdev->dev;
3224 struct device_node *np = ofdev->dev.of_node;
3225 struct talitos_private *priv;
3226 int i, err;
3227 int stride;
3228 struct resource *res;
3229
3230 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3231 if (!priv)
3232 return -ENOMEM;
3233
3234 INIT_LIST_HEAD(&priv->alg_list);
3235
3236 dev_set_drvdata(dev, priv);
3237
3238 priv->ofdev = ofdev;
3239
3240 spin_lock_init(&priv->reg_lock);
3241
3242 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3243 if (!res)
3244 return -ENXIO;
3245 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3246 if (!priv->reg) {
3247 dev_err(dev, "failed to of_iomap\n");
3248 err = -ENOMEM;
3249 goto err_out;
3250 }
3251
3252 /* get SEC version capabilities from device tree */
3253 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3254 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3255 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3256 of_property_read_u32(np, "fsl,descriptor-types-mask",
3257 &priv->desc_types);
3258
3259 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3260 !priv->exec_units || !priv->desc_types) {
3261 dev_err(dev, "invalid property data in device tree node\n");
3262 err = -EINVAL;
3263 goto err_out;
3264 }
3265
3266 if (of_device_is_compatible(np, "fsl,sec3.0"))
3267 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3268
3269 if (of_device_is_compatible(np, "fsl,sec2.1"))
3270 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3271 TALITOS_FTR_SHA224_HWINIT |
3272 TALITOS_FTR_HMAC_OK;
3273
3274 if (of_device_is_compatible(np, "fsl,sec1.0"))
3275 priv->features |= TALITOS_FTR_SEC1;
3276
3277 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3278 priv->reg_deu = priv->reg + TALITOS12_DEU;
3279 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3280 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3281 stride = TALITOS1_CH_STRIDE;
3282 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3283 priv->reg_deu = priv->reg + TALITOS10_DEU;
3284 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3285 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3286 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3287 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3288 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3289 stride = TALITOS1_CH_STRIDE;
3290 } else {
3291 priv->reg_deu = priv->reg + TALITOS2_DEU;
3292 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3293 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3294 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3295 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3296 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3297 priv->reg_keu = priv->reg + TALITOS2_KEU;
3298 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3299 stride = TALITOS2_CH_STRIDE;
3300 }
3301
3302 err = talitos_probe_irq(ofdev);
3303 if (err)
3304 goto err_out;
3305
3306 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3307 if (priv->num_channels == 1)
3308 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3309 (unsigned long)dev);
3310 else
3311 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3312 (unsigned long)dev);
3313 } else {
3314 if (priv->irq[1]) {
3315 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3316 (unsigned long)dev);
3317 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3318 (unsigned long)dev);
3319 } else if (priv->num_channels == 1) {
3320 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3321 (unsigned long)dev);
3322 } else {
3323 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3324 (unsigned long)dev);
3325 }
3326 }
3327
3328 priv->chan = devm_kzalloc(dev, sizeof(struct talitos_channel) *
3329 priv->num_channels, GFP_KERNEL);
3330 if (!priv->chan) {
3331 dev_err(dev, "failed to allocate channel management space\n");
3332 err = -ENOMEM;
3333 goto err_out;
3334 }
3335
3336 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3337
3338 for (i = 0; i < priv->num_channels; i++) {
3339 priv->chan[i].reg = priv->reg + stride * (i + 1);
3340 if (!priv->irq[1] || !(i & 1))
3341 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3342
3343 spin_lock_init(&priv->chan[i].head_lock);
3344 spin_lock_init(&priv->chan[i].tail_lock);
3345
3346 priv->chan[i].fifo = devm_kzalloc(dev,
3347 sizeof(struct talitos_request) *
3348 priv->fifo_len, GFP_KERNEL);
3349 if (!priv->chan[i].fifo) {
3350 dev_err(dev, "failed to allocate request fifo %d\n", i);
3351 err = -ENOMEM;
3352 goto err_out;
3353 }
3354
3355 atomic_set(&priv->chan[i].submit_count,
3356 -(priv->chfifo_len - 1));
3357 }
3358
3359 dma_set_mask(dev, DMA_BIT_MASK(36));
3360
3361 /* reset and initialize the h/w */
3362 err = init_device(dev);
3363 if (err) {
3364 dev_err(dev, "failed to initialize device\n");
3365 goto err_out;
3366 }
3367
3368 /* register the RNG, if available */
3369 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3370 err = talitos_register_rng(dev);
3371 if (err) {
3372 dev_err(dev, "failed to register hwrng: %d\n", err);
3373 goto err_out;
3374 } else
3375 dev_info(dev, "hwrng\n");
3376 }
3377
3378 /* register crypto algorithms the device supports */
3379 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3380 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3381 struct talitos_crypto_alg *t_alg;
3382 struct crypto_alg *alg = NULL;
3383
3384 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3385 if (IS_ERR(t_alg)) {
3386 err = PTR_ERR(t_alg);
3387 if (err == -ENOTSUPP)
3388 continue;
3389 goto err_out;
3390 }
3391
3392 switch (t_alg->algt.type) {
3393 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3394 err = crypto_register_alg(
3395 &t_alg->algt.alg.crypto);
3396 alg = &t_alg->algt.alg.crypto;
3397 break;
3398
3399 case CRYPTO_ALG_TYPE_AEAD:
3400 err = crypto_register_aead(
3401 &t_alg->algt.alg.aead);
3402 alg = &t_alg->algt.alg.aead.base;
3403 break;
3404
3405 case CRYPTO_ALG_TYPE_AHASH:
3406 err = crypto_register_ahash(
3407 &t_alg->algt.alg.hash);
3408 alg = &t_alg->algt.alg.hash.halg.base;
3409 break;
3410 }
3411 if (err) {
3412 dev_err(dev, "%s alg registration failed\n",
3413 alg->cra_driver_name);
3414 devm_kfree(dev, t_alg);
3415 } else
3416 list_add_tail(&t_alg->entry, &priv->alg_list);
3417 }
3418 }
3419 if (!list_empty(&priv->alg_list))
3420 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3421 (char *)of_get_property(np, "compatible", NULL));
3422
3423 return 0;
3424
3425 err_out:
3426 talitos_remove(ofdev);
3427
3428 return err;
3429 }
3430
3431 static const struct of_device_id talitos_match[] = {
3432 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3433 {
3434 .compatible = "fsl,sec1.0",
3435 },
3436 #endif
3437 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3438 {
3439 .compatible = "fsl,sec2.0",
3440 },
3441 #endif
3442 {},
3443 };
3444 MODULE_DEVICE_TABLE(of, talitos_match);
3445
3446 static struct platform_driver talitos_driver = {
3447 .driver = {
3448 .name = "talitos",
3449 .of_match_table = talitos_match,
3450 },
3451 .probe = talitos_probe,
3452 .remove = talitos_remove,
3453 };
3454
3455 module_platform_driver(talitos_driver);
3456
3457 MODULE_LICENSE("GPL");
3458 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3459 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");