]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/crypto/talitos.c
b7184f305867e130a363c761a7ef0f737fe2f8b7
[mirror_ubuntu-hirsute-kernel.git] / drivers / crypto / talitos.c
1 /*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55
56 #include "talitos.h"
57
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
60 {
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
64 }
65
66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
68 {
69 dst_ptr->ptr = src_ptr->ptr;
70 if (!is_sec1)
71 dst_ptr->eptr = src_ptr->eptr;
72 }
73
74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
75 bool is_sec1)
76 {
77 if (is_sec1) {
78 ptr->res = 0;
79 ptr->len1 = cpu_to_be16(len);
80 } else {
81 ptr->len = cpu_to_be16(len);
82 }
83 }
84
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86 bool is_sec1)
87 {
88 if (is_sec1)
89 return be16_to_cpu(ptr->len1);
90 else
91 return be16_to_cpu(ptr->len);
92 }
93
94 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
95 bool is_sec1)
96 {
97 if (!is_sec1)
98 ptr->j_extent = val;
99 }
100
101 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
102 {
103 if (!is_sec1)
104 ptr->j_extent |= val;
105 }
106
107 /*
108 * map virtual single (contiguous) pointer to h/w descriptor pointer
109 */
110 static void map_single_talitos_ptr(struct device *dev,
111 struct talitos_ptr *ptr,
112 unsigned int len, void *data,
113 enum dma_data_direction dir)
114 {
115 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
118
119 to_talitos_ptr_len(ptr, len, is_sec1);
120 to_talitos_ptr(ptr, dma_addr, is_sec1);
121 to_talitos_ptr_ext_set(ptr, 0, is_sec1);
122 }
123
124 /*
125 * unmap bus single (contiguous) h/w descriptor pointer
126 */
127 static void unmap_single_talitos_ptr(struct device *dev,
128 struct talitos_ptr *ptr,
129 enum dma_data_direction dir)
130 {
131 struct talitos_private *priv = dev_get_drvdata(dev);
132 bool is_sec1 = has_ftr_sec1(priv);
133
134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 from_talitos_ptr_len(ptr, is_sec1), dir);
136 }
137
138 static int reset_channel(struct device *dev, int ch)
139 {
140 struct talitos_private *priv = dev_get_drvdata(dev);
141 unsigned int timeout = TALITOS_TIMEOUT;
142 bool is_sec1 = has_ftr_sec1(priv);
143
144 if (is_sec1) {
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 TALITOS1_CCCR_LO_RESET);
147
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 TALITOS1_CCCR_LO_RESET) && --timeout)
150 cpu_relax();
151 } else {
152 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 TALITOS2_CCCR_RESET);
154
155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 TALITOS2_CCCR_RESET) && --timeout)
157 cpu_relax();
158 }
159
160 if (timeout == 0) {
161 dev_err(dev, "failed to reset channel %d\n", ch);
162 return -EIO;
163 }
164
165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168
169 /* and ICCR writeback, if available */
170 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
171 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
172 TALITOS_CCCR_LO_IWSE);
173
174 return 0;
175 }
176
177 static int reset_device(struct device *dev)
178 {
179 struct talitos_private *priv = dev_get_drvdata(dev);
180 unsigned int timeout = TALITOS_TIMEOUT;
181 bool is_sec1 = has_ftr_sec1(priv);
182 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
183
184 setbits32(priv->reg + TALITOS_MCR, mcr);
185
186 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
187 && --timeout)
188 cpu_relax();
189
190 if (priv->irq[1]) {
191 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
192 setbits32(priv->reg + TALITOS_MCR, mcr);
193 }
194
195 if (timeout == 0) {
196 dev_err(dev, "failed to reset device\n");
197 return -EIO;
198 }
199
200 return 0;
201 }
202
203 /*
204 * Reset and initialize the device
205 */
206 static int init_device(struct device *dev)
207 {
208 struct talitos_private *priv = dev_get_drvdata(dev);
209 int ch, err;
210 bool is_sec1 = has_ftr_sec1(priv);
211
212 /*
213 * Master reset
214 * errata documentation: warning: certain SEC interrupts
215 * are not fully cleared by writing the MCR:SWR bit,
216 * set bit twice to completely reset
217 */
218 err = reset_device(dev);
219 if (err)
220 return err;
221
222 err = reset_device(dev);
223 if (err)
224 return err;
225
226 /* reset channels */
227 for (ch = 0; ch < priv->num_channels; ch++) {
228 err = reset_channel(dev, ch);
229 if (err)
230 return err;
231 }
232
233 /* enable channel done and error interrupts */
234 if (is_sec1) {
235 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
236 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
237 /* disable parity error check in DEU (erroneous? test vect.) */
238 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
239 } else {
240 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
241 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
242 }
243
244 /* disable integrity check error interrupts (use writeback instead) */
245 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
246 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
247 TALITOS_MDEUICR_LO_ICE);
248
249 return 0;
250 }
251
252 /**
253 * talitos_submit - submits a descriptor to the device for processing
254 * @dev: the SEC device to be used
255 * @ch: the SEC device channel to be used
256 * @desc: the descriptor to be processed by the device
257 * @callback: whom to call when processing is complete
258 * @context: a handle for use by caller (optional)
259 *
260 * desc must contain valid dma-mapped (bus physical) address pointers.
261 * callback must check err and feedback in descriptor header
262 * for device processing status.
263 */
264 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
265 void (*callback)(struct device *dev,
266 struct talitos_desc *desc,
267 void *context, int error),
268 void *context)
269 {
270 struct talitos_private *priv = dev_get_drvdata(dev);
271 struct talitos_request *request;
272 unsigned long flags;
273 int head;
274 bool is_sec1 = has_ftr_sec1(priv);
275
276 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
277
278 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
279 /* h/w fifo is full */
280 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
281 return -EAGAIN;
282 }
283
284 head = priv->chan[ch].head;
285 request = &priv->chan[ch].fifo[head];
286
287 /* map descriptor and save caller data */
288 if (is_sec1) {
289 desc->hdr1 = desc->hdr;
290 desc->next_desc = 0;
291 request->dma_desc = dma_map_single(dev, &desc->hdr1,
292 TALITOS_DESC_SIZE,
293 DMA_BIDIRECTIONAL);
294 } else {
295 request->dma_desc = dma_map_single(dev, desc,
296 TALITOS_DESC_SIZE,
297 DMA_BIDIRECTIONAL);
298 }
299 request->callback = callback;
300 request->context = context;
301
302 /* increment fifo head */
303 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
304
305 smp_wmb();
306 request->desc = desc;
307
308 /* GO! */
309 wmb();
310 out_be32(priv->chan[ch].reg + TALITOS_FF,
311 upper_32_bits(request->dma_desc));
312 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
313 lower_32_bits(request->dma_desc));
314
315 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
316
317 return -EINPROGRESS;
318 }
319 EXPORT_SYMBOL(talitos_submit);
320
321 /*
322 * process what was done, notify callback of error if not
323 */
324 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
325 {
326 struct talitos_private *priv = dev_get_drvdata(dev);
327 struct talitos_request *request, saved_req;
328 unsigned long flags;
329 int tail, status;
330 bool is_sec1 = has_ftr_sec1(priv);
331
332 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
333
334 tail = priv->chan[ch].tail;
335 while (priv->chan[ch].fifo[tail].desc) {
336 __be32 hdr;
337
338 request = &priv->chan[ch].fifo[tail];
339
340 /* descriptors with their done bits set don't get the error */
341 rmb();
342 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
343
344 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
345 status = 0;
346 else
347 if (!error)
348 break;
349 else
350 status = error;
351
352 dma_unmap_single(dev, request->dma_desc,
353 TALITOS_DESC_SIZE,
354 DMA_BIDIRECTIONAL);
355
356 /* copy entries so we can call callback outside lock */
357 saved_req.desc = request->desc;
358 saved_req.callback = request->callback;
359 saved_req.context = request->context;
360
361 /* release request entry in fifo */
362 smp_wmb();
363 request->desc = NULL;
364
365 /* increment fifo tail */
366 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
367
368 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
369
370 atomic_dec(&priv->chan[ch].submit_count);
371
372 saved_req.callback(dev, saved_req.desc, saved_req.context,
373 status);
374 /* channel may resume processing in single desc error case */
375 if (error && !reset_ch && status == error)
376 return;
377 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
378 tail = priv->chan[ch].tail;
379 }
380
381 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
382 }
383
384 /*
385 * process completed requests for channels that have done status
386 */
387 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
388 static void talitos1_done_##name(unsigned long data) \
389 { \
390 struct device *dev = (struct device *)data; \
391 struct talitos_private *priv = dev_get_drvdata(dev); \
392 unsigned long flags; \
393 \
394 if (ch_done_mask & 0x10000000) \
395 flush_channel(dev, 0, 0, 0); \
396 if (priv->num_channels == 1) \
397 goto out; \
398 if (ch_done_mask & 0x40000000) \
399 flush_channel(dev, 1, 0, 0); \
400 if (ch_done_mask & 0x00010000) \
401 flush_channel(dev, 2, 0, 0); \
402 if (ch_done_mask & 0x00040000) \
403 flush_channel(dev, 3, 0, 0); \
404 \
405 out: \
406 /* At this point, all completed channels have been processed */ \
407 /* Unmask done interrupts for channels completed later on. */ \
408 spin_lock_irqsave(&priv->reg_lock, flags); \
409 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
410 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
411 spin_unlock_irqrestore(&priv->reg_lock, flags); \
412 }
413
414 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
415
416 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
417 static void talitos2_done_##name(unsigned long data) \
418 { \
419 struct device *dev = (struct device *)data; \
420 struct talitos_private *priv = dev_get_drvdata(dev); \
421 unsigned long flags; \
422 \
423 if (ch_done_mask & 1) \
424 flush_channel(dev, 0, 0, 0); \
425 if (priv->num_channels == 1) \
426 goto out; \
427 if (ch_done_mask & (1 << 2)) \
428 flush_channel(dev, 1, 0, 0); \
429 if (ch_done_mask & (1 << 4)) \
430 flush_channel(dev, 2, 0, 0); \
431 if (ch_done_mask & (1 << 6)) \
432 flush_channel(dev, 3, 0, 0); \
433 \
434 out: \
435 /* At this point, all completed channels have been processed */ \
436 /* Unmask done interrupts for channels completed later on. */ \
437 spin_lock_irqsave(&priv->reg_lock, flags); \
438 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
439 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
440 spin_unlock_irqrestore(&priv->reg_lock, flags); \
441 }
442
443 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
444 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
445 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
446
447 /*
448 * locate current (offending) descriptor
449 */
450 static u32 current_desc_hdr(struct device *dev, int ch)
451 {
452 struct talitos_private *priv = dev_get_drvdata(dev);
453 int tail, iter;
454 dma_addr_t cur_desc;
455
456 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
457 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
458
459 if (!cur_desc) {
460 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
461 return 0;
462 }
463
464 tail = priv->chan[ch].tail;
465
466 iter = tail;
467 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
468 iter = (iter + 1) & (priv->fifo_len - 1);
469 if (iter == tail) {
470 dev_err(dev, "couldn't locate current descriptor\n");
471 return 0;
472 }
473 }
474
475 return priv->chan[ch].fifo[iter].desc->hdr;
476 }
477
478 /*
479 * user diagnostics; report root cause of error based on execution unit status
480 */
481 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
482 {
483 struct talitos_private *priv = dev_get_drvdata(dev);
484 int i;
485
486 if (!desc_hdr)
487 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
488
489 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
490 case DESC_HDR_SEL0_AFEU:
491 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
492 in_be32(priv->reg_afeu + TALITOS_EUISR),
493 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
494 break;
495 case DESC_HDR_SEL0_DEU:
496 dev_err(dev, "DEUISR 0x%08x_%08x\n",
497 in_be32(priv->reg_deu + TALITOS_EUISR),
498 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
499 break;
500 case DESC_HDR_SEL0_MDEUA:
501 case DESC_HDR_SEL0_MDEUB:
502 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
503 in_be32(priv->reg_mdeu + TALITOS_EUISR),
504 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
505 break;
506 case DESC_HDR_SEL0_RNG:
507 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
508 in_be32(priv->reg_rngu + TALITOS_ISR),
509 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
510 break;
511 case DESC_HDR_SEL0_PKEU:
512 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
515 break;
516 case DESC_HDR_SEL0_AESU:
517 dev_err(dev, "AESUISR 0x%08x_%08x\n",
518 in_be32(priv->reg_aesu + TALITOS_EUISR),
519 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
520 break;
521 case DESC_HDR_SEL0_CRCU:
522 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
523 in_be32(priv->reg_crcu + TALITOS_EUISR),
524 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
525 break;
526 case DESC_HDR_SEL0_KEU:
527 dev_err(dev, "KEUISR 0x%08x_%08x\n",
528 in_be32(priv->reg_pkeu + TALITOS_EUISR),
529 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
530 break;
531 }
532
533 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
534 case DESC_HDR_SEL1_MDEUA:
535 case DESC_HDR_SEL1_MDEUB:
536 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
537 in_be32(priv->reg_mdeu + TALITOS_EUISR),
538 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
539 break;
540 case DESC_HDR_SEL1_CRCU:
541 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
542 in_be32(priv->reg_crcu + TALITOS_EUISR),
543 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
544 break;
545 }
546
547 for (i = 0; i < 8; i++)
548 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
549 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
550 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
551 }
552
553 /*
554 * recover from error interrupts
555 */
556 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
557 {
558 struct talitos_private *priv = dev_get_drvdata(dev);
559 unsigned int timeout = TALITOS_TIMEOUT;
560 int ch, error, reset_dev = 0;
561 u32 v_lo;
562 bool is_sec1 = has_ftr_sec1(priv);
563 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
564
565 for (ch = 0; ch < priv->num_channels; ch++) {
566 /* skip channels without errors */
567 if (is_sec1) {
568 /* bits 29, 31, 17, 19 */
569 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
570 continue;
571 } else {
572 if (!(isr & (1 << (ch * 2 + 1))))
573 continue;
574 }
575
576 error = -EINVAL;
577
578 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
579
580 if (v_lo & TALITOS_CCPSR_LO_DOF) {
581 dev_err(dev, "double fetch fifo overflow error\n");
582 error = -EAGAIN;
583 reset_ch = 1;
584 }
585 if (v_lo & TALITOS_CCPSR_LO_SOF) {
586 /* h/w dropped descriptor */
587 dev_err(dev, "single fetch fifo overflow error\n");
588 error = -EAGAIN;
589 }
590 if (v_lo & TALITOS_CCPSR_LO_MDTE)
591 dev_err(dev, "master data transfer error\n");
592 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
593 dev_err(dev, is_sec1 ? "pointer not complete error\n"
594 : "s/g data length zero error\n");
595 if (v_lo & TALITOS_CCPSR_LO_FPZ)
596 dev_err(dev, is_sec1 ? "parity error\n"
597 : "fetch pointer zero error\n");
598 if (v_lo & TALITOS_CCPSR_LO_IDH)
599 dev_err(dev, "illegal descriptor header error\n");
600 if (v_lo & TALITOS_CCPSR_LO_IEU)
601 dev_err(dev, is_sec1 ? "static assignment error\n"
602 : "invalid exec unit error\n");
603 if (v_lo & TALITOS_CCPSR_LO_EU)
604 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
605 if (!is_sec1) {
606 if (v_lo & TALITOS_CCPSR_LO_GB)
607 dev_err(dev, "gather boundary error\n");
608 if (v_lo & TALITOS_CCPSR_LO_GRL)
609 dev_err(dev, "gather return/length error\n");
610 if (v_lo & TALITOS_CCPSR_LO_SB)
611 dev_err(dev, "scatter boundary error\n");
612 if (v_lo & TALITOS_CCPSR_LO_SRL)
613 dev_err(dev, "scatter return/length error\n");
614 }
615
616 flush_channel(dev, ch, error, reset_ch);
617
618 if (reset_ch) {
619 reset_channel(dev, ch);
620 } else {
621 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
622 TALITOS2_CCCR_CONT);
623 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
624 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
625 TALITOS2_CCCR_CONT) && --timeout)
626 cpu_relax();
627 if (timeout == 0) {
628 dev_err(dev, "failed to restart channel %d\n",
629 ch);
630 reset_dev = 1;
631 }
632 }
633 }
634 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
635 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
636 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
637 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
638 isr, isr_lo);
639 else
640 dev_err(dev, "done overflow, internal time out, or "
641 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
642
643 /* purge request queues */
644 for (ch = 0; ch < priv->num_channels; ch++)
645 flush_channel(dev, ch, -EIO, 1);
646
647 /* reset and reinitialize the device */
648 init_device(dev);
649 }
650 }
651
652 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
653 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
654 { \
655 struct device *dev = data; \
656 struct talitos_private *priv = dev_get_drvdata(dev); \
657 u32 isr, isr_lo; \
658 unsigned long flags; \
659 \
660 spin_lock_irqsave(&priv->reg_lock, flags); \
661 isr = in_be32(priv->reg + TALITOS_ISR); \
662 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
663 /* Acknowledge interrupt */ \
664 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
665 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
666 \
667 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
668 spin_unlock_irqrestore(&priv->reg_lock, flags); \
669 talitos_error(dev, isr & ch_err_mask, isr_lo); \
670 } \
671 else { \
672 if (likely(isr & ch_done_mask)) { \
673 /* mask further done interrupts. */ \
674 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
675 /* done_task will unmask done interrupts at exit */ \
676 tasklet_schedule(&priv->done_task[tlet]); \
677 } \
678 spin_unlock_irqrestore(&priv->reg_lock, flags); \
679 } \
680 \
681 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
682 IRQ_NONE; \
683 }
684
685 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
686
687 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
688 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
689 { \
690 struct device *dev = data; \
691 struct talitos_private *priv = dev_get_drvdata(dev); \
692 u32 isr, isr_lo; \
693 unsigned long flags; \
694 \
695 spin_lock_irqsave(&priv->reg_lock, flags); \
696 isr = in_be32(priv->reg + TALITOS_ISR); \
697 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
698 /* Acknowledge interrupt */ \
699 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
700 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
701 \
702 if (unlikely(isr & ch_err_mask || isr_lo)) { \
703 spin_unlock_irqrestore(&priv->reg_lock, flags); \
704 talitos_error(dev, isr & ch_err_mask, isr_lo); \
705 } \
706 else { \
707 if (likely(isr & ch_done_mask)) { \
708 /* mask further done interrupts. */ \
709 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
710 /* done_task will unmask done interrupts at exit */ \
711 tasklet_schedule(&priv->done_task[tlet]); \
712 } \
713 spin_unlock_irqrestore(&priv->reg_lock, flags); \
714 } \
715 \
716 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
717 IRQ_NONE; \
718 }
719
720 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
721 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
722 0)
723 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
724 1)
725
726 /*
727 * hwrng
728 */
729 static int talitos_rng_data_present(struct hwrng *rng, int wait)
730 {
731 struct device *dev = (struct device *)rng->priv;
732 struct talitos_private *priv = dev_get_drvdata(dev);
733 u32 ofl;
734 int i;
735
736 for (i = 0; i < 20; i++) {
737 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
738 TALITOS_RNGUSR_LO_OFL;
739 if (ofl || !wait)
740 break;
741 udelay(10);
742 }
743
744 return !!ofl;
745 }
746
747 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
748 {
749 struct device *dev = (struct device *)rng->priv;
750 struct talitos_private *priv = dev_get_drvdata(dev);
751
752 /* rng fifo requires 64-bit accesses */
753 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
754 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
755
756 return sizeof(u32);
757 }
758
759 static int talitos_rng_init(struct hwrng *rng)
760 {
761 struct device *dev = (struct device *)rng->priv;
762 struct talitos_private *priv = dev_get_drvdata(dev);
763 unsigned int timeout = TALITOS_TIMEOUT;
764
765 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
766 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
767 & TALITOS_RNGUSR_LO_RD)
768 && --timeout)
769 cpu_relax();
770 if (timeout == 0) {
771 dev_err(dev, "failed to reset rng hw\n");
772 return -ENODEV;
773 }
774
775 /* start generating */
776 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
777
778 return 0;
779 }
780
781 static int talitos_register_rng(struct device *dev)
782 {
783 struct talitos_private *priv = dev_get_drvdata(dev);
784 int err;
785
786 priv->rng.name = dev_driver_string(dev),
787 priv->rng.init = talitos_rng_init,
788 priv->rng.data_present = talitos_rng_data_present,
789 priv->rng.data_read = talitos_rng_data_read,
790 priv->rng.priv = (unsigned long)dev;
791
792 err = hwrng_register(&priv->rng);
793 if (!err)
794 priv->rng_registered = true;
795
796 return err;
797 }
798
799 static void talitos_unregister_rng(struct device *dev)
800 {
801 struct talitos_private *priv = dev_get_drvdata(dev);
802
803 if (!priv->rng_registered)
804 return;
805
806 hwrng_unregister(&priv->rng);
807 priv->rng_registered = false;
808 }
809
810 /*
811 * crypto alg
812 */
813 #define TALITOS_CRA_PRIORITY 3000
814 /*
815 * Defines a priority for doing AEAD with descriptors type
816 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
817 */
818 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
819 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
820 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
821
822 struct talitos_ctx {
823 struct device *dev;
824 int ch;
825 __be32 desc_hdr_template;
826 u8 key[TALITOS_MAX_KEY_SIZE];
827 u8 iv[TALITOS_MAX_IV_LENGTH];
828 unsigned int keylen;
829 unsigned int enckeylen;
830 unsigned int authkeylen;
831 };
832
833 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
834 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
835
836 struct talitos_ahash_req_ctx {
837 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
838 unsigned int hw_context_size;
839 u8 buf[HASH_MAX_BLOCK_SIZE];
840 u8 bufnext[HASH_MAX_BLOCK_SIZE];
841 unsigned int swinit;
842 unsigned int first;
843 unsigned int last;
844 unsigned int to_hash_later;
845 unsigned int nbuf;
846 struct scatterlist bufsl[2];
847 struct scatterlist *psrc;
848 };
849
850 struct talitos_export_state {
851 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
852 u8 buf[HASH_MAX_BLOCK_SIZE];
853 unsigned int swinit;
854 unsigned int first;
855 unsigned int last;
856 unsigned int to_hash_later;
857 unsigned int nbuf;
858 };
859
860 static int aead_setkey(struct crypto_aead *authenc,
861 const u8 *key, unsigned int keylen)
862 {
863 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
864 struct crypto_authenc_keys keys;
865
866 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
867 goto badkey;
868
869 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
870 goto badkey;
871
872 memcpy(ctx->key, keys.authkey, keys.authkeylen);
873 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
874
875 ctx->keylen = keys.authkeylen + keys.enckeylen;
876 ctx->enckeylen = keys.enckeylen;
877 ctx->authkeylen = keys.authkeylen;
878
879 return 0;
880
881 badkey:
882 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
883 return -EINVAL;
884 }
885
886 /*
887 * talitos_edesc - s/w-extended descriptor
888 * @src_nents: number of segments in input scatterlist
889 * @dst_nents: number of segments in output scatterlist
890 * @icv_ool: whether ICV is out-of-line
891 * @iv_dma: dma address of iv for checking continuity and link table
892 * @dma_len: length of dma mapped link_tbl space
893 * @dma_link_tbl: bus physical address of link_tbl/buf
894 * @desc: h/w descriptor
895 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
896 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
897 *
898 * if decrypting (with authcheck), or either one of src_nents or dst_nents
899 * is greater than 1, an integrity check value is concatenated to the end
900 * of link_tbl data
901 */
902 struct talitos_edesc {
903 int src_nents;
904 int dst_nents;
905 bool icv_ool;
906 dma_addr_t iv_dma;
907 int dma_len;
908 dma_addr_t dma_link_tbl;
909 struct talitos_desc desc;
910 union {
911 struct talitos_ptr link_tbl[0];
912 u8 buf[0];
913 };
914 };
915
916 static void talitos_sg_unmap(struct device *dev,
917 struct talitos_edesc *edesc,
918 struct scatterlist *src,
919 struct scatterlist *dst,
920 unsigned int len, unsigned int offset)
921 {
922 struct talitos_private *priv = dev_get_drvdata(dev);
923 bool is_sec1 = has_ftr_sec1(priv);
924 unsigned int src_nents = edesc->src_nents ? : 1;
925 unsigned int dst_nents = edesc->dst_nents ? : 1;
926
927 if (is_sec1 && dst && dst_nents > 1) {
928 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
929 len, DMA_FROM_DEVICE);
930 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
931 offset);
932 }
933 if (src != dst) {
934 if (src_nents == 1 || !is_sec1)
935 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
936
937 if (dst && (dst_nents == 1 || !is_sec1))
938 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
939 } else if (src_nents == 1 || !is_sec1) {
940 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
941 }
942 }
943
944 static void ipsec_esp_unmap(struct device *dev,
945 struct talitos_edesc *edesc,
946 struct aead_request *areq)
947 {
948 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
949 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
950 unsigned int ivsize = crypto_aead_ivsize(aead);
951
952 if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
953 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
954 DMA_FROM_DEVICE);
955 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
956 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
957 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
958
959 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
960 areq->assoclen);
961
962 if (edesc->dma_len)
963 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
964 DMA_BIDIRECTIONAL);
965
966 if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
967 unsigned int dst_nents = edesc->dst_nents ? : 1;
968
969 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
970 areq->assoclen + areq->cryptlen - ivsize);
971 }
972 }
973
974 /*
975 * ipsec_esp descriptor callbacks
976 */
977 static void ipsec_esp_encrypt_done(struct device *dev,
978 struct talitos_desc *desc, void *context,
979 int err)
980 {
981 struct talitos_private *priv = dev_get_drvdata(dev);
982 bool is_sec1 = has_ftr_sec1(priv);
983 struct aead_request *areq = context;
984 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
985 unsigned int authsize = crypto_aead_authsize(authenc);
986 struct talitos_edesc *edesc;
987 struct scatterlist *sg;
988 void *icvdata;
989
990 edesc = container_of(desc, struct talitos_edesc, desc);
991
992 ipsec_esp_unmap(dev, edesc, areq);
993
994 /* copy the generated ICV to dst */
995 if (edesc->icv_ool) {
996 if (is_sec1)
997 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
998 else
999 icvdata = &edesc->link_tbl[edesc->src_nents +
1000 edesc->dst_nents + 2];
1001 sg = sg_last(areq->dst, edesc->dst_nents);
1002 memcpy((char *)sg_virt(sg) + sg->length - authsize,
1003 icvdata, authsize);
1004 }
1005
1006 kfree(edesc);
1007
1008 aead_request_complete(areq, err);
1009 }
1010
1011 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1012 struct talitos_desc *desc,
1013 void *context, int err)
1014 {
1015 struct aead_request *req = context;
1016 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1017 unsigned int authsize = crypto_aead_authsize(authenc);
1018 struct talitos_edesc *edesc;
1019 struct scatterlist *sg;
1020 char *oicv, *icv;
1021 struct talitos_private *priv = dev_get_drvdata(dev);
1022 bool is_sec1 = has_ftr_sec1(priv);
1023
1024 edesc = container_of(desc, struct talitos_edesc, desc);
1025
1026 ipsec_esp_unmap(dev, edesc, req);
1027
1028 if (!err) {
1029 /* auth check */
1030 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1031 icv = (char *)sg_virt(sg) + sg->length - authsize;
1032
1033 if (edesc->dma_len) {
1034 if (is_sec1)
1035 oicv = (char *)&edesc->dma_link_tbl +
1036 req->assoclen + req->cryptlen;
1037 else
1038 oicv = (char *)
1039 &edesc->link_tbl[edesc->src_nents +
1040 edesc->dst_nents + 2];
1041 if (edesc->icv_ool)
1042 icv = oicv + authsize;
1043 } else
1044 oicv = (char *)&edesc->link_tbl[0];
1045
1046 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1047 }
1048
1049 kfree(edesc);
1050
1051 aead_request_complete(req, err);
1052 }
1053
1054 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1055 struct talitos_desc *desc,
1056 void *context, int err)
1057 {
1058 struct aead_request *req = context;
1059 struct talitos_edesc *edesc;
1060
1061 edesc = container_of(desc, struct talitos_edesc, desc);
1062
1063 ipsec_esp_unmap(dev, edesc, req);
1064
1065 /* check ICV auth status */
1066 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1067 DESC_HDR_LO_ICCR1_PASS))
1068 err = -EBADMSG;
1069
1070 kfree(edesc);
1071
1072 aead_request_complete(req, err);
1073 }
1074
1075 /*
1076 * convert scatterlist to SEC h/w link table format
1077 * stop at cryptlen bytes
1078 */
1079 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1080 unsigned int offset, int cryptlen,
1081 struct talitos_ptr *link_tbl_ptr)
1082 {
1083 int n_sg = sg_count;
1084 int count = 0;
1085
1086 while (cryptlen && sg && n_sg--) {
1087 unsigned int len = sg_dma_len(sg);
1088
1089 if (offset >= len) {
1090 offset -= len;
1091 goto next;
1092 }
1093
1094 len -= offset;
1095
1096 if (len > cryptlen)
1097 len = cryptlen;
1098
1099 to_talitos_ptr(link_tbl_ptr + count,
1100 sg_dma_address(sg) + offset, 0);
1101 to_talitos_ptr_len(link_tbl_ptr + count, len, 0);
1102 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1103 count++;
1104 cryptlen -= len;
1105 offset = 0;
1106
1107 next:
1108 sg = sg_next(sg);
1109 }
1110
1111 /* tag end of link table */
1112 if (count > 0)
1113 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1114 DESC_PTR_LNKTBL_RETURN, 0);
1115
1116 return count;
1117 }
1118
1119 int talitos_sg_map(struct device *dev, struct scatterlist *src,
1120 unsigned int len, struct talitos_edesc *edesc,
1121 struct talitos_ptr *ptr,
1122 int sg_count, unsigned int offset, int tbl_off)
1123 {
1124 struct talitos_private *priv = dev_get_drvdata(dev);
1125 bool is_sec1 = has_ftr_sec1(priv);
1126
1127 to_talitos_ptr_len(ptr, len, is_sec1);
1128 to_talitos_ptr_ext_set(ptr, 0, is_sec1);
1129
1130 if (sg_count == 1) {
1131 to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
1132 return sg_count;
1133 }
1134 if (is_sec1) {
1135 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
1136 return sg_count;
1137 }
1138 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
1139 &edesc->link_tbl[tbl_off]);
1140 if (sg_count == 1) {
1141 /* Only one segment now, so no link tbl needed*/
1142 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1143 return sg_count;
1144 }
1145 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1146 tbl_off * sizeof(struct talitos_ptr), is_sec1);
1147 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1148
1149 return sg_count;
1150 }
1151
1152 /*
1153 * fill in and submit ipsec_esp descriptor
1154 */
1155 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1156 void (*callback)(struct device *dev,
1157 struct talitos_desc *desc,
1158 void *context, int error))
1159 {
1160 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1161 unsigned int authsize = crypto_aead_authsize(aead);
1162 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1163 struct device *dev = ctx->dev;
1164 struct talitos_desc *desc = &edesc->desc;
1165 unsigned int cryptlen = areq->cryptlen;
1166 unsigned int ivsize = crypto_aead_ivsize(aead);
1167 int tbl_off = 0;
1168 int sg_count, ret;
1169 int sg_link_tbl_len;
1170 bool sync_needed = false;
1171 struct talitos_private *priv = dev_get_drvdata(dev);
1172 bool is_sec1 = has_ftr_sec1(priv);
1173
1174 /* hmac key */
1175 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1176 DMA_TO_DEVICE);
1177
1178 sg_count = edesc->src_nents ?: 1;
1179 if (is_sec1 && sg_count > 1)
1180 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1181 areq->assoclen + cryptlen);
1182 else
1183 sg_count = dma_map_sg(dev, areq->src, sg_count,
1184 (areq->src == areq->dst) ?
1185 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1186
1187 /* hmac data */
1188 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1189 &desc->ptr[1], sg_count, 0, tbl_off);
1190
1191 if (ret > 1) {
1192 tbl_off += ret;
1193 sync_needed = true;
1194 }
1195
1196 /* cipher iv */
1197 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1198 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1);
1199 to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1);
1200 to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1);
1201 } else {
1202 to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1);
1203 to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1);
1204 to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1);
1205 }
1206
1207 /* cipher key */
1208 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1209 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1210 (char *)&ctx->key + ctx->authkeylen,
1211 DMA_TO_DEVICE);
1212 else
1213 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen,
1214 (char *)&ctx->key + ctx->authkeylen,
1215 DMA_TO_DEVICE);
1216
1217 /*
1218 * cipher in
1219 * map and adjust cipher len to aead request cryptlen.
1220 * extent is bytes of HMAC postpended to ciphertext,
1221 * typically 12 for ipsec
1222 */
1223 to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
1224 to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
1225
1226 sg_link_tbl_len = cryptlen;
1227
1228 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1229 to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
1230
1231 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1232 sg_link_tbl_len += authsize;
1233 }
1234
1235 ret = talitos_sg_map(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1236 sg_count, areq->assoclen, tbl_off);
1237
1238 if (ret > 1) {
1239 tbl_off += ret;
1240 sync_needed = true;
1241 }
1242
1243 /* cipher out */
1244 if (areq->src != areq->dst) {
1245 sg_count = edesc->dst_nents ? : 1;
1246 if (!is_sec1 || sg_count == 1)
1247 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1248 }
1249
1250 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1251 sg_count, areq->assoclen, tbl_off);
1252
1253 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1254 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1255
1256 /* ICV data */
1257 if (ret > 1) {
1258 tbl_off += ret;
1259 edesc->icv_ool = true;
1260 sync_needed = true;
1261
1262 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1263 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1264 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1265 sizeof(struct talitos_ptr) + authsize;
1266
1267 /* Add an entry to the link table for ICV data */
1268 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1269 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1270 is_sec1);
1271 to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
1272
1273 /* icv data follows link tables */
1274 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1275 is_sec1);
1276 } else {
1277 dma_addr_t addr = edesc->dma_link_tbl;
1278
1279 if (is_sec1)
1280 addr += areq->assoclen + cryptlen;
1281 else
1282 addr += sizeof(struct talitos_ptr) * tbl_off;
1283
1284 to_talitos_ptr(&desc->ptr[6], addr, is_sec1);
1285 to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
1286 }
1287 } else if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
1288 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1289 &desc->ptr[6], sg_count, areq->assoclen +
1290 cryptlen,
1291 tbl_off);
1292 if (ret > 1) {
1293 tbl_off += ret;
1294 edesc->icv_ool = true;
1295 sync_needed = true;
1296 } else {
1297 edesc->icv_ool = false;
1298 }
1299 } else {
1300 edesc->icv_ool = false;
1301 }
1302
1303 /* iv out */
1304 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1305 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1306 DMA_FROM_DEVICE);
1307
1308 if (sync_needed)
1309 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1310 edesc->dma_len,
1311 DMA_BIDIRECTIONAL);
1312
1313 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1314 if (ret != -EINPROGRESS) {
1315 ipsec_esp_unmap(dev, edesc, areq);
1316 kfree(edesc);
1317 }
1318 return ret;
1319 }
1320
1321 /*
1322 * allocate and map the extended descriptor
1323 */
1324 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1325 struct scatterlist *src,
1326 struct scatterlist *dst,
1327 u8 *iv,
1328 unsigned int assoclen,
1329 unsigned int cryptlen,
1330 unsigned int authsize,
1331 unsigned int ivsize,
1332 int icv_stashing,
1333 u32 cryptoflags,
1334 bool encrypt)
1335 {
1336 struct talitos_edesc *edesc;
1337 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1338 dma_addr_t iv_dma = 0;
1339 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1340 GFP_ATOMIC;
1341 struct talitos_private *priv = dev_get_drvdata(dev);
1342 bool is_sec1 = has_ftr_sec1(priv);
1343 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1344 void *err;
1345
1346 if (cryptlen + authsize > max_len) {
1347 dev_err(dev, "length exceeds h/w max limit\n");
1348 return ERR_PTR(-EINVAL);
1349 }
1350
1351 if (ivsize)
1352 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1353
1354 if (!dst || dst == src) {
1355 src_len = assoclen + cryptlen + authsize;
1356 src_nents = sg_nents_for_len(src, src_len);
1357 if (src_nents < 0) {
1358 dev_err(dev, "Invalid number of src SG.\n");
1359 err = ERR_PTR(-EINVAL);
1360 goto error_sg;
1361 }
1362 src_nents = (src_nents == 1) ? 0 : src_nents;
1363 dst_nents = dst ? src_nents : 0;
1364 dst_len = 0;
1365 } else { /* dst && dst != src*/
1366 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1367 src_nents = sg_nents_for_len(src, src_len);
1368 if (src_nents < 0) {
1369 dev_err(dev, "Invalid number of src SG.\n");
1370 err = ERR_PTR(-EINVAL);
1371 goto error_sg;
1372 }
1373 src_nents = (src_nents == 1) ? 0 : src_nents;
1374 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1375 dst_nents = sg_nents_for_len(dst, dst_len);
1376 if (dst_nents < 0) {
1377 dev_err(dev, "Invalid number of dst SG.\n");
1378 err = ERR_PTR(-EINVAL);
1379 goto error_sg;
1380 }
1381 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1382 }
1383
1384 /*
1385 * allocate space for base edesc plus the link tables,
1386 * allowing for two separate entries for AD and generated ICV (+ 2),
1387 * and space for two sets of ICVs (stashed and generated)
1388 */
1389 alloc_len = sizeof(struct talitos_edesc);
1390 if (src_nents || dst_nents) {
1391 if (is_sec1)
1392 dma_len = (src_nents ? src_len : 0) +
1393 (dst_nents ? dst_len : 0);
1394 else
1395 dma_len = (src_nents + dst_nents + 2) *
1396 sizeof(struct talitos_ptr) + authsize * 2;
1397 alloc_len += dma_len;
1398 } else {
1399 dma_len = 0;
1400 alloc_len += icv_stashing ? authsize : 0;
1401 }
1402
1403 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1404 if (!edesc) {
1405 dev_err(dev, "could not allocate edescriptor\n");
1406 err = ERR_PTR(-ENOMEM);
1407 goto error_sg;
1408 }
1409
1410 edesc->src_nents = src_nents;
1411 edesc->dst_nents = dst_nents;
1412 edesc->iv_dma = iv_dma;
1413 edesc->dma_len = dma_len;
1414 if (dma_len)
1415 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1416 edesc->dma_len,
1417 DMA_BIDIRECTIONAL);
1418
1419 return edesc;
1420 error_sg:
1421 if (iv_dma)
1422 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1423 return err;
1424 }
1425
1426 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1427 int icv_stashing, bool encrypt)
1428 {
1429 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1430 unsigned int authsize = crypto_aead_authsize(authenc);
1431 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1432 unsigned int ivsize = crypto_aead_ivsize(authenc);
1433
1434 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1435 iv, areq->assoclen, areq->cryptlen,
1436 authsize, ivsize, icv_stashing,
1437 areq->base.flags, encrypt);
1438 }
1439
1440 static int aead_encrypt(struct aead_request *req)
1441 {
1442 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1443 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1444 struct talitos_edesc *edesc;
1445
1446 /* allocate extended descriptor */
1447 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1448 if (IS_ERR(edesc))
1449 return PTR_ERR(edesc);
1450
1451 /* set encrypt */
1452 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1453
1454 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1455 }
1456
1457 static int aead_decrypt(struct aead_request *req)
1458 {
1459 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1460 unsigned int authsize = crypto_aead_authsize(authenc);
1461 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1462 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1463 struct talitos_edesc *edesc;
1464 struct scatterlist *sg;
1465 void *icvdata;
1466
1467 req->cryptlen -= authsize;
1468
1469 /* allocate extended descriptor */
1470 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1471 if (IS_ERR(edesc))
1472 return PTR_ERR(edesc);
1473
1474 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1475 ((!edesc->src_nents && !edesc->dst_nents) ||
1476 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1477
1478 /* decrypt and check the ICV */
1479 edesc->desc.hdr = ctx->desc_hdr_template |
1480 DESC_HDR_DIR_INBOUND |
1481 DESC_HDR_MODE1_MDEU_CICV;
1482
1483 /* reset integrity check result bits */
1484 edesc->desc.hdr_lo = 0;
1485
1486 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1487 }
1488
1489 /* Have to check the ICV with software */
1490 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1491
1492 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1493 if (edesc->dma_len)
1494 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1495 edesc->dst_nents + 2];
1496 else
1497 icvdata = &edesc->link_tbl[0];
1498
1499 sg = sg_last(req->src, edesc->src_nents ? : 1);
1500
1501 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1502
1503 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1504 }
1505
1506 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1507 const u8 *key, unsigned int keylen)
1508 {
1509 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1510 u32 tmp[DES_EXPKEY_WORDS];
1511
1512 if (keylen > TALITOS_MAX_KEY_SIZE) {
1513 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1514 return -EINVAL;
1515 }
1516
1517 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1518 CRYPTO_TFM_REQ_WEAK_KEY) &&
1519 !des_ekey(tmp, key)) {
1520 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1521 return -EINVAL;
1522 }
1523
1524 memcpy(&ctx->key, key, keylen);
1525 ctx->keylen = keylen;
1526
1527 return 0;
1528 }
1529
1530 static void common_nonsnoop_unmap(struct device *dev,
1531 struct talitos_edesc *edesc,
1532 struct ablkcipher_request *areq)
1533 {
1534 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1535
1536 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1537 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1538 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1539
1540 if (edesc->dma_len)
1541 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1542 DMA_BIDIRECTIONAL);
1543 }
1544
1545 static void ablkcipher_done(struct device *dev,
1546 struct talitos_desc *desc, void *context,
1547 int err)
1548 {
1549 struct ablkcipher_request *areq = context;
1550 struct talitos_edesc *edesc;
1551
1552 edesc = container_of(desc, struct talitos_edesc, desc);
1553
1554 common_nonsnoop_unmap(dev, edesc, areq);
1555
1556 kfree(edesc);
1557
1558 areq->base.complete(&areq->base, err);
1559 }
1560
1561 static int common_nonsnoop(struct talitos_edesc *edesc,
1562 struct ablkcipher_request *areq,
1563 void (*callback) (struct device *dev,
1564 struct talitos_desc *desc,
1565 void *context, int error))
1566 {
1567 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1568 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1569 struct device *dev = ctx->dev;
1570 struct talitos_desc *desc = &edesc->desc;
1571 unsigned int cryptlen = areq->nbytes;
1572 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1573 int sg_count, ret;
1574 bool sync_needed = false;
1575 struct talitos_private *priv = dev_get_drvdata(dev);
1576 bool is_sec1 = has_ftr_sec1(priv);
1577
1578 /* first DWORD empty */
1579 desc->ptr[0] = zero_entry;
1580
1581 /* cipher iv */
1582 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1583 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1584 to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1);
1585
1586 /* cipher key */
1587 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1588 (char *)&ctx->key, DMA_TO_DEVICE);
1589
1590 sg_count = edesc->src_nents ?: 1;
1591 if (is_sec1 && sg_count > 1)
1592 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1593 cryptlen);
1594 else
1595 sg_count = dma_map_sg(dev, areq->src, sg_count,
1596 (areq->src == areq->dst) ?
1597 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1598 /*
1599 * cipher in
1600 */
1601 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1602 &desc->ptr[3], sg_count, 0, 0);
1603 if (sg_count > 1)
1604 sync_needed = true;
1605
1606 /* cipher out */
1607 if (areq->src != areq->dst) {
1608 sg_count = edesc->dst_nents ? : 1;
1609 if (!is_sec1 || sg_count == 1)
1610 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1611 }
1612
1613 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1614 sg_count, 0, (edesc->src_nents + 1));
1615 if (ret > 1)
1616 sync_needed = true;
1617
1618 /* iv out */
1619 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1620 DMA_FROM_DEVICE);
1621
1622 /* last DWORD empty */
1623 desc->ptr[6] = zero_entry;
1624
1625 if (sync_needed)
1626 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1627 edesc->dma_len, DMA_BIDIRECTIONAL);
1628
1629 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1630 if (ret != -EINPROGRESS) {
1631 common_nonsnoop_unmap(dev, edesc, areq);
1632 kfree(edesc);
1633 }
1634 return ret;
1635 }
1636
1637 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1638 areq, bool encrypt)
1639 {
1640 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1641 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1642 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1643
1644 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1645 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1646 areq->base.flags, encrypt);
1647 }
1648
1649 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1650 {
1651 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1652 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1653 struct talitos_edesc *edesc;
1654
1655 /* allocate extended descriptor */
1656 edesc = ablkcipher_edesc_alloc(areq, true);
1657 if (IS_ERR(edesc))
1658 return PTR_ERR(edesc);
1659
1660 /* set encrypt */
1661 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1662
1663 return common_nonsnoop(edesc, areq, ablkcipher_done);
1664 }
1665
1666 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1667 {
1668 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1669 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1670 struct talitos_edesc *edesc;
1671
1672 /* allocate extended descriptor */
1673 edesc = ablkcipher_edesc_alloc(areq, false);
1674 if (IS_ERR(edesc))
1675 return PTR_ERR(edesc);
1676
1677 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1678
1679 return common_nonsnoop(edesc, areq, ablkcipher_done);
1680 }
1681
1682 static void common_nonsnoop_hash_unmap(struct device *dev,
1683 struct talitos_edesc *edesc,
1684 struct ahash_request *areq)
1685 {
1686 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1687 struct talitos_private *priv = dev_get_drvdata(dev);
1688 bool is_sec1 = has_ftr_sec1(priv);
1689
1690 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1691
1692 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1693
1694 /* When using hashctx-in, must unmap it. */
1695 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1696 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1697 DMA_TO_DEVICE);
1698
1699 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1700 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1701 DMA_TO_DEVICE);
1702
1703 if (edesc->dma_len)
1704 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1705 DMA_BIDIRECTIONAL);
1706
1707 }
1708
1709 static void ahash_done(struct device *dev,
1710 struct talitos_desc *desc, void *context,
1711 int err)
1712 {
1713 struct ahash_request *areq = context;
1714 struct talitos_edesc *edesc =
1715 container_of(desc, struct talitos_edesc, desc);
1716 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1717
1718 if (!req_ctx->last && req_ctx->to_hash_later) {
1719 /* Position any partial block for next update/final/finup */
1720 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1721 req_ctx->nbuf = req_ctx->to_hash_later;
1722 }
1723 common_nonsnoop_hash_unmap(dev, edesc, areq);
1724
1725 kfree(edesc);
1726
1727 areq->base.complete(&areq->base, err);
1728 }
1729
1730 /*
1731 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1732 * ourself and submit a padded block
1733 */
1734 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1735 struct talitos_edesc *edesc,
1736 struct talitos_ptr *ptr)
1737 {
1738 static u8 padded_hash[64] = {
1739 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1740 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1741 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1742 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1743 };
1744
1745 pr_err_once("Bug in SEC1, padding ourself\n");
1746 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1747 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1748 (char *)padded_hash, DMA_TO_DEVICE);
1749 }
1750
1751 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1752 struct ahash_request *areq, unsigned int length,
1753 void (*callback) (struct device *dev,
1754 struct talitos_desc *desc,
1755 void *context, int error))
1756 {
1757 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1758 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1759 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1760 struct device *dev = ctx->dev;
1761 struct talitos_desc *desc = &edesc->desc;
1762 int ret;
1763 bool sync_needed = false;
1764 struct talitos_private *priv = dev_get_drvdata(dev);
1765 bool is_sec1 = has_ftr_sec1(priv);
1766 int sg_count;
1767
1768 /* first DWORD empty */
1769 desc->ptr[0] = zero_entry;
1770
1771 /* hash context in */
1772 if (!req_ctx->first || req_ctx->swinit) {
1773 map_single_talitos_ptr(dev, &desc->ptr[1],
1774 req_ctx->hw_context_size,
1775 (char *)req_ctx->hw_context,
1776 DMA_TO_DEVICE);
1777 req_ctx->swinit = 0;
1778 } else {
1779 desc->ptr[1] = zero_entry;
1780 }
1781 /* Indicate next op is not the first. */
1782 req_ctx->first = 0;
1783
1784 /* HMAC key */
1785 if (ctx->keylen)
1786 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1787 (char *)&ctx->key, DMA_TO_DEVICE);
1788 else
1789 desc->ptr[2] = zero_entry;
1790
1791 sg_count = edesc->src_nents ?: 1;
1792 if (is_sec1 && sg_count > 1)
1793 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1794 else
1795 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1796 DMA_TO_DEVICE);
1797 /*
1798 * data in
1799 */
1800 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1801 &desc->ptr[3], sg_count, 0, 0);
1802 if (sg_count > 1)
1803 sync_needed = true;
1804
1805 /* fifth DWORD empty */
1806 desc->ptr[4] = zero_entry;
1807
1808 /* hash/HMAC out -or- hash context out */
1809 if (req_ctx->last)
1810 map_single_talitos_ptr(dev, &desc->ptr[5],
1811 crypto_ahash_digestsize(tfm),
1812 areq->result, DMA_FROM_DEVICE);
1813 else
1814 map_single_talitos_ptr(dev, &desc->ptr[5],
1815 req_ctx->hw_context_size,
1816 req_ctx->hw_context, DMA_FROM_DEVICE);
1817
1818 /* last DWORD empty */
1819 desc->ptr[6] = zero_entry;
1820
1821 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1822 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1823
1824 if (sync_needed)
1825 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1826 edesc->dma_len, DMA_BIDIRECTIONAL);
1827
1828 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1829 if (ret != -EINPROGRESS) {
1830 common_nonsnoop_hash_unmap(dev, edesc, areq);
1831 kfree(edesc);
1832 }
1833 return ret;
1834 }
1835
1836 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1837 unsigned int nbytes)
1838 {
1839 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1840 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1841 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1842
1843 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1844 nbytes, 0, 0, 0, areq->base.flags, false);
1845 }
1846
1847 static int ahash_init(struct ahash_request *areq)
1848 {
1849 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1850 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1851
1852 /* Initialize the context */
1853 req_ctx->nbuf = 0;
1854 req_ctx->first = 1; /* first indicates h/w must init its context */
1855 req_ctx->swinit = 0; /* assume h/w init of context */
1856 req_ctx->hw_context_size =
1857 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1858 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1859 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1860
1861 return 0;
1862 }
1863
1864 /*
1865 * on h/w without explicit sha224 support, we initialize h/w context
1866 * manually with sha224 constants, and tell it to run sha256.
1867 */
1868 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1869 {
1870 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1871
1872 ahash_init(areq);
1873 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1874
1875 req_ctx->hw_context[0] = SHA224_H0;
1876 req_ctx->hw_context[1] = SHA224_H1;
1877 req_ctx->hw_context[2] = SHA224_H2;
1878 req_ctx->hw_context[3] = SHA224_H3;
1879 req_ctx->hw_context[4] = SHA224_H4;
1880 req_ctx->hw_context[5] = SHA224_H5;
1881 req_ctx->hw_context[6] = SHA224_H6;
1882 req_ctx->hw_context[7] = SHA224_H7;
1883
1884 /* init 64-bit count */
1885 req_ctx->hw_context[8] = 0;
1886 req_ctx->hw_context[9] = 0;
1887
1888 return 0;
1889 }
1890
1891 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1892 {
1893 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1894 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1895 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1896 struct talitos_edesc *edesc;
1897 unsigned int blocksize =
1898 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1899 unsigned int nbytes_to_hash;
1900 unsigned int to_hash_later;
1901 unsigned int nsg;
1902 int nents;
1903
1904 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1905 /* Buffer up to one whole block */
1906 nents = sg_nents_for_len(areq->src, nbytes);
1907 if (nents < 0) {
1908 dev_err(ctx->dev, "Invalid number of src SG.\n");
1909 return nents;
1910 }
1911 sg_copy_to_buffer(areq->src, nents,
1912 req_ctx->buf + req_ctx->nbuf, nbytes);
1913 req_ctx->nbuf += nbytes;
1914 return 0;
1915 }
1916
1917 /* At least (blocksize + 1) bytes are available to hash */
1918 nbytes_to_hash = nbytes + req_ctx->nbuf;
1919 to_hash_later = nbytes_to_hash & (blocksize - 1);
1920
1921 if (req_ctx->last)
1922 to_hash_later = 0;
1923 else if (to_hash_later)
1924 /* There is a partial block. Hash the full block(s) now */
1925 nbytes_to_hash -= to_hash_later;
1926 else {
1927 /* Keep one block buffered */
1928 nbytes_to_hash -= blocksize;
1929 to_hash_later = blocksize;
1930 }
1931
1932 /* Chain in any previously buffered data */
1933 if (req_ctx->nbuf) {
1934 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1935 sg_init_table(req_ctx->bufsl, nsg);
1936 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1937 if (nsg > 1)
1938 sg_chain(req_ctx->bufsl, 2, areq->src);
1939 req_ctx->psrc = req_ctx->bufsl;
1940 } else
1941 req_ctx->psrc = areq->src;
1942
1943 if (to_hash_later) {
1944 nents = sg_nents_for_len(areq->src, nbytes);
1945 if (nents < 0) {
1946 dev_err(ctx->dev, "Invalid number of src SG.\n");
1947 return nents;
1948 }
1949 sg_pcopy_to_buffer(areq->src, nents,
1950 req_ctx->bufnext,
1951 to_hash_later,
1952 nbytes - to_hash_later);
1953 }
1954 req_ctx->to_hash_later = to_hash_later;
1955
1956 /* Allocate extended descriptor */
1957 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1958 if (IS_ERR(edesc))
1959 return PTR_ERR(edesc);
1960
1961 edesc->desc.hdr = ctx->desc_hdr_template;
1962
1963 /* On last one, request SEC to pad; otherwise continue */
1964 if (req_ctx->last)
1965 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1966 else
1967 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1968
1969 /* request SEC to INIT hash. */
1970 if (req_ctx->first && !req_ctx->swinit)
1971 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1972
1973 /* When the tfm context has a keylen, it's an HMAC.
1974 * A first or last (ie. not middle) descriptor must request HMAC.
1975 */
1976 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1977 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1978
1979 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1980 ahash_done);
1981 }
1982
1983 static int ahash_update(struct ahash_request *areq)
1984 {
1985 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1986
1987 req_ctx->last = 0;
1988
1989 return ahash_process_req(areq, areq->nbytes);
1990 }
1991
1992 static int ahash_final(struct ahash_request *areq)
1993 {
1994 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1995
1996 req_ctx->last = 1;
1997
1998 return ahash_process_req(areq, 0);
1999 }
2000
2001 static int ahash_finup(struct ahash_request *areq)
2002 {
2003 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2004
2005 req_ctx->last = 1;
2006
2007 return ahash_process_req(areq, areq->nbytes);
2008 }
2009
2010 static int ahash_digest(struct ahash_request *areq)
2011 {
2012 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2013 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2014
2015 ahash->init(areq);
2016 req_ctx->last = 1;
2017
2018 return ahash_process_req(areq, areq->nbytes);
2019 }
2020
2021 static int ahash_export(struct ahash_request *areq, void *out)
2022 {
2023 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2024 struct talitos_export_state *export = out;
2025
2026 memcpy(export->hw_context, req_ctx->hw_context,
2027 req_ctx->hw_context_size);
2028 memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
2029 export->swinit = req_ctx->swinit;
2030 export->first = req_ctx->first;
2031 export->last = req_ctx->last;
2032 export->to_hash_later = req_ctx->to_hash_later;
2033 export->nbuf = req_ctx->nbuf;
2034
2035 return 0;
2036 }
2037
2038 static int ahash_import(struct ahash_request *areq, const void *in)
2039 {
2040 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2041 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2042 const struct talitos_export_state *export = in;
2043
2044 memset(req_ctx, 0, sizeof(*req_ctx));
2045 req_ctx->hw_context_size =
2046 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2047 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2048 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2049 memcpy(req_ctx->hw_context, export->hw_context,
2050 req_ctx->hw_context_size);
2051 memcpy(req_ctx->buf, export->buf, export->nbuf);
2052 req_ctx->swinit = export->swinit;
2053 req_ctx->first = export->first;
2054 req_ctx->last = export->last;
2055 req_ctx->to_hash_later = export->to_hash_later;
2056 req_ctx->nbuf = export->nbuf;
2057
2058 return 0;
2059 }
2060
2061 struct keyhash_result {
2062 struct completion completion;
2063 int err;
2064 };
2065
2066 static void keyhash_complete(struct crypto_async_request *req, int err)
2067 {
2068 struct keyhash_result *res = req->data;
2069
2070 if (err == -EINPROGRESS)
2071 return;
2072
2073 res->err = err;
2074 complete(&res->completion);
2075 }
2076
2077 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2078 u8 *hash)
2079 {
2080 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2081
2082 struct scatterlist sg[1];
2083 struct ahash_request *req;
2084 struct keyhash_result hresult;
2085 int ret;
2086
2087 init_completion(&hresult.completion);
2088
2089 req = ahash_request_alloc(tfm, GFP_KERNEL);
2090 if (!req)
2091 return -ENOMEM;
2092
2093 /* Keep tfm keylen == 0 during hash of the long key */
2094 ctx->keylen = 0;
2095 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2096 keyhash_complete, &hresult);
2097
2098 sg_init_one(&sg[0], key, keylen);
2099
2100 ahash_request_set_crypt(req, sg, hash, keylen);
2101 ret = crypto_ahash_digest(req);
2102 switch (ret) {
2103 case 0:
2104 break;
2105 case -EINPROGRESS:
2106 case -EBUSY:
2107 ret = wait_for_completion_interruptible(
2108 &hresult.completion);
2109 if (!ret)
2110 ret = hresult.err;
2111 break;
2112 default:
2113 break;
2114 }
2115 ahash_request_free(req);
2116
2117 return ret;
2118 }
2119
2120 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2121 unsigned int keylen)
2122 {
2123 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2124 unsigned int blocksize =
2125 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2126 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2127 unsigned int keysize = keylen;
2128 u8 hash[SHA512_DIGEST_SIZE];
2129 int ret;
2130
2131 if (keylen <= blocksize)
2132 memcpy(ctx->key, key, keysize);
2133 else {
2134 /* Must get the hash of the long key */
2135 ret = keyhash(tfm, key, keylen, hash);
2136
2137 if (ret) {
2138 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2139 return -EINVAL;
2140 }
2141
2142 keysize = digestsize;
2143 memcpy(ctx->key, hash, digestsize);
2144 }
2145
2146 ctx->keylen = keysize;
2147
2148 return 0;
2149 }
2150
2151
2152 struct talitos_alg_template {
2153 u32 type;
2154 u32 priority;
2155 union {
2156 struct crypto_alg crypto;
2157 struct ahash_alg hash;
2158 struct aead_alg aead;
2159 } alg;
2160 __be32 desc_hdr_template;
2161 };
2162
2163 static struct talitos_alg_template driver_algs[] = {
2164 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2165 { .type = CRYPTO_ALG_TYPE_AEAD,
2166 .alg.aead = {
2167 .base = {
2168 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2169 .cra_driver_name = "authenc-hmac-sha1-"
2170 "cbc-aes-talitos",
2171 .cra_blocksize = AES_BLOCK_SIZE,
2172 .cra_flags = CRYPTO_ALG_ASYNC,
2173 },
2174 .ivsize = AES_BLOCK_SIZE,
2175 .maxauthsize = SHA1_DIGEST_SIZE,
2176 },
2177 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2178 DESC_HDR_SEL0_AESU |
2179 DESC_HDR_MODE0_AESU_CBC |
2180 DESC_HDR_SEL1_MDEUA |
2181 DESC_HDR_MODE1_MDEU_INIT |
2182 DESC_HDR_MODE1_MDEU_PAD |
2183 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2184 },
2185 { .type = CRYPTO_ALG_TYPE_AEAD,
2186 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2187 .alg.aead = {
2188 .base = {
2189 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2190 .cra_driver_name = "authenc-hmac-sha1-"
2191 "cbc-aes-talitos",
2192 .cra_blocksize = AES_BLOCK_SIZE,
2193 .cra_flags = CRYPTO_ALG_ASYNC,
2194 },
2195 .ivsize = AES_BLOCK_SIZE,
2196 .maxauthsize = SHA1_DIGEST_SIZE,
2197 },
2198 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2199 DESC_HDR_SEL0_AESU |
2200 DESC_HDR_MODE0_AESU_CBC |
2201 DESC_HDR_SEL1_MDEUA |
2202 DESC_HDR_MODE1_MDEU_INIT |
2203 DESC_HDR_MODE1_MDEU_PAD |
2204 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2205 },
2206 { .type = CRYPTO_ALG_TYPE_AEAD,
2207 .alg.aead = {
2208 .base = {
2209 .cra_name = "authenc(hmac(sha1),"
2210 "cbc(des3_ede))",
2211 .cra_driver_name = "authenc-hmac-sha1-"
2212 "cbc-3des-talitos",
2213 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2214 .cra_flags = CRYPTO_ALG_ASYNC,
2215 },
2216 .ivsize = DES3_EDE_BLOCK_SIZE,
2217 .maxauthsize = SHA1_DIGEST_SIZE,
2218 },
2219 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2220 DESC_HDR_SEL0_DEU |
2221 DESC_HDR_MODE0_DEU_CBC |
2222 DESC_HDR_MODE0_DEU_3DES |
2223 DESC_HDR_SEL1_MDEUA |
2224 DESC_HDR_MODE1_MDEU_INIT |
2225 DESC_HDR_MODE1_MDEU_PAD |
2226 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2227 },
2228 { .type = CRYPTO_ALG_TYPE_AEAD,
2229 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2230 .alg.aead = {
2231 .base = {
2232 .cra_name = "authenc(hmac(sha1),"
2233 "cbc(des3_ede))",
2234 .cra_driver_name = "authenc-hmac-sha1-"
2235 "cbc-3des-talitos",
2236 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2237 .cra_flags = CRYPTO_ALG_ASYNC,
2238 },
2239 .ivsize = DES3_EDE_BLOCK_SIZE,
2240 .maxauthsize = SHA1_DIGEST_SIZE,
2241 },
2242 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2243 DESC_HDR_SEL0_DEU |
2244 DESC_HDR_MODE0_DEU_CBC |
2245 DESC_HDR_MODE0_DEU_3DES |
2246 DESC_HDR_SEL1_MDEUA |
2247 DESC_HDR_MODE1_MDEU_INIT |
2248 DESC_HDR_MODE1_MDEU_PAD |
2249 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2250 },
2251 { .type = CRYPTO_ALG_TYPE_AEAD,
2252 .alg.aead = {
2253 .base = {
2254 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2255 .cra_driver_name = "authenc-hmac-sha224-"
2256 "cbc-aes-talitos",
2257 .cra_blocksize = AES_BLOCK_SIZE,
2258 .cra_flags = CRYPTO_ALG_ASYNC,
2259 },
2260 .ivsize = AES_BLOCK_SIZE,
2261 .maxauthsize = SHA224_DIGEST_SIZE,
2262 },
2263 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2264 DESC_HDR_SEL0_AESU |
2265 DESC_HDR_MODE0_AESU_CBC |
2266 DESC_HDR_SEL1_MDEUA |
2267 DESC_HDR_MODE1_MDEU_INIT |
2268 DESC_HDR_MODE1_MDEU_PAD |
2269 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2270 },
2271 { .type = CRYPTO_ALG_TYPE_AEAD,
2272 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2273 .alg.aead = {
2274 .base = {
2275 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2276 .cra_driver_name = "authenc-hmac-sha224-"
2277 "cbc-aes-talitos",
2278 .cra_blocksize = AES_BLOCK_SIZE,
2279 .cra_flags = CRYPTO_ALG_ASYNC,
2280 },
2281 .ivsize = AES_BLOCK_SIZE,
2282 .maxauthsize = SHA224_DIGEST_SIZE,
2283 },
2284 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2285 DESC_HDR_SEL0_AESU |
2286 DESC_HDR_MODE0_AESU_CBC |
2287 DESC_HDR_SEL1_MDEUA |
2288 DESC_HDR_MODE1_MDEU_INIT |
2289 DESC_HDR_MODE1_MDEU_PAD |
2290 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2291 },
2292 { .type = CRYPTO_ALG_TYPE_AEAD,
2293 .alg.aead = {
2294 .base = {
2295 .cra_name = "authenc(hmac(sha224),"
2296 "cbc(des3_ede))",
2297 .cra_driver_name = "authenc-hmac-sha224-"
2298 "cbc-3des-talitos",
2299 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2300 .cra_flags = CRYPTO_ALG_ASYNC,
2301 },
2302 .ivsize = DES3_EDE_BLOCK_SIZE,
2303 .maxauthsize = SHA224_DIGEST_SIZE,
2304 },
2305 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2306 DESC_HDR_SEL0_DEU |
2307 DESC_HDR_MODE0_DEU_CBC |
2308 DESC_HDR_MODE0_DEU_3DES |
2309 DESC_HDR_SEL1_MDEUA |
2310 DESC_HDR_MODE1_MDEU_INIT |
2311 DESC_HDR_MODE1_MDEU_PAD |
2312 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2313 },
2314 { .type = CRYPTO_ALG_TYPE_AEAD,
2315 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2316 .alg.aead = {
2317 .base = {
2318 .cra_name = "authenc(hmac(sha224),"
2319 "cbc(des3_ede))",
2320 .cra_driver_name = "authenc-hmac-sha224-"
2321 "cbc-3des-talitos",
2322 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2323 .cra_flags = CRYPTO_ALG_ASYNC,
2324 },
2325 .ivsize = DES3_EDE_BLOCK_SIZE,
2326 .maxauthsize = SHA224_DIGEST_SIZE,
2327 },
2328 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2329 DESC_HDR_SEL0_DEU |
2330 DESC_HDR_MODE0_DEU_CBC |
2331 DESC_HDR_MODE0_DEU_3DES |
2332 DESC_HDR_SEL1_MDEUA |
2333 DESC_HDR_MODE1_MDEU_INIT |
2334 DESC_HDR_MODE1_MDEU_PAD |
2335 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2336 },
2337 { .type = CRYPTO_ALG_TYPE_AEAD,
2338 .alg.aead = {
2339 .base = {
2340 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2341 .cra_driver_name = "authenc-hmac-sha256-"
2342 "cbc-aes-talitos",
2343 .cra_blocksize = AES_BLOCK_SIZE,
2344 .cra_flags = CRYPTO_ALG_ASYNC,
2345 },
2346 .ivsize = AES_BLOCK_SIZE,
2347 .maxauthsize = SHA256_DIGEST_SIZE,
2348 },
2349 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2350 DESC_HDR_SEL0_AESU |
2351 DESC_HDR_MODE0_AESU_CBC |
2352 DESC_HDR_SEL1_MDEUA |
2353 DESC_HDR_MODE1_MDEU_INIT |
2354 DESC_HDR_MODE1_MDEU_PAD |
2355 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2356 },
2357 { .type = CRYPTO_ALG_TYPE_AEAD,
2358 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2359 .alg.aead = {
2360 .base = {
2361 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2362 .cra_driver_name = "authenc-hmac-sha256-"
2363 "cbc-aes-talitos",
2364 .cra_blocksize = AES_BLOCK_SIZE,
2365 .cra_flags = CRYPTO_ALG_ASYNC,
2366 },
2367 .ivsize = AES_BLOCK_SIZE,
2368 .maxauthsize = SHA256_DIGEST_SIZE,
2369 },
2370 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2371 DESC_HDR_SEL0_AESU |
2372 DESC_HDR_MODE0_AESU_CBC |
2373 DESC_HDR_SEL1_MDEUA |
2374 DESC_HDR_MODE1_MDEU_INIT |
2375 DESC_HDR_MODE1_MDEU_PAD |
2376 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2377 },
2378 { .type = CRYPTO_ALG_TYPE_AEAD,
2379 .alg.aead = {
2380 .base = {
2381 .cra_name = "authenc(hmac(sha256),"
2382 "cbc(des3_ede))",
2383 .cra_driver_name = "authenc-hmac-sha256-"
2384 "cbc-3des-talitos",
2385 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2386 .cra_flags = CRYPTO_ALG_ASYNC,
2387 },
2388 .ivsize = DES3_EDE_BLOCK_SIZE,
2389 .maxauthsize = SHA256_DIGEST_SIZE,
2390 },
2391 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2392 DESC_HDR_SEL0_DEU |
2393 DESC_HDR_MODE0_DEU_CBC |
2394 DESC_HDR_MODE0_DEU_3DES |
2395 DESC_HDR_SEL1_MDEUA |
2396 DESC_HDR_MODE1_MDEU_INIT |
2397 DESC_HDR_MODE1_MDEU_PAD |
2398 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2399 },
2400 { .type = CRYPTO_ALG_TYPE_AEAD,
2401 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2402 .alg.aead = {
2403 .base = {
2404 .cra_name = "authenc(hmac(sha256),"
2405 "cbc(des3_ede))",
2406 .cra_driver_name = "authenc-hmac-sha256-"
2407 "cbc-3des-talitos",
2408 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2409 .cra_flags = CRYPTO_ALG_ASYNC,
2410 },
2411 .ivsize = DES3_EDE_BLOCK_SIZE,
2412 .maxauthsize = SHA256_DIGEST_SIZE,
2413 },
2414 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2415 DESC_HDR_SEL0_DEU |
2416 DESC_HDR_MODE0_DEU_CBC |
2417 DESC_HDR_MODE0_DEU_3DES |
2418 DESC_HDR_SEL1_MDEUA |
2419 DESC_HDR_MODE1_MDEU_INIT |
2420 DESC_HDR_MODE1_MDEU_PAD |
2421 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2422 },
2423 { .type = CRYPTO_ALG_TYPE_AEAD,
2424 .alg.aead = {
2425 .base = {
2426 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2427 .cra_driver_name = "authenc-hmac-sha384-"
2428 "cbc-aes-talitos",
2429 .cra_blocksize = AES_BLOCK_SIZE,
2430 .cra_flags = CRYPTO_ALG_ASYNC,
2431 },
2432 .ivsize = AES_BLOCK_SIZE,
2433 .maxauthsize = SHA384_DIGEST_SIZE,
2434 },
2435 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2436 DESC_HDR_SEL0_AESU |
2437 DESC_HDR_MODE0_AESU_CBC |
2438 DESC_HDR_SEL1_MDEUB |
2439 DESC_HDR_MODE1_MDEU_INIT |
2440 DESC_HDR_MODE1_MDEU_PAD |
2441 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2442 },
2443 { .type = CRYPTO_ALG_TYPE_AEAD,
2444 .alg.aead = {
2445 .base = {
2446 .cra_name = "authenc(hmac(sha384),"
2447 "cbc(des3_ede))",
2448 .cra_driver_name = "authenc-hmac-sha384-"
2449 "cbc-3des-talitos",
2450 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2451 .cra_flags = CRYPTO_ALG_ASYNC,
2452 },
2453 .ivsize = DES3_EDE_BLOCK_SIZE,
2454 .maxauthsize = SHA384_DIGEST_SIZE,
2455 },
2456 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2457 DESC_HDR_SEL0_DEU |
2458 DESC_HDR_MODE0_DEU_CBC |
2459 DESC_HDR_MODE0_DEU_3DES |
2460 DESC_HDR_SEL1_MDEUB |
2461 DESC_HDR_MODE1_MDEU_INIT |
2462 DESC_HDR_MODE1_MDEU_PAD |
2463 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2464 },
2465 { .type = CRYPTO_ALG_TYPE_AEAD,
2466 .alg.aead = {
2467 .base = {
2468 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2469 .cra_driver_name = "authenc-hmac-sha512-"
2470 "cbc-aes-talitos",
2471 .cra_blocksize = AES_BLOCK_SIZE,
2472 .cra_flags = CRYPTO_ALG_ASYNC,
2473 },
2474 .ivsize = AES_BLOCK_SIZE,
2475 .maxauthsize = SHA512_DIGEST_SIZE,
2476 },
2477 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2478 DESC_HDR_SEL0_AESU |
2479 DESC_HDR_MODE0_AESU_CBC |
2480 DESC_HDR_SEL1_MDEUB |
2481 DESC_HDR_MODE1_MDEU_INIT |
2482 DESC_HDR_MODE1_MDEU_PAD |
2483 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2484 },
2485 { .type = CRYPTO_ALG_TYPE_AEAD,
2486 .alg.aead = {
2487 .base = {
2488 .cra_name = "authenc(hmac(sha512),"
2489 "cbc(des3_ede))",
2490 .cra_driver_name = "authenc-hmac-sha512-"
2491 "cbc-3des-talitos",
2492 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2493 .cra_flags = CRYPTO_ALG_ASYNC,
2494 },
2495 .ivsize = DES3_EDE_BLOCK_SIZE,
2496 .maxauthsize = SHA512_DIGEST_SIZE,
2497 },
2498 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2499 DESC_HDR_SEL0_DEU |
2500 DESC_HDR_MODE0_DEU_CBC |
2501 DESC_HDR_MODE0_DEU_3DES |
2502 DESC_HDR_SEL1_MDEUB |
2503 DESC_HDR_MODE1_MDEU_INIT |
2504 DESC_HDR_MODE1_MDEU_PAD |
2505 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2506 },
2507 { .type = CRYPTO_ALG_TYPE_AEAD,
2508 .alg.aead = {
2509 .base = {
2510 .cra_name = "authenc(hmac(md5),cbc(aes))",
2511 .cra_driver_name = "authenc-hmac-md5-"
2512 "cbc-aes-talitos",
2513 .cra_blocksize = AES_BLOCK_SIZE,
2514 .cra_flags = CRYPTO_ALG_ASYNC,
2515 },
2516 .ivsize = AES_BLOCK_SIZE,
2517 .maxauthsize = MD5_DIGEST_SIZE,
2518 },
2519 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2520 DESC_HDR_SEL0_AESU |
2521 DESC_HDR_MODE0_AESU_CBC |
2522 DESC_HDR_SEL1_MDEUA |
2523 DESC_HDR_MODE1_MDEU_INIT |
2524 DESC_HDR_MODE1_MDEU_PAD |
2525 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2526 },
2527 { .type = CRYPTO_ALG_TYPE_AEAD,
2528 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2529 .alg.aead = {
2530 .base = {
2531 .cra_name = "authenc(hmac(md5),cbc(aes))",
2532 .cra_driver_name = "authenc-hmac-md5-"
2533 "cbc-aes-talitos",
2534 .cra_blocksize = AES_BLOCK_SIZE,
2535 .cra_flags = CRYPTO_ALG_ASYNC,
2536 },
2537 .ivsize = AES_BLOCK_SIZE,
2538 .maxauthsize = MD5_DIGEST_SIZE,
2539 },
2540 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2541 DESC_HDR_SEL0_AESU |
2542 DESC_HDR_MODE0_AESU_CBC |
2543 DESC_HDR_SEL1_MDEUA |
2544 DESC_HDR_MODE1_MDEU_INIT |
2545 DESC_HDR_MODE1_MDEU_PAD |
2546 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2547 },
2548 { .type = CRYPTO_ALG_TYPE_AEAD,
2549 .alg.aead = {
2550 .base = {
2551 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2552 .cra_driver_name = "authenc-hmac-md5-"
2553 "cbc-3des-talitos",
2554 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2555 .cra_flags = CRYPTO_ALG_ASYNC,
2556 },
2557 .ivsize = DES3_EDE_BLOCK_SIZE,
2558 .maxauthsize = MD5_DIGEST_SIZE,
2559 },
2560 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2561 DESC_HDR_SEL0_DEU |
2562 DESC_HDR_MODE0_DEU_CBC |
2563 DESC_HDR_MODE0_DEU_3DES |
2564 DESC_HDR_SEL1_MDEUA |
2565 DESC_HDR_MODE1_MDEU_INIT |
2566 DESC_HDR_MODE1_MDEU_PAD |
2567 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2568 },
2569 { .type = CRYPTO_ALG_TYPE_AEAD,
2570 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2571 .alg.aead = {
2572 .base = {
2573 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2574 .cra_driver_name = "authenc-hmac-md5-"
2575 "cbc-3des-talitos",
2576 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2577 .cra_flags = CRYPTO_ALG_ASYNC,
2578 },
2579 .ivsize = DES3_EDE_BLOCK_SIZE,
2580 .maxauthsize = MD5_DIGEST_SIZE,
2581 },
2582 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2583 DESC_HDR_SEL0_DEU |
2584 DESC_HDR_MODE0_DEU_CBC |
2585 DESC_HDR_MODE0_DEU_3DES |
2586 DESC_HDR_SEL1_MDEUA |
2587 DESC_HDR_MODE1_MDEU_INIT |
2588 DESC_HDR_MODE1_MDEU_PAD |
2589 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2590 },
2591 /* ABLKCIPHER algorithms. */
2592 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2593 .alg.crypto = {
2594 .cra_name = "ecb(aes)",
2595 .cra_driver_name = "ecb-aes-talitos",
2596 .cra_blocksize = AES_BLOCK_SIZE,
2597 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2598 CRYPTO_ALG_ASYNC,
2599 .cra_ablkcipher = {
2600 .min_keysize = AES_MIN_KEY_SIZE,
2601 .max_keysize = AES_MAX_KEY_SIZE,
2602 .ivsize = AES_BLOCK_SIZE,
2603 }
2604 },
2605 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2606 DESC_HDR_SEL0_AESU,
2607 },
2608 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2609 .alg.crypto = {
2610 .cra_name = "cbc(aes)",
2611 .cra_driver_name = "cbc-aes-talitos",
2612 .cra_blocksize = AES_BLOCK_SIZE,
2613 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2614 CRYPTO_ALG_ASYNC,
2615 .cra_ablkcipher = {
2616 .min_keysize = AES_MIN_KEY_SIZE,
2617 .max_keysize = AES_MAX_KEY_SIZE,
2618 .ivsize = AES_BLOCK_SIZE,
2619 }
2620 },
2621 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2622 DESC_HDR_SEL0_AESU |
2623 DESC_HDR_MODE0_AESU_CBC,
2624 },
2625 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2626 .alg.crypto = {
2627 .cra_name = "ctr(aes)",
2628 .cra_driver_name = "ctr-aes-talitos",
2629 .cra_blocksize = AES_BLOCK_SIZE,
2630 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2631 CRYPTO_ALG_ASYNC,
2632 .cra_ablkcipher = {
2633 .min_keysize = AES_MIN_KEY_SIZE,
2634 .max_keysize = AES_MAX_KEY_SIZE,
2635 .ivsize = AES_BLOCK_SIZE,
2636 }
2637 },
2638 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2639 DESC_HDR_SEL0_AESU |
2640 DESC_HDR_MODE0_AESU_CTR,
2641 },
2642 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2643 .alg.crypto = {
2644 .cra_name = "ecb(des)",
2645 .cra_driver_name = "ecb-des-talitos",
2646 .cra_blocksize = DES_BLOCK_SIZE,
2647 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2648 CRYPTO_ALG_ASYNC,
2649 .cra_ablkcipher = {
2650 .min_keysize = DES_KEY_SIZE,
2651 .max_keysize = DES_KEY_SIZE,
2652 .ivsize = DES_BLOCK_SIZE,
2653 }
2654 },
2655 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2656 DESC_HDR_SEL0_DEU,
2657 },
2658 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2659 .alg.crypto = {
2660 .cra_name = "cbc(des)",
2661 .cra_driver_name = "cbc-des-talitos",
2662 .cra_blocksize = DES_BLOCK_SIZE,
2663 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2664 CRYPTO_ALG_ASYNC,
2665 .cra_ablkcipher = {
2666 .min_keysize = DES_KEY_SIZE,
2667 .max_keysize = DES_KEY_SIZE,
2668 .ivsize = DES_BLOCK_SIZE,
2669 }
2670 },
2671 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2672 DESC_HDR_SEL0_DEU |
2673 DESC_HDR_MODE0_DEU_CBC,
2674 },
2675 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2676 .alg.crypto = {
2677 .cra_name = "ecb(des3_ede)",
2678 .cra_driver_name = "ecb-3des-talitos",
2679 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2680 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2681 CRYPTO_ALG_ASYNC,
2682 .cra_ablkcipher = {
2683 .min_keysize = DES3_EDE_KEY_SIZE,
2684 .max_keysize = DES3_EDE_KEY_SIZE,
2685 .ivsize = DES3_EDE_BLOCK_SIZE,
2686 }
2687 },
2688 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2689 DESC_HDR_SEL0_DEU |
2690 DESC_HDR_MODE0_DEU_3DES,
2691 },
2692 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2693 .alg.crypto = {
2694 .cra_name = "cbc(des3_ede)",
2695 .cra_driver_name = "cbc-3des-talitos",
2696 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2697 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2698 CRYPTO_ALG_ASYNC,
2699 .cra_ablkcipher = {
2700 .min_keysize = DES3_EDE_KEY_SIZE,
2701 .max_keysize = DES3_EDE_KEY_SIZE,
2702 .ivsize = DES3_EDE_BLOCK_SIZE,
2703 }
2704 },
2705 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2706 DESC_HDR_SEL0_DEU |
2707 DESC_HDR_MODE0_DEU_CBC |
2708 DESC_HDR_MODE0_DEU_3DES,
2709 },
2710 /* AHASH algorithms. */
2711 { .type = CRYPTO_ALG_TYPE_AHASH,
2712 .alg.hash = {
2713 .halg.digestsize = MD5_DIGEST_SIZE,
2714 .halg.statesize = sizeof(struct talitos_export_state),
2715 .halg.base = {
2716 .cra_name = "md5",
2717 .cra_driver_name = "md5-talitos",
2718 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2719 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2720 CRYPTO_ALG_ASYNC,
2721 }
2722 },
2723 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2724 DESC_HDR_SEL0_MDEUA |
2725 DESC_HDR_MODE0_MDEU_MD5,
2726 },
2727 { .type = CRYPTO_ALG_TYPE_AHASH,
2728 .alg.hash = {
2729 .halg.digestsize = SHA1_DIGEST_SIZE,
2730 .halg.statesize = sizeof(struct talitos_export_state),
2731 .halg.base = {
2732 .cra_name = "sha1",
2733 .cra_driver_name = "sha1-talitos",
2734 .cra_blocksize = SHA1_BLOCK_SIZE,
2735 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2736 CRYPTO_ALG_ASYNC,
2737 }
2738 },
2739 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2740 DESC_HDR_SEL0_MDEUA |
2741 DESC_HDR_MODE0_MDEU_SHA1,
2742 },
2743 { .type = CRYPTO_ALG_TYPE_AHASH,
2744 .alg.hash = {
2745 .halg.digestsize = SHA224_DIGEST_SIZE,
2746 .halg.statesize = sizeof(struct talitos_export_state),
2747 .halg.base = {
2748 .cra_name = "sha224",
2749 .cra_driver_name = "sha224-talitos",
2750 .cra_blocksize = SHA224_BLOCK_SIZE,
2751 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2752 CRYPTO_ALG_ASYNC,
2753 }
2754 },
2755 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2756 DESC_HDR_SEL0_MDEUA |
2757 DESC_HDR_MODE0_MDEU_SHA224,
2758 },
2759 { .type = CRYPTO_ALG_TYPE_AHASH,
2760 .alg.hash = {
2761 .halg.digestsize = SHA256_DIGEST_SIZE,
2762 .halg.statesize = sizeof(struct talitos_export_state),
2763 .halg.base = {
2764 .cra_name = "sha256",
2765 .cra_driver_name = "sha256-talitos",
2766 .cra_blocksize = SHA256_BLOCK_SIZE,
2767 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2768 CRYPTO_ALG_ASYNC,
2769 }
2770 },
2771 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2772 DESC_HDR_SEL0_MDEUA |
2773 DESC_HDR_MODE0_MDEU_SHA256,
2774 },
2775 { .type = CRYPTO_ALG_TYPE_AHASH,
2776 .alg.hash = {
2777 .halg.digestsize = SHA384_DIGEST_SIZE,
2778 .halg.statesize = sizeof(struct talitos_export_state),
2779 .halg.base = {
2780 .cra_name = "sha384",
2781 .cra_driver_name = "sha384-talitos",
2782 .cra_blocksize = SHA384_BLOCK_SIZE,
2783 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2784 CRYPTO_ALG_ASYNC,
2785 }
2786 },
2787 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2788 DESC_HDR_SEL0_MDEUB |
2789 DESC_HDR_MODE0_MDEUB_SHA384,
2790 },
2791 { .type = CRYPTO_ALG_TYPE_AHASH,
2792 .alg.hash = {
2793 .halg.digestsize = SHA512_DIGEST_SIZE,
2794 .halg.statesize = sizeof(struct talitos_export_state),
2795 .halg.base = {
2796 .cra_name = "sha512",
2797 .cra_driver_name = "sha512-talitos",
2798 .cra_blocksize = SHA512_BLOCK_SIZE,
2799 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2800 CRYPTO_ALG_ASYNC,
2801 }
2802 },
2803 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2804 DESC_HDR_SEL0_MDEUB |
2805 DESC_HDR_MODE0_MDEUB_SHA512,
2806 },
2807 { .type = CRYPTO_ALG_TYPE_AHASH,
2808 .alg.hash = {
2809 .halg.digestsize = MD5_DIGEST_SIZE,
2810 .halg.statesize = sizeof(struct talitos_export_state),
2811 .halg.base = {
2812 .cra_name = "hmac(md5)",
2813 .cra_driver_name = "hmac-md5-talitos",
2814 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2815 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2816 CRYPTO_ALG_ASYNC,
2817 }
2818 },
2819 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2820 DESC_HDR_SEL0_MDEUA |
2821 DESC_HDR_MODE0_MDEU_MD5,
2822 },
2823 { .type = CRYPTO_ALG_TYPE_AHASH,
2824 .alg.hash = {
2825 .halg.digestsize = SHA1_DIGEST_SIZE,
2826 .halg.statesize = sizeof(struct talitos_export_state),
2827 .halg.base = {
2828 .cra_name = "hmac(sha1)",
2829 .cra_driver_name = "hmac-sha1-talitos",
2830 .cra_blocksize = SHA1_BLOCK_SIZE,
2831 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2832 CRYPTO_ALG_ASYNC,
2833 }
2834 },
2835 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2836 DESC_HDR_SEL0_MDEUA |
2837 DESC_HDR_MODE0_MDEU_SHA1,
2838 },
2839 { .type = CRYPTO_ALG_TYPE_AHASH,
2840 .alg.hash = {
2841 .halg.digestsize = SHA224_DIGEST_SIZE,
2842 .halg.statesize = sizeof(struct talitos_export_state),
2843 .halg.base = {
2844 .cra_name = "hmac(sha224)",
2845 .cra_driver_name = "hmac-sha224-talitos",
2846 .cra_blocksize = SHA224_BLOCK_SIZE,
2847 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2848 CRYPTO_ALG_ASYNC,
2849 }
2850 },
2851 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2852 DESC_HDR_SEL0_MDEUA |
2853 DESC_HDR_MODE0_MDEU_SHA224,
2854 },
2855 { .type = CRYPTO_ALG_TYPE_AHASH,
2856 .alg.hash = {
2857 .halg.digestsize = SHA256_DIGEST_SIZE,
2858 .halg.statesize = sizeof(struct talitos_export_state),
2859 .halg.base = {
2860 .cra_name = "hmac(sha256)",
2861 .cra_driver_name = "hmac-sha256-talitos",
2862 .cra_blocksize = SHA256_BLOCK_SIZE,
2863 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2864 CRYPTO_ALG_ASYNC,
2865 }
2866 },
2867 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2868 DESC_HDR_SEL0_MDEUA |
2869 DESC_HDR_MODE0_MDEU_SHA256,
2870 },
2871 { .type = CRYPTO_ALG_TYPE_AHASH,
2872 .alg.hash = {
2873 .halg.digestsize = SHA384_DIGEST_SIZE,
2874 .halg.statesize = sizeof(struct talitos_export_state),
2875 .halg.base = {
2876 .cra_name = "hmac(sha384)",
2877 .cra_driver_name = "hmac-sha384-talitos",
2878 .cra_blocksize = SHA384_BLOCK_SIZE,
2879 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2880 CRYPTO_ALG_ASYNC,
2881 }
2882 },
2883 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2884 DESC_HDR_SEL0_MDEUB |
2885 DESC_HDR_MODE0_MDEUB_SHA384,
2886 },
2887 { .type = CRYPTO_ALG_TYPE_AHASH,
2888 .alg.hash = {
2889 .halg.digestsize = SHA512_DIGEST_SIZE,
2890 .halg.statesize = sizeof(struct talitos_export_state),
2891 .halg.base = {
2892 .cra_name = "hmac(sha512)",
2893 .cra_driver_name = "hmac-sha512-talitos",
2894 .cra_blocksize = SHA512_BLOCK_SIZE,
2895 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2896 CRYPTO_ALG_ASYNC,
2897 }
2898 },
2899 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2900 DESC_HDR_SEL0_MDEUB |
2901 DESC_HDR_MODE0_MDEUB_SHA512,
2902 }
2903 };
2904
2905 struct talitos_crypto_alg {
2906 struct list_head entry;
2907 struct device *dev;
2908 struct talitos_alg_template algt;
2909 };
2910
2911 static int talitos_init_common(struct talitos_ctx *ctx,
2912 struct talitos_crypto_alg *talitos_alg)
2913 {
2914 struct talitos_private *priv;
2915
2916 /* update context with ptr to dev */
2917 ctx->dev = talitos_alg->dev;
2918
2919 /* assign SEC channel to tfm in round-robin fashion */
2920 priv = dev_get_drvdata(ctx->dev);
2921 ctx->ch = atomic_inc_return(&priv->last_chan) &
2922 (priv->num_channels - 1);
2923
2924 /* copy descriptor header template value */
2925 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2926
2927 /* select done notification */
2928 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2929
2930 return 0;
2931 }
2932
2933 static int talitos_cra_init(struct crypto_tfm *tfm)
2934 {
2935 struct crypto_alg *alg = tfm->__crt_alg;
2936 struct talitos_crypto_alg *talitos_alg;
2937 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2938
2939 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2940 talitos_alg = container_of(__crypto_ahash_alg(alg),
2941 struct talitos_crypto_alg,
2942 algt.alg.hash);
2943 else
2944 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2945 algt.alg.crypto);
2946
2947 return talitos_init_common(ctx, talitos_alg);
2948 }
2949
2950 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2951 {
2952 struct aead_alg *alg = crypto_aead_alg(tfm);
2953 struct talitos_crypto_alg *talitos_alg;
2954 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2955
2956 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2957 algt.alg.aead);
2958
2959 return talitos_init_common(ctx, talitos_alg);
2960 }
2961
2962 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2963 {
2964 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2965
2966 talitos_cra_init(tfm);
2967
2968 ctx->keylen = 0;
2969 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2970 sizeof(struct talitos_ahash_req_ctx));
2971
2972 return 0;
2973 }
2974
2975 /*
2976 * given the alg's descriptor header template, determine whether descriptor
2977 * type and primary/secondary execution units required match the hw
2978 * capabilities description provided in the device tree node.
2979 */
2980 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2981 {
2982 struct talitos_private *priv = dev_get_drvdata(dev);
2983 int ret;
2984
2985 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2986 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2987
2988 if (SECONDARY_EU(desc_hdr_template))
2989 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2990 & priv->exec_units);
2991
2992 return ret;
2993 }
2994
2995 static int talitos_remove(struct platform_device *ofdev)
2996 {
2997 struct device *dev = &ofdev->dev;
2998 struct talitos_private *priv = dev_get_drvdata(dev);
2999 struct talitos_crypto_alg *t_alg, *n;
3000 int i;
3001
3002 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3003 switch (t_alg->algt.type) {
3004 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3005 break;
3006 case CRYPTO_ALG_TYPE_AEAD:
3007 crypto_unregister_aead(&t_alg->algt.alg.aead);
3008 case CRYPTO_ALG_TYPE_AHASH:
3009 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3010 break;
3011 }
3012 list_del(&t_alg->entry);
3013 kfree(t_alg);
3014 }
3015
3016 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3017 talitos_unregister_rng(dev);
3018
3019 for (i = 0; priv->chan && i < priv->num_channels; i++)
3020 kfree(priv->chan[i].fifo);
3021
3022 kfree(priv->chan);
3023
3024 for (i = 0; i < 2; i++)
3025 if (priv->irq[i]) {
3026 free_irq(priv->irq[i], dev);
3027 irq_dispose_mapping(priv->irq[i]);
3028 }
3029
3030 tasklet_kill(&priv->done_task[0]);
3031 if (priv->irq[1])
3032 tasklet_kill(&priv->done_task[1]);
3033
3034 iounmap(priv->reg);
3035
3036 kfree(priv);
3037
3038 return 0;
3039 }
3040
3041 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3042 struct talitos_alg_template
3043 *template)
3044 {
3045 struct talitos_private *priv = dev_get_drvdata(dev);
3046 struct talitos_crypto_alg *t_alg;
3047 struct crypto_alg *alg;
3048
3049 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
3050 if (!t_alg)
3051 return ERR_PTR(-ENOMEM);
3052
3053 t_alg->algt = *template;
3054
3055 switch (t_alg->algt.type) {
3056 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3057 alg = &t_alg->algt.alg.crypto;
3058 alg->cra_init = talitos_cra_init;
3059 alg->cra_type = &crypto_ablkcipher_type;
3060 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3061 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3062 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3063 alg->cra_ablkcipher.geniv = "eseqiv";
3064 break;
3065 case CRYPTO_ALG_TYPE_AEAD:
3066 alg = &t_alg->algt.alg.aead.base;
3067 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3068 t_alg->algt.alg.aead.setkey = aead_setkey;
3069 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3070 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3071 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3072 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3073 kfree(t_alg);
3074 return ERR_PTR(-ENOTSUPP);
3075 }
3076 break;
3077 case CRYPTO_ALG_TYPE_AHASH:
3078 alg = &t_alg->algt.alg.hash.halg.base;
3079 alg->cra_init = talitos_cra_init_ahash;
3080 alg->cra_type = &crypto_ahash_type;
3081 t_alg->algt.alg.hash.init = ahash_init;
3082 t_alg->algt.alg.hash.update = ahash_update;
3083 t_alg->algt.alg.hash.final = ahash_final;
3084 t_alg->algt.alg.hash.finup = ahash_finup;
3085 t_alg->algt.alg.hash.digest = ahash_digest;
3086 if (!strncmp(alg->cra_name, "hmac", 4))
3087 t_alg->algt.alg.hash.setkey = ahash_setkey;
3088 t_alg->algt.alg.hash.import = ahash_import;
3089 t_alg->algt.alg.hash.export = ahash_export;
3090
3091 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3092 !strncmp(alg->cra_name, "hmac", 4)) {
3093 kfree(t_alg);
3094 return ERR_PTR(-ENOTSUPP);
3095 }
3096 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3097 (!strcmp(alg->cra_name, "sha224") ||
3098 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3099 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3100 t_alg->algt.desc_hdr_template =
3101 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3102 DESC_HDR_SEL0_MDEUA |
3103 DESC_HDR_MODE0_MDEU_SHA256;
3104 }
3105 break;
3106 default:
3107 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3108 kfree(t_alg);
3109 return ERR_PTR(-EINVAL);
3110 }
3111
3112 alg->cra_module = THIS_MODULE;
3113 if (t_alg->algt.priority)
3114 alg->cra_priority = t_alg->algt.priority;
3115 else
3116 alg->cra_priority = TALITOS_CRA_PRIORITY;
3117 alg->cra_alignmask = 0;
3118 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3119 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3120
3121 t_alg->dev = dev;
3122
3123 return t_alg;
3124 }
3125
3126 static int talitos_probe_irq(struct platform_device *ofdev)
3127 {
3128 struct device *dev = &ofdev->dev;
3129 struct device_node *np = ofdev->dev.of_node;
3130 struct talitos_private *priv = dev_get_drvdata(dev);
3131 int err;
3132 bool is_sec1 = has_ftr_sec1(priv);
3133
3134 priv->irq[0] = irq_of_parse_and_map(np, 0);
3135 if (!priv->irq[0]) {
3136 dev_err(dev, "failed to map irq\n");
3137 return -EINVAL;
3138 }
3139 if (is_sec1) {
3140 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3141 dev_driver_string(dev), dev);
3142 goto primary_out;
3143 }
3144
3145 priv->irq[1] = irq_of_parse_and_map(np, 1);
3146
3147 /* get the primary irq line */
3148 if (!priv->irq[1]) {
3149 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3150 dev_driver_string(dev), dev);
3151 goto primary_out;
3152 }
3153
3154 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3155 dev_driver_string(dev), dev);
3156 if (err)
3157 goto primary_out;
3158
3159 /* get the secondary irq line */
3160 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3161 dev_driver_string(dev), dev);
3162 if (err) {
3163 dev_err(dev, "failed to request secondary irq\n");
3164 irq_dispose_mapping(priv->irq[1]);
3165 priv->irq[1] = 0;
3166 }
3167
3168 return err;
3169
3170 primary_out:
3171 if (err) {
3172 dev_err(dev, "failed to request primary irq\n");
3173 irq_dispose_mapping(priv->irq[0]);
3174 priv->irq[0] = 0;
3175 }
3176
3177 return err;
3178 }
3179
3180 static int talitos_probe(struct platform_device *ofdev)
3181 {
3182 struct device *dev = &ofdev->dev;
3183 struct device_node *np = ofdev->dev.of_node;
3184 struct talitos_private *priv;
3185 const unsigned int *prop;
3186 int i, err;
3187 int stride;
3188
3189 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
3190 if (!priv)
3191 return -ENOMEM;
3192
3193 INIT_LIST_HEAD(&priv->alg_list);
3194
3195 dev_set_drvdata(dev, priv);
3196
3197 priv->ofdev = ofdev;
3198
3199 spin_lock_init(&priv->reg_lock);
3200
3201 priv->reg = of_iomap(np, 0);
3202 if (!priv->reg) {
3203 dev_err(dev, "failed to of_iomap\n");
3204 err = -ENOMEM;
3205 goto err_out;
3206 }
3207
3208 /* get SEC version capabilities from device tree */
3209 prop = of_get_property(np, "fsl,num-channels", NULL);
3210 if (prop)
3211 priv->num_channels = *prop;
3212
3213 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
3214 if (prop)
3215 priv->chfifo_len = *prop;
3216
3217 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
3218 if (prop)
3219 priv->exec_units = *prop;
3220
3221 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
3222 if (prop)
3223 priv->desc_types = *prop;
3224
3225 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3226 !priv->exec_units || !priv->desc_types) {
3227 dev_err(dev, "invalid property data in device tree node\n");
3228 err = -EINVAL;
3229 goto err_out;
3230 }
3231
3232 if (of_device_is_compatible(np, "fsl,sec3.0"))
3233 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3234
3235 if (of_device_is_compatible(np, "fsl,sec2.1"))
3236 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3237 TALITOS_FTR_SHA224_HWINIT |
3238 TALITOS_FTR_HMAC_OK;
3239
3240 if (of_device_is_compatible(np, "fsl,sec1.0"))
3241 priv->features |= TALITOS_FTR_SEC1;
3242
3243 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3244 priv->reg_deu = priv->reg + TALITOS12_DEU;
3245 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3246 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3247 stride = TALITOS1_CH_STRIDE;
3248 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3249 priv->reg_deu = priv->reg + TALITOS10_DEU;
3250 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3251 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3252 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3253 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3254 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3255 stride = TALITOS1_CH_STRIDE;
3256 } else {
3257 priv->reg_deu = priv->reg + TALITOS2_DEU;
3258 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3259 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3260 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3261 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3262 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3263 priv->reg_keu = priv->reg + TALITOS2_KEU;
3264 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3265 stride = TALITOS2_CH_STRIDE;
3266 }
3267
3268 err = talitos_probe_irq(ofdev);
3269 if (err)
3270 goto err_out;
3271
3272 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3273 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3274 (unsigned long)dev);
3275 } else {
3276 if (!priv->irq[1]) {
3277 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3278 (unsigned long)dev);
3279 } else {
3280 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3281 (unsigned long)dev);
3282 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3283 (unsigned long)dev);
3284 }
3285 }
3286
3287 priv->chan = kzalloc(sizeof(struct talitos_channel) *
3288 priv->num_channels, GFP_KERNEL);
3289 if (!priv->chan) {
3290 dev_err(dev, "failed to allocate channel management space\n");
3291 err = -ENOMEM;
3292 goto err_out;
3293 }
3294
3295 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3296
3297 for (i = 0; i < priv->num_channels; i++) {
3298 priv->chan[i].reg = priv->reg + stride * (i + 1);
3299 if (!priv->irq[1] || !(i & 1))
3300 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3301
3302 spin_lock_init(&priv->chan[i].head_lock);
3303 spin_lock_init(&priv->chan[i].tail_lock);
3304
3305 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3306 priv->fifo_len, GFP_KERNEL);
3307 if (!priv->chan[i].fifo) {
3308 dev_err(dev, "failed to allocate request fifo %d\n", i);
3309 err = -ENOMEM;
3310 goto err_out;
3311 }
3312
3313 atomic_set(&priv->chan[i].submit_count,
3314 -(priv->chfifo_len - 1));
3315 }
3316
3317 dma_set_mask(dev, DMA_BIT_MASK(36));
3318
3319 /* reset and initialize the h/w */
3320 err = init_device(dev);
3321 if (err) {
3322 dev_err(dev, "failed to initialize device\n");
3323 goto err_out;
3324 }
3325
3326 /* register the RNG, if available */
3327 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3328 err = talitos_register_rng(dev);
3329 if (err) {
3330 dev_err(dev, "failed to register hwrng: %d\n", err);
3331 goto err_out;
3332 } else
3333 dev_info(dev, "hwrng\n");
3334 }
3335
3336 /* register crypto algorithms the device supports */
3337 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3338 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3339 struct talitos_crypto_alg *t_alg;
3340 struct crypto_alg *alg = NULL;
3341
3342 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3343 if (IS_ERR(t_alg)) {
3344 err = PTR_ERR(t_alg);
3345 if (err == -ENOTSUPP)
3346 continue;
3347 goto err_out;
3348 }
3349
3350 switch (t_alg->algt.type) {
3351 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3352 err = crypto_register_alg(
3353 &t_alg->algt.alg.crypto);
3354 alg = &t_alg->algt.alg.crypto;
3355 break;
3356
3357 case CRYPTO_ALG_TYPE_AEAD:
3358 err = crypto_register_aead(
3359 &t_alg->algt.alg.aead);
3360 alg = &t_alg->algt.alg.aead.base;
3361 break;
3362
3363 case CRYPTO_ALG_TYPE_AHASH:
3364 err = crypto_register_ahash(
3365 &t_alg->algt.alg.hash);
3366 alg = &t_alg->algt.alg.hash.halg.base;
3367 break;
3368 }
3369 if (err) {
3370 dev_err(dev, "%s alg registration failed\n",
3371 alg->cra_driver_name);
3372 kfree(t_alg);
3373 } else
3374 list_add_tail(&t_alg->entry, &priv->alg_list);
3375 }
3376 }
3377 if (!list_empty(&priv->alg_list))
3378 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3379 (char *)of_get_property(np, "compatible", NULL));
3380
3381 return 0;
3382
3383 err_out:
3384 talitos_remove(ofdev);
3385
3386 return err;
3387 }
3388
3389 static const struct of_device_id talitos_match[] = {
3390 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3391 {
3392 .compatible = "fsl,sec1.0",
3393 },
3394 #endif
3395 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3396 {
3397 .compatible = "fsl,sec2.0",
3398 },
3399 #endif
3400 {},
3401 };
3402 MODULE_DEVICE_TABLE(of, talitos_match);
3403
3404 static struct platform_driver talitos_driver = {
3405 .driver = {
3406 .name = "talitos",
3407 .of_match_table = talitos_match,
3408 },
3409 .probe = talitos_probe,
3410 .remove = talitos_remove,
3411 };
3412
3413 module_platform_driver(talitos_driver);
3414
3415 MODULE_LICENSE("GPL");
3416 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3417 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");