]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/crypto/talitos.c
drm/i915: make mappable struct resource centric
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / talitos.c
1 /*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55
56 #include "talitos.h"
57
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 unsigned int len, bool is_sec1)
60 {
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 if (is_sec1) {
63 ptr->len1 = cpu_to_be16(len);
64 } else {
65 ptr->len = cpu_to_be16(len);
66 ptr->eptr = upper_32_bits(dma_addr);
67 }
68 }
69
70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71 struct talitos_ptr *src_ptr, bool is_sec1)
72 {
73 dst_ptr->ptr = src_ptr->ptr;
74 if (is_sec1) {
75 dst_ptr->len1 = src_ptr->len1;
76 } else {
77 dst_ptr->len = src_ptr->len;
78 dst_ptr->eptr = src_ptr->eptr;
79 }
80 }
81
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
83 bool is_sec1)
84 {
85 if (is_sec1)
86 return be16_to_cpu(ptr->len1);
87 else
88 return be16_to_cpu(ptr->len);
89 }
90
91 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
92 bool is_sec1)
93 {
94 if (!is_sec1)
95 ptr->j_extent = val;
96 }
97
98 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
99 {
100 if (!is_sec1)
101 ptr->j_extent |= val;
102 }
103
104 /*
105 * map virtual single (contiguous) pointer to h/w descriptor pointer
106 */
107 static void map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir)
111 {
112 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
113 struct talitos_private *priv = dev_get_drvdata(dev);
114 bool is_sec1 = has_ftr_sec1(priv);
115
116 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
117 }
118
119 /*
120 * unmap bus single (contiguous) h/w descriptor pointer
121 */
122 static void unmap_single_talitos_ptr(struct device *dev,
123 struct talitos_ptr *ptr,
124 enum dma_data_direction dir)
125 {
126 struct talitos_private *priv = dev_get_drvdata(dev);
127 bool is_sec1 = has_ftr_sec1(priv);
128
129 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
130 from_talitos_ptr_len(ptr, is_sec1), dir);
131 }
132
133 static int reset_channel(struct device *dev, int ch)
134 {
135 struct talitos_private *priv = dev_get_drvdata(dev);
136 unsigned int timeout = TALITOS_TIMEOUT;
137 bool is_sec1 = has_ftr_sec1(priv);
138
139 if (is_sec1) {
140 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
141 TALITOS1_CCCR_LO_RESET);
142
143 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
144 TALITOS1_CCCR_LO_RESET) && --timeout)
145 cpu_relax();
146 } else {
147 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
148 TALITOS2_CCCR_RESET);
149
150 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
151 TALITOS2_CCCR_RESET) && --timeout)
152 cpu_relax();
153 }
154
155 if (timeout == 0) {
156 dev_err(dev, "failed to reset channel %d\n", ch);
157 return -EIO;
158 }
159
160 /* set 36-bit addressing, done writeback enable and done IRQ enable */
161 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
162 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
163 /* enable chaining descriptors */
164 if (is_sec1)
165 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
166 TALITOS_CCCR_LO_NE);
167
168 /* and ICCR writeback, if available */
169 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
170 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
171 TALITOS_CCCR_LO_IWSE);
172
173 return 0;
174 }
175
176 static int reset_device(struct device *dev)
177 {
178 struct talitos_private *priv = dev_get_drvdata(dev);
179 unsigned int timeout = TALITOS_TIMEOUT;
180 bool is_sec1 = has_ftr_sec1(priv);
181 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
182
183 setbits32(priv->reg + TALITOS_MCR, mcr);
184
185 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
186 && --timeout)
187 cpu_relax();
188
189 if (priv->irq[1]) {
190 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
191 setbits32(priv->reg + TALITOS_MCR, mcr);
192 }
193
194 if (timeout == 0) {
195 dev_err(dev, "failed to reset device\n");
196 return -EIO;
197 }
198
199 return 0;
200 }
201
202 /*
203 * Reset and initialize the device
204 */
205 static int init_device(struct device *dev)
206 {
207 struct talitos_private *priv = dev_get_drvdata(dev);
208 int ch, err;
209 bool is_sec1 = has_ftr_sec1(priv);
210
211 /*
212 * Master reset
213 * errata documentation: warning: certain SEC interrupts
214 * are not fully cleared by writing the MCR:SWR bit,
215 * set bit twice to completely reset
216 */
217 err = reset_device(dev);
218 if (err)
219 return err;
220
221 err = reset_device(dev);
222 if (err)
223 return err;
224
225 /* reset channels */
226 for (ch = 0; ch < priv->num_channels; ch++) {
227 err = reset_channel(dev, ch);
228 if (err)
229 return err;
230 }
231
232 /* enable channel done and error interrupts */
233 if (is_sec1) {
234 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
235 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
236 /* disable parity error check in DEU (erroneous? test vect.) */
237 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
238 } else {
239 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
240 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
241 }
242
243 /* disable integrity check error interrupts (use writeback instead) */
244 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
245 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
246 TALITOS_MDEUICR_LO_ICE);
247
248 return 0;
249 }
250
251 /**
252 * talitos_submit - submits a descriptor to the device for processing
253 * @dev: the SEC device to be used
254 * @ch: the SEC device channel to be used
255 * @desc: the descriptor to be processed by the device
256 * @callback: whom to call when processing is complete
257 * @context: a handle for use by caller (optional)
258 *
259 * desc must contain valid dma-mapped (bus physical) address pointers.
260 * callback must check err and feedback in descriptor header
261 * for device processing status.
262 */
263 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
264 void (*callback)(struct device *dev,
265 struct talitos_desc *desc,
266 void *context, int error),
267 void *context)
268 {
269 struct talitos_private *priv = dev_get_drvdata(dev);
270 struct talitos_request *request;
271 unsigned long flags;
272 int head;
273 bool is_sec1 = has_ftr_sec1(priv);
274
275 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
276
277 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
278 /* h/w fifo is full */
279 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
280 return -EAGAIN;
281 }
282
283 head = priv->chan[ch].head;
284 request = &priv->chan[ch].fifo[head];
285
286 /* map descriptor and save caller data */
287 if (is_sec1) {
288 desc->hdr1 = desc->hdr;
289 request->dma_desc = dma_map_single(dev, &desc->hdr1,
290 TALITOS_DESC_SIZE,
291 DMA_BIDIRECTIONAL);
292 } else {
293 request->dma_desc = dma_map_single(dev, desc,
294 TALITOS_DESC_SIZE,
295 DMA_BIDIRECTIONAL);
296 }
297 request->callback = callback;
298 request->context = context;
299
300 /* increment fifo head */
301 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
302
303 smp_wmb();
304 request->desc = desc;
305
306 /* GO! */
307 wmb();
308 out_be32(priv->chan[ch].reg + TALITOS_FF,
309 upper_32_bits(request->dma_desc));
310 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
311 lower_32_bits(request->dma_desc));
312
313 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
314
315 return -EINPROGRESS;
316 }
317 EXPORT_SYMBOL(talitos_submit);
318
319 /*
320 * process what was done, notify callback of error if not
321 */
322 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
323 {
324 struct talitos_private *priv = dev_get_drvdata(dev);
325 struct talitos_request *request, saved_req;
326 unsigned long flags;
327 int tail, status;
328 bool is_sec1 = has_ftr_sec1(priv);
329
330 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
331
332 tail = priv->chan[ch].tail;
333 while (priv->chan[ch].fifo[tail].desc) {
334 __be32 hdr;
335
336 request = &priv->chan[ch].fifo[tail];
337
338 /* descriptors with their done bits set don't get the error */
339 rmb();
340 if (!is_sec1)
341 hdr = request->desc->hdr;
342 else if (request->desc->next_desc)
343 hdr = (request->desc + 1)->hdr1;
344 else
345 hdr = request->desc->hdr1;
346
347 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
348 status = 0;
349 else
350 if (!error)
351 break;
352 else
353 status = error;
354
355 dma_unmap_single(dev, request->dma_desc,
356 TALITOS_DESC_SIZE,
357 DMA_BIDIRECTIONAL);
358
359 /* copy entries so we can call callback outside lock */
360 saved_req.desc = request->desc;
361 saved_req.callback = request->callback;
362 saved_req.context = request->context;
363
364 /* release request entry in fifo */
365 smp_wmb();
366 request->desc = NULL;
367
368 /* increment fifo tail */
369 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
370
371 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
372
373 atomic_dec(&priv->chan[ch].submit_count);
374
375 saved_req.callback(dev, saved_req.desc, saved_req.context,
376 status);
377 /* channel may resume processing in single desc error case */
378 if (error && !reset_ch && status == error)
379 return;
380 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
381 tail = priv->chan[ch].tail;
382 }
383
384 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
385 }
386
387 /*
388 * process completed requests for channels that have done status
389 */
390 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
391 static void talitos1_done_##name(unsigned long data) \
392 { \
393 struct device *dev = (struct device *)data; \
394 struct talitos_private *priv = dev_get_drvdata(dev); \
395 unsigned long flags; \
396 \
397 if (ch_done_mask & 0x10000000) \
398 flush_channel(dev, 0, 0, 0); \
399 if (ch_done_mask & 0x40000000) \
400 flush_channel(dev, 1, 0, 0); \
401 if (ch_done_mask & 0x00010000) \
402 flush_channel(dev, 2, 0, 0); \
403 if (ch_done_mask & 0x00040000) \
404 flush_channel(dev, 3, 0, 0); \
405 \
406 /* At this point, all completed channels have been processed */ \
407 /* Unmask done interrupts for channels completed later on. */ \
408 spin_lock_irqsave(&priv->reg_lock, flags); \
409 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
410 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
411 spin_unlock_irqrestore(&priv->reg_lock, flags); \
412 }
413
414 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
415 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
416
417 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
418 static void talitos2_done_##name(unsigned long data) \
419 { \
420 struct device *dev = (struct device *)data; \
421 struct talitos_private *priv = dev_get_drvdata(dev); \
422 unsigned long flags; \
423 \
424 if (ch_done_mask & 1) \
425 flush_channel(dev, 0, 0, 0); \
426 if (ch_done_mask & (1 << 2)) \
427 flush_channel(dev, 1, 0, 0); \
428 if (ch_done_mask & (1 << 4)) \
429 flush_channel(dev, 2, 0, 0); \
430 if (ch_done_mask & (1 << 6)) \
431 flush_channel(dev, 3, 0, 0); \
432 \
433 /* At this point, all completed channels have been processed */ \
434 /* Unmask done interrupts for channels completed later on. */ \
435 spin_lock_irqsave(&priv->reg_lock, flags); \
436 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
437 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
438 spin_unlock_irqrestore(&priv->reg_lock, flags); \
439 }
440
441 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
442 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
443 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
444 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
445
446 /*
447 * locate current (offending) descriptor
448 */
449 static u32 current_desc_hdr(struct device *dev, int ch)
450 {
451 struct talitos_private *priv = dev_get_drvdata(dev);
452 int tail, iter;
453 dma_addr_t cur_desc;
454
455 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
456 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
457
458 if (!cur_desc) {
459 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
460 return 0;
461 }
462
463 tail = priv->chan[ch].tail;
464
465 iter = tail;
466 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
467 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
468 iter = (iter + 1) & (priv->fifo_len - 1);
469 if (iter == tail) {
470 dev_err(dev, "couldn't locate current descriptor\n");
471 return 0;
472 }
473 }
474
475 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
476 return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
477
478 return priv->chan[ch].fifo[iter].desc->hdr;
479 }
480
481 /*
482 * user diagnostics; report root cause of error based on execution unit status
483 */
484 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
485 {
486 struct talitos_private *priv = dev_get_drvdata(dev);
487 int i;
488
489 if (!desc_hdr)
490 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
491
492 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
493 case DESC_HDR_SEL0_AFEU:
494 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
495 in_be32(priv->reg_afeu + TALITOS_EUISR),
496 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
497 break;
498 case DESC_HDR_SEL0_DEU:
499 dev_err(dev, "DEUISR 0x%08x_%08x\n",
500 in_be32(priv->reg_deu + TALITOS_EUISR),
501 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
502 break;
503 case DESC_HDR_SEL0_MDEUA:
504 case DESC_HDR_SEL0_MDEUB:
505 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
506 in_be32(priv->reg_mdeu + TALITOS_EUISR),
507 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
508 break;
509 case DESC_HDR_SEL0_RNG:
510 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
511 in_be32(priv->reg_rngu + TALITOS_ISR),
512 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
513 break;
514 case DESC_HDR_SEL0_PKEU:
515 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
516 in_be32(priv->reg_pkeu + TALITOS_EUISR),
517 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
518 break;
519 case DESC_HDR_SEL0_AESU:
520 dev_err(dev, "AESUISR 0x%08x_%08x\n",
521 in_be32(priv->reg_aesu + TALITOS_EUISR),
522 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
523 break;
524 case DESC_HDR_SEL0_CRCU:
525 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
526 in_be32(priv->reg_crcu + TALITOS_EUISR),
527 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
528 break;
529 case DESC_HDR_SEL0_KEU:
530 dev_err(dev, "KEUISR 0x%08x_%08x\n",
531 in_be32(priv->reg_pkeu + TALITOS_EUISR),
532 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
533 break;
534 }
535
536 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
537 case DESC_HDR_SEL1_MDEUA:
538 case DESC_HDR_SEL1_MDEUB:
539 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
540 in_be32(priv->reg_mdeu + TALITOS_EUISR),
541 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
542 break;
543 case DESC_HDR_SEL1_CRCU:
544 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
545 in_be32(priv->reg_crcu + TALITOS_EUISR),
546 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
547 break;
548 }
549
550 for (i = 0; i < 8; i++)
551 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
552 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
553 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
554 }
555
556 /*
557 * recover from error interrupts
558 */
559 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
560 {
561 struct talitos_private *priv = dev_get_drvdata(dev);
562 unsigned int timeout = TALITOS_TIMEOUT;
563 int ch, error, reset_dev = 0;
564 u32 v_lo;
565 bool is_sec1 = has_ftr_sec1(priv);
566 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
567
568 for (ch = 0; ch < priv->num_channels; ch++) {
569 /* skip channels without errors */
570 if (is_sec1) {
571 /* bits 29, 31, 17, 19 */
572 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
573 continue;
574 } else {
575 if (!(isr & (1 << (ch * 2 + 1))))
576 continue;
577 }
578
579 error = -EINVAL;
580
581 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
582
583 if (v_lo & TALITOS_CCPSR_LO_DOF) {
584 dev_err(dev, "double fetch fifo overflow error\n");
585 error = -EAGAIN;
586 reset_ch = 1;
587 }
588 if (v_lo & TALITOS_CCPSR_LO_SOF) {
589 /* h/w dropped descriptor */
590 dev_err(dev, "single fetch fifo overflow error\n");
591 error = -EAGAIN;
592 }
593 if (v_lo & TALITOS_CCPSR_LO_MDTE)
594 dev_err(dev, "master data transfer error\n");
595 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
596 dev_err(dev, is_sec1 ? "pointer not complete error\n"
597 : "s/g data length zero error\n");
598 if (v_lo & TALITOS_CCPSR_LO_FPZ)
599 dev_err(dev, is_sec1 ? "parity error\n"
600 : "fetch pointer zero error\n");
601 if (v_lo & TALITOS_CCPSR_LO_IDH)
602 dev_err(dev, "illegal descriptor header error\n");
603 if (v_lo & TALITOS_CCPSR_LO_IEU)
604 dev_err(dev, is_sec1 ? "static assignment error\n"
605 : "invalid exec unit error\n");
606 if (v_lo & TALITOS_CCPSR_LO_EU)
607 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
608 if (!is_sec1) {
609 if (v_lo & TALITOS_CCPSR_LO_GB)
610 dev_err(dev, "gather boundary error\n");
611 if (v_lo & TALITOS_CCPSR_LO_GRL)
612 dev_err(dev, "gather return/length error\n");
613 if (v_lo & TALITOS_CCPSR_LO_SB)
614 dev_err(dev, "scatter boundary error\n");
615 if (v_lo & TALITOS_CCPSR_LO_SRL)
616 dev_err(dev, "scatter return/length error\n");
617 }
618
619 flush_channel(dev, ch, error, reset_ch);
620
621 if (reset_ch) {
622 reset_channel(dev, ch);
623 } else {
624 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
625 TALITOS2_CCCR_CONT);
626 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
627 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
628 TALITOS2_CCCR_CONT) && --timeout)
629 cpu_relax();
630 if (timeout == 0) {
631 dev_err(dev, "failed to restart channel %d\n",
632 ch);
633 reset_dev = 1;
634 }
635 }
636 }
637 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
638 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
639 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
640 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
641 isr, isr_lo);
642 else
643 dev_err(dev, "done overflow, internal time out, or "
644 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
645
646 /* purge request queues */
647 for (ch = 0; ch < priv->num_channels; ch++)
648 flush_channel(dev, ch, -EIO, 1);
649
650 /* reset and reinitialize the device */
651 init_device(dev);
652 }
653 }
654
655 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
656 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
657 { \
658 struct device *dev = data; \
659 struct talitos_private *priv = dev_get_drvdata(dev); \
660 u32 isr, isr_lo; \
661 unsigned long flags; \
662 \
663 spin_lock_irqsave(&priv->reg_lock, flags); \
664 isr = in_be32(priv->reg + TALITOS_ISR); \
665 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
666 /* Acknowledge interrupt */ \
667 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
668 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
669 \
670 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
671 spin_unlock_irqrestore(&priv->reg_lock, flags); \
672 talitos_error(dev, isr & ch_err_mask, isr_lo); \
673 } \
674 else { \
675 if (likely(isr & ch_done_mask)) { \
676 /* mask further done interrupts. */ \
677 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
678 /* done_task will unmask done interrupts at exit */ \
679 tasklet_schedule(&priv->done_task[tlet]); \
680 } \
681 spin_unlock_irqrestore(&priv->reg_lock, flags); \
682 } \
683 \
684 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
685 IRQ_NONE; \
686 }
687
688 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
689
690 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
691 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
692 { \
693 struct device *dev = data; \
694 struct talitos_private *priv = dev_get_drvdata(dev); \
695 u32 isr, isr_lo; \
696 unsigned long flags; \
697 \
698 spin_lock_irqsave(&priv->reg_lock, flags); \
699 isr = in_be32(priv->reg + TALITOS_ISR); \
700 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
701 /* Acknowledge interrupt */ \
702 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
703 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
704 \
705 if (unlikely(isr & ch_err_mask || isr_lo)) { \
706 spin_unlock_irqrestore(&priv->reg_lock, flags); \
707 talitos_error(dev, isr & ch_err_mask, isr_lo); \
708 } \
709 else { \
710 if (likely(isr & ch_done_mask)) { \
711 /* mask further done interrupts. */ \
712 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
713 /* done_task will unmask done interrupts at exit */ \
714 tasklet_schedule(&priv->done_task[tlet]); \
715 } \
716 spin_unlock_irqrestore(&priv->reg_lock, flags); \
717 } \
718 \
719 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
720 IRQ_NONE; \
721 }
722
723 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
724 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
725 0)
726 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
727 1)
728
729 /*
730 * hwrng
731 */
732 static int talitos_rng_data_present(struct hwrng *rng, int wait)
733 {
734 struct device *dev = (struct device *)rng->priv;
735 struct talitos_private *priv = dev_get_drvdata(dev);
736 u32 ofl;
737 int i;
738
739 for (i = 0; i < 20; i++) {
740 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
741 TALITOS_RNGUSR_LO_OFL;
742 if (ofl || !wait)
743 break;
744 udelay(10);
745 }
746
747 return !!ofl;
748 }
749
750 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
751 {
752 struct device *dev = (struct device *)rng->priv;
753 struct talitos_private *priv = dev_get_drvdata(dev);
754
755 /* rng fifo requires 64-bit accesses */
756 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
757 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
758
759 return sizeof(u32);
760 }
761
762 static int talitos_rng_init(struct hwrng *rng)
763 {
764 struct device *dev = (struct device *)rng->priv;
765 struct talitos_private *priv = dev_get_drvdata(dev);
766 unsigned int timeout = TALITOS_TIMEOUT;
767
768 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
769 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
770 & TALITOS_RNGUSR_LO_RD)
771 && --timeout)
772 cpu_relax();
773 if (timeout == 0) {
774 dev_err(dev, "failed to reset rng hw\n");
775 return -ENODEV;
776 }
777
778 /* start generating */
779 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
780
781 return 0;
782 }
783
784 static int talitos_register_rng(struct device *dev)
785 {
786 struct talitos_private *priv = dev_get_drvdata(dev);
787 int err;
788
789 priv->rng.name = dev_driver_string(dev),
790 priv->rng.init = talitos_rng_init,
791 priv->rng.data_present = talitos_rng_data_present,
792 priv->rng.data_read = talitos_rng_data_read,
793 priv->rng.priv = (unsigned long)dev;
794
795 err = hwrng_register(&priv->rng);
796 if (!err)
797 priv->rng_registered = true;
798
799 return err;
800 }
801
802 static void talitos_unregister_rng(struct device *dev)
803 {
804 struct talitos_private *priv = dev_get_drvdata(dev);
805
806 if (!priv->rng_registered)
807 return;
808
809 hwrng_unregister(&priv->rng);
810 priv->rng_registered = false;
811 }
812
813 /*
814 * crypto alg
815 */
816 #define TALITOS_CRA_PRIORITY 3000
817 /*
818 * Defines a priority for doing AEAD with descriptors type
819 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
820 */
821 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
822 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
823 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
824
825 struct talitos_ctx {
826 struct device *dev;
827 int ch;
828 __be32 desc_hdr_template;
829 u8 key[TALITOS_MAX_KEY_SIZE];
830 u8 iv[TALITOS_MAX_IV_LENGTH];
831 dma_addr_t dma_key;
832 unsigned int keylen;
833 unsigned int enckeylen;
834 unsigned int authkeylen;
835 };
836
837 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
838 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
839
840 struct talitos_ahash_req_ctx {
841 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
842 unsigned int hw_context_size;
843 u8 buf[2][HASH_MAX_BLOCK_SIZE];
844 int buf_idx;
845 unsigned int swinit;
846 unsigned int first;
847 unsigned int last;
848 unsigned int to_hash_later;
849 unsigned int nbuf;
850 struct scatterlist bufsl[2];
851 struct scatterlist *psrc;
852 };
853
854 struct talitos_export_state {
855 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
856 u8 buf[HASH_MAX_BLOCK_SIZE];
857 unsigned int swinit;
858 unsigned int first;
859 unsigned int last;
860 unsigned int to_hash_later;
861 unsigned int nbuf;
862 };
863
864 static int aead_setkey(struct crypto_aead *authenc,
865 const u8 *key, unsigned int keylen)
866 {
867 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
868 struct device *dev = ctx->dev;
869 struct crypto_authenc_keys keys;
870
871 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
872 goto badkey;
873
874 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
875 goto badkey;
876
877 if (ctx->keylen)
878 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
879
880 memcpy(ctx->key, keys.authkey, keys.authkeylen);
881 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
882
883 ctx->keylen = keys.authkeylen + keys.enckeylen;
884 ctx->enckeylen = keys.enckeylen;
885 ctx->authkeylen = keys.authkeylen;
886 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
887 DMA_TO_DEVICE);
888
889 return 0;
890
891 badkey:
892 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
893 return -EINVAL;
894 }
895
896 /*
897 * talitos_edesc - s/w-extended descriptor
898 * @src_nents: number of segments in input scatterlist
899 * @dst_nents: number of segments in output scatterlist
900 * @icv_ool: whether ICV is out-of-line
901 * @iv_dma: dma address of iv for checking continuity and link table
902 * @dma_len: length of dma mapped link_tbl space
903 * @dma_link_tbl: bus physical address of link_tbl/buf
904 * @desc: h/w descriptor
905 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
906 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
907 *
908 * if decrypting (with authcheck), or either one of src_nents or dst_nents
909 * is greater than 1, an integrity check value is concatenated to the end
910 * of link_tbl data
911 */
912 struct talitos_edesc {
913 int src_nents;
914 int dst_nents;
915 bool icv_ool;
916 dma_addr_t iv_dma;
917 int dma_len;
918 dma_addr_t dma_link_tbl;
919 struct talitos_desc desc;
920 union {
921 struct talitos_ptr link_tbl[0];
922 u8 buf[0];
923 };
924 };
925
926 static void talitos_sg_unmap(struct device *dev,
927 struct talitos_edesc *edesc,
928 struct scatterlist *src,
929 struct scatterlist *dst,
930 unsigned int len, unsigned int offset)
931 {
932 struct talitos_private *priv = dev_get_drvdata(dev);
933 bool is_sec1 = has_ftr_sec1(priv);
934 unsigned int src_nents = edesc->src_nents ? : 1;
935 unsigned int dst_nents = edesc->dst_nents ? : 1;
936
937 if (is_sec1 && dst && dst_nents > 1) {
938 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
939 len, DMA_FROM_DEVICE);
940 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
941 offset);
942 }
943 if (src != dst) {
944 if (src_nents == 1 || !is_sec1)
945 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
946
947 if (dst && (dst_nents == 1 || !is_sec1))
948 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
949 } else if (src_nents == 1 || !is_sec1) {
950 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
951 }
952 }
953
954 static void ipsec_esp_unmap(struct device *dev,
955 struct talitos_edesc *edesc,
956 struct aead_request *areq)
957 {
958 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
959 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
960 unsigned int ivsize = crypto_aead_ivsize(aead);
961 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
962 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
963
964 if (is_ipsec_esp)
965 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
966 DMA_FROM_DEVICE);
967 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
968
969 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
970 areq->assoclen);
971
972 if (edesc->dma_len)
973 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
974 DMA_BIDIRECTIONAL);
975
976 if (!is_ipsec_esp) {
977 unsigned int dst_nents = edesc->dst_nents ? : 1;
978
979 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
980 areq->assoclen + areq->cryptlen - ivsize);
981 }
982 }
983
984 /*
985 * ipsec_esp descriptor callbacks
986 */
987 static void ipsec_esp_encrypt_done(struct device *dev,
988 struct talitos_desc *desc, void *context,
989 int err)
990 {
991 struct talitos_private *priv = dev_get_drvdata(dev);
992 bool is_sec1 = has_ftr_sec1(priv);
993 struct aead_request *areq = context;
994 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
995 unsigned int authsize = crypto_aead_authsize(authenc);
996 unsigned int ivsize = crypto_aead_ivsize(authenc);
997 struct talitos_edesc *edesc;
998 struct scatterlist *sg;
999 void *icvdata;
1000
1001 edesc = container_of(desc, struct talitos_edesc, desc);
1002
1003 ipsec_esp_unmap(dev, edesc, areq);
1004
1005 /* copy the generated ICV to dst */
1006 if (edesc->icv_ool) {
1007 if (is_sec1)
1008 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1009 else
1010 icvdata = &edesc->link_tbl[edesc->src_nents +
1011 edesc->dst_nents + 2];
1012 sg = sg_last(areq->dst, edesc->dst_nents);
1013 memcpy((char *)sg_virt(sg) + sg->length - authsize,
1014 icvdata, authsize);
1015 }
1016
1017 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1018
1019 kfree(edesc);
1020
1021 aead_request_complete(areq, err);
1022 }
1023
1024 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1025 struct talitos_desc *desc,
1026 void *context, int err)
1027 {
1028 struct aead_request *req = context;
1029 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1030 unsigned int authsize = crypto_aead_authsize(authenc);
1031 struct talitos_edesc *edesc;
1032 struct scatterlist *sg;
1033 char *oicv, *icv;
1034 struct talitos_private *priv = dev_get_drvdata(dev);
1035 bool is_sec1 = has_ftr_sec1(priv);
1036
1037 edesc = container_of(desc, struct talitos_edesc, desc);
1038
1039 ipsec_esp_unmap(dev, edesc, req);
1040
1041 if (!err) {
1042 /* auth check */
1043 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1044 icv = (char *)sg_virt(sg) + sg->length - authsize;
1045
1046 if (edesc->dma_len) {
1047 if (is_sec1)
1048 oicv = (char *)&edesc->dma_link_tbl +
1049 req->assoclen + req->cryptlen;
1050 else
1051 oicv = (char *)
1052 &edesc->link_tbl[edesc->src_nents +
1053 edesc->dst_nents + 2];
1054 if (edesc->icv_ool)
1055 icv = oicv + authsize;
1056 } else
1057 oicv = (char *)&edesc->link_tbl[0];
1058
1059 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1060 }
1061
1062 kfree(edesc);
1063
1064 aead_request_complete(req, err);
1065 }
1066
1067 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1068 struct talitos_desc *desc,
1069 void *context, int err)
1070 {
1071 struct aead_request *req = context;
1072 struct talitos_edesc *edesc;
1073
1074 edesc = container_of(desc, struct talitos_edesc, desc);
1075
1076 ipsec_esp_unmap(dev, edesc, req);
1077
1078 /* check ICV auth status */
1079 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1080 DESC_HDR_LO_ICCR1_PASS))
1081 err = -EBADMSG;
1082
1083 kfree(edesc);
1084
1085 aead_request_complete(req, err);
1086 }
1087
1088 /*
1089 * convert scatterlist to SEC h/w link table format
1090 * stop at cryptlen bytes
1091 */
1092 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1093 unsigned int offset, int cryptlen,
1094 struct talitos_ptr *link_tbl_ptr)
1095 {
1096 int n_sg = sg_count;
1097 int count = 0;
1098
1099 while (cryptlen && sg && n_sg--) {
1100 unsigned int len = sg_dma_len(sg);
1101
1102 if (offset >= len) {
1103 offset -= len;
1104 goto next;
1105 }
1106
1107 len -= offset;
1108
1109 if (len > cryptlen)
1110 len = cryptlen;
1111
1112 to_talitos_ptr(link_tbl_ptr + count,
1113 sg_dma_address(sg) + offset, len, 0);
1114 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1115 count++;
1116 cryptlen -= len;
1117 offset = 0;
1118
1119 next:
1120 sg = sg_next(sg);
1121 }
1122
1123 /* tag end of link table */
1124 if (count > 0)
1125 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1126 DESC_PTR_LNKTBL_RETURN, 0);
1127
1128 return count;
1129 }
1130
1131 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1132 unsigned int len, struct talitos_edesc *edesc,
1133 struct talitos_ptr *ptr, int sg_count,
1134 unsigned int offset, int tbl_off, int elen)
1135 {
1136 struct talitos_private *priv = dev_get_drvdata(dev);
1137 bool is_sec1 = has_ftr_sec1(priv);
1138
1139 if (!src) {
1140 to_talitos_ptr(ptr, 0, 0, is_sec1);
1141 return 1;
1142 }
1143 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1144 if (sg_count == 1) {
1145 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1146 return sg_count;
1147 }
1148 if (is_sec1) {
1149 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1150 return sg_count;
1151 }
1152 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1153 &edesc->link_tbl[tbl_off]);
1154 if (sg_count == 1) {
1155 /* Only one segment now, so no link tbl needed*/
1156 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1157 return sg_count;
1158 }
1159 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1160 tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1161 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1162
1163 return sg_count;
1164 }
1165
1166 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1167 unsigned int len, struct talitos_edesc *edesc,
1168 struct talitos_ptr *ptr, int sg_count,
1169 unsigned int offset, int tbl_off)
1170 {
1171 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1172 tbl_off, 0);
1173 }
1174
1175 /*
1176 * fill in and submit ipsec_esp descriptor
1177 */
1178 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1179 void (*callback)(struct device *dev,
1180 struct talitos_desc *desc,
1181 void *context, int error))
1182 {
1183 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1184 unsigned int authsize = crypto_aead_authsize(aead);
1185 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1186 struct device *dev = ctx->dev;
1187 struct talitos_desc *desc = &edesc->desc;
1188 unsigned int cryptlen = areq->cryptlen;
1189 unsigned int ivsize = crypto_aead_ivsize(aead);
1190 int tbl_off = 0;
1191 int sg_count, ret;
1192 int elen = 0;
1193 bool sync_needed = false;
1194 struct talitos_private *priv = dev_get_drvdata(dev);
1195 bool is_sec1 = has_ftr_sec1(priv);
1196 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1197 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1198 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1199
1200 /* hmac key */
1201 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1202
1203 sg_count = edesc->src_nents ?: 1;
1204 if (is_sec1 && sg_count > 1)
1205 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1206 areq->assoclen + cryptlen);
1207 else
1208 sg_count = dma_map_sg(dev, areq->src, sg_count,
1209 (areq->src == areq->dst) ?
1210 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1211
1212 /* hmac data */
1213 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1214 &desc->ptr[1], sg_count, 0, tbl_off);
1215
1216 if (ret > 1) {
1217 tbl_off += ret;
1218 sync_needed = true;
1219 }
1220
1221 /* cipher iv */
1222 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1223
1224 /* cipher key */
1225 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1226 ctx->enckeylen, is_sec1);
1227
1228 /*
1229 * cipher in
1230 * map and adjust cipher len to aead request cryptlen.
1231 * extent is bytes of HMAC postpended to ciphertext,
1232 * typically 12 for ipsec
1233 */
1234 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1235 elen = authsize;
1236
1237 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1238 sg_count, areq->assoclen, tbl_off, elen);
1239
1240 if (ret > 1) {
1241 tbl_off += ret;
1242 sync_needed = true;
1243 }
1244
1245 /* cipher out */
1246 if (areq->src != areq->dst) {
1247 sg_count = edesc->dst_nents ? : 1;
1248 if (!is_sec1 || sg_count == 1)
1249 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1250 }
1251
1252 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1253 sg_count, areq->assoclen, tbl_off);
1254
1255 if (is_ipsec_esp)
1256 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1257
1258 /* ICV data */
1259 if (ret > 1) {
1260 tbl_off += ret;
1261 edesc->icv_ool = true;
1262 sync_needed = true;
1263
1264 if (is_ipsec_esp) {
1265 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1266 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1267 sizeof(struct talitos_ptr) + authsize;
1268
1269 /* Add an entry to the link table for ICV data */
1270 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1271 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1272 is_sec1);
1273
1274 /* icv data follows link tables */
1275 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1276 authsize, is_sec1);
1277 } else {
1278 dma_addr_t addr = edesc->dma_link_tbl;
1279
1280 if (is_sec1)
1281 addr += areq->assoclen + cryptlen;
1282 else
1283 addr += sizeof(struct talitos_ptr) * tbl_off;
1284
1285 to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1286 }
1287 } else if (!is_ipsec_esp) {
1288 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1289 &desc->ptr[6], sg_count, areq->assoclen +
1290 cryptlen,
1291 tbl_off);
1292 if (ret > 1) {
1293 tbl_off += ret;
1294 edesc->icv_ool = true;
1295 sync_needed = true;
1296 } else {
1297 edesc->icv_ool = false;
1298 }
1299 } else {
1300 edesc->icv_ool = false;
1301 }
1302
1303 /* iv out */
1304 if (is_ipsec_esp)
1305 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1306 DMA_FROM_DEVICE);
1307
1308 if (sync_needed)
1309 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1310 edesc->dma_len,
1311 DMA_BIDIRECTIONAL);
1312
1313 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1314 if (ret != -EINPROGRESS) {
1315 ipsec_esp_unmap(dev, edesc, areq);
1316 kfree(edesc);
1317 }
1318 return ret;
1319 }
1320
1321 /*
1322 * allocate and map the extended descriptor
1323 */
1324 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1325 struct scatterlist *src,
1326 struct scatterlist *dst,
1327 u8 *iv,
1328 unsigned int assoclen,
1329 unsigned int cryptlen,
1330 unsigned int authsize,
1331 unsigned int ivsize,
1332 int icv_stashing,
1333 u32 cryptoflags,
1334 bool encrypt)
1335 {
1336 struct talitos_edesc *edesc;
1337 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1338 dma_addr_t iv_dma = 0;
1339 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1340 GFP_ATOMIC;
1341 struct talitos_private *priv = dev_get_drvdata(dev);
1342 bool is_sec1 = has_ftr_sec1(priv);
1343 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1344 void *err;
1345
1346 if (cryptlen + authsize > max_len) {
1347 dev_err(dev, "length exceeds h/w max limit\n");
1348 return ERR_PTR(-EINVAL);
1349 }
1350
1351 if (ivsize)
1352 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1353
1354 if (!dst || dst == src) {
1355 src_len = assoclen + cryptlen + authsize;
1356 src_nents = sg_nents_for_len(src, src_len);
1357 if (src_nents < 0) {
1358 dev_err(dev, "Invalid number of src SG.\n");
1359 err = ERR_PTR(-EINVAL);
1360 goto error_sg;
1361 }
1362 src_nents = (src_nents == 1) ? 0 : src_nents;
1363 dst_nents = dst ? src_nents : 0;
1364 dst_len = 0;
1365 } else { /* dst && dst != src*/
1366 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1367 src_nents = sg_nents_for_len(src, src_len);
1368 if (src_nents < 0) {
1369 dev_err(dev, "Invalid number of src SG.\n");
1370 err = ERR_PTR(-EINVAL);
1371 goto error_sg;
1372 }
1373 src_nents = (src_nents == 1) ? 0 : src_nents;
1374 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1375 dst_nents = sg_nents_for_len(dst, dst_len);
1376 if (dst_nents < 0) {
1377 dev_err(dev, "Invalid number of dst SG.\n");
1378 err = ERR_PTR(-EINVAL);
1379 goto error_sg;
1380 }
1381 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1382 }
1383
1384 /*
1385 * allocate space for base edesc plus the link tables,
1386 * allowing for two separate entries for AD and generated ICV (+ 2),
1387 * and space for two sets of ICVs (stashed and generated)
1388 */
1389 alloc_len = sizeof(struct talitos_edesc);
1390 if (src_nents || dst_nents) {
1391 if (is_sec1)
1392 dma_len = (src_nents ? src_len : 0) +
1393 (dst_nents ? dst_len : 0);
1394 else
1395 dma_len = (src_nents + dst_nents + 2) *
1396 sizeof(struct talitos_ptr) + authsize * 2;
1397 alloc_len += dma_len;
1398 } else {
1399 dma_len = 0;
1400 alloc_len += icv_stashing ? authsize : 0;
1401 }
1402
1403 /* if its a ahash, add space for a second desc next to the first one */
1404 if (is_sec1 && !dst)
1405 alloc_len += sizeof(struct talitos_desc);
1406
1407 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1408 if (!edesc) {
1409 dev_err(dev, "could not allocate edescriptor\n");
1410 err = ERR_PTR(-ENOMEM);
1411 goto error_sg;
1412 }
1413 memset(&edesc->desc, 0, sizeof(edesc->desc));
1414
1415 edesc->src_nents = src_nents;
1416 edesc->dst_nents = dst_nents;
1417 edesc->iv_dma = iv_dma;
1418 edesc->dma_len = dma_len;
1419 if (dma_len) {
1420 void *addr = &edesc->link_tbl[0];
1421
1422 if (is_sec1 && !dst)
1423 addr += sizeof(struct talitos_desc);
1424 edesc->dma_link_tbl = dma_map_single(dev, addr,
1425 edesc->dma_len,
1426 DMA_BIDIRECTIONAL);
1427 }
1428 return edesc;
1429 error_sg:
1430 if (iv_dma)
1431 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1432 return err;
1433 }
1434
1435 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1436 int icv_stashing, bool encrypt)
1437 {
1438 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1439 unsigned int authsize = crypto_aead_authsize(authenc);
1440 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1441 unsigned int ivsize = crypto_aead_ivsize(authenc);
1442
1443 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1444 iv, areq->assoclen, areq->cryptlen,
1445 authsize, ivsize, icv_stashing,
1446 areq->base.flags, encrypt);
1447 }
1448
1449 static int aead_encrypt(struct aead_request *req)
1450 {
1451 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1452 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1453 struct talitos_edesc *edesc;
1454
1455 /* allocate extended descriptor */
1456 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1457 if (IS_ERR(edesc))
1458 return PTR_ERR(edesc);
1459
1460 /* set encrypt */
1461 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1462
1463 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1464 }
1465
1466 static int aead_decrypt(struct aead_request *req)
1467 {
1468 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1469 unsigned int authsize = crypto_aead_authsize(authenc);
1470 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1471 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1472 struct talitos_edesc *edesc;
1473 struct scatterlist *sg;
1474 void *icvdata;
1475
1476 req->cryptlen -= authsize;
1477
1478 /* allocate extended descriptor */
1479 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1480 if (IS_ERR(edesc))
1481 return PTR_ERR(edesc);
1482
1483 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1484 ((!edesc->src_nents && !edesc->dst_nents) ||
1485 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1486
1487 /* decrypt and check the ICV */
1488 edesc->desc.hdr = ctx->desc_hdr_template |
1489 DESC_HDR_DIR_INBOUND |
1490 DESC_HDR_MODE1_MDEU_CICV;
1491
1492 /* reset integrity check result bits */
1493
1494 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1495 }
1496
1497 /* Have to check the ICV with software */
1498 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1499
1500 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1501 if (edesc->dma_len)
1502 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1503 edesc->dst_nents + 2];
1504 else
1505 icvdata = &edesc->link_tbl[0];
1506
1507 sg = sg_last(req->src, edesc->src_nents ? : 1);
1508
1509 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1510
1511 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1512 }
1513
1514 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1515 const u8 *key, unsigned int keylen)
1516 {
1517 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1518 struct device *dev = ctx->dev;
1519 u32 tmp[DES_EXPKEY_WORDS];
1520
1521 if (keylen > TALITOS_MAX_KEY_SIZE) {
1522 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1523 return -EINVAL;
1524 }
1525
1526 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1527 CRYPTO_TFM_REQ_WEAK_KEY) &&
1528 !des_ekey(tmp, key)) {
1529 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1530 return -EINVAL;
1531 }
1532
1533 if (ctx->keylen)
1534 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1535
1536 memcpy(&ctx->key, key, keylen);
1537 ctx->keylen = keylen;
1538
1539 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1540
1541 return 0;
1542 }
1543
1544 static void common_nonsnoop_unmap(struct device *dev,
1545 struct talitos_edesc *edesc,
1546 struct ablkcipher_request *areq)
1547 {
1548 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1549
1550 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1551 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1552
1553 if (edesc->dma_len)
1554 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1555 DMA_BIDIRECTIONAL);
1556 }
1557
1558 static void ablkcipher_done(struct device *dev,
1559 struct talitos_desc *desc, void *context,
1560 int err)
1561 {
1562 struct ablkcipher_request *areq = context;
1563 struct talitos_edesc *edesc;
1564
1565 edesc = container_of(desc, struct talitos_edesc, desc);
1566
1567 common_nonsnoop_unmap(dev, edesc, areq);
1568
1569 kfree(edesc);
1570
1571 areq->base.complete(&areq->base, err);
1572 }
1573
1574 static int common_nonsnoop(struct talitos_edesc *edesc,
1575 struct ablkcipher_request *areq,
1576 void (*callback) (struct device *dev,
1577 struct talitos_desc *desc,
1578 void *context, int error))
1579 {
1580 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1581 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1582 struct device *dev = ctx->dev;
1583 struct talitos_desc *desc = &edesc->desc;
1584 unsigned int cryptlen = areq->nbytes;
1585 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1586 int sg_count, ret;
1587 bool sync_needed = false;
1588 struct talitos_private *priv = dev_get_drvdata(dev);
1589 bool is_sec1 = has_ftr_sec1(priv);
1590
1591 /* first DWORD empty */
1592
1593 /* cipher iv */
1594 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1595
1596 /* cipher key */
1597 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1598
1599 sg_count = edesc->src_nents ?: 1;
1600 if (is_sec1 && sg_count > 1)
1601 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1602 cryptlen);
1603 else
1604 sg_count = dma_map_sg(dev, areq->src, sg_count,
1605 (areq->src == areq->dst) ?
1606 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1607 /*
1608 * cipher in
1609 */
1610 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1611 &desc->ptr[3], sg_count, 0, 0);
1612 if (sg_count > 1)
1613 sync_needed = true;
1614
1615 /* cipher out */
1616 if (areq->src != areq->dst) {
1617 sg_count = edesc->dst_nents ? : 1;
1618 if (!is_sec1 || sg_count == 1)
1619 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1620 }
1621
1622 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1623 sg_count, 0, (edesc->src_nents + 1));
1624 if (ret > 1)
1625 sync_needed = true;
1626
1627 /* iv out */
1628 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1629 DMA_FROM_DEVICE);
1630
1631 /* last DWORD empty */
1632
1633 if (sync_needed)
1634 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1635 edesc->dma_len, DMA_BIDIRECTIONAL);
1636
1637 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1638 if (ret != -EINPROGRESS) {
1639 common_nonsnoop_unmap(dev, edesc, areq);
1640 kfree(edesc);
1641 }
1642 return ret;
1643 }
1644
1645 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1646 areq, bool encrypt)
1647 {
1648 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1649 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1650 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1651
1652 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1653 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1654 areq->base.flags, encrypt);
1655 }
1656
1657 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1658 {
1659 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1660 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1661 struct talitos_edesc *edesc;
1662
1663 /* allocate extended descriptor */
1664 edesc = ablkcipher_edesc_alloc(areq, true);
1665 if (IS_ERR(edesc))
1666 return PTR_ERR(edesc);
1667
1668 /* set encrypt */
1669 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1670
1671 return common_nonsnoop(edesc, areq, ablkcipher_done);
1672 }
1673
1674 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1675 {
1676 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1677 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1678 struct talitos_edesc *edesc;
1679
1680 /* allocate extended descriptor */
1681 edesc = ablkcipher_edesc_alloc(areq, false);
1682 if (IS_ERR(edesc))
1683 return PTR_ERR(edesc);
1684
1685 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1686
1687 return common_nonsnoop(edesc, areq, ablkcipher_done);
1688 }
1689
1690 static void common_nonsnoop_hash_unmap(struct device *dev,
1691 struct talitos_edesc *edesc,
1692 struct ahash_request *areq)
1693 {
1694 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1695 struct talitos_private *priv = dev_get_drvdata(dev);
1696 bool is_sec1 = has_ftr_sec1(priv);
1697 struct talitos_desc *desc = &edesc->desc;
1698 struct talitos_desc *desc2 = desc + 1;
1699
1700 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1701 if (desc->next_desc &&
1702 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1703 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1704
1705 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1706
1707 /* When using hashctx-in, must unmap it. */
1708 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1709 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1710 DMA_TO_DEVICE);
1711 else if (desc->next_desc)
1712 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1713 DMA_TO_DEVICE);
1714
1715 if (is_sec1 && req_ctx->nbuf)
1716 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1717 DMA_TO_DEVICE);
1718
1719 if (edesc->dma_len)
1720 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1721 DMA_BIDIRECTIONAL);
1722
1723 if (edesc->desc.next_desc)
1724 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1725 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1726 }
1727
1728 static void ahash_done(struct device *dev,
1729 struct talitos_desc *desc, void *context,
1730 int err)
1731 {
1732 struct ahash_request *areq = context;
1733 struct talitos_edesc *edesc =
1734 container_of(desc, struct talitos_edesc, desc);
1735 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1736
1737 if (!req_ctx->last && req_ctx->to_hash_later) {
1738 /* Position any partial block for next update/final/finup */
1739 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1740 req_ctx->nbuf = req_ctx->to_hash_later;
1741 }
1742 common_nonsnoop_hash_unmap(dev, edesc, areq);
1743
1744 kfree(edesc);
1745
1746 areq->base.complete(&areq->base, err);
1747 }
1748
1749 /*
1750 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1751 * ourself and submit a padded block
1752 */
1753 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1754 struct talitos_edesc *edesc,
1755 struct talitos_ptr *ptr)
1756 {
1757 static u8 padded_hash[64] = {
1758 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1759 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1760 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1761 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1762 };
1763
1764 pr_err_once("Bug in SEC1, padding ourself\n");
1765 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1766 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1767 (char *)padded_hash, DMA_TO_DEVICE);
1768 }
1769
1770 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1771 struct ahash_request *areq, unsigned int length,
1772 unsigned int offset,
1773 void (*callback) (struct device *dev,
1774 struct talitos_desc *desc,
1775 void *context, int error))
1776 {
1777 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1778 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1779 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1780 struct device *dev = ctx->dev;
1781 struct talitos_desc *desc = &edesc->desc;
1782 int ret;
1783 bool sync_needed = false;
1784 struct talitos_private *priv = dev_get_drvdata(dev);
1785 bool is_sec1 = has_ftr_sec1(priv);
1786 int sg_count;
1787
1788 /* first DWORD empty */
1789
1790 /* hash context in */
1791 if (!req_ctx->first || req_ctx->swinit) {
1792 map_single_talitos_ptr(dev, &desc->ptr[1],
1793 req_ctx->hw_context_size,
1794 (char *)req_ctx->hw_context,
1795 DMA_TO_DEVICE);
1796 req_ctx->swinit = 0;
1797 }
1798 /* Indicate next op is not the first. */
1799 req_ctx->first = 0;
1800
1801 /* HMAC key */
1802 if (ctx->keylen)
1803 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1804 is_sec1);
1805
1806 if (is_sec1 && req_ctx->nbuf)
1807 length -= req_ctx->nbuf;
1808
1809 sg_count = edesc->src_nents ?: 1;
1810 if (is_sec1 && sg_count > 1)
1811 sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1812 edesc->buf + sizeof(struct talitos_desc),
1813 length, req_ctx->nbuf);
1814 else if (length)
1815 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1816 DMA_TO_DEVICE);
1817 /*
1818 * data in
1819 */
1820 if (is_sec1 && req_ctx->nbuf) {
1821 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1822 req_ctx->buf[req_ctx->buf_idx],
1823 DMA_TO_DEVICE);
1824 } else {
1825 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1826 &desc->ptr[3], sg_count, offset, 0);
1827 if (sg_count > 1)
1828 sync_needed = true;
1829 }
1830
1831 /* fifth DWORD empty */
1832
1833 /* hash/HMAC out -or- hash context out */
1834 if (req_ctx->last)
1835 map_single_talitos_ptr(dev, &desc->ptr[5],
1836 crypto_ahash_digestsize(tfm),
1837 areq->result, DMA_FROM_DEVICE);
1838 else
1839 map_single_talitos_ptr(dev, &desc->ptr[5],
1840 req_ctx->hw_context_size,
1841 req_ctx->hw_context, DMA_FROM_DEVICE);
1842
1843 /* last DWORD empty */
1844
1845 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1846 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1847
1848 if (is_sec1 && req_ctx->nbuf && length) {
1849 struct talitos_desc *desc2 = desc + 1;
1850 dma_addr_t next_desc;
1851
1852 memset(desc2, 0, sizeof(*desc2));
1853 desc2->hdr = desc->hdr;
1854 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1855 desc2->hdr1 = desc2->hdr;
1856 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1857 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1858 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1859
1860 if (desc->ptr[1].ptr)
1861 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1862 is_sec1);
1863 else
1864 map_single_talitos_ptr(dev, &desc2->ptr[1],
1865 req_ctx->hw_context_size,
1866 req_ctx->hw_context,
1867 DMA_TO_DEVICE);
1868 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1869 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1870 &desc2->ptr[3], sg_count, offset, 0);
1871 if (sg_count > 1)
1872 sync_needed = true;
1873 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1874 if (req_ctx->last)
1875 map_single_talitos_ptr(dev, &desc->ptr[5],
1876 req_ctx->hw_context_size,
1877 req_ctx->hw_context,
1878 DMA_FROM_DEVICE);
1879
1880 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1881 DMA_BIDIRECTIONAL);
1882 desc->next_desc = cpu_to_be32(next_desc);
1883 }
1884
1885 if (sync_needed)
1886 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1887 edesc->dma_len, DMA_BIDIRECTIONAL);
1888
1889 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1890 if (ret != -EINPROGRESS) {
1891 common_nonsnoop_hash_unmap(dev, edesc, areq);
1892 kfree(edesc);
1893 }
1894 return ret;
1895 }
1896
1897 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1898 unsigned int nbytes)
1899 {
1900 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1901 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1902 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1903 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1904 bool is_sec1 = has_ftr_sec1(priv);
1905
1906 if (is_sec1)
1907 nbytes -= req_ctx->nbuf;
1908
1909 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1910 nbytes, 0, 0, 0, areq->base.flags, false);
1911 }
1912
1913 static int ahash_init(struct ahash_request *areq)
1914 {
1915 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1916 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1917 unsigned int size;
1918
1919 /* Initialize the context */
1920 req_ctx->buf_idx = 0;
1921 req_ctx->nbuf = 0;
1922 req_ctx->first = 1; /* first indicates h/w must init its context */
1923 req_ctx->swinit = 0; /* assume h/w init of context */
1924 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1925 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1926 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1927 req_ctx->hw_context_size = size;
1928
1929 return 0;
1930 }
1931
1932 /*
1933 * on h/w without explicit sha224 support, we initialize h/w context
1934 * manually with sha224 constants, and tell it to run sha256.
1935 */
1936 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1937 {
1938 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1939
1940 ahash_init(areq);
1941 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1942
1943 req_ctx->hw_context[0] = SHA224_H0;
1944 req_ctx->hw_context[1] = SHA224_H1;
1945 req_ctx->hw_context[2] = SHA224_H2;
1946 req_ctx->hw_context[3] = SHA224_H3;
1947 req_ctx->hw_context[4] = SHA224_H4;
1948 req_ctx->hw_context[5] = SHA224_H5;
1949 req_ctx->hw_context[6] = SHA224_H6;
1950 req_ctx->hw_context[7] = SHA224_H7;
1951
1952 /* init 64-bit count */
1953 req_ctx->hw_context[8] = 0;
1954 req_ctx->hw_context[9] = 0;
1955
1956 return 0;
1957 }
1958
1959 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1960 {
1961 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1962 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1963 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1964 struct talitos_edesc *edesc;
1965 unsigned int blocksize =
1966 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1967 unsigned int nbytes_to_hash;
1968 unsigned int to_hash_later;
1969 unsigned int nsg;
1970 int nents;
1971 struct device *dev = ctx->dev;
1972 struct talitos_private *priv = dev_get_drvdata(dev);
1973 bool is_sec1 = has_ftr_sec1(priv);
1974 int offset = 0;
1975 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1976
1977 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1978 /* Buffer up to one whole block */
1979 nents = sg_nents_for_len(areq->src, nbytes);
1980 if (nents < 0) {
1981 dev_err(ctx->dev, "Invalid number of src SG.\n");
1982 return nents;
1983 }
1984 sg_copy_to_buffer(areq->src, nents,
1985 ctx_buf + req_ctx->nbuf, nbytes);
1986 req_ctx->nbuf += nbytes;
1987 return 0;
1988 }
1989
1990 /* At least (blocksize + 1) bytes are available to hash */
1991 nbytes_to_hash = nbytes + req_ctx->nbuf;
1992 to_hash_later = nbytes_to_hash & (blocksize - 1);
1993
1994 if (req_ctx->last)
1995 to_hash_later = 0;
1996 else if (to_hash_later)
1997 /* There is a partial block. Hash the full block(s) now */
1998 nbytes_to_hash -= to_hash_later;
1999 else {
2000 /* Keep one block buffered */
2001 nbytes_to_hash -= blocksize;
2002 to_hash_later = blocksize;
2003 }
2004
2005 /* Chain in any previously buffered data */
2006 if (!is_sec1 && req_ctx->nbuf) {
2007 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2008 sg_init_table(req_ctx->bufsl, nsg);
2009 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2010 if (nsg > 1)
2011 sg_chain(req_ctx->bufsl, 2, areq->src);
2012 req_ctx->psrc = req_ctx->bufsl;
2013 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2014 if (nbytes_to_hash > blocksize)
2015 offset = blocksize - req_ctx->nbuf;
2016 else
2017 offset = nbytes_to_hash - req_ctx->nbuf;
2018 nents = sg_nents_for_len(areq->src, offset);
2019 if (nents < 0) {
2020 dev_err(ctx->dev, "Invalid number of src SG.\n");
2021 return nents;
2022 }
2023 sg_copy_to_buffer(areq->src, nents,
2024 ctx_buf + req_ctx->nbuf, offset);
2025 req_ctx->nbuf += offset;
2026 req_ctx->psrc = areq->src;
2027 } else
2028 req_ctx->psrc = areq->src;
2029
2030 if (to_hash_later) {
2031 nents = sg_nents_for_len(areq->src, nbytes);
2032 if (nents < 0) {
2033 dev_err(ctx->dev, "Invalid number of src SG.\n");
2034 return nents;
2035 }
2036 sg_pcopy_to_buffer(areq->src, nents,
2037 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2038 to_hash_later,
2039 nbytes - to_hash_later);
2040 }
2041 req_ctx->to_hash_later = to_hash_later;
2042
2043 /* Allocate extended descriptor */
2044 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2045 if (IS_ERR(edesc))
2046 return PTR_ERR(edesc);
2047
2048 edesc->desc.hdr = ctx->desc_hdr_template;
2049
2050 /* On last one, request SEC to pad; otherwise continue */
2051 if (req_ctx->last)
2052 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2053 else
2054 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2055
2056 /* request SEC to INIT hash. */
2057 if (req_ctx->first && !req_ctx->swinit)
2058 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2059
2060 /* When the tfm context has a keylen, it's an HMAC.
2061 * A first or last (ie. not middle) descriptor must request HMAC.
2062 */
2063 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2064 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2065
2066 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2067 ahash_done);
2068 }
2069
2070 static int ahash_update(struct ahash_request *areq)
2071 {
2072 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2073
2074 req_ctx->last = 0;
2075
2076 return ahash_process_req(areq, areq->nbytes);
2077 }
2078
2079 static int ahash_final(struct ahash_request *areq)
2080 {
2081 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2082
2083 req_ctx->last = 1;
2084
2085 return ahash_process_req(areq, 0);
2086 }
2087
2088 static int ahash_finup(struct ahash_request *areq)
2089 {
2090 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2091
2092 req_ctx->last = 1;
2093
2094 return ahash_process_req(areq, areq->nbytes);
2095 }
2096
2097 static int ahash_digest(struct ahash_request *areq)
2098 {
2099 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2100 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2101
2102 ahash->init(areq);
2103 req_ctx->last = 1;
2104
2105 return ahash_process_req(areq, areq->nbytes);
2106 }
2107
2108 static int ahash_export(struct ahash_request *areq, void *out)
2109 {
2110 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2111 struct talitos_export_state *export = out;
2112
2113 memcpy(export->hw_context, req_ctx->hw_context,
2114 req_ctx->hw_context_size);
2115 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2116 export->swinit = req_ctx->swinit;
2117 export->first = req_ctx->first;
2118 export->last = req_ctx->last;
2119 export->to_hash_later = req_ctx->to_hash_later;
2120 export->nbuf = req_ctx->nbuf;
2121
2122 return 0;
2123 }
2124
2125 static int ahash_import(struct ahash_request *areq, const void *in)
2126 {
2127 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2128 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2129 const struct talitos_export_state *export = in;
2130 unsigned int size;
2131
2132 memset(req_ctx, 0, sizeof(*req_ctx));
2133 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2134 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2135 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2136 req_ctx->hw_context_size = size;
2137 memcpy(req_ctx->hw_context, export->hw_context, size);
2138 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2139 req_ctx->swinit = export->swinit;
2140 req_ctx->first = export->first;
2141 req_ctx->last = export->last;
2142 req_ctx->to_hash_later = export->to_hash_later;
2143 req_ctx->nbuf = export->nbuf;
2144
2145 return 0;
2146 }
2147
2148 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2149 u8 *hash)
2150 {
2151 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2152
2153 struct scatterlist sg[1];
2154 struct ahash_request *req;
2155 struct crypto_wait wait;
2156 int ret;
2157
2158 crypto_init_wait(&wait);
2159
2160 req = ahash_request_alloc(tfm, GFP_KERNEL);
2161 if (!req)
2162 return -ENOMEM;
2163
2164 /* Keep tfm keylen == 0 during hash of the long key */
2165 ctx->keylen = 0;
2166 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2167 crypto_req_done, &wait);
2168
2169 sg_init_one(&sg[0], key, keylen);
2170
2171 ahash_request_set_crypt(req, sg, hash, keylen);
2172 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2173
2174 ahash_request_free(req);
2175
2176 return ret;
2177 }
2178
2179 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2180 unsigned int keylen)
2181 {
2182 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2183 struct device *dev = ctx->dev;
2184 unsigned int blocksize =
2185 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2186 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2187 unsigned int keysize = keylen;
2188 u8 hash[SHA512_DIGEST_SIZE];
2189 int ret;
2190
2191 if (keylen <= blocksize)
2192 memcpy(ctx->key, key, keysize);
2193 else {
2194 /* Must get the hash of the long key */
2195 ret = keyhash(tfm, key, keylen, hash);
2196
2197 if (ret) {
2198 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2199 return -EINVAL;
2200 }
2201
2202 keysize = digestsize;
2203 memcpy(ctx->key, hash, digestsize);
2204 }
2205
2206 if (ctx->keylen)
2207 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2208
2209 ctx->keylen = keysize;
2210 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2211
2212 return 0;
2213 }
2214
2215
2216 struct talitos_alg_template {
2217 u32 type;
2218 u32 priority;
2219 union {
2220 struct crypto_alg crypto;
2221 struct ahash_alg hash;
2222 struct aead_alg aead;
2223 } alg;
2224 __be32 desc_hdr_template;
2225 };
2226
2227 static struct talitos_alg_template driver_algs[] = {
2228 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2229 { .type = CRYPTO_ALG_TYPE_AEAD,
2230 .alg.aead = {
2231 .base = {
2232 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2233 .cra_driver_name = "authenc-hmac-sha1-"
2234 "cbc-aes-talitos",
2235 .cra_blocksize = AES_BLOCK_SIZE,
2236 .cra_flags = CRYPTO_ALG_ASYNC,
2237 },
2238 .ivsize = AES_BLOCK_SIZE,
2239 .maxauthsize = SHA1_DIGEST_SIZE,
2240 },
2241 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2242 DESC_HDR_SEL0_AESU |
2243 DESC_HDR_MODE0_AESU_CBC |
2244 DESC_HDR_SEL1_MDEUA |
2245 DESC_HDR_MODE1_MDEU_INIT |
2246 DESC_HDR_MODE1_MDEU_PAD |
2247 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2248 },
2249 { .type = CRYPTO_ALG_TYPE_AEAD,
2250 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2251 .alg.aead = {
2252 .base = {
2253 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2254 .cra_driver_name = "authenc-hmac-sha1-"
2255 "cbc-aes-talitos",
2256 .cra_blocksize = AES_BLOCK_SIZE,
2257 .cra_flags = CRYPTO_ALG_ASYNC,
2258 },
2259 .ivsize = AES_BLOCK_SIZE,
2260 .maxauthsize = SHA1_DIGEST_SIZE,
2261 },
2262 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2263 DESC_HDR_SEL0_AESU |
2264 DESC_HDR_MODE0_AESU_CBC |
2265 DESC_HDR_SEL1_MDEUA |
2266 DESC_HDR_MODE1_MDEU_INIT |
2267 DESC_HDR_MODE1_MDEU_PAD |
2268 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2269 },
2270 { .type = CRYPTO_ALG_TYPE_AEAD,
2271 .alg.aead = {
2272 .base = {
2273 .cra_name = "authenc(hmac(sha1),"
2274 "cbc(des3_ede))",
2275 .cra_driver_name = "authenc-hmac-sha1-"
2276 "cbc-3des-talitos",
2277 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2278 .cra_flags = CRYPTO_ALG_ASYNC,
2279 },
2280 .ivsize = DES3_EDE_BLOCK_SIZE,
2281 .maxauthsize = SHA1_DIGEST_SIZE,
2282 },
2283 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2284 DESC_HDR_SEL0_DEU |
2285 DESC_HDR_MODE0_DEU_CBC |
2286 DESC_HDR_MODE0_DEU_3DES |
2287 DESC_HDR_SEL1_MDEUA |
2288 DESC_HDR_MODE1_MDEU_INIT |
2289 DESC_HDR_MODE1_MDEU_PAD |
2290 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2291 },
2292 { .type = CRYPTO_ALG_TYPE_AEAD,
2293 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2294 .alg.aead = {
2295 .base = {
2296 .cra_name = "authenc(hmac(sha1),"
2297 "cbc(des3_ede))",
2298 .cra_driver_name = "authenc-hmac-sha1-"
2299 "cbc-3des-talitos",
2300 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2301 .cra_flags = CRYPTO_ALG_ASYNC,
2302 },
2303 .ivsize = DES3_EDE_BLOCK_SIZE,
2304 .maxauthsize = SHA1_DIGEST_SIZE,
2305 },
2306 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2307 DESC_HDR_SEL0_DEU |
2308 DESC_HDR_MODE0_DEU_CBC |
2309 DESC_HDR_MODE0_DEU_3DES |
2310 DESC_HDR_SEL1_MDEUA |
2311 DESC_HDR_MODE1_MDEU_INIT |
2312 DESC_HDR_MODE1_MDEU_PAD |
2313 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2314 },
2315 { .type = CRYPTO_ALG_TYPE_AEAD,
2316 .alg.aead = {
2317 .base = {
2318 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2319 .cra_driver_name = "authenc-hmac-sha224-"
2320 "cbc-aes-talitos",
2321 .cra_blocksize = AES_BLOCK_SIZE,
2322 .cra_flags = CRYPTO_ALG_ASYNC,
2323 },
2324 .ivsize = AES_BLOCK_SIZE,
2325 .maxauthsize = SHA224_DIGEST_SIZE,
2326 },
2327 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2328 DESC_HDR_SEL0_AESU |
2329 DESC_HDR_MODE0_AESU_CBC |
2330 DESC_HDR_SEL1_MDEUA |
2331 DESC_HDR_MODE1_MDEU_INIT |
2332 DESC_HDR_MODE1_MDEU_PAD |
2333 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2334 },
2335 { .type = CRYPTO_ALG_TYPE_AEAD,
2336 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2337 .alg.aead = {
2338 .base = {
2339 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2340 .cra_driver_name = "authenc-hmac-sha224-"
2341 "cbc-aes-talitos",
2342 .cra_blocksize = AES_BLOCK_SIZE,
2343 .cra_flags = CRYPTO_ALG_ASYNC,
2344 },
2345 .ivsize = AES_BLOCK_SIZE,
2346 .maxauthsize = SHA224_DIGEST_SIZE,
2347 },
2348 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2349 DESC_HDR_SEL0_AESU |
2350 DESC_HDR_MODE0_AESU_CBC |
2351 DESC_HDR_SEL1_MDEUA |
2352 DESC_HDR_MODE1_MDEU_INIT |
2353 DESC_HDR_MODE1_MDEU_PAD |
2354 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2355 },
2356 { .type = CRYPTO_ALG_TYPE_AEAD,
2357 .alg.aead = {
2358 .base = {
2359 .cra_name = "authenc(hmac(sha224),"
2360 "cbc(des3_ede))",
2361 .cra_driver_name = "authenc-hmac-sha224-"
2362 "cbc-3des-talitos",
2363 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2364 .cra_flags = CRYPTO_ALG_ASYNC,
2365 },
2366 .ivsize = DES3_EDE_BLOCK_SIZE,
2367 .maxauthsize = SHA224_DIGEST_SIZE,
2368 },
2369 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2370 DESC_HDR_SEL0_DEU |
2371 DESC_HDR_MODE0_DEU_CBC |
2372 DESC_HDR_MODE0_DEU_3DES |
2373 DESC_HDR_SEL1_MDEUA |
2374 DESC_HDR_MODE1_MDEU_INIT |
2375 DESC_HDR_MODE1_MDEU_PAD |
2376 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2377 },
2378 { .type = CRYPTO_ALG_TYPE_AEAD,
2379 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2380 .alg.aead = {
2381 .base = {
2382 .cra_name = "authenc(hmac(sha224),"
2383 "cbc(des3_ede))",
2384 .cra_driver_name = "authenc-hmac-sha224-"
2385 "cbc-3des-talitos",
2386 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2387 .cra_flags = CRYPTO_ALG_ASYNC,
2388 },
2389 .ivsize = DES3_EDE_BLOCK_SIZE,
2390 .maxauthsize = SHA224_DIGEST_SIZE,
2391 },
2392 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2393 DESC_HDR_SEL0_DEU |
2394 DESC_HDR_MODE0_DEU_CBC |
2395 DESC_HDR_MODE0_DEU_3DES |
2396 DESC_HDR_SEL1_MDEUA |
2397 DESC_HDR_MODE1_MDEU_INIT |
2398 DESC_HDR_MODE1_MDEU_PAD |
2399 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2400 },
2401 { .type = CRYPTO_ALG_TYPE_AEAD,
2402 .alg.aead = {
2403 .base = {
2404 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2405 .cra_driver_name = "authenc-hmac-sha256-"
2406 "cbc-aes-talitos",
2407 .cra_blocksize = AES_BLOCK_SIZE,
2408 .cra_flags = CRYPTO_ALG_ASYNC,
2409 },
2410 .ivsize = AES_BLOCK_SIZE,
2411 .maxauthsize = SHA256_DIGEST_SIZE,
2412 },
2413 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2414 DESC_HDR_SEL0_AESU |
2415 DESC_HDR_MODE0_AESU_CBC |
2416 DESC_HDR_SEL1_MDEUA |
2417 DESC_HDR_MODE1_MDEU_INIT |
2418 DESC_HDR_MODE1_MDEU_PAD |
2419 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2420 },
2421 { .type = CRYPTO_ALG_TYPE_AEAD,
2422 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2423 .alg.aead = {
2424 .base = {
2425 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2426 .cra_driver_name = "authenc-hmac-sha256-"
2427 "cbc-aes-talitos",
2428 .cra_blocksize = AES_BLOCK_SIZE,
2429 .cra_flags = CRYPTO_ALG_ASYNC,
2430 },
2431 .ivsize = AES_BLOCK_SIZE,
2432 .maxauthsize = SHA256_DIGEST_SIZE,
2433 },
2434 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2435 DESC_HDR_SEL0_AESU |
2436 DESC_HDR_MODE0_AESU_CBC |
2437 DESC_HDR_SEL1_MDEUA |
2438 DESC_HDR_MODE1_MDEU_INIT |
2439 DESC_HDR_MODE1_MDEU_PAD |
2440 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2441 },
2442 { .type = CRYPTO_ALG_TYPE_AEAD,
2443 .alg.aead = {
2444 .base = {
2445 .cra_name = "authenc(hmac(sha256),"
2446 "cbc(des3_ede))",
2447 .cra_driver_name = "authenc-hmac-sha256-"
2448 "cbc-3des-talitos",
2449 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2450 .cra_flags = CRYPTO_ALG_ASYNC,
2451 },
2452 .ivsize = DES3_EDE_BLOCK_SIZE,
2453 .maxauthsize = SHA256_DIGEST_SIZE,
2454 },
2455 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2456 DESC_HDR_SEL0_DEU |
2457 DESC_HDR_MODE0_DEU_CBC |
2458 DESC_HDR_MODE0_DEU_3DES |
2459 DESC_HDR_SEL1_MDEUA |
2460 DESC_HDR_MODE1_MDEU_INIT |
2461 DESC_HDR_MODE1_MDEU_PAD |
2462 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2463 },
2464 { .type = CRYPTO_ALG_TYPE_AEAD,
2465 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2466 .alg.aead = {
2467 .base = {
2468 .cra_name = "authenc(hmac(sha256),"
2469 "cbc(des3_ede))",
2470 .cra_driver_name = "authenc-hmac-sha256-"
2471 "cbc-3des-talitos",
2472 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2473 .cra_flags = CRYPTO_ALG_ASYNC,
2474 },
2475 .ivsize = DES3_EDE_BLOCK_SIZE,
2476 .maxauthsize = SHA256_DIGEST_SIZE,
2477 },
2478 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2479 DESC_HDR_SEL0_DEU |
2480 DESC_HDR_MODE0_DEU_CBC |
2481 DESC_HDR_MODE0_DEU_3DES |
2482 DESC_HDR_SEL1_MDEUA |
2483 DESC_HDR_MODE1_MDEU_INIT |
2484 DESC_HDR_MODE1_MDEU_PAD |
2485 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2486 },
2487 { .type = CRYPTO_ALG_TYPE_AEAD,
2488 .alg.aead = {
2489 .base = {
2490 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2491 .cra_driver_name = "authenc-hmac-sha384-"
2492 "cbc-aes-talitos",
2493 .cra_blocksize = AES_BLOCK_SIZE,
2494 .cra_flags = CRYPTO_ALG_ASYNC,
2495 },
2496 .ivsize = AES_BLOCK_SIZE,
2497 .maxauthsize = SHA384_DIGEST_SIZE,
2498 },
2499 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2500 DESC_HDR_SEL0_AESU |
2501 DESC_HDR_MODE0_AESU_CBC |
2502 DESC_HDR_SEL1_MDEUB |
2503 DESC_HDR_MODE1_MDEU_INIT |
2504 DESC_HDR_MODE1_MDEU_PAD |
2505 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2506 },
2507 { .type = CRYPTO_ALG_TYPE_AEAD,
2508 .alg.aead = {
2509 .base = {
2510 .cra_name = "authenc(hmac(sha384),"
2511 "cbc(des3_ede))",
2512 .cra_driver_name = "authenc-hmac-sha384-"
2513 "cbc-3des-talitos",
2514 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2515 .cra_flags = CRYPTO_ALG_ASYNC,
2516 },
2517 .ivsize = DES3_EDE_BLOCK_SIZE,
2518 .maxauthsize = SHA384_DIGEST_SIZE,
2519 },
2520 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2521 DESC_HDR_SEL0_DEU |
2522 DESC_HDR_MODE0_DEU_CBC |
2523 DESC_HDR_MODE0_DEU_3DES |
2524 DESC_HDR_SEL1_MDEUB |
2525 DESC_HDR_MODE1_MDEU_INIT |
2526 DESC_HDR_MODE1_MDEU_PAD |
2527 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2528 },
2529 { .type = CRYPTO_ALG_TYPE_AEAD,
2530 .alg.aead = {
2531 .base = {
2532 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2533 .cra_driver_name = "authenc-hmac-sha512-"
2534 "cbc-aes-talitos",
2535 .cra_blocksize = AES_BLOCK_SIZE,
2536 .cra_flags = CRYPTO_ALG_ASYNC,
2537 },
2538 .ivsize = AES_BLOCK_SIZE,
2539 .maxauthsize = SHA512_DIGEST_SIZE,
2540 },
2541 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2542 DESC_HDR_SEL0_AESU |
2543 DESC_HDR_MODE0_AESU_CBC |
2544 DESC_HDR_SEL1_MDEUB |
2545 DESC_HDR_MODE1_MDEU_INIT |
2546 DESC_HDR_MODE1_MDEU_PAD |
2547 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2548 },
2549 { .type = CRYPTO_ALG_TYPE_AEAD,
2550 .alg.aead = {
2551 .base = {
2552 .cra_name = "authenc(hmac(sha512),"
2553 "cbc(des3_ede))",
2554 .cra_driver_name = "authenc-hmac-sha512-"
2555 "cbc-3des-talitos",
2556 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2557 .cra_flags = CRYPTO_ALG_ASYNC,
2558 },
2559 .ivsize = DES3_EDE_BLOCK_SIZE,
2560 .maxauthsize = SHA512_DIGEST_SIZE,
2561 },
2562 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2563 DESC_HDR_SEL0_DEU |
2564 DESC_HDR_MODE0_DEU_CBC |
2565 DESC_HDR_MODE0_DEU_3DES |
2566 DESC_HDR_SEL1_MDEUB |
2567 DESC_HDR_MODE1_MDEU_INIT |
2568 DESC_HDR_MODE1_MDEU_PAD |
2569 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2570 },
2571 { .type = CRYPTO_ALG_TYPE_AEAD,
2572 .alg.aead = {
2573 .base = {
2574 .cra_name = "authenc(hmac(md5),cbc(aes))",
2575 .cra_driver_name = "authenc-hmac-md5-"
2576 "cbc-aes-talitos",
2577 .cra_blocksize = AES_BLOCK_SIZE,
2578 .cra_flags = CRYPTO_ALG_ASYNC,
2579 },
2580 .ivsize = AES_BLOCK_SIZE,
2581 .maxauthsize = MD5_DIGEST_SIZE,
2582 },
2583 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2584 DESC_HDR_SEL0_AESU |
2585 DESC_HDR_MODE0_AESU_CBC |
2586 DESC_HDR_SEL1_MDEUA |
2587 DESC_HDR_MODE1_MDEU_INIT |
2588 DESC_HDR_MODE1_MDEU_PAD |
2589 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2590 },
2591 { .type = CRYPTO_ALG_TYPE_AEAD,
2592 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2593 .alg.aead = {
2594 .base = {
2595 .cra_name = "authenc(hmac(md5),cbc(aes))",
2596 .cra_driver_name = "authenc-hmac-md5-"
2597 "cbc-aes-talitos",
2598 .cra_blocksize = AES_BLOCK_SIZE,
2599 .cra_flags = CRYPTO_ALG_ASYNC,
2600 },
2601 .ivsize = AES_BLOCK_SIZE,
2602 .maxauthsize = MD5_DIGEST_SIZE,
2603 },
2604 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2605 DESC_HDR_SEL0_AESU |
2606 DESC_HDR_MODE0_AESU_CBC |
2607 DESC_HDR_SEL1_MDEUA |
2608 DESC_HDR_MODE1_MDEU_INIT |
2609 DESC_HDR_MODE1_MDEU_PAD |
2610 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2611 },
2612 { .type = CRYPTO_ALG_TYPE_AEAD,
2613 .alg.aead = {
2614 .base = {
2615 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2616 .cra_driver_name = "authenc-hmac-md5-"
2617 "cbc-3des-talitos",
2618 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2619 .cra_flags = CRYPTO_ALG_ASYNC,
2620 },
2621 .ivsize = DES3_EDE_BLOCK_SIZE,
2622 .maxauthsize = MD5_DIGEST_SIZE,
2623 },
2624 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2625 DESC_HDR_SEL0_DEU |
2626 DESC_HDR_MODE0_DEU_CBC |
2627 DESC_HDR_MODE0_DEU_3DES |
2628 DESC_HDR_SEL1_MDEUA |
2629 DESC_HDR_MODE1_MDEU_INIT |
2630 DESC_HDR_MODE1_MDEU_PAD |
2631 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2632 },
2633 { .type = CRYPTO_ALG_TYPE_AEAD,
2634 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2635 .alg.aead = {
2636 .base = {
2637 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2638 .cra_driver_name = "authenc-hmac-md5-"
2639 "cbc-3des-talitos",
2640 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2641 .cra_flags = CRYPTO_ALG_ASYNC,
2642 },
2643 .ivsize = DES3_EDE_BLOCK_SIZE,
2644 .maxauthsize = MD5_DIGEST_SIZE,
2645 },
2646 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2647 DESC_HDR_SEL0_DEU |
2648 DESC_HDR_MODE0_DEU_CBC |
2649 DESC_HDR_MODE0_DEU_3DES |
2650 DESC_HDR_SEL1_MDEUA |
2651 DESC_HDR_MODE1_MDEU_INIT |
2652 DESC_HDR_MODE1_MDEU_PAD |
2653 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2654 },
2655 /* ABLKCIPHER algorithms. */
2656 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2657 .alg.crypto = {
2658 .cra_name = "ecb(aes)",
2659 .cra_driver_name = "ecb-aes-talitos",
2660 .cra_blocksize = AES_BLOCK_SIZE,
2661 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2662 CRYPTO_ALG_ASYNC,
2663 .cra_ablkcipher = {
2664 .min_keysize = AES_MIN_KEY_SIZE,
2665 .max_keysize = AES_MAX_KEY_SIZE,
2666 .ivsize = AES_BLOCK_SIZE,
2667 }
2668 },
2669 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2670 DESC_HDR_SEL0_AESU,
2671 },
2672 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2673 .alg.crypto = {
2674 .cra_name = "cbc(aes)",
2675 .cra_driver_name = "cbc-aes-talitos",
2676 .cra_blocksize = AES_BLOCK_SIZE,
2677 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2678 CRYPTO_ALG_ASYNC,
2679 .cra_ablkcipher = {
2680 .min_keysize = AES_MIN_KEY_SIZE,
2681 .max_keysize = AES_MAX_KEY_SIZE,
2682 .ivsize = AES_BLOCK_SIZE,
2683 }
2684 },
2685 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2686 DESC_HDR_SEL0_AESU |
2687 DESC_HDR_MODE0_AESU_CBC,
2688 },
2689 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2690 .alg.crypto = {
2691 .cra_name = "ctr(aes)",
2692 .cra_driver_name = "ctr-aes-talitos",
2693 .cra_blocksize = AES_BLOCK_SIZE,
2694 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2695 CRYPTO_ALG_ASYNC,
2696 .cra_ablkcipher = {
2697 .min_keysize = AES_MIN_KEY_SIZE,
2698 .max_keysize = AES_MAX_KEY_SIZE,
2699 .ivsize = AES_BLOCK_SIZE,
2700 }
2701 },
2702 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2703 DESC_HDR_SEL0_AESU |
2704 DESC_HDR_MODE0_AESU_CTR,
2705 },
2706 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2707 .alg.crypto = {
2708 .cra_name = "ecb(des)",
2709 .cra_driver_name = "ecb-des-talitos",
2710 .cra_blocksize = DES_BLOCK_SIZE,
2711 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2712 CRYPTO_ALG_ASYNC,
2713 .cra_ablkcipher = {
2714 .min_keysize = DES_KEY_SIZE,
2715 .max_keysize = DES_KEY_SIZE,
2716 .ivsize = DES_BLOCK_SIZE,
2717 }
2718 },
2719 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2720 DESC_HDR_SEL0_DEU,
2721 },
2722 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2723 .alg.crypto = {
2724 .cra_name = "cbc(des)",
2725 .cra_driver_name = "cbc-des-talitos",
2726 .cra_blocksize = DES_BLOCK_SIZE,
2727 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2728 CRYPTO_ALG_ASYNC,
2729 .cra_ablkcipher = {
2730 .min_keysize = DES_KEY_SIZE,
2731 .max_keysize = DES_KEY_SIZE,
2732 .ivsize = DES_BLOCK_SIZE,
2733 }
2734 },
2735 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2736 DESC_HDR_SEL0_DEU |
2737 DESC_HDR_MODE0_DEU_CBC,
2738 },
2739 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2740 .alg.crypto = {
2741 .cra_name = "ecb(des3_ede)",
2742 .cra_driver_name = "ecb-3des-talitos",
2743 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2744 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2745 CRYPTO_ALG_ASYNC,
2746 .cra_ablkcipher = {
2747 .min_keysize = DES3_EDE_KEY_SIZE,
2748 .max_keysize = DES3_EDE_KEY_SIZE,
2749 .ivsize = DES3_EDE_BLOCK_SIZE,
2750 }
2751 },
2752 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2753 DESC_HDR_SEL0_DEU |
2754 DESC_HDR_MODE0_DEU_3DES,
2755 },
2756 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2757 .alg.crypto = {
2758 .cra_name = "cbc(des3_ede)",
2759 .cra_driver_name = "cbc-3des-talitos",
2760 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2761 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2762 CRYPTO_ALG_ASYNC,
2763 .cra_ablkcipher = {
2764 .min_keysize = DES3_EDE_KEY_SIZE,
2765 .max_keysize = DES3_EDE_KEY_SIZE,
2766 .ivsize = DES3_EDE_BLOCK_SIZE,
2767 }
2768 },
2769 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2770 DESC_HDR_SEL0_DEU |
2771 DESC_HDR_MODE0_DEU_CBC |
2772 DESC_HDR_MODE0_DEU_3DES,
2773 },
2774 /* AHASH algorithms. */
2775 { .type = CRYPTO_ALG_TYPE_AHASH,
2776 .alg.hash = {
2777 .halg.digestsize = MD5_DIGEST_SIZE,
2778 .halg.statesize = sizeof(struct talitos_export_state),
2779 .halg.base = {
2780 .cra_name = "md5",
2781 .cra_driver_name = "md5-talitos",
2782 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2783 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2784 CRYPTO_ALG_ASYNC,
2785 }
2786 },
2787 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2788 DESC_HDR_SEL0_MDEUA |
2789 DESC_HDR_MODE0_MDEU_MD5,
2790 },
2791 { .type = CRYPTO_ALG_TYPE_AHASH,
2792 .alg.hash = {
2793 .halg.digestsize = SHA1_DIGEST_SIZE,
2794 .halg.statesize = sizeof(struct talitos_export_state),
2795 .halg.base = {
2796 .cra_name = "sha1",
2797 .cra_driver_name = "sha1-talitos",
2798 .cra_blocksize = SHA1_BLOCK_SIZE,
2799 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2800 CRYPTO_ALG_ASYNC,
2801 }
2802 },
2803 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2804 DESC_HDR_SEL0_MDEUA |
2805 DESC_HDR_MODE0_MDEU_SHA1,
2806 },
2807 { .type = CRYPTO_ALG_TYPE_AHASH,
2808 .alg.hash = {
2809 .halg.digestsize = SHA224_DIGEST_SIZE,
2810 .halg.statesize = sizeof(struct talitos_export_state),
2811 .halg.base = {
2812 .cra_name = "sha224",
2813 .cra_driver_name = "sha224-talitos",
2814 .cra_blocksize = SHA224_BLOCK_SIZE,
2815 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2816 CRYPTO_ALG_ASYNC,
2817 }
2818 },
2819 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2820 DESC_HDR_SEL0_MDEUA |
2821 DESC_HDR_MODE0_MDEU_SHA224,
2822 },
2823 { .type = CRYPTO_ALG_TYPE_AHASH,
2824 .alg.hash = {
2825 .halg.digestsize = SHA256_DIGEST_SIZE,
2826 .halg.statesize = sizeof(struct talitos_export_state),
2827 .halg.base = {
2828 .cra_name = "sha256",
2829 .cra_driver_name = "sha256-talitos",
2830 .cra_blocksize = SHA256_BLOCK_SIZE,
2831 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2832 CRYPTO_ALG_ASYNC,
2833 }
2834 },
2835 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2836 DESC_HDR_SEL0_MDEUA |
2837 DESC_HDR_MODE0_MDEU_SHA256,
2838 },
2839 { .type = CRYPTO_ALG_TYPE_AHASH,
2840 .alg.hash = {
2841 .halg.digestsize = SHA384_DIGEST_SIZE,
2842 .halg.statesize = sizeof(struct talitos_export_state),
2843 .halg.base = {
2844 .cra_name = "sha384",
2845 .cra_driver_name = "sha384-talitos",
2846 .cra_blocksize = SHA384_BLOCK_SIZE,
2847 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2848 CRYPTO_ALG_ASYNC,
2849 }
2850 },
2851 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2852 DESC_HDR_SEL0_MDEUB |
2853 DESC_HDR_MODE0_MDEUB_SHA384,
2854 },
2855 { .type = CRYPTO_ALG_TYPE_AHASH,
2856 .alg.hash = {
2857 .halg.digestsize = SHA512_DIGEST_SIZE,
2858 .halg.statesize = sizeof(struct talitos_export_state),
2859 .halg.base = {
2860 .cra_name = "sha512",
2861 .cra_driver_name = "sha512-talitos",
2862 .cra_blocksize = SHA512_BLOCK_SIZE,
2863 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2864 CRYPTO_ALG_ASYNC,
2865 }
2866 },
2867 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2868 DESC_HDR_SEL0_MDEUB |
2869 DESC_HDR_MODE0_MDEUB_SHA512,
2870 },
2871 { .type = CRYPTO_ALG_TYPE_AHASH,
2872 .alg.hash = {
2873 .halg.digestsize = MD5_DIGEST_SIZE,
2874 .halg.statesize = sizeof(struct talitos_export_state),
2875 .halg.base = {
2876 .cra_name = "hmac(md5)",
2877 .cra_driver_name = "hmac-md5-talitos",
2878 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2879 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2880 CRYPTO_ALG_ASYNC,
2881 }
2882 },
2883 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2884 DESC_HDR_SEL0_MDEUA |
2885 DESC_HDR_MODE0_MDEU_MD5,
2886 },
2887 { .type = CRYPTO_ALG_TYPE_AHASH,
2888 .alg.hash = {
2889 .halg.digestsize = SHA1_DIGEST_SIZE,
2890 .halg.statesize = sizeof(struct talitos_export_state),
2891 .halg.base = {
2892 .cra_name = "hmac(sha1)",
2893 .cra_driver_name = "hmac-sha1-talitos",
2894 .cra_blocksize = SHA1_BLOCK_SIZE,
2895 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2896 CRYPTO_ALG_ASYNC,
2897 }
2898 },
2899 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2900 DESC_HDR_SEL0_MDEUA |
2901 DESC_HDR_MODE0_MDEU_SHA1,
2902 },
2903 { .type = CRYPTO_ALG_TYPE_AHASH,
2904 .alg.hash = {
2905 .halg.digestsize = SHA224_DIGEST_SIZE,
2906 .halg.statesize = sizeof(struct talitos_export_state),
2907 .halg.base = {
2908 .cra_name = "hmac(sha224)",
2909 .cra_driver_name = "hmac-sha224-talitos",
2910 .cra_blocksize = SHA224_BLOCK_SIZE,
2911 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2912 CRYPTO_ALG_ASYNC,
2913 }
2914 },
2915 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2916 DESC_HDR_SEL0_MDEUA |
2917 DESC_HDR_MODE0_MDEU_SHA224,
2918 },
2919 { .type = CRYPTO_ALG_TYPE_AHASH,
2920 .alg.hash = {
2921 .halg.digestsize = SHA256_DIGEST_SIZE,
2922 .halg.statesize = sizeof(struct talitos_export_state),
2923 .halg.base = {
2924 .cra_name = "hmac(sha256)",
2925 .cra_driver_name = "hmac-sha256-talitos",
2926 .cra_blocksize = SHA256_BLOCK_SIZE,
2927 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2928 CRYPTO_ALG_ASYNC,
2929 }
2930 },
2931 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2932 DESC_HDR_SEL0_MDEUA |
2933 DESC_HDR_MODE0_MDEU_SHA256,
2934 },
2935 { .type = CRYPTO_ALG_TYPE_AHASH,
2936 .alg.hash = {
2937 .halg.digestsize = SHA384_DIGEST_SIZE,
2938 .halg.statesize = sizeof(struct talitos_export_state),
2939 .halg.base = {
2940 .cra_name = "hmac(sha384)",
2941 .cra_driver_name = "hmac-sha384-talitos",
2942 .cra_blocksize = SHA384_BLOCK_SIZE,
2943 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2944 CRYPTO_ALG_ASYNC,
2945 }
2946 },
2947 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2948 DESC_HDR_SEL0_MDEUB |
2949 DESC_HDR_MODE0_MDEUB_SHA384,
2950 },
2951 { .type = CRYPTO_ALG_TYPE_AHASH,
2952 .alg.hash = {
2953 .halg.digestsize = SHA512_DIGEST_SIZE,
2954 .halg.statesize = sizeof(struct talitos_export_state),
2955 .halg.base = {
2956 .cra_name = "hmac(sha512)",
2957 .cra_driver_name = "hmac-sha512-talitos",
2958 .cra_blocksize = SHA512_BLOCK_SIZE,
2959 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2960 CRYPTO_ALG_ASYNC,
2961 }
2962 },
2963 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2964 DESC_HDR_SEL0_MDEUB |
2965 DESC_HDR_MODE0_MDEUB_SHA512,
2966 }
2967 };
2968
2969 struct talitos_crypto_alg {
2970 struct list_head entry;
2971 struct device *dev;
2972 struct talitos_alg_template algt;
2973 };
2974
2975 static int talitos_init_common(struct talitos_ctx *ctx,
2976 struct talitos_crypto_alg *talitos_alg)
2977 {
2978 struct talitos_private *priv;
2979
2980 /* update context with ptr to dev */
2981 ctx->dev = talitos_alg->dev;
2982
2983 /* assign SEC channel to tfm in round-robin fashion */
2984 priv = dev_get_drvdata(ctx->dev);
2985 ctx->ch = atomic_inc_return(&priv->last_chan) &
2986 (priv->num_channels - 1);
2987
2988 /* copy descriptor header template value */
2989 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2990
2991 /* select done notification */
2992 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2993
2994 return 0;
2995 }
2996
2997 static int talitos_cra_init(struct crypto_tfm *tfm)
2998 {
2999 struct crypto_alg *alg = tfm->__crt_alg;
3000 struct talitos_crypto_alg *talitos_alg;
3001 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3002
3003 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3004 talitos_alg = container_of(__crypto_ahash_alg(alg),
3005 struct talitos_crypto_alg,
3006 algt.alg.hash);
3007 else
3008 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3009 algt.alg.crypto);
3010
3011 return talitos_init_common(ctx, talitos_alg);
3012 }
3013
3014 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3015 {
3016 struct aead_alg *alg = crypto_aead_alg(tfm);
3017 struct talitos_crypto_alg *talitos_alg;
3018 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3019
3020 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3021 algt.alg.aead);
3022
3023 return talitos_init_common(ctx, talitos_alg);
3024 }
3025
3026 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3027 {
3028 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3029
3030 talitos_cra_init(tfm);
3031
3032 ctx->keylen = 0;
3033 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3034 sizeof(struct talitos_ahash_req_ctx));
3035
3036 return 0;
3037 }
3038
3039 static void talitos_cra_exit(struct crypto_tfm *tfm)
3040 {
3041 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3042 struct device *dev = ctx->dev;
3043
3044 if (ctx->keylen)
3045 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3046 }
3047
3048 /*
3049 * given the alg's descriptor header template, determine whether descriptor
3050 * type and primary/secondary execution units required match the hw
3051 * capabilities description provided in the device tree node.
3052 */
3053 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3054 {
3055 struct talitos_private *priv = dev_get_drvdata(dev);
3056 int ret;
3057
3058 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3059 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3060
3061 if (SECONDARY_EU(desc_hdr_template))
3062 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3063 & priv->exec_units);
3064
3065 return ret;
3066 }
3067
3068 static int talitos_remove(struct platform_device *ofdev)
3069 {
3070 struct device *dev = &ofdev->dev;
3071 struct talitos_private *priv = dev_get_drvdata(dev);
3072 struct talitos_crypto_alg *t_alg, *n;
3073 int i;
3074
3075 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3076 switch (t_alg->algt.type) {
3077 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3078 break;
3079 case CRYPTO_ALG_TYPE_AEAD:
3080 crypto_unregister_aead(&t_alg->algt.alg.aead);
3081 case CRYPTO_ALG_TYPE_AHASH:
3082 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3083 break;
3084 }
3085 list_del(&t_alg->entry);
3086 }
3087
3088 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3089 talitos_unregister_rng(dev);
3090
3091 for (i = 0; i < 2; i++)
3092 if (priv->irq[i]) {
3093 free_irq(priv->irq[i], dev);
3094 irq_dispose_mapping(priv->irq[i]);
3095 }
3096
3097 tasklet_kill(&priv->done_task[0]);
3098 if (priv->irq[1])
3099 tasklet_kill(&priv->done_task[1]);
3100
3101 return 0;
3102 }
3103
3104 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3105 struct talitos_alg_template
3106 *template)
3107 {
3108 struct talitos_private *priv = dev_get_drvdata(dev);
3109 struct talitos_crypto_alg *t_alg;
3110 struct crypto_alg *alg;
3111
3112 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3113 GFP_KERNEL);
3114 if (!t_alg)
3115 return ERR_PTR(-ENOMEM);
3116
3117 t_alg->algt = *template;
3118
3119 switch (t_alg->algt.type) {
3120 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3121 alg = &t_alg->algt.alg.crypto;
3122 alg->cra_init = talitos_cra_init;
3123 alg->cra_exit = talitos_cra_exit;
3124 alg->cra_type = &crypto_ablkcipher_type;
3125 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3126 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3127 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3128 alg->cra_ablkcipher.geniv = "eseqiv";
3129 break;
3130 case CRYPTO_ALG_TYPE_AEAD:
3131 alg = &t_alg->algt.alg.aead.base;
3132 alg->cra_exit = talitos_cra_exit;
3133 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3134 t_alg->algt.alg.aead.setkey = aead_setkey;
3135 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3136 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3137 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3138 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3139 devm_kfree(dev, t_alg);
3140 return ERR_PTR(-ENOTSUPP);
3141 }
3142 break;
3143 case CRYPTO_ALG_TYPE_AHASH:
3144 alg = &t_alg->algt.alg.hash.halg.base;
3145 alg->cra_init = talitos_cra_init_ahash;
3146 alg->cra_exit = talitos_cra_exit;
3147 alg->cra_type = &crypto_ahash_type;
3148 t_alg->algt.alg.hash.init = ahash_init;
3149 t_alg->algt.alg.hash.update = ahash_update;
3150 t_alg->algt.alg.hash.final = ahash_final;
3151 t_alg->algt.alg.hash.finup = ahash_finup;
3152 t_alg->algt.alg.hash.digest = ahash_digest;
3153 if (!strncmp(alg->cra_name, "hmac", 4))
3154 t_alg->algt.alg.hash.setkey = ahash_setkey;
3155 t_alg->algt.alg.hash.import = ahash_import;
3156 t_alg->algt.alg.hash.export = ahash_export;
3157
3158 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3159 !strncmp(alg->cra_name, "hmac", 4)) {
3160 devm_kfree(dev, t_alg);
3161 return ERR_PTR(-ENOTSUPP);
3162 }
3163 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3164 (!strcmp(alg->cra_name, "sha224") ||
3165 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3166 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3167 t_alg->algt.desc_hdr_template =
3168 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3169 DESC_HDR_SEL0_MDEUA |
3170 DESC_HDR_MODE0_MDEU_SHA256;
3171 }
3172 break;
3173 default:
3174 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3175 devm_kfree(dev, t_alg);
3176 return ERR_PTR(-EINVAL);
3177 }
3178
3179 alg->cra_module = THIS_MODULE;
3180 if (t_alg->algt.priority)
3181 alg->cra_priority = t_alg->algt.priority;
3182 else
3183 alg->cra_priority = TALITOS_CRA_PRIORITY;
3184 alg->cra_alignmask = 0;
3185 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3186 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3187
3188 t_alg->dev = dev;
3189
3190 return t_alg;
3191 }
3192
3193 static int talitos_probe_irq(struct platform_device *ofdev)
3194 {
3195 struct device *dev = &ofdev->dev;
3196 struct device_node *np = ofdev->dev.of_node;
3197 struct talitos_private *priv = dev_get_drvdata(dev);
3198 int err;
3199 bool is_sec1 = has_ftr_sec1(priv);
3200
3201 priv->irq[0] = irq_of_parse_and_map(np, 0);
3202 if (!priv->irq[0]) {
3203 dev_err(dev, "failed to map irq\n");
3204 return -EINVAL;
3205 }
3206 if (is_sec1) {
3207 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3208 dev_driver_string(dev), dev);
3209 goto primary_out;
3210 }
3211
3212 priv->irq[1] = irq_of_parse_and_map(np, 1);
3213
3214 /* get the primary irq line */
3215 if (!priv->irq[1]) {
3216 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3217 dev_driver_string(dev), dev);
3218 goto primary_out;
3219 }
3220
3221 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3222 dev_driver_string(dev), dev);
3223 if (err)
3224 goto primary_out;
3225
3226 /* get the secondary irq line */
3227 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3228 dev_driver_string(dev), dev);
3229 if (err) {
3230 dev_err(dev, "failed to request secondary irq\n");
3231 irq_dispose_mapping(priv->irq[1]);
3232 priv->irq[1] = 0;
3233 }
3234
3235 return err;
3236
3237 primary_out:
3238 if (err) {
3239 dev_err(dev, "failed to request primary irq\n");
3240 irq_dispose_mapping(priv->irq[0]);
3241 priv->irq[0] = 0;
3242 }
3243
3244 return err;
3245 }
3246
3247 static int talitos_probe(struct platform_device *ofdev)
3248 {
3249 struct device *dev = &ofdev->dev;
3250 struct device_node *np = ofdev->dev.of_node;
3251 struct talitos_private *priv;
3252 int i, err;
3253 int stride;
3254 struct resource *res;
3255
3256 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3257 if (!priv)
3258 return -ENOMEM;
3259
3260 INIT_LIST_HEAD(&priv->alg_list);
3261
3262 dev_set_drvdata(dev, priv);
3263
3264 priv->ofdev = ofdev;
3265
3266 spin_lock_init(&priv->reg_lock);
3267
3268 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3269 if (!res)
3270 return -ENXIO;
3271 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3272 if (!priv->reg) {
3273 dev_err(dev, "failed to of_iomap\n");
3274 err = -ENOMEM;
3275 goto err_out;
3276 }
3277
3278 /* get SEC version capabilities from device tree */
3279 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3280 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3281 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3282 of_property_read_u32(np, "fsl,descriptor-types-mask",
3283 &priv->desc_types);
3284
3285 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3286 !priv->exec_units || !priv->desc_types) {
3287 dev_err(dev, "invalid property data in device tree node\n");
3288 err = -EINVAL;
3289 goto err_out;
3290 }
3291
3292 if (of_device_is_compatible(np, "fsl,sec3.0"))
3293 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3294
3295 if (of_device_is_compatible(np, "fsl,sec2.1"))
3296 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3297 TALITOS_FTR_SHA224_HWINIT |
3298 TALITOS_FTR_HMAC_OK;
3299
3300 if (of_device_is_compatible(np, "fsl,sec1.0"))
3301 priv->features |= TALITOS_FTR_SEC1;
3302
3303 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3304 priv->reg_deu = priv->reg + TALITOS12_DEU;
3305 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3306 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3307 stride = TALITOS1_CH_STRIDE;
3308 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3309 priv->reg_deu = priv->reg + TALITOS10_DEU;
3310 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3311 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3312 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3313 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3314 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3315 stride = TALITOS1_CH_STRIDE;
3316 } else {
3317 priv->reg_deu = priv->reg + TALITOS2_DEU;
3318 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3319 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3320 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3321 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3322 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3323 priv->reg_keu = priv->reg + TALITOS2_KEU;
3324 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3325 stride = TALITOS2_CH_STRIDE;
3326 }
3327
3328 err = talitos_probe_irq(ofdev);
3329 if (err)
3330 goto err_out;
3331
3332 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3333 if (priv->num_channels == 1)
3334 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3335 (unsigned long)dev);
3336 else
3337 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3338 (unsigned long)dev);
3339 } else {
3340 if (priv->irq[1]) {
3341 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3342 (unsigned long)dev);
3343 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3344 (unsigned long)dev);
3345 } else if (priv->num_channels == 1) {
3346 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3347 (unsigned long)dev);
3348 } else {
3349 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3350 (unsigned long)dev);
3351 }
3352 }
3353
3354 priv->chan = devm_kzalloc(dev, sizeof(struct talitos_channel) *
3355 priv->num_channels, GFP_KERNEL);
3356 if (!priv->chan) {
3357 dev_err(dev, "failed to allocate channel management space\n");
3358 err = -ENOMEM;
3359 goto err_out;
3360 }
3361
3362 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3363
3364 for (i = 0; i < priv->num_channels; i++) {
3365 priv->chan[i].reg = priv->reg + stride * (i + 1);
3366 if (!priv->irq[1] || !(i & 1))
3367 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3368
3369 spin_lock_init(&priv->chan[i].head_lock);
3370 spin_lock_init(&priv->chan[i].tail_lock);
3371
3372 priv->chan[i].fifo = devm_kzalloc(dev,
3373 sizeof(struct talitos_request) *
3374 priv->fifo_len, GFP_KERNEL);
3375 if (!priv->chan[i].fifo) {
3376 dev_err(dev, "failed to allocate request fifo %d\n", i);
3377 err = -ENOMEM;
3378 goto err_out;
3379 }
3380
3381 atomic_set(&priv->chan[i].submit_count,
3382 -(priv->chfifo_len - 1));
3383 }
3384
3385 dma_set_mask(dev, DMA_BIT_MASK(36));
3386
3387 /* reset and initialize the h/w */
3388 err = init_device(dev);
3389 if (err) {
3390 dev_err(dev, "failed to initialize device\n");
3391 goto err_out;
3392 }
3393
3394 /* register the RNG, if available */
3395 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3396 err = talitos_register_rng(dev);
3397 if (err) {
3398 dev_err(dev, "failed to register hwrng: %d\n", err);
3399 goto err_out;
3400 } else
3401 dev_info(dev, "hwrng\n");
3402 }
3403
3404 /* register crypto algorithms the device supports */
3405 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3406 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3407 struct talitos_crypto_alg *t_alg;
3408 struct crypto_alg *alg = NULL;
3409
3410 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3411 if (IS_ERR(t_alg)) {
3412 err = PTR_ERR(t_alg);
3413 if (err == -ENOTSUPP)
3414 continue;
3415 goto err_out;
3416 }
3417
3418 switch (t_alg->algt.type) {
3419 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3420 err = crypto_register_alg(
3421 &t_alg->algt.alg.crypto);
3422 alg = &t_alg->algt.alg.crypto;
3423 break;
3424
3425 case CRYPTO_ALG_TYPE_AEAD:
3426 err = crypto_register_aead(
3427 &t_alg->algt.alg.aead);
3428 alg = &t_alg->algt.alg.aead.base;
3429 break;
3430
3431 case CRYPTO_ALG_TYPE_AHASH:
3432 err = crypto_register_ahash(
3433 &t_alg->algt.alg.hash);
3434 alg = &t_alg->algt.alg.hash.halg.base;
3435 break;
3436 }
3437 if (err) {
3438 dev_err(dev, "%s alg registration failed\n",
3439 alg->cra_driver_name);
3440 devm_kfree(dev, t_alg);
3441 } else
3442 list_add_tail(&t_alg->entry, &priv->alg_list);
3443 }
3444 }
3445 if (!list_empty(&priv->alg_list))
3446 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3447 (char *)of_get_property(np, "compatible", NULL));
3448
3449 return 0;
3450
3451 err_out:
3452 talitos_remove(ofdev);
3453
3454 return err;
3455 }
3456
3457 static const struct of_device_id talitos_match[] = {
3458 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3459 {
3460 .compatible = "fsl,sec1.0",
3461 },
3462 #endif
3463 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3464 {
3465 .compatible = "fsl,sec2.0",
3466 },
3467 #endif
3468 {},
3469 };
3470 MODULE_DEVICE_TABLE(of, talitos_match);
3471
3472 static struct platform_driver talitos_driver = {
3473 .driver = {
3474 .name = "talitos",
3475 .of_match_table = talitos_match,
3476 },
3477 .probe = talitos_probe,
3478 .remove = talitos_remove,
3479 };
3480
3481 module_platform_driver(talitos_driver);
3482
3483 MODULE_LICENSE("GPL");
3484 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3485 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");