]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/crypto/talitos.c
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / talitos.c
CommitLineData
9c4a7965
KP
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
5228f0f7 4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
9c4a7965
KP
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
5af50730
RH
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
9c4a7965
KP
37#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
5a0e3ad6 42#include <linux/slab.h>
9c4a7965
KP
43
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
3952f17e 46#include <crypto/des.h>
9c4a7965 47#include <crypto/sha.h>
497f2e6b 48#include <crypto/md5.h>
e98014ab 49#include <crypto/internal/aead.h>
9c4a7965 50#include <crypto/authenc.h>
4de9d0b5 51#include <crypto/skcipher.h>
acbf7c62
LN
52#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
4de9d0b5 54#include <crypto/scatterwalk.h>
9c4a7965
KP
55
56#include "talitos.h"
57
922f9dc8
LC
58static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
81eb024c 60{
edc6bd69 61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
922f9dc8
LC
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
81eb024c
KP
64}
65
340ff60a
HG
66static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
68{
69 dst_ptr->ptr = src_ptr->ptr;
70 if (!is_sec1)
71 dst_ptr->eptr = src_ptr->eptr;
72}
73
42e8b0d7 74static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
922f9dc8 75 bool is_sec1)
538caf83 76{
922f9dc8
LC
77 if (is_sec1) {
78 ptr->res = 0;
79 ptr->len1 = cpu_to_be16(len);
80 } else {
81 ptr->len = cpu_to_be16(len);
82 }
538caf83
LC
83}
84
922f9dc8
LC
85static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86 bool is_sec1)
538caf83 87{
922f9dc8
LC
88 if (is_sec1)
89 return be16_to_cpu(ptr->len1);
90 else
91 return be16_to_cpu(ptr->len);
538caf83
LC
92}
93
922f9dc8 94static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
185eb79f 95{
922f9dc8
LC
96 if (!is_sec1)
97 ptr->j_extent = 0;
185eb79f
LC
98}
99
9c4a7965
KP
100/*
101 * map virtual single (contiguous) pointer to h/w descriptor pointer
102 */
103static void map_single_talitos_ptr(struct device *dev,
edc6bd69 104 struct talitos_ptr *ptr,
42e8b0d7 105 unsigned int len, void *data,
9c4a7965
KP
106 enum dma_data_direction dir)
107{
81eb024c 108 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
922f9dc8
LC
109 struct talitos_private *priv = dev_get_drvdata(dev);
110 bool is_sec1 = has_ftr_sec1(priv);
81eb024c 111
922f9dc8
LC
112 to_talitos_ptr_len(ptr, len, is_sec1);
113 to_talitos_ptr(ptr, dma_addr, is_sec1);
114 to_talitos_ptr_extent_clear(ptr, is_sec1);
9c4a7965
KP
115}
116
117/*
118 * unmap bus single (contiguous) h/w descriptor pointer
119 */
120static void unmap_single_talitos_ptr(struct device *dev,
edc6bd69 121 struct talitos_ptr *ptr,
9c4a7965
KP
122 enum dma_data_direction dir)
123{
922f9dc8
LC
124 struct talitos_private *priv = dev_get_drvdata(dev);
125 bool is_sec1 = has_ftr_sec1(priv);
126
edc6bd69 127 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
922f9dc8 128 from_talitos_ptr_len(ptr, is_sec1), dir);
9c4a7965
KP
129}
130
131static int reset_channel(struct device *dev, int ch)
132{
133 struct talitos_private *priv = dev_get_drvdata(dev);
134 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 135 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 136
dd3c0987
LC
137 if (is_sec1) {
138 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
139 TALITOS1_CCCR_LO_RESET);
9c4a7965 140
dd3c0987
LC
141 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
142 TALITOS1_CCCR_LO_RESET) && --timeout)
143 cpu_relax();
144 } else {
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
146 TALITOS2_CCCR_RESET);
147
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
149 TALITOS2_CCCR_RESET) && --timeout)
150 cpu_relax();
151 }
9c4a7965
KP
152
153 if (timeout == 0) {
154 dev_err(dev, "failed to reset channel %d\n", ch);
155 return -EIO;
156 }
157
81eb024c 158 /* set 36-bit addressing, done writeback enable and done IRQ enable */
ad42d5fc 159 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
81eb024c 160 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
9c4a7965 161
fe5720e2
KP
162 /* and ICCR writeback, if available */
163 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
ad42d5fc 164 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
fe5720e2
KP
165 TALITOS_CCCR_LO_IWSE);
166
9c4a7965
KP
167 return 0;
168}
169
170static int reset_device(struct device *dev)
171{
172 struct talitos_private *priv = dev_get_drvdata(dev);
173 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987
LC
174 bool is_sec1 = has_ftr_sec1(priv);
175 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
9c4a7965 176
c3e337f8 177 setbits32(priv->reg + TALITOS_MCR, mcr);
9c4a7965 178
dd3c0987 179 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
9c4a7965
KP
180 && --timeout)
181 cpu_relax();
182
2cdba3cf 183 if (priv->irq[1]) {
c3e337f8
KP
184 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
185 setbits32(priv->reg + TALITOS_MCR, mcr);
186 }
187
9c4a7965
KP
188 if (timeout == 0) {
189 dev_err(dev, "failed to reset device\n");
190 return -EIO;
191 }
192
193 return 0;
194}
195
196/*
197 * Reset and initialize the device
198 */
199static int init_device(struct device *dev)
200{
201 struct talitos_private *priv = dev_get_drvdata(dev);
202 int ch, err;
dd3c0987 203 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965
KP
204
205 /*
206 * Master reset
207 * errata documentation: warning: certain SEC interrupts
208 * are not fully cleared by writing the MCR:SWR bit,
209 * set bit twice to completely reset
210 */
211 err = reset_device(dev);
212 if (err)
213 return err;
214
215 err = reset_device(dev);
216 if (err)
217 return err;
218
219 /* reset channels */
220 for (ch = 0; ch < priv->num_channels; ch++) {
221 err = reset_channel(dev, ch);
222 if (err)
223 return err;
224 }
225
226 /* enable channel done and error interrupts */
dd3c0987
LC
227 if (is_sec1) {
228 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
229 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
230 /* disable parity error check in DEU (erroneous? test vect.) */
231 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
232 } else {
233 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
234 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
235 }
9c4a7965 236
fe5720e2
KP
237 /* disable integrity check error interrupts (use writeback instead) */
238 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
5fa7fa14 239 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
fe5720e2
KP
240 TALITOS_MDEUICR_LO_ICE);
241
9c4a7965
KP
242 return 0;
243}
244
245/**
246 * talitos_submit - submits a descriptor to the device for processing
247 * @dev: the SEC device to be used
5228f0f7 248 * @ch: the SEC device channel to be used
9c4a7965
KP
249 * @desc: the descriptor to be processed by the device
250 * @callback: whom to call when processing is complete
251 * @context: a handle for use by caller (optional)
252 *
253 * desc must contain valid dma-mapped (bus physical) address pointers.
254 * callback must check err and feedback in descriptor header
255 * for device processing status.
256 */
865d5061
HG
257int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
258 void (*callback)(struct device *dev,
259 struct talitos_desc *desc,
260 void *context, int error),
261 void *context)
9c4a7965
KP
262{
263 struct talitos_private *priv = dev_get_drvdata(dev);
264 struct talitos_request *request;
5228f0f7 265 unsigned long flags;
9c4a7965 266 int head;
7d607c6a 267 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 268
4b992628 269 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
9c4a7965 270
4b992628 271 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
ec6644d6 272 /* h/w fifo is full */
4b992628 273 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
274 return -EAGAIN;
275 }
276
4b992628
KP
277 head = priv->chan[ch].head;
278 request = &priv->chan[ch].fifo[head];
ec6644d6 279
9c4a7965 280 /* map descriptor and save caller data */
7d607c6a
LC
281 if (is_sec1) {
282 desc->hdr1 = desc->hdr;
283 desc->next_desc = 0;
284 request->dma_desc = dma_map_single(dev, &desc->hdr1,
285 TALITOS_DESC_SIZE,
286 DMA_BIDIRECTIONAL);
287 } else {
288 request->dma_desc = dma_map_single(dev, desc,
289 TALITOS_DESC_SIZE,
290 DMA_BIDIRECTIONAL);
291 }
9c4a7965
KP
292 request->callback = callback;
293 request->context = context;
294
295 /* increment fifo head */
4b992628 296 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
9c4a7965
KP
297
298 smp_wmb();
299 request->desc = desc;
300
301 /* GO! */
302 wmb();
ad42d5fc
KP
303 out_be32(priv->chan[ch].reg + TALITOS_FF,
304 upper_32_bits(request->dma_desc));
305 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
a752447a 306 lower_32_bits(request->dma_desc));
9c4a7965 307
4b992628 308 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
309
310 return -EINPROGRESS;
311}
865d5061 312EXPORT_SYMBOL(talitos_submit);
9c4a7965
KP
313
314/*
315 * process what was done, notify callback of error if not
316 */
317static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
318{
319 struct talitos_private *priv = dev_get_drvdata(dev);
320 struct talitos_request *request, saved_req;
321 unsigned long flags;
322 int tail, status;
7d607c6a 323 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 324
4b992628 325 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
9c4a7965 326
4b992628
KP
327 tail = priv->chan[ch].tail;
328 while (priv->chan[ch].fifo[tail].desc) {
7d607c6a
LC
329 __be32 hdr;
330
4b992628 331 request = &priv->chan[ch].fifo[tail];
9c4a7965
KP
332
333 /* descriptors with their done bits set don't get the error */
334 rmb();
7d607c6a
LC
335 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
336
337 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
9c4a7965 338 status = 0;
ca38a814 339 else
9c4a7965
KP
340 if (!error)
341 break;
342 else
343 status = error;
344
345 dma_unmap_single(dev, request->dma_desc,
7d607c6a 346 TALITOS_DESC_SIZE,
e938e465 347 DMA_BIDIRECTIONAL);
9c4a7965
KP
348
349 /* copy entries so we can call callback outside lock */
350 saved_req.desc = request->desc;
351 saved_req.callback = request->callback;
352 saved_req.context = request->context;
353
354 /* release request entry in fifo */
355 smp_wmb();
356 request->desc = NULL;
357
358 /* increment fifo tail */
4b992628 359 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
9c4a7965 360
4b992628 361 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
ec6644d6 362
4b992628 363 atomic_dec(&priv->chan[ch].submit_count);
ec6644d6 364
9c4a7965
KP
365 saved_req.callback(dev, saved_req.desc, saved_req.context,
366 status);
367 /* channel may resume processing in single desc error case */
368 if (error && !reset_ch && status == error)
369 return;
4b992628
KP
370 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
371 tail = priv->chan[ch].tail;
9c4a7965
KP
372 }
373
4b992628 374 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
9c4a7965
KP
375}
376
377/*
378 * process completed requests for channels that have done status
379 */
dd3c0987
LC
380#define DEF_TALITOS1_DONE(name, ch_done_mask) \
381static void talitos1_done_##name(unsigned long data) \
382{ \
383 struct device *dev = (struct device *)data; \
384 struct talitos_private *priv = dev_get_drvdata(dev); \
385 unsigned long flags; \
386 \
387 if (ch_done_mask & 0x10000000) \
388 flush_channel(dev, 0, 0, 0); \
389 if (priv->num_channels == 1) \
390 goto out; \
391 if (ch_done_mask & 0x40000000) \
392 flush_channel(dev, 1, 0, 0); \
393 if (ch_done_mask & 0x00010000) \
394 flush_channel(dev, 2, 0, 0); \
395 if (ch_done_mask & 0x00040000) \
396 flush_channel(dev, 3, 0, 0); \
397 \
398out: \
399 /* At this point, all completed channels have been processed */ \
400 /* Unmask done interrupts for channels completed later on. */ \
401 spin_lock_irqsave(&priv->reg_lock, flags); \
402 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
403 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
404 spin_unlock_irqrestore(&priv->reg_lock, flags); \
405}
406
407DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
408
409#define DEF_TALITOS2_DONE(name, ch_done_mask) \
410static void talitos2_done_##name(unsigned long data) \
c3e337f8
KP
411{ \
412 struct device *dev = (struct device *)data; \
413 struct talitos_private *priv = dev_get_drvdata(dev); \
511d63cb 414 unsigned long flags; \
c3e337f8
KP
415 \
416 if (ch_done_mask & 1) \
417 flush_channel(dev, 0, 0, 0); \
418 if (priv->num_channels == 1) \
419 goto out; \
420 if (ch_done_mask & (1 << 2)) \
421 flush_channel(dev, 1, 0, 0); \
422 if (ch_done_mask & (1 << 4)) \
423 flush_channel(dev, 2, 0, 0); \
424 if (ch_done_mask & (1 << 6)) \
425 flush_channel(dev, 3, 0, 0); \
426 \
427out: \
428 /* At this point, all completed channels have been processed */ \
429 /* Unmask done interrupts for channels completed later on. */ \
511d63cb 430 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8 431 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
dd3c0987 432 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
511d63cb 433 spin_unlock_irqrestore(&priv->reg_lock, flags); \
9c4a7965 434}
dd3c0987
LC
435
436DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
437DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
438DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
9c4a7965
KP
439
440/*
441 * locate current (offending) descriptor
442 */
3e721aeb 443static u32 current_desc_hdr(struct device *dev, int ch)
9c4a7965
KP
444{
445 struct talitos_private *priv = dev_get_drvdata(dev);
b62ffd8c 446 int tail, iter;
9c4a7965
KP
447 dma_addr_t cur_desc;
448
b62ffd8c
HG
449 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
450 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
9c4a7965 451
b62ffd8c
HG
452 if (!cur_desc) {
453 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
454 return 0;
455 }
456
457 tail = priv->chan[ch].tail;
458
459 iter = tail;
460 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
461 iter = (iter + 1) & (priv->fifo_len - 1);
462 if (iter == tail) {
9c4a7965 463 dev_err(dev, "couldn't locate current descriptor\n");
3e721aeb 464 return 0;
9c4a7965
KP
465 }
466 }
467
b62ffd8c 468 return priv->chan[ch].fifo[iter].desc->hdr;
9c4a7965
KP
469}
470
471/*
472 * user diagnostics; report root cause of error based on execution unit status
473 */
3e721aeb 474static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
9c4a7965
KP
475{
476 struct talitos_private *priv = dev_get_drvdata(dev);
477 int i;
478
3e721aeb 479 if (!desc_hdr)
ad42d5fc 480 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
3e721aeb
KP
481
482 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
9c4a7965
KP
483 case DESC_HDR_SEL0_AFEU:
484 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
5fa7fa14
LC
485 in_be32(priv->reg_afeu + TALITOS_EUISR),
486 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
9c4a7965
KP
487 break;
488 case DESC_HDR_SEL0_DEU:
489 dev_err(dev, "DEUISR 0x%08x_%08x\n",
5fa7fa14
LC
490 in_be32(priv->reg_deu + TALITOS_EUISR),
491 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
9c4a7965
KP
492 break;
493 case DESC_HDR_SEL0_MDEUA:
494 case DESC_HDR_SEL0_MDEUB:
495 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
496 in_be32(priv->reg_mdeu + TALITOS_EUISR),
497 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
498 break;
499 case DESC_HDR_SEL0_RNG:
500 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
5fa7fa14
LC
501 in_be32(priv->reg_rngu + TALITOS_ISR),
502 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
9c4a7965
KP
503 break;
504 case DESC_HDR_SEL0_PKEU:
505 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
5fa7fa14
LC
506 in_be32(priv->reg_pkeu + TALITOS_EUISR),
507 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
508 break;
509 case DESC_HDR_SEL0_AESU:
510 dev_err(dev, "AESUISR 0x%08x_%08x\n",
5fa7fa14
LC
511 in_be32(priv->reg_aesu + TALITOS_EUISR),
512 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
9c4a7965
KP
513 break;
514 case DESC_HDR_SEL0_CRCU:
515 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
516 in_be32(priv->reg_crcu + TALITOS_EUISR),
517 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
518 break;
519 case DESC_HDR_SEL0_KEU:
520 dev_err(dev, "KEUISR 0x%08x_%08x\n",
5fa7fa14
LC
521 in_be32(priv->reg_pkeu + TALITOS_EUISR),
522 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
523 break;
524 }
525
3e721aeb 526 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
9c4a7965
KP
527 case DESC_HDR_SEL1_MDEUA:
528 case DESC_HDR_SEL1_MDEUB:
529 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
530 in_be32(priv->reg_mdeu + TALITOS_EUISR),
531 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
532 break;
533 case DESC_HDR_SEL1_CRCU:
534 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
535 in_be32(priv->reg_crcu + TALITOS_EUISR),
536 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
537 break;
538 }
539
540 for (i = 0; i < 8; i++)
541 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
ad42d5fc
KP
542 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
543 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
9c4a7965
KP
544}
545
546/*
547 * recover from error interrupts
548 */
5e718a09 549static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
9c4a7965 550{
9c4a7965
KP
551 struct talitos_private *priv = dev_get_drvdata(dev);
552 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 553 int ch, error, reset_dev = 0;
42e8b0d7 554 u32 v_lo;
dd3c0987
LC
555 bool is_sec1 = has_ftr_sec1(priv);
556 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
9c4a7965
KP
557
558 for (ch = 0; ch < priv->num_channels; ch++) {
559 /* skip channels without errors */
dd3c0987
LC
560 if (is_sec1) {
561 /* bits 29, 31, 17, 19 */
562 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
563 continue;
564 } else {
565 if (!(isr & (1 << (ch * 2 + 1))))
566 continue;
567 }
9c4a7965
KP
568
569 error = -EINVAL;
570
ad42d5fc 571 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
9c4a7965
KP
572
573 if (v_lo & TALITOS_CCPSR_LO_DOF) {
574 dev_err(dev, "double fetch fifo overflow error\n");
575 error = -EAGAIN;
576 reset_ch = 1;
577 }
578 if (v_lo & TALITOS_CCPSR_LO_SOF) {
579 /* h/w dropped descriptor */
580 dev_err(dev, "single fetch fifo overflow error\n");
581 error = -EAGAIN;
582 }
583 if (v_lo & TALITOS_CCPSR_LO_MDTE)
584 dev_err(dev, "master data transfer error\n");
585 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
dd3c0987
LC
586 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
587 : "s/g data length zero error\n");
9c4a7965 588 if (v_lo & TALITOS_CCPSR_LO_FPZ)
dd3c0987
LC
589 dev_err(dev, is_sec1 ? "parity error\n"
590 : "fetch pointer zero error\n");
9c4a7965
KP
591 if (v_lo & TALITOS_CCPSR_LO_IDH)
592 dev_err(dev, "illegal descriptor header error\n");
593 if (v_lo & TALITOS_CCPSR_LO_IEU)
dd3c0987
LC
594 dev_err(dev, is_sec1 ? "static assignment error\n"
595 : "invalid exec unit error\n");
9c4a7965 596 if (v_lo & TALITOS_CCPSR_LO_EU)
3e721aeb 597 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
dd3c0987
LC
598 if (!is_sec1) {
599 if (v_lo & TALITOS_CCPSR_LO_GB)
600 dev_err(dev, "gather boundary error\n");
601 if (v_lo & TALITOS_CCPSR_LO_GRL)
602 dev_err(dev, "gather return/length error\n");
603 if (v_lo & TALITOS_CCPSR_LO_SB)
604 dev_err(dev, "scatter boundary error\n");
605 if (v_lo & TALITOS_CCPSR_LO_SRL)
606 dev_err(dev, "scatter return/length error\n");
607 }
9c4a7965
KP
608
609 flush_channel(dev, ch, error, reset_ch);
610
611 if (reset_ch) {
612 reset_channel(dev, ch);
613 } else {
ad42d5fc 614 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
dd3c0987 615 TALITOS2_CCCR_CONT);
ad42d5fc
KP
616 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
617 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
dd3c0987 618 TALITOS2_CCCR_CONT) && --timeout)
9c4a7965
KP
619 cpu_relax();
620 if (timeout == 0) {
621 dev_err(dev, "failed to restart channel %d\n",
622 ch);
623 reset_dev = 1;
624 }
625 }
626 }
dd3c0987
LC
627 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
628 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
629 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
630 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
631 isr, isr_lo);
632 else
633 dev_err(dev, "done overflow, internal time out, or "
634 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
9c4a7965
KP
635
636 /* purge request queues */
637 for (ch = 0; ch < priv->num_channels; ch++)
638 flush_channel(dev, ch, -EIO, 1);
639
640 /* reset and reinitialize the device */
641 init_device(dev);
642 }
643}
644
dd3c0987
LC
645#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
646static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
647{ \
648 struct device *dev = data; \
649 struct talitos_private *priv = dev_get_drvdata(dev); \
650 u32 isr, isr_lo; \
651 unsigned long flags; \
652 \
653 spin_lock_irqsave(&priv->reg_lock, flags); \
654 isr = in_be32(priv->reg + TALITOS_ISR); \
655 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
656 /* Acknowledge interrupt */ \
657 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
658 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
659 \
660 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
661 spin_unlock_irqrestore(&priv->reg_lock, flags); \
662 talitos_error(dev, isr & ch_err_mask, isr_lo); \
663 } \
664 else { \
665 if (likely(isr & ch_done_mask)) { \
666 /* mask further done interrupts. */ \
667 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
668 /* done_task will unmask done interrupts at exit */ \
669 tasklet_schedule(&priv->done_task[tlet]); \
670 } \
671 spin_unlock_irqrestore(&priv->reg_lock, flags); \
672 } \
673 \
674 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
675 IRQ_NONE; \
676}
677
678DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
679
680#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
681static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
c3e337f8
KP
682{ \
683 struct device *dev = data; \
684 struct talitos_private *priv = dev_get_drvdata(dev); \
685 u32 isr, isr_lo; \
511d63cb 686 unsigned long flags; \
c3e337f8 687 \
511d63cb 688 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8
KP
689 isr = in_be32(priv->reg + TALITOS_ISR); \
690 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
691 /* Acknowledge interrupt */ \
692 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
693 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
694 \
511d63cb
HG
695 if (unlikely(isr & ch_err_mask || isr_lo)) { \
696 spin_unlock_irqrestore(&priv->reg_lock, flags); \
697 talitos_error(dev, isr & ch_err_mask, isr_lo); \
698 } \
699 else { \
c3e337f8
KP
700 if (likely(isr & ch_done_mask)) { \
701 /* mask further done interrupts. */ \
702 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
703 /* done_task will unmask done interrupts at exit */ \
704 tasklet_schedule(&priv->done_task[tlet]); \
705 } \
511d63cb
HG
706 spin_unlock_irqrestore(&priv->reg_lock, flags); \
707 } \
c3e337f8
KP
708 \
709 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
710 IRQ_NONE; \
9c4a7965 711}
dd3c0987
LC
712
713DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
714DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
715 0)
716DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
717 1)
9c4a7965
KP
718
719/*
720 * hwrng
721 */
722static int talitos_rng_data_present(struct hwrng *rng, int wait)
723{
724 struct device *dev = (struct device *)rng->priv;
725 struct talitos_private *priv = dev_get_drvdata(dev);
726 u32 ofl;
727 int i;
728
729 for (i = 0; i < 20; i++) {
5fa7fa14 730 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
9c4a7965
KP
731 TALITOS_RNGUSR_LO_OFL;
732 if (ofl || !wait)
733 break;
734 udelay(10);
735 }
736
737 return !!ofl;
738}
739
740static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
741{
742 struct device *dev = (struct device *)rng->priv;
743 struct talitos_private *priv = dev_get_drvdata(dev);
744
745 /* rng fifo requires 64-bit accesses */
5fa7fa14
LC
746 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
747 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
9c4a7965
KP
748
749 return sizeof(u32);
750}
751
752static int talitos_rng_init(struct hwrng *rng)
753{
754 struct device *dev = (struct device *)rng->priv;
755 struct talitos_private *priv = dev_get_drvdata(dev);
756 unsigned int timeout = TALITOS_TIMEOUT;
757
5fa7fa14
LC
758 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
759 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
760 & TALITOS_RNGUSR_LO_RD)
9c4a7965
KP
761 && --timeout)
762 cpu_relax();
763 if (timeout == 0) {
764 dev_err(dev, "failed to reset rng hw\n");
765 return -ENODEV;
766 }
767
768 /* start generating */
5fa7fa14 769 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
9c4a7965
KP
770
771 return 0;
772}
773
774static int talitos_register_rng(struct device *dev)
775{
776 struct talitos_private *priv = dev_get_drvdata(dev);
35a3bb3d 777 int err;
9c4a7965
KP
778
779 priv->rng.name = dev_driver_string(dev),
780 priv->rng.init = talitos_rng_init,
781 priv->rng.data_present = talitos_rng_data_present,
782 priv->rng.data_read = talitos_rng_data_read,
783 priv->rng.priv = (unsigned long)dev;
784
35a3bb3d
AS
785 err = hwrng_register(&priv->rng);
786 if (!err)
787 priv->rng_registered = true;
788
789 return err;
9c4a7965
KP
790}
791
792static void talitos_unregister_rng(struct device *dev)
793{
794 struct talitos_private *priv = dev_get_drvdata(dev);
795
35a3bb3d
AS
796 if (!priv->rng_registered)
797 return;
798
9c4a7965 799 hwrng_unregister(&priv->rng);
35a3bb3d 800 priv->rng_registered = false;
9c4a7965
KP
801}
802
803/*
804 * crypto alg
805 */
806#define TALITOS_CRA_PRIORITY 3000
357fb605 807#define TALITOS_MAX_KEY_SIZE 96
3952f17e 808#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
70bcaca7 809
9c4a7965
KP
810struct talitos_ctx {
811 struct device *dev;
5228f0f7 812 int ch;
9c4a7965
KP
813 __be32 desc_hdr_template;
814 u8 key[TALITOS_MAX_KEY_SIZE];
70bcaca7 815 u8 iv[TALITOS_MAX_IV_LENGTH];
9c4a7965
KP
816 unsigned int keylen;
817 unsigned int enckeylen;
818 unsigned int authkeylen;
9c4a7965
KP
819};
820
497f2e6b
LN
821#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
822#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
823
824struct talitos_ahash_req_ctx {
60f208d7 825 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
497f2e6b
LN
826 unsigned int hw_context_size;
827 u8 buf[HASH_MAX_BLOCK_SIZE];
828 u8 bufnext[HASH_MAX_BLOCK_SIZE];
60f208d7 829 unsigned int swinit;
497f2e6b
LN
830 unsigned int first;
831 unsigned int last;
832 unsigned int to_hash_later;
42e8b0d7 833 unsigned int nbuf;
497f2e6b
LN
834 struct scatterlist bufsl[2];
835 struct scatterlist *psrc;
836};
837
3639ca84
HG
838struct talitos_export_state {
839 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
840 u8 buf[HASH_MAX_BLOCK_SIZE];
841 unsigned int swinit;
842 unsigned int first;
843 unsigned int last;
844 unsigned int to_hash_later;
845 unsigned int nbuf;
846};
847
56af8cd4
LN
848static int aead_setkey(struct crypto_aead *authenc,
849 const u8 *key, unsigned int keylen)
9c4a7965
KP
850{
851 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
c306a98d 852 struct crypto_authenc_keys keys;
9c4a7965 853
c306a98d 854 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
9c4a7965
KP
855 goto badkey;
856
c306a98d 857 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
9c4a7965
KP
858 goto badkey;
859
c306a98d
MK
860 memcpy(ctx->key, keys.authkey, keys.authkeylen);
861 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
9c4a7965 862
c306a98d
MK
863 ctx->keylen = keys.authkeylen + keys.enckeylen;
864 ctx->enckeylen = keys.enckeylen;
865 ctx->authkeylen = keys.authkeylen;
9c4a7965
KP
866
867 return 0;
868
869badkey:
870 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
871 return -EINVAL;
872}
873
874/*
56af8cd4 875 * talitos_edesc - s/w-extended descriptor
9c4a7965
KP
876 * @src_nents: number of segments in input scatterlist
877 * @dst_nents: number of segments in output scatterlist
aeb4c132 878 * @icv_ool: whether ICV is out-of-line
79fd31d3 879 * @iv_dma: dma address of iv for checking continuity and link table
9c4a7965 880 * @dma_len: length of dma mapped link_tbl space
6f65f6ac 881 * @dma_link_tbl: bus physical address of link_tbl/buf
9c4a7965 882 * @desc: h/w descriptor
6f65f6ac
LC
883 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
884 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
9c4a7965
KP
885 *
886 * if decrypting (with authcheck), or either one of src_nents or dst_nents
887 * is greater than 1, an integrity check value is concatenated to the end
888 * of link_tbl data
889 */
56af8cd4 890struct talitos_edesc {
9c4a7965
KP
891 int src_nents;
892 int dst_nents;
aeb4c132 893 bool icv_ool;
79fd31d3 894 dma_addr_t iv_dma;
9c4a7965
KP
895 int dma_len;
896 dma_addr_t dma_link_tbl;
897 struct talitos_desc desc;
6f65f6ac
LC
898 union {
899 struct talitos_ptr link_tbl[0];
900 u8 buf[0];
901 };
9c4a7965
KP
902};
903
4de9d0b5
LN
904static void talitos_sg_unmap(struct device *dev,
905 struct talitos_edesc *edesc,
906 struct scatterlist *src,
907 struct scatterlist *dst)
908{
909 unsigned int src_nents = edesc->src_nents ? : 1;
910 unsigned int dst_nents = edesc->dst_nents ? : 1;
911
912 if (src != dst) {
b8a011d4 913 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
4de9d0b5 914
497f2e6b 915 if (dst) {
b8a011d4 916 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
497f2e6b 917 }
4de9d0b5 918 } else
b8a011d4 919 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
4de9d0b5
LN
920}
921
9c4a7965 922static void ipsec_esp_unmap(struct device *dev,
56af8cd4 923 struct talitos_edesc *edesc,
9c4a7965
KP
924 struct aead_request *areq)
925{
926 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
927 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
928 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
929 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
930
4de9d0b5 931 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
9c4a7965
KP
932
933 if (edesc->dma_len)
934 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
935 DMA_BIDIRECTIONAL);
936}
937
938/*
939 * ipsec_esp descriptor callbacks
940 */
941static void ipsec_esp_encrypt_done(struct device *dev,
942 struct talitos_desc *desc, void *context,
943 int err)
944{
945 struct aead_request *areq = context;
9c4a7965 946 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
aeb4c132 947 unsigned int authsize = crypto_aead_authsize(authenc);
19bbbc63 948 struct talitos_edesc *edesc;
9c4a7965
KP
949 struct scatterlist *sg;
950 void *icvdata;
951
19bbbc63
KP
952 edesc = container_of(desc, struct talitos_edesc, desc);
953
9c4a7965
KP
954 ipsec_esp_unmap(dev, edesc, areq);
955
956 /* copy the generated ICV to dst */
aeb4c132 957 if (edesc->icv_ool) {
9c4a7965 958 icvdata = &edesc->link_tbl[edesc->src_nents +
aeb4c132 959 edesc->dst_nents + 2];
9c4a7965 960 sg = sg_last(areq->dst, edesc->dst_nents);
aeb4c132
HX
961 memcpy((char *)sg_virt(sg) + sg->length - authsize,
962 icvdata, authsize);
9c4a7965
KP
963 }
964
965 kfree(edesc);
966
967 aead_request_complete(areq, err);
968}
969
fe5720e2 970static void ipsec_esp_decrypt_swauth_done(struct device *dev,
e938e465
KP
971 struct talitos_desc *desc,
972 void *context, int err)
9c4a7965
KP
973{
974 struct aead_request *req = context;
9c4a7965 975 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
aeb4c132 976 unsigned int authsize = crypto_aead_authsize(authenc);
19bbbc63 977 struct talitos_edesc *edesc;
9c4a7965 978 struct scatterlist *sg;
aeb4c132 979 char *oicv, *icv;
9c4a7965 980
19bbbc63
KP
981 edesc = container_of(desc, struct talitos_edesc, desc);
982
9c4a7965
KP
983 ipsec_esp_unmap(dev, edesc, req);
984
985 if (!err) {
986 /* auth check */
9c4a7965 987 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
aeb4c132
HX
988 icv = (char *)sg_virt(sg) + sg->length - authsize;
989
990 if (edesc->dma_len) {
991 oicv = (char *)&edesc->link_tbl[edesc->src_nents +
992 edesc->dst_nents + 2];
993 if (edesc->icv_ool)
994 icv = oicv + authsize;
995 } else
996 oicv = (char *)&edesc->link_tbl[0];
997
79960943 998 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
9c4a7965
KP
999 }
1000
1001 kfree(edesc);
1002
1003 aead_request_complete(req, err);
1004}
1005
fe5720e2 1006static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
e938e465
KP
1007 struct talitos_desc *desc,
1008 void *context, int err)
fe5720e2
KP
1009{
1010 struct aead_request *req = context;
19bbbc63
KP
1011 struct talitos_edesc *edesc;
1012
1013 edesc = container_of(desc, struct talitos_edesc, desc);
fe5720e2
KP
1014
1015 ipsec_esp_unmap(dev, edesc, req);
1016
1017 /* check ICV auth status */
e938e465
KP
1018 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1019 DESC_HDR_LO_ICCR1_PASS))
1020 err = -EBADMSG;
fe5720e2
KP
1021
1022 kfree(edesc);
1023
1024 aead_request_complete(req, err);
1025}
1026
9c4a7965
KP
1027/*
1028 * convert scatterlist to SEC h/w link table format
1029 * stop at cryptlen bytes
1030 */
aeb4c132
HX
1031static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1032 unsigned int offset, int cryptlen,
1033 struct talitos_ptr *link_tbl_ptr)
9c4a7965 1034{
70bcaca7 1035 int n_sg = sg_count;
aeb4c132 1036 int count = 0;
70bcaca7 1037
aeb4c132
HX
1038 while (cryptlen && sg && n_sg--) {
1039 unsigned int len = sg_dma_len(sg);
9c4a7965 1040
aeb4c132
HX
1041 if (offset >= len) {
1042 offset -= len;
1043 goto next;
1044 }
1045
1046 len -= offset;
1047
1048 if (len > cryptlen)
1049 len = cryptlen;
1050
1051 to_talitos_ptr(link_tbl_ptr + count,
1052 sg_dma_address(sg) + offset, 0);
1053 link_tbl_ptr[count].len = cpu_to_be16(len);
1054 link_tbl_ptr[count].j_extent = 0;
1055 count++;
1056 cryptlen -= len;
1057 offset = 0;
1058
1059next:
1060 sg = sg_next(sg);
70bcaca7 1061 }
9c4a7965
KP
1062
1063 /* tag end of link table */
aeb4c132
HX
1064 if (count > 0)
1065 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
70bcaca7 1066
aeb4c132
HX
1067 return count;
1068}
1069
1070static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1071 int cryptlen,
1072 struct talitos_ptr *link_tbl_ptr)
1073{
1074 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1075 link_tbl_ptr);
9c4a7965
KP
1076}
1077
1078/*
1079 * fill in and submit ipsec_esp descriptor
1080 */
56af8cd4 1081static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
aeb4c132
HX
1082 void (*callback)(struct device *dev,
1083 struct talitos_desc *desc,
1084 void *context, int error))
9c4a7965
KP
1085{
1086 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
aeb4c132 1087 unsigned int authsize = crypto_aead_authsize(aead);
9c4a7965
KP
1088 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1089 struct device *dev = ctx->dev;
1090 struct talitos_desc *desc = &edesc->desc;
1091 unsigned int cryptlen = areq->cryptlen;
e41256f1 1092 unsigned int ivsize = crypto_aead_ivsize(aead);
aeb4c132 1093 int tbl_off = 0;
fa86a267 1094 int sg_count, ret;
fe5720e2 1095 int sg_link_tbl_len;
9c4a7965
KP
1096
1097 /* hmac key */
1098 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
a2b35aa8 1099 DMA_TO_DEVICE);
79fd31d3 1100
b8a011d4
LC
1101 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1102 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1103 : DMA_TO_DEVICE);
9c4a7965 1104 /* hmac data */
aeb4c132
HX
1105 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1106 if (sg_count > 1 &&
1107 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1108 areq->assoclen,
1109 &edesc->link_tbl[tbl_off])) > 1) {
79fd31d3 1110 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
922f9dc8 1111 sizeof(struct talitos_ptr), 0);
79fd31d3
HG
1112 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1113
79fd31d3
HG
1114 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1115 edesc->dma_len, DMA_BIDIRECTIONAL);
340ff60a
HG
1116
1117 tbl_off += ret;
79fd31d3 1118 } else {
aeb4c132 1119 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
79fd31d3
HG
1120 desc->ptr[1].j_extent = 0;
1121 }
1122
9c4a7965 1123 /* cipher iv */
922f9dc8 1124 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
79fd31d3
HG
1125 desc->ptr[2].len = cpu_to_be16(ivsize);
1126 desc->ptr[2].j_extent = 0;
9c4a7965
KP
1127
1128 /* cipher key */
1129 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
a2b35aa8 1130 (char *)&ctx->key + ctx->authkeylen,
9c4a7965
KP
1131 DMA_TO_DEVICE);
1132
1133 /*
1134 * cipher in
1135 * map and adjust cipher len to aead request cryptlen.
1136 * extent is bytes of HMAC postpended to ciphertext,
1137 * typically 12 for ipsec
1138 */
1139 desc->ptr[4].len = cpu_to_be16(cryptlen);
1140 desc->ptr[4].j_extent = authsize;
1141
aeb4c132
HX
1142 sg_link_tbl_len = cryptlen;
1143 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1144 sg_link_tbl_len += authsize;
1145
340ff60a
HG
1146 if (sg_count == 1) {
1147 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1148 areq->assoclen, 0);
1149 } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1150 areq->assoclen, sg_link_tbl_len,
1151 &edesc->link_tbl[tbl_off])) >
1152 1) {
aeb4c132
HX
1153 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1154 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1155 tbl_off *
1156 sizeof(struct talitos_ptr), 0);
1157 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1158 edesc->dma_len,
1159 DMA_BIDIRECTIONAL);
340ff60a
HG
1160 tbl_off += ret;
1161 } else {
1162 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1163 }
9c4a7965
KP
1164
1165 /* cipher out */
1166 desc->ptr[5].len = cpu_to_be16(cryptlen);
1167 desc->ptr[5].j_extent = authsize;
1168
e938e465 1169 if (areq->src != areq->dst)
b8a011d4
LC
1170 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1171 DMA_FROM_DEVICE);
9c4a7965 1172
aeb4c132
HX
1173 edesc->icv_ool = false;
1174
340ff60a
HG
1175 if (sg_count == 1) {
1176 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1177 areq->assoclen, 0);
1178 } else if ((sg_count =
1179 sg_to_link_tbl_offset(areq->dst, sg_count,
aeb4c132 1180 areq->assoclen, cryptlen,
340ff60a 1181 &edesc->link_tbl[tbl_off])) > 1) {
79fd31d3 1182 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
9c4a7965 1183
81eb024c 1184 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
922f9dc8 1185 tbl_off * sizeof(struct talitos_ptr), 0);
fe5720e2 1186
f3c85bc1 1187 /* Add an entry to the link table for ICV data */
79fd31d3
HG
1188 tbl_ptr += sg_count - 1;
1189 tbl_ptr->j_extent = 0;
1190 tbl_ptr++;
1191 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1192 tbl_ptr->len = cpu_to_be16(authsize);
9c4a7965
KP
1193
1194 /* icv data follows link tables */
79fd31d3 1195 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
aeb4c132
HX
1196 (edesc->src_nents + edesc->dst_nents +
1197 2) * sizeof(struct talitos_ptr) +
1198 authsize, 0);
9c4a7965
KP
1199 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1200 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1201 edesc->dma_len, DMA_BIDIRECTIONAL);
aeb4c132
HX
1202
1203 edesc->icv_ool = true;
340ff60a
HG
1204 } else {
1205 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1206 }
9c4a7965
KP
1207
1208 /* iv out */
a2b35aa8 1209 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
9c4a7965
KP
1210 DMA_FROM_DEVICE);
1211
5228f0f7 1212 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
fa86a267
KP
1213 if (ret != -EINPROGRESS) {
1214 ipsec_esp_unmap(dev, edesc, areq);
1215 kfree(edesc);
1216 }
1217 return ret;
9c4a7965
KP
1218}
1219
9c4a7965 1220/*
56af8cd4 1221 * allocate and map the extended descriptor
9c4a7965 1222 */
4de9d0b5
LN
1223static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1224 struct scatterlist *src,
1225 struct scatterlist *dst,
79fd31d3
HG
1226 u8 *iv,
1227 unsigned int assoclen,
4de9d0b5
LN
1228 unsigned int cryptlen,
1229 unsigned int authsize,
79fd31d3 1230 unsigned int ivsize,
4de9d0b5 1231 int icv_stashing,
62293a37
HG
1232 u32 cryptoflags,
1233 bool encrypt)
9c4a7965 1234{
56af8cd4 1235 struct talitos_edesc *edesc;
aeb4c132 1236 int src_nents, dst_nents, alloc_len, dma_len;
79fd31d3 1237 dma_addr_t iv_dma = 0;
4de9d0b5 1238 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
586725f8 1239 GFP_ATOMIC;
6f65f6ac
LC
1240 struct talitos_private *priv = dev_get_drvdata(dev);
1241 bool is_sec1 = has_ftr_sec1(priv);
1242 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
8e409fe1 1243 void *err;
9c4a7965 1244
6f65f6ac 1245 if (cryptlen + authsize > max_len) {
4de9d0b5 1246 dev_err(dev, "length exceeds h/w max limit\n");
9c4a7965
KP
1247 return ERR_PTR(-EINVAL);
1248 }
1249
935e99a3 1250 if (ivsize)
79fd31d3
HG
1251 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1252
62293a37 1253 if (!dst || dst == src) {
b8a011d4
LC
1254 src_nents = sg_nents_for_len(src,
1255 assoclen + cryptlen + authsize);
8e409fe1
LC
1256 if (src_nents < 0) {
1257 dev_err(dev, "Invalid number of src SG.\n");
1258 err = ERR_PTR(-EINVAL);
1259 goto error_sg;
1260 }
62293a37
HG
1261 src_nents = (src_nents == 1) ? 0 : src_nents;
1262 dst_nents = dst ? src_nents : 0;
1263 } else { /* dst && dst != src*/
b8a011d4
LC
1264 src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1265 (encrypt ? 0 : authsize));
8e409fe1
LC
1266 if (src_nents < 0) {
1267 dev_err(dev, "Invalid number of src SG.\n");
1268 err = ERR_PTR(-EINVAL);
1269 goto error_sg;
1270 }
62293a37 1271 src_nents = (src_nents == 1) ? 0 : src_nents;
b8a011d4
LC
1272 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1273 (encrypt ? authsize : 0));
8e409fe1
LC
1274 if (dst_nents < 0) {
1275 dev_err(dev, "Invalid number of dst SG.\n");
1276 err = ERR_PTR(-EINVAL);
1277 goto error_sg;
1278 }
62293a37 1279 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
9c4a7965
KP
1280 }
1281
1282 /*
1283 * allocate space for base edesc plus the link tables,
aeb4c132
HX
1284 * allowing for two separate entries for AD and generated ICV (+ 2),
1285 * and space for two sets of ICVs (stashed and generated)
9c4a7965 1286 */
56af8cd4 1287 alloc_len = sizeof(struct talitos_edesc);
aeb4c132 1288 if (src_nents || dst_nents) {
6f65f6ac 1289 if (is_sec1)
608f37d0
DC
1290 dma_len = (src_nents ? cryptlen : 0) +
1291 (dst_nents ? cryptlen : 0);
6f65f6ac 1292 else
aeb4c132
HX
1293 dma_len = (src_nents + dst_nents + 2) *
1294 sizeof(struct talitos_ptr) + authsize * 2;
9c4a7965
KP
1295 alloc_len += dma_len;
1296 } else {
1297 dma_len = 0;
4de9d0b5 1298 alloc_len += icv_stashing ? authsize : 0;
9c4a7965
KP
1299 }
1300
586725f8 1301 edesc = kmalloc(alloc_len, GFP_DMA | flags);
9c4a7965 1302 if (!edesc) {
4de9d0b5 1303 dev_err(dev, "could not allocate edescriptor\n");
8e409fe1
LC
1304 err = ERR_PTR(-ENOMEM);
1305 goto error_sg;
9c4a7965
KP
1306 }
1307
1308 edesc->src_nents = src_nents;
1309 edesc->dst_nents = dst_nents;
79fd31d3 1310 edesc->iv_dma = iv_dma;
9c4a7965 1311 edesc->dma_len = dma_len;
497f2e6b
LN
1312 if (dma_len)
1313 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1314 edesc->dma_len,
1315 DMA_BIDIRECTIONAL);
9c4a7965
KP
1316
1317 return edesc;
8e409fe1
LC
1318error_sg:
1319 if (iv_dma)
1320 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1321 return err;
9c4a7965
KP
1322}
1323
79fd31d3 1324static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
62293a37 1325 int icv_stashing, bool encrypt)
4de9d0b5
LN
1326{
1327 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
aeb4c132 1328 unsigned int authsize = crypto_aead_authsize(authenc);
4de9d0b5 1329 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
79fd31d3 1330 unsigned int ivsize = crypto_aead_ivsize(authenc);
4de9d0b5 1331
aeb4c132 1332 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
79fd31d3 1333 iv, areq->assoclen, areq->cryptlen,
aeb4c132 1334 authsize, ivsize, icv_stashing,
62293a37 1335 areq->base.flags, encrypt);
4de9d0b5
LN
1336}
1337
56af8cd4 1338static int aead_encrypt(struct aead_request *req)
9c4a7965
KP
1339{
1340 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1341 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
56af8cd4 1342 struct talitos_edesc *edesc;
9c4a7965
KP
1343
1344 /* allocate extended descriptor */
62293a37 1345 edesc = aead_edesc_alloc(req, req->iv, 0, true);
9c4a7965
KP
1346 if (IS_ERR(edesc))
1347 return PTR_ERR(edesc);
1348
1349 /* set encrypt */
70bcaca7 1350 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
9c4a7965 1351
aeb4c132 1352 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
9c4a7965
KP
1353}
1354
56af8cd4 1355static int aead_decrypt(struct aead_request *req)
9c4a7965
KP
1356{
1357 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
aeb4c132 1358 unsigned int authsize = crypto_aead_authsize(authenc);
9c4a7965 1359 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
fe5720e2 1360 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
56af8cd4 1361 struct talitos_edesc *edesc;
9c4a7965
KP
1362 struct scatterlist *sg;
1363 void *icvdata;
1364
1365 req->cryptlen -= authsize;
1366
1367 /* allocate extended descriptor */
62293a37 1368 edesc = aead_edesc_alloc(req, req->iv, 1, false);
9c4a7965
KP
1369 if (IS_ERR(edesc))
1370 return PTR_ERR(edesc);
1371
fe5720e2 1372 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
e938e465
KP
1373 ((!edesc->src_nents && !edesc->dst_nents) ||
1374 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
9c4a7965 1375
fe5720e2 1376 /* decrypt and check the ICV */
e938e465
KP
1377 edesc->desc.hdr = ctx->desc_hdr_template |
1378 DESC_HDR_DIR_INBOUND |
fe5720e2 1379 DESC_HDR_MODE1_MDEU_CICV;
9c4a7965 1380
fe5720e2
KP
1381 /* reset integrity check result bits */
1382 edesc->desc.hdr_lo = 0;
9c4a7965 1383
aeb4c132 1384 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
e938e465 1385 }
fe5720e2 1386
e938e465
KP
1387 /* Have to check the ICV with software */
1388 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
fe5720e2 1389
e938e465
KP
1390 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1391 if (edesc->dma_len)
aeb4c132
HX
1392 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1393 edesc->dst_nents + 2];
e938e465
KP
1394 else
1395 icvdata = &edesc->link_tbl[0];
fe5720e2 1396
e938e465 1397 sg = sg_last(req->src, edesc->src_nents ? : 1);
fe5720e2 1398
aeb4c132 1399 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
9c4a7965 1400
aeb4c132 1401 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
9c4a7965
KP
1402}
1403
4de9d0b5
LN
1404static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1405 const u8 *key, unsigned int keylen)
1406{
1407 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
4de9d0b5
LN
1408
1409 memcpy(&ctx->key, key, keylen);
1410 ctx->keylen = keylen;
1411
1412 return 0;
4de9d0b5
LN
1413}
1414
032d197e
LC
1415static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1416 struct scatterlist *dst, unsigned int len,
1417 struct talitos_edesc *edesc)
1418{
6f65f6ac
LC
1419 struct talitos_private *priv = dev_get_drvdata(dev);
1420 bool is_sec1 = has_ftr_sec1(priv);
1421
1422 if (is_sec1) {
1423 if (!edesc->src_nents) {
1424 dma_unmap_sg(dev, src, 1,
1425 dst != src ? DMA_TO_DEVICE
1426 : DMA_BIDIRECTIONAL);
1427 }
1428 if (dst && edesc->dst_nents) {
1429 dma_sync_single_for_device(dev,
1430 edesc->dma_link_tbl + len,
1431 len, DMA_FROM_DEVICE);
1432 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1433 edesc->buf + len, len);
1434 } else if (dst && dst != src) {
1435 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1436 }
1437 } else {
1438 talitos_sg_unmap(dev, edesc, src, dst);
1439 }
032d197e
LC
1440}
1441
4de9d0b5
LN
1442static void common_nonsnoop_unmap(struct device *dev,
1443 struct talitos_edesc *edesc,
1444 struct ablkcipher_request *areq)
1445{
1446 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
032d197e
LC
1447
1448 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
4de9d0b5
LN
1449 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1450 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1451
4de9d0b5
LN
1452 if (edesc->dma_len)
1453 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1454 DMA_BIDIRECTIONAL);
1455}
1456
1457static void ablkcipher_done(struct device *dev,
1458 struct talitos_desc *desc, void *context,
1459 int err)
1460{
1461 struct ablkcipher_request *areq = context;
19bbbc63
KP
1462 struct talitos_edesc *edesc;
1463
1464 edesc = container_of(desc, struct talitos_edesc, desc);
4de9d0b5
LN
1465
1466 common_nonsnoop_unmap(dev, edesc, areq);
1467
1468 kfree(edesc);
1469
1470 areq->base.complete(&areq->base, err);
1471}
1472
032d197e
LC
1473int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1474 unsigned int len, struct talitos_edesc *edesc,
1475 enum dma_data_direction dir, struct talitos_ptr *ptr)
1476{
1477 int sg_count;
922f9dc8
LC
1478 struct talitos_private *priv = dev_get_drvdata(dev);
1479 bool is_sec1 = has_ftr_sec1(priv);
032d197e 1480
922f9dc8 1481 to_talitos_ptr_len(ptr, len, is_sec1);
032d197e 1482
6f65f6ac
LC
1483 if (is_sec1) {
1484 sg_count = edesc->src_nents ? : 1;
032d197e 1485
6f65f6ac
LC
1486 if (sg_count == 1) {
1487 dma_map_sg(dev, src, 1, dir);
1488 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
032d197e 1489 } else {
6f65f6ac
LC
1490 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1491 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1492 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1493 len, DMA_TO_DEVICE);
1494 }
1495 } else {
1496 to_talitos_ptr_extent_clear(ptr, is_sec1);
1497
b8a011d4 1498 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
6f65f6ac
LC
1499
1500 if (sg_count == 1) {
922f9dc8 1501 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
6f65f6ac
LC
1502 } else {
1503 sg_count = sg_to_link_tbl(src, sg_count, len,
1504 &edesc->link_tbl[0]);
1505 if (sg_count > 1) {
1506 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1507 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1508 dma_sync_single_for_device(dev,
1509 edesc->dma_link_tbl,
1510 edesc->dma_len,
1511 DMA_BIDIRECTIONAL);
1512 } else {
1513 /* Only one segment now, so no link tbl needed*/
1514 to_talitos_ptr(ptr, sg_dma_address(src),
1515 is_sec1);
1516 }
032d197e
LC
1517 }
1518 }
1519 return sg_count;
1520}
1521
1522void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1523 unsigned int len, struct talitos_edesc *edesc,
1524 enum dma_data_direction dir,
1525 struct talitos_ptr *ptr, int sg_count)
1526{
922f9dc8
LC
1527 struct talitos_private *priv = dev_get_drvdata(dev);
1528 bool is_sec1 = has_ftr_sec1(priv);
1529
032d197e 1530 if (dir != DMA_NONE)
b8a011d4 1531 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
032d197e 1532
6f65f6ac
LC
1533 to_talitos_ptr_len(ptr, len, is_sec1);
1534
1535 if (is_sec1) {
1536 if (sg_count == 1) {
1537 if (dir != DMA_NONE)
1538 dma_map_sg(dev, dst, 1, dir);
1539 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1540 } else {
1541 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1542 dma_sync_single_for_device(dev,
1543 edesc->dma_link_tbl + len,
1544 len, DMA_FROM_DEVICE);
1545 }
032d197e 1546 } else {
6f65f6ac
LC
1547 to_talitos_ptr_extent_clear(ptr, is_sec1);
1548
1549 if (sg_count == 1) {
1550 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1551 } else {
1552 struct talitos_ptr *link_tbl_ptr =
1553 &edesc->link_tbl[edesc->src_nents + 1];
1554
1555 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1556 (edesc->src_nents + 1) *
1557 sizeof(struct talitos_ptr), 0);
1558 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
42e8b0d7 1559 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
6f65f6ac
LC
1560 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1561 edesc->dma_len,
1562 DMA_BIDIRECTIONAL);
1563 }
032d197e
LC
1564 }
1565}
1566
4de9d0b5
LN
1567static int common_nonsnoop(struct talitos_edesc *edesc,
1568 struct ablkcipher_request *areq,
4de9d0b5
LN
1569 void (*callback) (struct device *dev,
1570 struct talitos_desc *desc,
1571 void *context, int error))
1572{
1573 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1574 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1575 struct device *dev = ctx->dev;
1576 struct talitos_desc *desc = &edesc->desc;
1577 unsigned int cryptlen = areq->nbytes;
79fd31d3 1578 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1579 int sg_count, ret;
922f9dc8
LC
1580 struct talitos_private *priv = dev_get_drvdata(dev);
1581 bool is_sec1 = has_ftr_sec1(priv);
4de9d0b5
LN
1582
1583 /* first DWORD empty */
2529bc37 1584 desc->ptr[0] = zero_entry;
4de9d0b5
LN
1585
1586 /* cipher iv */
922f9dc8
LC
1587 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1588 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1589 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
4de9d0b5
LN
1590
1591 /* cipher key */
1592 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1593 (char *)&ctx->key, DMA_TO_DEVICE);
4de9d0b5
LN
1594
1595 /*
1596 * cipher in
1597 */
032d197e
LC
1598 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1599 (areq->src == areq->dst) ?
1600 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1601 &desc->ptr[3]);
4de9d0b5
LN
1602
1603 /* cipher out */
032d197e
LC
1604 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1605 (areq->src == areq->dst) ? DMA_NONE
1606 : DMA_FROM_DEVICE,
1607 &desc->ptr[4], sg_count);
4de9d0b5
LN
1608
1609 /* iv out */
a2b35aa8 1610 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
4de9d0b5
LN
1611 DMA_FROM_DEVICE);
1612
1613 /* last DWORD empty */
2529bc37 1614 desc->ptr[6] = zero_entry;
4de9d0b5 1615
5228f0f7 1616 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
4de9d0b5
LN
1617 if (ret != -EINPROGRESS) {
1618 common_nonsnoop_unmap(dev, edesc, areq);
1619 kfree(edesc);
1620 }
1621 return ret;
1622}
1623
e938e465 1624static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
62293a37 1625 areq, bool encrypt)
4de9d0b5
LN
1626{
1627 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1628 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
79fd31d3 1629 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1630
aeb4c132 1631 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
79fd31d3 1632 areq->info, 0, areq->nbytes, 0, ivsize, 0,
62293a37 1633 areq->base.flags, encrypt);
4de9d0b5
LN
1634}
1635
1636static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1637{
1638 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1639 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1640 struct talitos_edesc *edesc;
1641
1642 /* allocate extended descriptor */
62293a37 1643 edesc = ablkcipher_edesc_alloc(areq, true);
4de9d0b5
LN
1644 if (IS_ERR(edesc))
1645 return PTR_ERR(edesc);
1646
1647 /* set encrypt */
1648 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1649
febec542 1650 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1651}
1652
1653static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1654{
1655 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1656 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1657 struct talitos_edesc *edesc;
1658
1659 /* allocate extended descriptor */
62293a37 1660 edesc = ablkcipher_edesc_alloc(areq, false);
4de9d0b5
LN
1661 if (IS_ERR(edesc))
1662 return PTR_ERR(edesc);
1663
1664 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1665
febec542 1666 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1667}
1668
497f2e6b
LN
1669static void common_nonsnoop_hash_unmap(struct device *dev,
1670 struct talitos_edesc *edesc,
1671 struct ahash_request *areq)
1672{
1673 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
922f9dc8
LC
1674 struct talitos_private *priv = dev_get_drvdata(dev);
1675 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1676
1677 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1678
032d197e
LC
1679 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1680
497f2e6b 1681 /* When using hashctx-in, must unmap it. */
922f9dc8 1682 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
497f2e6b
LN
1683 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1684 DMA_TO_DEVICE);
1685
922f9dc8 1686 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
497f2e6b
LN
1687 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1688 DMA_TO_DEVICE);
1689
497f2e6b
LN
1690 if (edesc->dma_len)
1691 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1692 DMA_BIDIRECTIONAL);
1693
1694}
1695
1696static void ahash_done(struct device *dev,
1697 struct talitos_desc *desc, void *context,
1698 int err)
1699{
1700 struct ahash_request *areq = context;
1701 struct talitos_edesc *edesc =
1702 container_of(desc, struct talitos_edesc, desc);
1703 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1704
1705 if (!req_ctx->last && req_ctx->to_hash_later) {
1706 /* Position any partial block for next update/final/finup */
1707 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
5e833bc4 1708 req_ctx->nbuf = req_ctx->to_hash_later;
497f2e6b
LN
1709 }
1710 common_nonsnoop_hash_unmap(dev, edesc, areq);
1711
1712 kfree(edesc);
1713
1714 areq->base.complete(&areq->base, err);
1715}
1716
2d02905e
LC
1717/*
1718 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1719 * ourself and submit a padded block
1720 */
1721void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1722 struct talitos_edesc *edesc,
1723 struct talitos_ptr *ptr)
1724{
1725 static u8 padded_hash[64] = {
1726 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1727 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1728 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1729 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1730 };
1731
1732 pr_err_once("Bug in SEC1, padding ourself\n");
1733 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1734 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1735 (char *)padded_hash, DMA_TO_DEVICE);
1736}
1737
497f2e6b
LN
1738static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1739 struct ahash_request *areq, unsigned int length,
1740 void (*callback) (struct device *dev,
1741 struct talitos_desc *desc,
1742 void *context, int error))
1743{
1744 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1745 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1746 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1747 struct device *dev = ctx->dev;
1748 struct talitos_desc *desc = &edesc->desc;
032d197e 1749 int ret;
922f9dc8
LC
1750 struct talitos_private *priv = dev_get_drvdata(dev);
1751 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1752
1753 /* first DWORD empty */
1754 desc->ptr[0] = zero_entry;
1755
60f208d7
KP
1756 /* hash context in */
1757 if (!req_ctx->first || req_ctx->swinit) {
497f2e6b
LN
1758 map_single_talitos_ptr(dev, &desc->ptr[1],
1759 req_ctx->hw_context_size,
a2b35aa8 1760 (char *)req_ctx->hw_context,
497f2e6b 1761 DMA_TO_DEVICE);
60f208d7 1762 req_ctx->swinit = 0;
497f2e6b
LN
1763 } else {
1764 desc->ptr[1] = zero_entry;
1765 /* Indicate next op is not the first. */
1766 req_ctx->first = 0;
1767 }
1768
1769 /* HMAC key */
1770 if (ctx->keylen)
1771 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1772 (char *)&ctx->key, DMA_TO_DEVICE);
497f2e6b
LN
1773 else
1774 desc->ptr[2] = zero_entry;
1775
1776 /*
1777 * data in
1778 */
032d197e
LC
1779 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1780 DMA_TO_DEVICE, &desc->ptr[3]);
497f2e6b
LN
1781
1782 /* fifth DWORD empty */
1783 desc->ptr[4] = zero_entry;
1784
1785 /* hash/HMAC out -or- hash context out */
1786 if (req_ctx->last)
1787 map_single_talitos_ptr(dev, &desc->ptr[5],
1788 crypto_ahash_digestsize(tfm),
a2b35aa8 1789 areq->result, DMA_FROM_DEVICE);
497f2e6b
LN
1790 else
1791 map_single_talitos_ptr(dev, &desc->ptr[5],
1792 req_ctx->hw_context_size,
a2b35aa8 1793 req_ctx->hw_context, DMA_FROM_DEVICE);
497f2e6b
LN
1794
1795 /* last DWORD empty */
1796 desc->ptr[6] = zero_entry;
1797
2d02905e
LC
1798 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1799 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1800
5228f0f7 1801 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
497f2e6b
LN
1802 if (ret != -EINPROGRESS) {
1803 common_nonsnoop_hash_unmap(dev, edesc, areq);
1804 kfree(edesc);
1805 }
1806 return ret;
1807}
1808
1809static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1810 unsigned int nbytes)
1811{
1812 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1813 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1814 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1815
aeb4c132 1816 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
62293a37 1817 nbytes, 0, 0, 0, areq->base.flags, false);
497f2e6b
LN
1818}
1819
1820static int ahash_init(struct ahash_request *areq)
1821{
1822 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1823 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1824
1825 /* Initialize the context */
5e833bc4 1826 req_ctx->nbuf = 0;
60f208d7
KP
1827 req_ctx->first = 1; /* first indicates h/w must init its context */
1828 req_ctx->swinit = 0; /* assume h/w init of context */
497f2e6b
LN
1829 req_ctx->hw_context_size =
1830 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1831 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1832 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1833
1834 return 0;
1835}
1836
60f208d7
KP
1837/*
1838 * on h/w without explicit sha224 support, we initialize h/w context
1839 * manually with sha224 constants, and tell it to run sha256.
1840 */
1841static int ahash_init_sha224_swinit(struct ahash_request *areq)
1842{
1843 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1844
1845 ahash_init(areq);
1846 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1847
a752447a
KP
1848 req_ctx->hw_context[0] = SHA224_H0;
1849 req_ctx->hw_context[1] = SHA224_H1;
1850 req_ctx->hw_context[2] = SHA224_H2;
1851 req_ctx->hw_context[3] = SHA224_H3;
1852 req_ctx->hw_context[4] = SHA224_H4;
1853 req_ctx->hw_context[5] = SHA224_H5;
1854 req_ctx->hw_context[6] = SHA224_H6;
1855 req_ctx->hw_context[7] = SHA224_H7;
60f208d7
KP
1856
1857 /* init 64-bit count */
1858 req_ctx->hw_context[8] = 0;
1859 req_ctx->hw_context[9] = 0;
1860
1861 return 0;
1862}
1863
497f2e6b
LN
1864static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1865{
1866 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1867 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1868 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1869 struct talitos_edesc *edesc;
1870 unsigned int blocksize =
1871 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1872 unsigned int nbytes_to_hash;
1873 unsigned int to_hash_later;
5e833bc4 1874 unsigned int nsg;
8e409fe1 1875 int nents;
497f2e6b 1876
5e833bc4
LN
1877 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1878 /* Buffer up to one whole block */
8e409fe1
LC
1879 nents = sg_nents_for_len(areq->src, nbytes);
1880 if (nents < 0) {
1881 dev_err(ctx->dev, "Invalid number of src SG.\n");
1882 return nents;
1883 }
1884 sg_copy_to_buffer(areq->src, nents,
5e833bc4
LN
1885 req_ctx->buf + req_ctx->nbuf, nbytes);
1886 req_ctx->nbuf += nbytes;
497f2e6b
LN
1887 return 0;
1888 }
1889
5e833bc4
LN
1890 /* At least (blocksize + 1) bytes are available to hash */
1891 nbytes_to_hash = nbytes + req_ctx->nbuf;
1892 to_hash_later = nbytes_to_hash & (blocksize - 1);
1893
1894 if (req_ctx->last)
1895 to_hash_later = 0;
1896 else if (to_hash_later)
1897 /* There is a partial block. Hash the full block(s) now */
1898 nbytes_to_hash -= to_hash_later;
1899 else {
1900 /* Keep one block buffered */
1901 nbytes_to_hash -= blocksize;
1902 to_hash_later = blocksize;
1903 }
1904
1905 /* Chain in any previously buffered data */
1906 if (req_ctx->nbuf) {
1907 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1908 sg_init_table(req_ctx->bufsl, nsg);
1909 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1910 if (nsg > 1)
c56f6d12 1911 sg_chain(req_ctx->bufsl, 2, areq->src);
497f2e6b 1912 req_ctx->psrc = req_ctx->bufsl;
5e833bc4 1913 } else
497f2e6b 1914 req_ctx->psrc = areq->src;
5e833bc4
LN
1915
1916 if (to_hash_later) {
8e409fe1
LC
1917 nents = sg_nents_for_len(areq->src, nbytes);
1918 if (nents < 0) {
1919 dev_err(ctx->dev, "Invalid number of src SG.\n");
1920 return nents;
1921 }
d0525723 1922 sg_pcopy_to_buffer(areq->src, nents,
5e833bc4
LN
1923 req_ctx->bufnext,
1924 to_hash_later,
1925 nbytes - to_hash_later);
497f2e6b 1926 }
5e833bc4 1927 req_ctx->to_hash_later = to_hash_later;
497f2e6b 1928
5e833bc4 1929 /* Allocate extended descriptor */
497f2e6b
LN
1930 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1931 if (IS_ERR(edesc))
1932 return PTR_ERR(edesc);
1933
1934 edesc->desc.hdr = ctx->desc_hdr_template;
1935
1936 /* On last one, request SEC to pad; otherwise continue */
1937 if (req_ctx->last)
1938 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1939 else
1940 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1941
60f208d7
KP
1942 /* request SEC to INIT hash. */
1943 if (req_ctx->first && !req_ctx->swinit)
497f2e6b
LN
1944 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1945
1946 /* When the tfm context has a keylen, it's an HMAC.
1947 * A first or last (ie. not middle) descriptor must request HMAC.
1948 */
1949 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1950 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1951
1952 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1953 ahash_done);
1954}
1955
1956static int ahash_update(struct ahash_request *areq)
1957{
1958 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1959
1960 req_ctx->last = 0;
1961
1962 return ahash_process_req(areq, areq->nbytes);
1963}
1964
1965static int ahash_final(struct ahash_request *areq)
1966{
1967 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1968
1969 req_ctx->last = 1;
1970
1971 return ahash_process_req(areq, 0);
1972}
1973
1974static int ahash_finup(struct ahash_request *areq)
1975{
1976 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1977
1978 req_ctx->last = 1;
1979
1980 return ahash_process_req(areq, areq->nbytes);
1981}
1982
1983static int ahash_digest(struct ahash_request *areq)
1984{
1985 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
60f208d7 1986 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
497f2e6b 1987
60f208d7 1988 ahash->init(areq);
497f2e6b
LN
1989 req_ctx->last = 1;
1990
1991 return ahash_process_req(areq, areq->nbytes);
1992}
1993
3639ca84
HG
1994static int ahash_export(struct ahash_request *areq, void *out)
1995{
1996 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1997 struct talitos_export_state *export = out;
1998
1999 memcpy(export->hw_context, req_ctx->hw_context,
2000 req_ctx->hw_context_size);
2001 memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
2002 export->swinit = req_ctx->swinit;
2003 export->first = req_ctx->first;
2004 export->last = req_ctx->last;
2005 export->to_hash_later = req_ctx->to_hash_later;
2006 export->nbuf = req_ctx->nbuf;
2007
2008 return 0;
2009}
2010
2011static int ahash_import(struct ahash_request *areq, const void *in)
2012{
2013 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2014 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2015 const struct talitos_export_state *export = in;
2016
2017 memset(req_ctx, 0, sizeof(*req_ctx));
2018 req_ctx->hw_context_size =
2019 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2020 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2021 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2022 memcpy(req_ctx->hw_context, export->hw_context,
2023 req_ctx->hw_context_size);
2024 memcpy(req_ctx->buf, export->buf, export->nbuf);
2025 req_ctx->swinit = export->swinit;
2026 req_ctx->first = export->first;
2027 req_ctx->last = export->last;
2028 req_ctx->to_hash_later = export->to_hash_later;
2029 req_ctx->nbuf = export->nbuf;
2030
2031 return 0;
2032}
2033
79b3a418
LN
2034struct keyhash_result {
2035 struct completion completion;
2036 int err;
2037};
2038
2039static void keyhash_complete(struct crypto_async_request *req, int err)
2040{
2041 struct keyhash_result *res = req->data;
2042
2043 if (err == -EINPROGRESS)
2044 return;
2045
2046 res->err = err;
2047 complete(&res->completion);
2048}
2049
2050static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2051 u8 *hash)
2052{
2053 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2054
2055 struct scatterlist sg[1];
2056 struct ahash_request *req;
2057 struct keyhash_result hresult;
2058 int ret;
2059
2060 init_completion(&hresult.completion);
2061
2062 req = ahash_request_alloc(tfm, GFP_KERNEL);
2063 if (!req)
2064 return -ENOMEM;
2065
2066 /* Keep tfm keylen == 0 during hash of the long key */
2067 ctx->keylen = 0;
2068 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2069 keyhash_complete, &hresult);
2070
2071 sg_init_one(&sg[0], key, keylen);
2072
2073 ahash_request_set_crypt(req, sg, hash, keylen);
2074 ret = crypto_ahash_digest(req);
2075 switch (ret) {
2076 case 0:
2077 break;
2078 case -EINPROGRESS:
2079 case -EBUSY:
2080 ret = wait_for_completion_interruptible(
2081 &hresult.completion);
2082 if (!ret)
2083 ret = hresult.err;
2084 break;
2085 default:
2086 break;
2087 }
2088 ahash_request_free(req);
2089
2090 return ret;
2091}
2092
2093static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2094 unsigned int keylen)
2095{
2096 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2097 unsigned int blocksize =
2098 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2099 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2100 unsigned int keysize = keylen;
2101 u8 hash[SHA512_DIGEST_SIZE];
2102 int ret;
2103
2104 if (keylen <= blocksize)
2105 memcpy(ctx->key, key, keysize);
2106 else {
2107 /* Must get the hash of the long key */
2108 ret = keyhash(tfm, key, keylen, hash);
2109
2110 if (ret) {
2111 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2112 return -EINVAL;
2113 }
2114
2115 keysize = digestsize;
2116 memcpy(ctx->key, hash, digestsize);
2117 }
2118
2119 ctx->keylen = keysize;
2120
2121 return 0;
2122}
2123
2124
9c4a7965 2125struct talitos_alg_template {
d5e4aaef
LN
2126 u32 type;
2127 union {
2128 struct crypto_alg crypto;
acbf7c62 2129 struct ahash_alg hash;
aeb4c132 2130 struct aead_alg aead;
d5e4aaef 2131 } alg;
9c4a7965
KP
2132 __be32 desc_hdr_template;
2133};
2134
2135static struct talitos_alg_template driver_algs[] = {
991155ba 2136 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
d5e4aaef 2137 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2138 .alg.aead = {
2139 .base = {
2140 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2141 .cra_driver_name = "authenc-hmac-sha1-"
2142 "cbc-aes-talitos",
2143 .cra_blocksize = AES_BLOCK_SIZE,
2144 .cra_flags = CRYPTO_ALG_ASYNC,
2145 },
2146 .ivsize = AES_BLOCK_SIZE,
2147 .maxauthsize = SHA1_DIGEST_SIZE,
56af8cd4 2148 },
9c4a7965
KP
2149 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2150 DESC_HDR_SEL0_AESU |
2151 DESC_HDR_MODE0_AESU_CBC |
2152 DESC_HDR_SEL1_MDEUA |
2153 DESC_HDR_MODE1_MDEU_INIT |
2154 DESC_HDR_MODE1_MDEU_PAD |
2155 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
70bcaca7 2156 },
d5e4aaef 2157 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2158 .alg.aead = {
2159 .base = {
2160 .cra_name = "authenc(hmac(sha1),"
2161 "cbc(des3_ede))",
2162 .cra_driver_name = "authenc-hmac-sha1-"
2163 "cbc-3des-talitos",
2164 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2165 .cra_flags = CRYPTO_ALG_ASYNC,
2166 },
2167 .ivsize = DES3_EDE_BLOCK_SIZE,
2168 .maxauthsize = SHA1_DIGEST_SIZE,
56af8cd4 2169 },
70bcaca7
LN
2170 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2171 DESC_HDR_SEL0_DEU |
2172 DESC_HDR_MODE0_DEU_CBC |
2173 DESC_HDR_MODE0_DEU_3DES |
2174 DESC_HDR_SEL1_MDEUA |
2175 DESC_HDR_MODE1_MDEU_INIT |
2176 DESC_HDR_MODE1_MDEU_PAD |
2177 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
3952f17e 2178 },
357fb605 2179 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2180 .alg.aead = {
2181 .base = {
2182 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2183 .cra_driver_name = "authenc-hmac-sha224-"
2184 "cbc-aes-talitos",
2185 .cra_blocksize = AES_BLOCK_SIZE,
2186 .cra_flags = CRYPTO_ALG_ASYNC,
2187 },
2188 .ivsize = AES_BLOCK_SIZE,
2189 .maxauthsize = SHA224_DIGEST_SIZE,
357fb605
HG
2190 },
2191 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2192 DESC_HDR_SEL0_AESU |
2193 DESC_HDR_MODE0_AESU_CBC |
2194 DESC_HDR_SEL1_MDEUA |
2195 DESC_HDR_MODE1_MDEU_INIT |
2196 DESC_HDR_MODE1_MDEU_PAD |
2197 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2198 },
2199 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2200 .alg.aead = {
2201 .base = {
2202 .cra_name = "authenc(hmac(sha224),"
2203 "cbc(des3_ede))",
2204 .cra_driver_name = "authenc-hmac-sha224-"
2205 "cbc-3des-talitos",
2206 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2207 .cra_flags = CRYPTO_ALG_ASYNC,
2208 },
2209 .ivsize = DES3_EDE_BLOCK_SIZE,
2210 .maxauthsize = SHA224_DIGEST_SIZE,
357fb605
HG
2211 },
2212 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2213 DESC_HDR_SEL0_DEU |
2214 DESC_HDR_MODE0_DEU_CBC |
2215 DESC_HDR_MODE0_DEU_3DES |
2216 DESC_HDR_SEL1_MDEUA |
2217 DESC_HDR_MODE1_MDEU_INIT |
2218 DESC_HDR_MODE1_MDEU_PAD |
2219 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2220 },
d5e4aaef 2221 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2222 .alg.aead = {
2223 .base = {
2224 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2225 .cra_driver_name = "authenc-hmac-sha256-"
2226 "cbc-aes-talitos",
2227 .cra_blocksize = AES_BLOCK_SIZE,
2228 .cra_flags = CRYPTO_ALG_ASYNC,
2229 },
2230 .ivsize = AES_BLOCK_SIZE,
2231 .maxauthsize = SHA256_DIGEST_SIZE,
56af8cd4 2232 },
3952f17e
LN
2233 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2234 DESC_HDR_SEL0_AESU |
2235 DESC_HDR_MODE0_AESU_CBC |
2236 DESC_HDR_SEL1_MDEUA |
2237 DESC_HDR_MODE1_MDEU_INIT |
2238 DESC_HDR_MODE1_MDEU_PAD |
2239 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2240 },
d5e4aaef 2241 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2242 .alg.aead = {
2243 .base = {
2244 .cra_name = "authenc(hmac(sha256),"
2245 "cbc(des3_ede))",
2246 .cra_driver_name = "authenc-hmac-sha256-"
2247 "cbc-3des-talitos",
2248 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2249 .cra_flags = CRYPTO_ALG_ASYNC,
2250 },
2251 .ivsize = DES3_EDE_BLOCK_SIZE,
2252 .maxauthsize = SHA256_DIGEST_SIZE,
56af8cd4 2253 },
3952f17e
LN
2254 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2255 DESC_HDR_SEL0_DEU |
2256 DESC_HDR_MODE0_DEU_CBC |
2257 DESC_HDR_MODE0_DEU_3DES |
2258 DESC_HDR_SEL1_MDEUA |
2259 DESC_HDR_MODE1_MDEU_INIT |
2260 DESC_HDR_MODE1_MDEU_PAD |
2261 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2262 },
d5e4aaef 2263 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2264 .alg.aead = {
2265 .base = {
2266 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2267 .cra_driver_name = "authenc-hmac-sha384-"
2268 "cbc-aes-talitos",
2269 .cra_blocksize = AES_BLOCK_SIZE,
2270 .cra_flags = CRYPTO_ALG_ASYNC,
2271 },
2272 .ivsize = AES_BLOCK_SIZE,
2273 .maxauthsize = SHA384_DIGEST_SIZE,
357fb605
HG
2274 },
2275 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2276 DESC_HDR_SEL0_AESU |
2277 DESC_HDR_MODE0_AESU_CBC |
2278 DESC_HDR_SEL1_MDEUB |
2279 DESC_HDR_MODE1_MDEU_INIT |
2280 DESC_HDR_MODE1_MDEU_PAD |
2281 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2282 },
2283 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2284 .alg.aead = {
2285 .base = {
2286 .cra_name = "authenc(hmac(sha384),"
2287 "cbc(des3_ede))",
2288 .cra_driver_name = "authenc-hmac-sha384-"
2289 "cbc-3des-talitos",
2290 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2291 .cra_flags = CRYPTO_ALG_ASYNC,
2292 },
2293 .ivsize = DES3_EDE_BLOCK_SIZE,
2294 .maxauthsize = SHA384_DIGEST_SIZE,
357fb605
HG
2295 },
2296 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2297 DESC_HDR_SEL0_DEU |
2298 DESC_HDR_MODE0_DEU_CBC |
2299 DESC_HDR_MODE0_DEU_3DES |
2300 DESC_HDR_SEL1_MDEUB |
2301 DESC_HDR_MODE1_MDEU_INIT |
2302 DESC_HDR_MODE1_MDEU_PAD |
2303 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2304 },
2305 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2306 .alg.aead = {
2307 .base = {
2308 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2309 .cra_driver_name = "authenc-hmac-sha512-"
2310 "cbc-aes-talitos",
2311 .cra_blocksize = AES_BLOCK_SIZE,
2312 .cra_flags = CRYPTO_ALG_ASYNC,
2313 },
2314 .ivsize = AES_BLOCK_SIZE,
2315 .maxauthsize = SHA512_DIGEST_SIZE,
357fb605
HG
2316 },
2317 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2318 DESC_HDR_SEL0_AESU |
2319 DESC_HDR_MODE0_AESU_CBC |
2320 DESC_HDR_SEL1_MDEUB |
2321 DESC_HDR_MODE1_MDEU_INIT |
2322 DESC_HDR_MODE1_MDEU_PAD |
2323 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2324 },
2325 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2326 .alg.aead = {
2327 .base = {
2328 .cra_name = "authenc(hmac(sha512),"
2329 "cbc(des3_ede))",
2330 .cra_driver_name = "authenc-hmac-sha512-"
2331 "cbc-3des-talitos",
2332 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2333 .cra_flags = CRYPTO_ALG_ASYNC,
2334 },
2335 .ivsize = DES3_EDE_BLOCK_SIZE,
2336 .maxauthsize = SHA512_DIGEST_SIZE,
357fb605
HG
2337 },
2338 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2339 DESC_HDR_SEL0_DEU |
2340 DESC_HDR_MODE0_DEU_CBC |
2341 DESC_HDR_MODE0_DEU_3DES |
2342 DESC_HDR_SEL1_MDEUB |
2343 DESC_HDR_MODE1_MDEU_INIT |
2344 DESC_HDR_MODE1_MDEU_PAD |
2345 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2346 },
2347 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2348 .alg.aead = {
2349 .base = {
2350 .cra_name = "authenc(hmac(md5),cbc(aes))",
2351 .cra_driver_name = "authenc-hmac-md5-"
2352 "cbc-aes-talitos",
2353 .cra_blocksize = AES_BLOCK_SIZE,
2354 .cra_flags = CRYPTO_ALG_ASYNC,
2355 },
2356 .ivsize = AES_BLOCK_SIZE,
2357 .maxauthsize = MD5_DIGEST_SIZE,
56af8cd4 2358 },
3952f17e
LN
2359 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2360 DESC_HDR_SEL0_AESU |
2361 DESC_HDR_MODE0_AESU_CBC |
2362 DESC_HDR_SEL1_MDEUA |
2363 DESC_HDR_MODE1_MDEU_INIT |
2364 DESC_HDR_MODE1_MDEU_PAD |
2365 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2366 },
d5e4aaef 2367 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2368 .alg.aead = {
2369 .base = {
2370 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2371 .cra_driver_name = "authenc-hmac-md5-"
2372 "cbc-3des-talitos",
2373 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2374 .cra_flags = CRYPTO_ALG_ASYNC,
2375 },
2376 .ivsize = DES3_EDE_BLOCK_SIZE,
2377 .maxauthsize = MD5_DIGEST_SIZE,
56af8cd4 2378 },
3952f17e
LN
2379 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2380 DESC_HDR_SEL0_DEU |
2381 DESC_HDR_MODE0_DEU_CBC |
2382 DESC_HDR_MODE0_DEU_3DES |
2383 DESC_HDR_SEL1_MDEUA |
2384 DESC_HDR_MODE1_MDEU_INIT |
2385 DESC_HDR_MODE1_MDEU_PAD |
2386 DESC_HDR_MODE1_MDEU_MD5_HMAC,
4de9d0b5
LN
2387 },
2388 /* ABLKCIPHER algorithms. */
5e75ae1b
LC
2389 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2390 .alg.crypto = {
2391 .cra_name = "ecb(aes)",
2392 .cra_driver_name = "ecb-aes-talitos",
2393 .cra_blocksize = AES_BLOCK_SIZE,
2394 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2395 CRYPTO_ALG_ASYNC,
2396 .cra_ablkcipher = {
2397 .min_keysize = AES_MIN_KEY_SIZE,
2398 .max_keysize = AES_MAX_KEY_SIZE,
2399 .ivsize = AES_BLOCK_SIZE,
2400 }
2401 },
2402 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2403 DESC_HDR_SEL0_AESU,
2404 },
d5e4aaef
LN
2405 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2406 .alg.crypto = {
4de9d0b5
LN
2407 .cra_name = "cbc(aes)",
2408 .cra_driver_name = "cbc-aes-talitos",
2409 .cra_blocksize = AES_BLOCK_SIZE,
2410 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2411 CRYPTO_ALG_ASYNC,
4de9d0b5 2412 .cra_ablkcipher = {
4de9d0b5
LN
2413 .min_keysize = AES_MIN_KEY_SIZE,
2414 .max_keysize = AES_MAX_KEY_SIZE,
2415 .ivsize = AES_BLOCK_SIZE,
2416 }
2417 },
2418 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2419 DESC_HDR_SEL0_AESU |
2420 DESC_HDR_MODE0_AESU_CBC,
2421 },
5e75ae1b
LC
2422 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2423 .alg.crypto = {
2424 .cra_name = "ctr(aes)",
2425 .cra_driver_name = "ctr-aes-talitos",
2426 .cra_blocksize = AES_BLOCK_SIZE,
2427 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2428 CRYPTO_ALG_ASYNC,
2429 .cra_ablkcipher = {
2430 .min_keysize = AES_MIN_KEY_SIZE,
2431 .max_keysize = AES_MAX_KEY_SIZE,
2432 .ivsize = AES_BLOCK_SIZE,
2433 }
2434 },
2435 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2436 DESC_HDR_SEL0_AESU |
2437 DESC_HDR_MODE0_AESU_CTR,
2438 },
2439 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2440 .alg.crypto = {
2441 .cra_name = "ecb(des)",
2442 .cra_driver_name = "ecb-des-talitos",
2443 .cra_blocksize = DES_BLOCK_SIZE,
2444 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2445 CRYPTO_ALG_ASYNC,
2446 .cra_ablkcipher = {
2447 .min_keysize = DES_KEY_SIZE,
2448 .max_keysize = DES_KEY_SIZE,
2449 .ivsize = DES_BLOCK_SIZE,
2450 }
2451 },
2452 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2453 DESC_HDR_SEL0_DEU,
2454 },
2455 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2456 .alg.crypto = {
2457 .cra_name = "cbc(des)",
2458 .cra_driver_name = "cbc-des-talitos",
2459 .cra_blocksize = DES_BLOCK_SIZE,
2460 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2461 CRYPTO_ALG_ASYNC,
2462 .cra_ablkcipher = {
2463 .min_keysize = DES_KEY_SIZE,
2464 .max_keysize = DES_KEY_SIZE,
2465 .ivsize = DES_BLOCK_SIZE,
2466 }
2467 },
2468 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2469 DESC_HDR_SEL0_DEU |
2470 DESC_HDR_MODE0_DEU_CBC,
2471 },
2472 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2473 .alg.crypto = {
2474 .cra_name = "ecb(des3_ede)",
2475 .cra_driver_name = "ecb-3des-talitos",
2476 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2477 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2478 CRYPTO_ALG_ASYNC,
2479 .cra_ablkcipher = {
2480 .min_keysize = DES3_EDE_KEY_SIZE,
2481 .max_keysize = DES3_EDE_KEY_SIZE,
2482 .ivsize = DES3_EDE_BLOCK_SIZE,
2483 }
2484 },
2485 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2486 DESC_HDR_SEL0_DEU |
2487 DESC_HDR_MODE0_DEU_3DES,
2488 },
d5e4aaef
LN
2489 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2490 .alg.crypto = {
4de9d0b5
LN
2491 .cra_name = "cbc(des3_ede)",
2492 .cra_driver_name = "cbc-3des-talitos",
2493 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2494 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2495 CRYPTO_ALG_ASYNC,
4de9d0b5 2496 .cra_ablkcipher = {
4de9d0b5
LN
2497 .min_keysize = DES3_EDE_KEY_SIZE,
2498 .max_keysize = DES3_EDE_KEY_SIZE,
2499 .ivsize = DES3_EDE_BLOCK_SIZE,
2500 }
2501 },
2502 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2503 DESC_HDR_SEL0_DEU |
2504 DESC_HDR_MODE0_DEU_CBC |
2505 DESC_HDR_MODE0_DEU_3DES,
497f2e6b
LN
2506 },
2507 /* AHASH algorithms. */
2508 { .type = CRYPTO_ALG_TYPE_AHASH,
2509 .alg.hash = {
497f2e6b 2510 .halg.digestsize = MD5_DIGEST_SIZE,
3639ca84 2511 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2512 .halg.base = {
2513 .cra_name = "md5",
2514 .cra_driver_name = "md5-talitos",
b3988618 2515 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
497f2e6b
LN
2516 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2517 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2518 }
2519 },
2520 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2521 DESC_HDR_SEL0_MDEUA |
2522 DESC_HDR_MODE0_MDEU_MD5,
2523 },
2524 { .type = CRYPTO_ALG_TYPE_AHASH,
2525 .alg.hash = {
497f2e6b 2526 .halg.digestsize = SHA1_DIGEST_SIZE,
3639ca84 2527 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2528 .halg.base = {
2529 .cra_name = "sha1",
2530 .cra_driver_name = "sha1-talitos",
2531 .cra_blocksize = SHA1_BLOCK_SIZE,
2532 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2533 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2534 }
2535 },
2536 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2537 DESC_HDR_SEL0_MDEUA |
2538 DESC_HDR_MODE0_MDEU_SHA1,
2539 },
60f208d7
KP
2540 { .type = CRYPTO_ALG_TYPE_AHASH,
2541 .alg.hash = {
60f208d7 2542 .halg.digestsize = SHA224_DIGEST_SIZE,
3639ca84 2543 .halg.statesize = sizeof(struct talitos_export_state),
60f208d7
KP
2544 .halg.base = {
2545 .cra_name = "sha224",
2546 .cra_driver_name = "sha224-talitos",
2547 .cra_blocksize = SHA224_BLOCK_SIZE,
2548 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2549 CRYPTO_ALG_ASYNC,
60f208d7
KP
2550 }
2551 },
2552 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2553 DESC_HDR_SEL0_MDEUA |
2554 DESC_HDR_MODE0_MDEU_SHA224,
2555 },
497f2e6b
LN
2556 { .type = CRYPTO_ALG_TYPE_AHASH,
2557 .alg.hash = {
497f2e6b 2558 .halg.digestsize = SHA256_DIGEST_SIZE,
3639ca84 2559 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2560 .halg.base = {
2561 .cra_name = "sha256",
2562 .cra_driver_name = "sha256-talitos",
2563 .cra_blocksize = SHA256_BLOCK_SIZE,
2564 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2565 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2566 }
2567 },
2568 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2569 DESC_HDR_SEL0_MDEUA |
2570 DESC_HDR_MODE0_MDEU_SHA256,
2571 },
2572 { .type = CRYPTO_ALG_TYPE_AHASH,
2573 .alg.hash = {
497f2e6b 2574 .halg.digestsize = SHA384_DIGEST_SIZE,
3639ca84 2575 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2576 .halg.base = {
2577 .cra_name = "sha384",
2578 .cra_driver_name = "sha384-talitos",
2579 .cra_blocksize = SHA384_BLOCK_SIZE,
2580 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2581 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2582 }
2583 },
2584 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2585 DESC_HDR_SEL0_MDEUB |
2586 DESC_HDR_MODE0_MDEUB_SHA384,
2587 },
2588 { .type = CRYPTO_ALG_TYPE_AHASH,
2589 .alg.hash = {
497f2e6b 2590 .halg.digestsize = SHA512_DIGEST_SIZE,
3639ca84 2591 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2592 .halg.base = {
2593 .cra_name = "sha512",
2594 .cra_driver_name = "sha512-talitos",
2595 .cra_blocksize = SHA512_BLOCK_SIZE,
2596 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2597 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2598 }
2599 },
2600 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2601 DESC_HDR_SEL0_MDEUB |
2602 DESC_HDR_MODE0_MDEUB_SHA512,
2603 },
79b3a418
LN
2604 { .type = CRYPTO_ALG_TYPE_AHASH,
2605 .alg.hash = {
79b3a418 2606 .halg.digestsize = MD5_DIGEST_SIZE,
3639ca84 2607 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2608 .halg.base = {
2609 .cra_name = "hmac(md5)",
2610 .cra_driver_name = "hmac-md5-talitos",
b3988618 2611 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
79b3a418
LN
2612 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2613 CRYPTO_ALG_ASYNC,
79b3a418
LN
2614 }
2615 },
2616 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2617 DESC_HDR_SEL0_MDEUA |
2618 DESC_HDR_MODE0_MDEU_MD5,
2619 },
2620 { .type = CRYPTO_ALG_TYPE_AHASH,
2621 .alg.hash = {
79b3a418 2622 .halg.digestsize = SHA1_DIGEST_SIZE,
3639ca84 2623 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2624 .halg.base = {
2625 .cra_name = "hmac(sha1)",
2626 .cra_driver_name = "hmac-sha1-talitos",
2627 .cra_blocksize = SHA1_BLOCK_SIZE,
2628 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2629 CRYPTO_ALG_ASYNC,
79b3a418
LN
2630 }
2631 },
2632 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2633 DESC_HDR_SEL0_MDEUA |
2634 DESC_HDR_MODE0_MDEU_SHA1,
2635 },
2636 { .type = CRYPTO_ALG_TYPE_AHASH,
2637 .alg.hash = {
79b3a418 2638 .halg.digestsize = SHA224_DIGEST_SIZE,
3639ca84 2639 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2640 .halg.base = {
2641 .cra_name = "hmac(sha224)",
2642 .cra_driver_name = "hmac-sha224-talitos",
2643 .cra_blocksize = SHA224_BLOCK_SIZE,
2644 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2645 CRYPTO_ALG_ASYNC,
79b3a418
LN
2646 }
2647 },
2648 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2649 DESC_HDR_SEL0_MDEUA |
2650 DESC_HDR_MODE0_MDEU_SHA224,
2651 },
2652 { .type = CRYPTO_ALG_TYPE_AHASH,
2653 .alg.hash = {
79b3a418 2654 .halg.digestsize = SHA256_DIGEST_SIZE,
3639ca84 2655 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2656 .halg.base = {
2657 .cra_name = "hmac(sha256)",
2658 .cra_driver_name = "hmac-sha256-talitos",
2659 .cra_blocksize = SHA256_BLOCK_SIZE,
2660 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2661 CRYPTO_ALG_ASYNC,
79b3a418
LN
2662 }
2663 },
2664 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2665 DESC_HDR_SEL0_MDEUA |
2666 DESC_HDR_MODE0_MDEU_SHA256,
2667 },
2668 { .type = CRYPTO_ALG_TYPE_AHASH,
2669 .alg.hash = {
79b3a418 2670 .halg.digestsize = SHA384_DIGEST_SIZE,
3639ca84 2671 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2672 .halg.base = {
2673 .cra_name = "hmac(sha384)",
2674 .cra_driver_name = "hmac-sha384-talitos",
2675 .cra_blocksize = SHA384_BLOCK_SIZE,
2676 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2677 CRYPTO_ALG_ASYNC,
79b3a418
LN
2678 }
2679 },
2680 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2681 DESC_HDR_SEL0_MDEUB |
2682 DESC_HDR_MODE0_MDEUB_SHA384,
2683 },
2684 { .type = CRYPTO_ALG_TYPE_AHASH,
2685 .alg.hash = {
79b3a418 2686 .halg.digestsize = SHA512_DIGEST_SIZE,
3639ca84 2687 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2688 .halg.base = {
2689 .cra_name = "hmac(sha512)",
2690 .cra_driver_name = "hmac-sha512-talitos",
2691 .cra_blocksize = SHA512_BLOCK_SIZE,
2692 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2693 CRYPTO_ALG_ASYNC,
79b3a418
LN
2694 }
2695 },
2696 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2697 DESC_HDR_SEL0_MDEUB |
2698 DESC_HDR_MODE0_MDEUB_SHA512,
2699 }
9c4a7965
KP
2700};
2701
2702struct talitos_crypto_alg {
2703 struct list_head entry;
2704 struct device *dev;
acbf7c62 2705 struct talitos_alg_template algt;
9c4a7965
KP
2706};
2707
89d124cb
JE
2708static int talitos_init_common(struct talitos_ctx *ctx,
2709 struct talitos_crypto_alg *talitos_alg)
9c4a7965 2710{
5228f0f7 2711 struct talitos_private *priv;
9c4a7965
KP
2712
2713 /* update context with ptr to dev */
2714 ctx->dev = talitos_alg->dev;
19bbbc63 2715
5228f0f7
KP
2716 /* assign SEC channel to tfm in round-robin fashion */
2717 priv = dev_get_drvdata(ctx->dev);
2718 ctx->ch = atomic_inc_return(&priv->last_chan) &
2719 (priv->num_channels - 1);
2720
9c4a7965 2721 /* copy descriptor header template value */
acbf7c62 2722 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
9c4a7965 2723
602dba5a
KP
2724 /* select done notification */
2725 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2726
497f2e6b
LN
2727 return 0;
2728}
2729
89d124cb
JE
2730static int talitos_cra_init(struct crypto_tfm *tfm)
2731{
2732 struct crypto_alg *alg = tfm->__crt_alg;
2733 struct talitos_crypto_alg *talitos_alg;
2734 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2735
2736 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2737 talitos_alg = container_of(__crypto_ahash_alg(alg),
2738 struct talitos_crypto_alg,
2739 algt.alg.hash);
2740 else
2741 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2742 algt.alg.crypto);
2743
2744 return talitos_init_common(ctx, talitos_alg);
2745}
2746
aeb4c132 2747static int talitos_cra_init_aead(struct crypto_aead *tfm)
497f2e6b 2748{
89d124cb
JE
2749 struct aead_alg *alg = crypto_aead_alg(tfm);
2750 struct talitos_crypto_alg *talitos_alg;
2751 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2752
2753 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2754 algt.alg.aead);
2755
2756 return talitos_init_common(ctx, talitos_alg);
9c4a7965
KP
2757}
2758
497f2e6b
LN
2759static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2760{
2761 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2762
2763 talitos_cra_init(tfm);
2764
2765 ctx->keylen = 0;
2766 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2767 sizeof(struct talitos_ahash_req_ctx));
2768
2769 return 0;
2770}
2771
9c4a7965
KP
2772/*
2773 * given the alg's descriptor header template, determine whether descriptor
2774 * type and primary/secondary execution units required match the hw
2775 * capabilities description provided in the device tree node.
2776 */
2777static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2778{
2779 struct talitos_private *priv = dev_get_drvdata(dev);
2780 int ret;
2781
2782 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2783 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2784
2785 if (SECONDARY_EU(desc_hdr_template))
2786 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2787 & priv->exec_units);
2788
2789 return ret;
2790}
2791
2dc11581 2792static int talitos_remove(struct platform_device *ofdev)
9c4a7965
KP
2793{
2794 struct device *dev = &ofdev->dev;
2795 struct talitos_private *priv = dev_get_drvdata(dev);
2796 struct talitos_crypto_alg *t_alg, *n;
2797 int i;
2798
2799 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
acbf7c62
LN
2800 switch (t_alg->algt.type) {
2801 case CRYPTO_ALG_TYPE_ABLKCIPHER:
acbf7c62 2802 break;
aeb4c132
HX
2803 case CRYPTO_ALG_TYPE_AEAD:
2804 crypto_unregister_aead(&t_alg->algt.alg.aead);
acbf7c62
LN
2805 case CRYPTO_ALG_TYPE_AHASH:
2806 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2807 break;
2808 }
9c4a7965
KP
2809 list_del(&t_alg->entry);
2810 kfree(t_alg);
2811 }
2812
2813 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2814 talitos_unregister_rng(dev);
2815
35a3bb3d 2816 for (i = 0; priv->chan && i < priv->num_channels; i++)
0b798247 2817 kfree(priv->chan[i].fifo);
9c4a7965 2818
4b992628 2819 kfree(priv->chan);
9c4a7965 2820
c3e337f8 2821 for (i = 0; i < 2; i++)
2cdba3cf 2822 if (priv->irq[i]) {
c3e337f8
KP
2823 free_irq(priv->irq[i], dev);
2824 irq_dispose_mapping(priv->irq[i]);
2825 }
9c4a7965 2826
c3e337f8 2827 tasklet_kill(&priv->done_task[0]);
2cdba3cf 2828 if (priv->irq[1])
c3e337f8 2829 tasklet_kill(&priv->done_task[1]);
9c4a7965
KP
2830
2831 iounmap(priv->reg);
2832
9c4a7965
KP
2833 kfree(priv);
2834
2835 return 0;
2836}
2837
2838static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2839 struct talitos_alg_template
2840 *template)
2841{
60f208d7 2842 struct talitos_private *priv = dev_get_drvdata(dev);
9c4a7965
KP
2843 struct talitos_crypto_alg *t_alg;
2844 struct crypto_alg *alg;
2845
2846 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2847 if (!t_alg)
2848 return ERR_PTR(-ENOMEM);
2849
acbf7c62
LN
2850 t_alg->algt = *template;
2851
2852 switch (t_alg->algt.type) {
2853 case CRYPTO_ALG_TYPE_ABLKCIPHER:
497f2e6b
LN
2854 alg = &t_alg->algt.alg.crypto;
2855 alg->cra_init = talitos_cra_init;
d4cd3283 2856 alg->cra_type = &crypto_ablkcipher_type;
b286e003
KP
2857 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2858 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2859 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2860 alg->cra_ablkcipher.geniv = "eseqiv";
497f2e6b 2861 break;
acbf7c62 2862 case CRYPTO_ALG_TYPE_AEAD:
aeb4c132 2863 alg = &t_alg->algt.alg.aead.base;
aeb4c132
HX
2864 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2865 t_alg->algt.alg.aead.setkey = aead_setkey;
2866 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2867 t_alg->algt.alg.aead.decrypt = aead_decrypt;
acbf7c62
LN
2868 break;
2869 case CRYPTO_ALG_TYPE_AHASH:
2870 alg = &t_alg->algt.alg.hash.halg.base;
497f2e6b 2871 alg->cra_init = talitos_cra_init_ahash;
d4cd3283 2872 alg->cra_type = &crypto_ahash_type;
b286e003
KP
2873 t_alg->algt.alg.hash.init = ahash_init;
2874 t_alg->algt.alg.hash.update = ahash_update;
2875 t_alg->algt.alg.hash.final = ahash_final;
2876 t_alg->algt.alg.hash.finup = ahash_finup;
2877 t_alg->algt.alg.hash.digest = ahash_digest;
2878 t_alg->algt.alg.hash.setkey = ahash_setkey;
3639ca84
HG
2879 t_alg->algt.alg.hash.import = ahash_import;
2880 t_alg->algt.alg.hash.export = ahash_export;
b286e003 2881
79b3a418 2882 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
0b2730d8
KP
2883 !strncmp(alg->cra_name, "hmac", 4)) {
2884 kfree(t_alg);
79b3a418 2885 return ERR_PTR(-ENOTSUPP);
0b2730d8 2886 }
60f208d7 2887 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
79b3a418
LN
2888 (!strcmp(alg->cra_name, "sha224") ||
2889 !strcmp(alg->cra_name, "hmac(sha224)"))) {
60f208d7
KP
2890 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2891 t_alg->algt.desc_hdr_template =
2892 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2893 DESC_HDR_SEL0_MDEUA |
2894 DESC_HDR_MODE0_MDEU_SHA256;
2895 }
497f2e6b 2896 break;
1d11911a
KP
2897 default:
2898 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
5fa7dadc 2899 kfree(t_alg);
1d11911a 2900 return ERR_PTR(-EINVAL);
acbf7c62 2901 }
9c4a7965 2902
9c4a7965 2903 alg->cra_module = THIS_MODULE;
9c4a7965 2904 alg->cra_priority = TALITOS_CRA_PRIORITY;
9c4a7965 2905 alg->cra_alignmask = 0;
9c4a7965 2906 alg->cra_ctxsize = sizeof(struct talitos_ctx);
d912bb76 2907 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
9c4a7965 2908
9c4a7965
KP
2909 t_alg->dev = dev;
2910
2911 return t_alg;
2912}
2913
c3e337f8
KP
2914static int talitos_probe_irq(struct platform_device *ofdev)
2915{
2916 struct device *dev = &ofdev->dev;
2917 struct device_node *np = ofdev->dev.of_node;
2918 struct talitos_private *priv = dev_get_drvdata(dev);
2919 int err;
dd3c0987 2920 bool is_sec1 = has_ftr_sec1(priv);
c3e337f8
KP
2921
2922 priv->irq[0] = irq_of_parse_and_map(np, 0);
2cdba3cf 2923 if (!priv->irq[0]) {
c3e337f8
KP
2924 dev_err(dev, "failed to map irq\n");
2925 return -EINVAL;
2926 }
dd3c0987
LC
2927 if (is_sec1) {
2928 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2929 dev_driver_string(dev), dev);
2930 goto primary_out;
2931 }
c3e337f8
KP
2932
2933 priv->irq[1] = irq_of_parse_and_map(np, 1);
2934
2935 /* get the primary irq line */
2cdba3cf 2936 if (!priv->irq[1]) {
dd3c0987 2937 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
c3e337f8
KP
2938 dev_driver_string(dev), dev);
2939 goto primary_out;
2940 }
2941
dd3c0987 2942 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
c3e337f8
KP
2943 dev_driver_string(dev), dev);
2944 if (err)
2945 goto primary_out;
2946
2947 /* get the secondary irq line */
dd3c0987 2948 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
c3e337f8
KP
2949 dev_driver_string(dev), dev);
2950 if (err) {
2951 dev_err(dev, "failed to request secondary irq\n");
2952 irq_dispose_mapping(priv->irq[1]);
2cdba3cf 2953 priv->irq[1] = 0;
c3e337f8
KP
2954 }
2955
2956 return err;
2957
2958primary_out:
2959 if (err) {
2960 dev_err(dev, "failed to request primary irq\n");
2961 irq_dispose_mapping(priv->irq[0]);
2cdba3cf 2962 priv->irq[0] = 0;
c3e337f8
KP
2963 }
2964
2965 return err;
2966}
2967
1c48a5c9 2968static int talitos_probe(struct platform_device *ofdev)
9c4a7965
KP
2969{
2970 struct device *dev = &ofdev->dev;
61c7a080 2971 struct device_node *np = ofdev->dev.of_node;
9c4a7965
KP
2972 struct talitos_private *priv;
2973 const unsigned int *prop;
2974 int i, err;
5fa7fa14 2975 int stride;
9c4a7965
KP
2976
2977 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2978 if (!priv)
2979 return -ENOMEM;
2980
f3de9cb1
KH
2981 INIT_LIST_HEAD(&priv->alg_list);
2982
9c4a7965
KP
2983 dev_set_drvdata(dev, priv);
2984
2985 priv->ofdev = ofdev;
2986
511d63cb
HG
2987 spin_lock_init(&priv->reg_lock);
2988
9c4a7965
KP
2989 priv->reg = of_iomap(np, 0);
2990 if (!priv->reg) {
2991 dev_err(dev, "failed to of_iomap\n");
2992 err = -ENOMEM;
2993 goto err_out;
2994 }
2995
2996 /* get SEC version capabilities from device tree */
2997 prop = of_get_property(np, "fsl,num-channels", NULL);
2998 if (prop)
2999 priv->num_channels = *prop;
3000
3001 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
3002 if (prop)
3003 priv->chfifo_len = *prop;
3004
3005 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
3006 if (prop)
3007 priv->exec_units = *prop;
3008
3009 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
3010 if (prop)
3011 priv->desc_types = *prop;
3012
3013 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3014 !priv->exec_units || !priv->desc_types) {
3015 dev_err(dev, "invalid property data in device tree node\n");
3016 err = -EINVAL;
3017 goto err_out;
3018 }
3019
f3c85bc1
LN
3020 if (of_device_is_compatible(np, "fsl,sec3.0"))
3021 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3022
fe5720e2 3023 if (of_device_is_compatible(np, "fsl,sec2.1"))
60f208d7 3024 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
79b3a418
LN
3025 TALITOS_FTR_SHA224_HWINIT |
3026 TALITOS_FTR_HMAC_OK;
fe5720e2 3027
21590888
LC
3028 if (of_device_is_compatible(np, "fsl,sec1.0"))
3029 priv->features |= TALITOS_FTR_SEC1;
3030
5fa7fa14
LC
3031 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3032 priv->reg_deu = priv->reg + TALITOS12_DEU;
3033 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3034 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3035 stride = TALITOS1_CH_STRIDE;
3036 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3037 priv->reg_deu = priv->reg + TALITOS10_DEU;
3038 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3039 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3040 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3041 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3042 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3043 stride = TALITOS1_CH_STRIDE;
3044 } else {
3045 priv->reg_deu = priv->reg + TALITOS2_DEU;
3046 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3047 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3048 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3049 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3050 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3051 priv->reg_keu = priv->reg + TALITOS2_KEU;
3052 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3053 stride = TALITOS2_CH_STRIDE;
3054 }
3055
dd3c0987
LC
3056 err = talitos_probe_irq(ofdev);
3057 if (err)
3058 goto err_out;
3059
3060 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3061 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3062 (unsigned long)dev);
3063 } else {
3064 if (!priv->irq[1]) {
3065 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3066 (unsigned long)dev);
3067 } else {
3068 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3069 (unsigned long)dev);
3070 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3071 (unsigned long)dev);
3072 }
3073 }
3074
4b992628
KP
3075 priv->chan = kzalloc(sizeof(struct talitos_channel) *
3076 priv->num_channels, GFP_KERNEL);
3077 if (!priv->chan) {
3078 dev_err(dev, "failed to allocate channel management space\n");
9c4a7965
KP
3079 err = -ENOMEM;
3080 goto err_out;
3081 }
3082
f641dddd
MH
3083 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3084
c3e337f8 3085 for (i = 0; i < priv->num_channels; i++) {
5fa7fa14 3086 priv->chan[i].reg = priv->reg + stride * (i + 1);
2cdba3cf 3087 if (!priv->irq[1] || !(i & 1))
c3e337f8 3088 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
ad42d5fc 3089
4b992628
KP
3090 spin_lock_init(&priv->chan[i].head_lock);
3091 spin_lock_init(&priv->chan[i].tail_lock);
9c4a7965 3092
4b992628
KP
3093 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3094 priv->fifo_len, GFP_KERNEL);
3095 if (!priv->chan[i].fifo) {
9c4a7965
KP
3096 dev_err(dev, "failed to allocate request fifo %d\n", i);
3097 err = -ENOMEM;
3098 goto err_out;
3099 }
9c4a7965 3100
4b992628
KP
3101 atomic_set(&priv->chan[i].submit_count,
3102 -(priv->chfifo_len - 1));
f641dddd 3103 }
9c4a7965 3104
81eb024c
KP
3105 dma_set_mask(dev, DMA_BIT_MASK(36));
3106
9c4a7965
KP
3107 /* reset and initialize the h/w */
3108 err = init_device(dev);
3109 if (err) {
3110 dev_err(dev, "failed to initialize device\n");
3111 goto err_out;
3112 }
3113
3114 /* register the RNG, if available */
3115 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3116 err = talitos_register_rng(dev);
3117 if (err) {
3118 dev_err(dev, "failed to register hwrng: %d\n", err);
3119 goto err_out;
3120 } else
3121 dev_info(dev, "hwrng\n");
3122 }
3123
3124 /* register crypto algorithms the device supports */
9c4a7965
KP
3125 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3126 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3127 struct talitos_crypto_alg *t_alg;
aeb4c132 3128 struct crypto_alg *alg = NULL;
9c4a7965
KP
3129
3130 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3131 if (IS_ERR(t_alg)) {
3132 err = PTR_ERR(t_alg);
0b2730d8 3133 if (err == -ENOTSUPP)
79b3a418 3134 continue;
9c4a7965
KP
3135 goto err_out;
3136 }
3137
acbf7c62
LN
3138 switch (t_alg->algt.type) {
3139 case CRYPTO_ALG_TYPE_ABLKCIPHER:
acbf7c62
LN
3140 err = crypto_register_alg(
3141 &t_alg->algt.alg.crypto);
aeb4c132 3142 alg = &t_alg->algt.alg.crypto;
acbf7c62 3143 break;
aeb4c132
HX
3144
3145 case CRYPTO_ALG_TYPE_AEAD:
3146 err = crypto_register_aead(
3147 &t_alg->algt.alg.aead);
3148 alg = &t_alg->algt.alg.aead.base;
3149 break;
3150
acbf7c62
LN
3151 case CRYPTO_ALG_TYPE_AHASH:
3152 err = crypto_register_ahash(
3153 &t_alg->algt.alg.hash);
aeb4c132 3154 alg = &t_alg->algt.alg.hash.halg.base;
acbf7c62
LN
3155 break;
3156 }
9c4a7965
KP
3157 if (err) {
3158 dev_err(dev, "%s alg registration failed\n",
aeb4c132 3159 alg->cra_driver_name);
9c4a7965 3160 kfree(t_alg);
991155ba 3161 } else
9c4a7965 3162 list_add_tail(&t_alg->entry, &priv->alg_list);
9c4a7965
KP
3163 }
3164 }
5b859b6e
KP
3165 if (!list_empty(&priv->alg_list))
3166 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3167 (char *)of_get_property(np, "compatible", NULL));
9c4a7965
KP
3168
3169 return 0;
3170
3171err_out:
3172 talitos_remove(ofdev);
9c4a7965
KP
3173
3174 return err;
3175}
3176
6c3f975a 3177static const struct of_device_id talitos_match[] = {
0635b7db
LC
3178#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3179 {
3180 .compatible = "fsl,sec1.0",
3181 },
3182#endif
3183#ifdef CONFIG_CRYPTO_DEV_TALITOS2
9c4a7965
KP
3184 {
3185 .compatible = "fsl,sec2.0",
3186 },
0635b7db 3187#endif
9c4a7965
KP
3188 {},
3189};
3190MODULE_DEVICE_TABLE(of, talitos_match);
3191
1c48a5c9 3192static struct platform_driver talitos_driver = {
4018294b
GL
3193 .driver = {
3194 .name = "talitos",
4018294b
GL
3195 .of_match_table = talitos_match,
3196 },
9c4a7965 3197 .probe = talitos_probe,
596f1034 3198 .remove = talitos_remove,
9c4a7965
KP
3199};
3200
741e8c2d 3201module_platform_driver(talitos_driver);
9c4a7965
KP
3202
3203MODULE_LICENSE("GPL");
3204MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3205MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");