]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/crypto/sahara.c
crypto: sahara - use the backlog
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / sahara.c
CommitLineData
5de88752
JM
1/*
2 * Cryptographic API.
3 *
4 * Support for SAHARA cryptographic accelerator.
5 *
5a2bb93f 6 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
5de88752
JM
7 * Copyright (c) 2013 Vista Silicon S.L.
8 * Author: Javier Martin <javier.martin@vista-silicon.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 * Based on omap-aes.c and tegra-aes.c
15 */
16
17#include <crypto/algapi.h>
18#include <crypto/aes.h>
5a2bb93f
ST
19#include <crypto/hash.h>
20#include <crypto/internal/hash.h>
21#include <crypto/scatterwalk.h>
22#include <crypto/sha.h>
5de88752
JM
23
24#include <linux/clk.h>
25#include <linux/crypto.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/kernel.h>
c0c3c89a 30#include <linux/kthread.h>
5de88752 31#include <linux/module.h>
c0c3c89a 32#include <linux/mutex.h>
5de88752 33#include <linux/of.h>
5ed903b3 34#include <linux/of_device.h>
5de88752
JM
35#include <linux/platform_device.h>
36
5a2bb93f
ST
37#define SHA_BUFFER_LEN PAGE_SIZE
38#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
39
5de88752
JM
40#define SAHARA_NAME "sahara"
41#define SAHARA_VERSION_3 3
5ed903b3 42#define SAHARA_VERSION_4 4
5de88752
JM
43#define SAHARA_TIMEOUT_MS 1000
44#define SAHARA_MAX_HW_DESC 2
45#define SAHARA_MAX_HW_LINK 20
46
47#define FLAGS_MODE_MASK 0x000f
48#define FLAGS_ENCRYPT BIT(0)
49#define FLAGS_CBC BIT(1)
50#define FLAGS_NEW_KEY BIT(3)
5de88752
JM
51
52#define SAHARA_HDR_BASE 0x00800000
53#define SAHARA_HDR_SKHA_ALG_AES 0
54#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
55#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
56#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
57#define SAHARA_HDR_FORM_DATA (5 << 16)
58#define SAHARA_HDR_FORM_KEY (8 << 16)
59#define SAHARA_HDR_LLO (1 << 24)
60#define SAHARA_HDR_CHA_SKHA (1 << 28)
61#define SAHARA_HDR_CHA_MDHA (2 << 28)
62#define SAHARA_HDR_PARITY_BIT (1 << 31)
63
5a2bb93f
ST
64#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
65#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
66#define SAHARA_HDR_MDHA_HASH 0xA0850000
67#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
68#define SAHARA_HDR_MDHA_ALG_SHA1 0
69#define SAHARA_HDR_MDHA_ALG_MD5 1
70#define SAHARA_HDR_MDHA_ALG_SHA256 2
71#define SAHARA_HDR_MDHA_ALG_SHA224 3
72#define SAHARA_HDR_MDHA_PDATA (1 << 2)
73#define SAHARA_HDR_MDHA_HMAC (1 << 3)
74#define SAHARA_HDR_MDHA_INIT (1 << 5)
75#define SAHARA_HDR_MDHA_IPAD (1 << 6)
76#define SAHARA_HDR_MDHA_OPAD (1 << 7)
77#define SAHARA_HDR_MDHA_SWAP (1 << 8)
78#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
79#define SAHARA_HDR_MDHA_SSL (1 << 10)
80
5de88752
JM
81/* SAHARA can only process one request at a time */
82#define SAHARA_QUEUE_LENGTH 1
83
84#define SAHARA_REG_VERSION 0x00
85#define SAHARA_REG_DAR 0x04
86#define SAHARA_REG_CONTROL 0x08
87#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
88#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
89#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
90#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
91#define SAHARA_REG_CMD 0x0C
92#define SAHARA_CMD_RESET (1 << 0)
93#define SAHARA_CMD_CLEAR_INT (1 << 8)
94#define SAHARA_CMD_CLEAR_ERR (1 << 9)
95#define SAHARA_CMD_SINGLE_STEP (1 << 10)
96#define SAHARA_CMD_MODE_BATCH (1 << 16)
97#define SAHARA_CMD_MODE_DEBUG (1 << 18)
98#define SAHARA_REG_STATUS 0x10
99#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
100#define SAHARA_STATE_IDLE 0
101#define SAHARA_STATE_BUSY 1
102#define SAHARA_STATE_ERR 2
103#define SAHARA_STATE_FAULT 3
104#define SAHARA_STATE_COMPLETE 4
105#define SAHARA_STATE_COMP_FLAG (1 << 2)
106#define SAHARA_STATUS_DAR_FULL (1 << 3)
107#define SAHARA_STATUS_ERROR (1 << 4)
108#define SAHARA_STATUS_SECURE (1 << 5)
109#define SAHARA_STATUS_FAIL (1 << 6)
110#define SAHARA_STATUS_INIT (1 << 7)
111#define SAHARA_STATUS_RNG_RESEED (1 << 8)
112#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
113#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
114#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
115#define SAHARA_STATUS_MODE_BATCH (1 << 16)
116#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
117#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
118#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
119#define SAHARA_REG_ERRSTATUS 0x14
120#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
121#define SAHARA_ERRSOURCE_CHA 14
122#define SAHARA_ERRSOURCE_DMA 15
123#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
124#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
125#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
126#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
127#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
128#define SAHARA_REG_FADDR 0x18
129#define SAHARA_REG_CDAR 0x1C
130#define SAHARA_REG_IDAR 0x20
131
132struct sahara_hw_desc {
133 u32 hdr;
134 u32 len1;
135 dma_addr_t p1;
136 u32 len2;
137 dma_addr_t p2;
138 dma_addr_t next;
139};
140
141struct sahara_hw_link {
142 u32 len;
143 dma_addr_t p;
144 dma_addr_t next;
145};
146
147struct sahara_ctx {
5de88752 148 unsigned long flags;
5a2bb93f
ST
149
150 /* AES-specific context */
5de88752
JM
151 int keylen;
152 u8 key[AES_KEYSIZE_128];
153 struct crypto_ablkcipher *fallback;
5a2bb93f
ST
154
155 /* SHA-specific context */
156 struct crypto_shash *shash_fallback;
5de88752
JM
157};
158
159struct sahara_aes_reqctx {
160 unsigned long mode;
161};
162
5a2bb93f
ST
163/*
164 * struct sahara_sha_reqctx - private data per request
165 * @buf: holds data for requests smaller than block_size
166 * @rembuf: used to prepare one block_size-aligned request
167 * @context: hw-specific context for request. Digest is extracted from this
168 * @mode: specifies what type of hw-descriptor needs to be built
169 * @digest_size: length of digest for this request
170 * @context_size: length of hw-context for this request.
171 * Always digest_size + 4
172 * @buf_cnt: number of bytes saved in buf
173 * @sg_in_idx: number of hw links
174 * @in_sg: scatterlist for input data
175 * @in_sg_chain: scatterlists for chained input data
176 * @in_sg_chained: specifies if chained scatterlists are used or not
177 * @total: total number of bytes for transfer
178 * @last: is this the last block
179 * @first: is this the first block
180 * @active: inside a transfer
181 */
182struct sahara_sha_reqctx {
183 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
184 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
185 u8 context[SHA256_DIGEST_SIZE + 4];
186 struct mutex mutex;
187 unsigned int mode;
188 unsigned int digest_size;
189 unsigned int context_size;
190 unsigned int buf_cnt;
191 unsigned int sg_in_idx;
192 struct scatterlist *in_sg;
193 struct scatterlist in_sg_chain[2];
194 bool in_sg_chained;
195 size_t total;
196 unsigned int last;
197 unsigned int first;
198 unsigned int active;
199};
200
5de88752
JM
201struct sahara_dev {
202 struct device *device;
5ed903b3 203 unsigned int version;
5de88752
JM
204 void __iomem *regs_base;
205 struct clk *clk_ipg;
206 struct clk *clk_ahb;
c0c3c89a
ST
207 struct mutex queue_mutex;
208 struct task_struct *kthread;
209 struct completion dma_completion;
5de88752
JM
210
211 struct sahara_ctx *ctx;
212 spinlock_t lock;
213 struct crypto_queue queue;
214 unsigned long flags;
215
5de88752
JM
216 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
217 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
218
219 u8 *key_base;
220 dma_addr_t key_phys_base;
221
222 u8 *iv_base;
223 dma_addr_t iv_phys_base;
224
5a2bb93f
ST
225 u8 *context_base;
226 dma_addr_t context_phys_base;
227
5de88752
JM
228 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
229 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
230
5de88752
JM
231 size_t total;
232 struct scatterlist *in_sg;
233 unsigned int nb_in_sg;
234 struct scatterlist *out_sg;
235 unsigned int nb_out_sg;
236
237 u32 error;
5de88752
JM
238};
239
240static struct sahara_dev *dev_ptr;
241
242static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
243{
244 writel(data, dev->regs_base + reg);
245}
246
247static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
248{
249 return readl(dev->regs_base + reg);
250}
251
252static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
253{
254 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
255 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
256 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
257
258 if (dev->flags & FLAGS_CBC) {
259 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
260 hdr ^= SAHARA_HDR_PARITY_BIT;
261 }
262
263 if (dev->flags & FLAGS_ENCRYPT) {
264 hdr |= SAHARA_HDR_SKHA_OP_ENC;
265 hdr ^= SAHARA_HDR_PARITY_BIT;
266 }
267
268 return hdr;
269}
270
271static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
272{
273 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
274 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
275}
276
277static int sahara_sg_length(struct scatterlist *sg,
278 unsigned int total)
279{
280 int sg_nb;
281 unsigned int len;
282 struct scatterlist *sg_list;
283
284 sg_nb = 0;
285 sg_list = sg;
286
287 while (total) {
288 len = min(sg_list->length, total);
289
290 sg_nb++;
291 total -= len;
292
293 sg_list = sg_next(sg_list);
294 if (!sg_list)
295 total = 0;
296 }
297
298 return sg_nb;
299}
300
301static char *sahara_err_src[16] = {
302 "No error",
303 "Header error",
304 "Descriptor length error",
305 "Descriptor length or pointer error",
306 "Link length error",
307 "Link pointer error",
308 "Input buffer error",
309 "Output buffer error",
310 "Output buffer starvation",
311 "Internal state fault",
312 "General descriptor problem",
313 "Reserved",
314 "Descriptor address error",
315 "Link address error",
316 "CHA error",
317 "DMA error"
318};
319
320static char *sahara_err_dmasize[4] = {
321 "Byte transfer",
322 "Half-word transfer",
323 "Word transfer",
324 "Reserved"
325};
326
327static char *sahara_err_dmasrc[8] = {
328 "No error",
329 "AHB bus error",
330 "Internal IP bus error",
331 "Parity error",
332 "DMA crosses 256 byte boundary",
333 "DMA is busy",
334 "Reserved",
335 "DMA HW error"
336};
337
338static char *sahara_cha_errsrc[12] = {
339 "Input buffer non-empty",
340 "Illegal address",
341 "Illegal mode",
342 "Illegal data size",
343 "Illegal key size",
344 "Write during processing",
345 "CTX read during processing",
346 "HW error",
347 "Input buffer disabled/underflow",
348 "Output buffer disabled/overflow",
349 "DES key parity error",
350 "Reserved"
351};
352
353static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
354
355static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
356{
357 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
358 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
359
360 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
361
362 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
363
364 if (source == SAHARA_ERRSOURCE_DMA) {
365 if (error & SAHARA_ERRSTATUS_DMA_DIR)
366 dev_err(dev->device, " * DMA read.\n");
367 else
368 dev_err(dev->device, " * DMA write.\n");
369
370 dev_err(dev->device, " * %s.\n",
371 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
372 dev_err(dev->device, " * %s.\n",
373 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
374 } else if (source == SAHARA_ERRSOURCE_CHA) {
375 dev_err(dev->device, " * %s.\n",
376 sahara_cha_errsrc[chasrc]);
377 dev_err(dev->device, " * %s.\n",
378 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
379 }
380 dev_err(dev->device, "\n");
381}
382
383static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
384
385static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
386{
387 u8 state;
388
389 if (!IS_ENABLED(DEBUG))
390 return;
391
392 state = SAHARA_STATUS_GET_STATE(status);
393
394 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
395 __func__, status);
396
397 dev_dbg(dev->device, " - State = %d:\n", state);
398 if (state & SAHARA_STATE_COMP_FLAG)
399 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
400
401 dev_dbg(dev->device, " * %s.\n",
402 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
403
404 if (status & SAHARA_STATUS_DAR_FULL)
405 dev_dbg(dev->device, " - DAR Full.\n");
406 if (status & SAHARA_STATUS_ERROR)
407 dev_dbg(dev->device, " - Error.\n");
408 if (status & SAHARA_STATUS_SECURE)
409 dev_dbg(dev->device, " - Secure.\n");
410 if (status & SAHARA_STATUS_FAIL)
411 dev_dbg(dev->device, " - Fail.\n");
412 if (status & SAHARA_STATUS_RNG_RESEED)
413 dev_dbg(dev->device, " - RNG Reseed Request.\n");
414 if (status & SAHARA_STATUS_ACTIVE_RNG)
415 dev_dbg(dev->device, " - RNG Active.\n");
416 if (status & SAHARA_STATUS_ACTIVE_MDHA)
417 dev_dbg(dev->device, " - MDHA Active.\n");
418 if (status & SAHARA_STATUS_ACTIVE_SKHA)
419 dev_dbg(dev->device, " - SKHA Active.\n");
420
421 if (status & SAHARA_STATUS_MODE_BATCH)
422 dev_dbg(dev->device, " - Batch Mode.\n");
423 else if (status & SAHARA_STATUS_MODE_DEDICATED)
424 dev_dbg(dev->device, " - Decidated Mode.\n");
425 else if (status & SAHARA_STATUS_MODE_DEBUG)
426 dev_dbg(dev->device, " - Debug Mode.\n");
427
428 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
429 SAHARA_STATUS_GET_ISTATE(status));
430
431 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
432 sahara_read(dev, SAHARA_REG_CDAR));
433 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
434 sahara_read(dev, SAHARA_REG_IDAR));
435}
436
437static void sahara_dump_descriptors(struct sahara_dev *dev)
438{
439 int i;
440
441 if (!IS_ENABLED(DEBUG))
442 return;
443
444 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
445 dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
446 i, dev->hw_phys_desc[i]);
447 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
448 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
449 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
450 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
451 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
452 dev_dbg(dev->device, "\tnext = 0x%08x\n",
453 dev->hw_desc[i]->next);
454 }
455 dev_dbg(dev->device, "\n");
456}
457
458static void sahara_dump_links(struct sahara_dev *dev)
459{
460 int i;
461
462 if (!IS_ENABLED(DEBUG))
463 return;
464
465 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
466 dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
467 i, dev->hw_phys_link[i]);
468 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
469 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
470 dev_dbg(dev->device, "\tnext = 0x%08x\n",
471 dev->hw_link[i]->next);
472 }
473 dev_dbg(dev->device, "\n");
474}
475
5de88752
JM
476static int sahara_hw_descriptor_create(struct sahara_dev *dev)
477{
478 struct sahara_ctx *ctx = dev->ctx;
479 struct scatterlist *sg;
480 int ret;
481 int i, j;
482
483 /* Copy new key if necessary */
484 if (ctx->flags & FLAGS_NEW_KEY) {
485 memcpy(dev->key_base, ctx->key, ctx->keylen);
486 ctx->flags &= ~FLAGS_NEW_KEY;
487
488 if (dev->flags & FLAGS_CBC) {
489 dev->hw_desc[0]->len1 = AES_BLOCK_SIZE;
490 dev->hw_desc[0]->p1 = dev->iv_phys_base;
491 } else {
492 dev->hw_desc[0]->len1 = 0;
493 dev->hw_desc[0]->p1 = 0;
494 }
495 dev->hw_desc[0]->len2 = ctx->keylen;
496 dev->hw_desc[0]->p2 = dev->key_phys_base;
497 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
498 }
499 dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev);
500
501 dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
502 dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
503 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
504 dev_err(dev->device, "not enough hw links (%d)\n",
505 dev->nb_in_sg + dev->nb_out_sg);
506 return -EINVAL;
507 }
508
509 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
510 DMA_TO_DEVICE);
511 if (ret != dev->nb_in_sg) {
512 dev_err(dev->device, "couldn't map in sg\n");
513 goto unmap_in;
514 }
515 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
516 DMA_FROM_DEVICE);
517 if (ret != dev->nb_out_sg) {
518 dev_err(dev->device, "couldn't map out sg\n");
519 goto unmap_out;
520 }
521
522 /* Create input links */
523 dev->hw_desc[1]->p1 = dev->hw_phys_link[0];
524 sg = dev->in_sg;
525 for (i = 0; i < dev->nb_in_sg; i++) {
526 dev->hw_link[i]->len = sg->length;
527 dev->hw_link[i]->p = sg->dma_address;
528 if (i == (dev->nb_in_sg - 1)) {
529 dev->hw_link[i]->next = 0;
530 } else {
531 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
532 sg = sg_next(sg);
533 }
534 }
535
536 /* Create output links */
537 dev->hw_desc[1]->p2 = dev->hw_phys_link[i];
538 sg = dev->out_sg;
539 for (j = i; j < dev->nb_out_sg + i; j++) {
540 dev->hw_link[j]->len = sg->length;
541 dev->hw_link[j]->p = sg->dma_address;
542 if (j == (dev->nb_out_sg + i - 1)) {
543 dev->hw_link[j]->next = 0;
544 } else {
545 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
546 sg = sg_next(sg);
547 }
548 }
549
550 /* Fill remaining fields of hw_desc[1] */
551 dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev);
552 dev->hw_desc[1]->len1 = dev->total;
553 dev->hw_desc[1]->len2 = dev->total;
554 dev->hw_desc[1]->next = 0;
555
556 sahara_dump_descriptors(dev);
557 sahara_dump_links(dev);
558
5de88752
JM
559 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
560
561 return 0;
562
563unmap_out:
564 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
565 DMA_TO_DEVICE);
566unmap_in:
567 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
568 DMA_FROM_DEVICE);
569
570 return -EINVAL;
571}
572
c0c3c89a 573static int sahara_aes_process(struct ablkcipher_request *req)
5de88752 574{
c0c3c89a 575 struct sahara_dev *dev = dev_ptr;
5de88752
JM
576 struct sahara_ctx *ctx;
577 struct sahara_aes_reqctx *rctx;
5de88752 578 int ret;
58ed798b 579 unsigned long timeout;
5de88752 580
5de88752
JM
581 /* Request is ready to be dispatched by the device */
582 dev_dbg(dev->device,
583 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
584 req->nbytes, req->src, req->dst);
585
586 /* assign new request to device */
5de88752
JM
587 dev->total = req->nbytes;
588 dev->in_sg = req->src;
589 dev->out_sg = req->dst;
590
591 rctx = ablkcipher_request_ctx(req);
592 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
593 rctx->mode &= FLAGS_MODE_MASK;
594 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
595
596 if ((dev->flags & FLAGS_CBC) && req->info)
597 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
598
599 /* assign new context to device */
5de88752
JM
600 dev->ctx = ctx;
601
c0c3c89a
ST
602 reinit_completion(&dev->dma_completion);
603
5de88752 604 ret = sahara_hw_descriptor_create(dev);
6cf02fca
NMG
605 if (ret)
606 return -EINVAL;
c0c3c89a 607
58ed798b 608 timeout = wait_for_completion_timeout(&dev->dma_completion,
c0c3c89a 609 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
58ed798b 610 if (!timeout) {
c0c3c89a
ST
611 dev_err(dev->device, "AES timeout\n");
612 return -ETIMEDOUT;
5de88752 613 }
c0c3c89a
ST
614
615 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
616 DMA_TO_DEVICE);
617 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
618 DMA_FROM_DEVICE);
619
620 return 0;
5de88752
JM
621}
622
623static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
624 unsigned int keylen)
625{
626 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
627 int ret;
628
629 ctx->keylen = keylen;
630
631 /* SAHARA only supports 128bit keys */
632 if (keylen == AES_KEYSIZE_128) {
633 memcpy(ctx->key, key, keylen);
634 ctx->flags |= FLAGS_NEW_KEY;
635 return 0;
636 }
637
638 if (keylen != AES_KEYSIZE_128 &&
639 keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
640 return -EINVAL;
641
642 /*
643 * The requested key size is not supported by HW, do a fallback.
644 */
645 ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
646 ctx->fallback->base.crt_flags |=
647 (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
648
649 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
650 if (ret) {
651 struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
652
653 tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
654 tfm_aux->crt_flags |=
655 (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
656 }
657 return ret;
658}
659
660static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
661{
5de88752
JM
662 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
663 struct sahara_dev *dev = dev_ptr;
664 int err = 0;
5de88752
JM
665
666 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
667 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
668
669 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
670 dev_err(dev->device,
671 "request size is not exact amount of AES blocks\n");
672 return -EINVAL;
673 }
674
5de88752 675 rctx->mode = mode;
c0c3c89a
ST
676
677 mutex_lock(&dev->queue_mutex);
5de88752 678 err = ablkcipher_enqueue_request(&dev->queue, req);
c0c3c89a 679 mutex_unlock(&dev->queue_mutex);
5de88752 680
c0c3c89a 681 wake_up_process(dev->kthread);
5de88752
JM
682
683 return err;
684}
685
686static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
687{
688 struct crypto_tfm *tfm =
689 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
690 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
691 crypto_ablkcipher_reqtfm(req));
692 int err;
693
694 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
695 ablkcipher_request_set_tfm(req, ctx->fallback);
696 err = crypto_ablkcipher_encrypt(req);
697 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
698 return err;
699 }
700
701 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
702}
703
704static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
705{
706 struct crypto_tfm *tfm =
707 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
708 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
709 crypto_ablkcipher_reqtfm(req));
710 int err;
711
712 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
713 ablkcipher_request_set_tfm(req, ctx->fallback);
714 err = crypto_ablkcipher_decrypt(req);
715 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
716 return err;
717 }
718
719 return sahara_aes_crypt(req, 0);
720}
721
722static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
723{
724 struct crypto_tfm *tfm =
725 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
726 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
727 crypto_ablkcipher_reqtfm(req));
728 int err;
729
730 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
731 ablkcipher_request_set_tfm(req, ctx->fallback);
732 err = crypto_ablkcipher_encrypt(req);
733 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
734 return err;
735 }
736
737 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
738}
739
740static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
741{
742 struct crypto_tfm *tfm =
743 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
744 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
745 crypto_ablkcipher_reqtfm(req));
746 int err;
747
748 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
749 ablkcipher_request_set_tfm(req, ctx->fallback);
750 err = crypto_ablkcipher_decrypt(req);
751 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
752 return err;
753 }
754
755 return sahara_aes_crypt(req, FLAGS_CBC);
756}
757
758static int sahara_aes_cra_init(struct crypto_tfm *tfm)
759{
efa59e2e 760 const char *name = crypto_tfm_alg_name(tfm);
5de88752
JM
761 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
762
763 ctx->fallback = crypto_alloc_ablkcipher(name, 0,
764 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
765 if (IS_ERR(ctx->fallback)) {
766 pr_err("Error allocating fallback algo %s\n", name);
767 return PTR_ERR(ctx->fallback);
768 }
769
770 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
771
772 return 0;
773}
774
775static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
776{
777 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
778
779 if (ctx->fallback)
780 crypto_free_ablkcipher(ctx->fallback);
781 ctx->fallback = NULL;
782}
783
5a2bb93f
ST
784static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
785 struct sahara_sha_reqctx *rctx)
786{
787 u32 hdr = 0;
788
789 hdr = rctx->mode;
790
791 if (rctx->first) {
792 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
793 hdr |= SAHARA_HDR_MDHA_INIT;
794 } else {
795 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
796 }
797
798 if (rctx->last)
799 hdr |= SAHARA_HDR_MDHA_PDATA;
800
801 if (hweight_long(hdr) % 2 == 0)
802 hdr |= SAHARA_HDR_PARITY_BIT;
803
804 return hdr;
805}
806
807static int sahara_sha_hw_links_create(struct sahara_dev *dev,
808 struct sahara_sha_reqctx *rctx,
809 int start)
810{
811 struct scatterlist *sg;
812 unsigned int i;
813 int ret;
814
815 dev->in_sg = rctx->in_sg;
816
817 dev->nb_in_sg = sahara_sg_length(dev->in_sg, rctx->total);
818 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
819 dev_err(dev->device, "not enough hw links (%d)\n",
820 dev->nb_in_sg + dev->nb_out_sg);
821 return -EINVAL;
822 }
823
824 if (rctx->in_sg_chained) {
825 i = start;
826 sg = dev->in_sg;
827 while (sg) {
828 ret = dma_map_sg(dev->device, sg, 1,
829 DMA_TO_DEVICE);
830 if (!ret)
831 return -EFAULT;
832
833 dev->hw_link[i]->len = sg->length;
834 dev->hw_link[i]->p = sg->dma_address;
835 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
836 sg = sg_next(sg);
837 i += 1;
838 }
839 dev->hw_link[i-1]->next = 0;
840 } else {
841 sg = dev->in_sg;
842 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
843 DMA_TO_DEVICE);
844 if (!ret)
845 return -EFAULT;
846
847 for (i = start; i < dev->nb_in_sg + start; i++) {
848 dev->hw_link[i]->len = sg->length;
849 dev->hw_link[i]->p = sg->dma_address;
850 if (i == (dev->nb_in_sg + start - 1)) {
851 dev->hw_link[i]->next = 0;
852 } else {
853 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
854 sg = sg_next(sg);
855 }
856 }
857 }
858
859 return i;
860}
861
862static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
863 struct sahara_sha_reqctx *rctx,
864 struct ahash_request *req,
865 int index)
866{
867 unsigned result_len;
868 int i = index;
869
870 if (rctx->first)
871 /* Create initial descriptor: #8*/
872 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
873 else
874 /* Create hash descriptor: #10. Must follow #6. */
875 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
876
877 dev->hw_desc[index]->len1 = rctx->total;
878 if (dev->hw_desc[index]->len1 == 0) {
879 /* if len1 is 0, p1 must be 0, too */
880 dev->hw_desc[index]->p1 = 0;
881 rctx->sg_in_idx = 0;
882 } else {
883 /* Create input links */
884 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
885 i = sahara_sha_hw_links_create(dev, rctx, index);
886
887 rctx->sg_in_idx = index;
888 if (i < 0)
889 return i;
890 }
891
892 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
893
894 /* Save the context for the next operation */
895 result_len = rctx->context_size;
896 dev->hw_link[i]->p = dev->context_phys_base;
897
898 dev->hw_link[i]->len = result_len;
899 dev->hw_desc[index]->len2 = result_len;
900
901 dev->hw_link[i]->next = 0;
902
903 return 0;
904}
905
906/*
907 * Load descriptor aka #6
908 *
909 * To load a previously saved context back to the MDHA unit
910 *
911 * p1: Saved Context
912 * p2: NULL
913 *
914 */
915static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
916 struct sahara_sha_reqctx *rctx,
917 struct ahash_request *req,
918 int index)
919{
920 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
921
922 dev->hw_desc[index]->len1 = rctx->context_size;
923 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
924 dev->hw_desc[index]->len2 = 0;
925 dev->hw_desc[index]->p2 = 0;
926
927 dev->hw_link[index]->len = rctx->context_size;
928 dev->hw_link[index]->p = dev->context_phys_base;
929 dev->hw_link[index]->next = 0;
930
931 return 0;
932}
933
934static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
935{
936 if (!sg || !sg->length)
937 return nbytes;
938
939 while (nbytes && sg) {
940 if (nbytes <= sg->length) {
941 sg->length = nbytes;
942 sg_mark_end(sg);
943 break;
944 }
945 nbytes -= sg->length;
5be4d4c9 946 sg = sg_next(sg);
5a2bb93f
ST
947 }
948
949 return nbytes;
950}
951
952static int sahara_sha_prepare_request(struct ahash_request *req)
953{
954 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
955 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
956 unsigned int hash_later;
957 unsigned int block_size;
958 unsigned int len;
959
960 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
961
962 /* append bytes from previous operation */
963 len = rctx->buf_cnt + req->nbytes;
964
965 /* only the last transfer can be padded in hardware */
966 if (!rctx->last && (len < block_size)) {
967 /* to few data, save for next operation */
968 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
969 0, req->nbytes, 0);
970 rctx->buf_cnt += req->nbytes;
971
972 return 0;
973 }
974
975 /* add data from previous operation first */
976 if (rctx->buf_cnt)
977 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
978
979 /* data must always be a multiple of block_size */
980 hash_later = rctx->last ? 0 : len & (block_size - 1);
981 if (hash_later) {
982 unsigned int offset = req->nbytes - hash_later;
983 /* Save remaining bytes for later use */
984 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
985 hash_later, 0);
986 }
987
988 /* nbytes should now be multiple of blocksize */
989 req->nbytes = req->nbytes - hash_later;
990
991 sahara_walk_and_recalc(req->src, req->nbytes);
992
993 /* have data from previous operation and current */
994 if (rctx->buf_cnt && req->nbytes) {
995 sg_init_table(rctx->in_sg_chain, 2);
996 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
997
998 scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src);
999
1000 rctx->total = req->nbytes + rctx->buf_cnt;
1001 rctx->in_sg = rctx->in_sg_chain;
1002
1003 rctx->in_sg_chained = true;
1004 req->src = rctx->in_sg_chain;
1005 /* only data from previous operation */
1006 } else if (rctx->buf_cnt) {
1007 if (req->src)
1008 rctx->in_sg = req->src;
1009 else
1010 rctx->in_sg = rctx->in_sg_chain;
1011 /* buf was copied into rembuf above */
1012 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
1013 rctx->total = rctx->buf_cnt;
1014 rctx->in_sg_chained = false;
1015 /* no data from previous operation */
1016 } else {
1017 rctx->in_sg = req->src;
1018 rctx->total = req->nbytes;
1019 req->src = rctx->in_sg;
1020 rctx->in_sg_chained = false;
1021 }
1022
1023 /* on next call, we only have the remaining data in the buffer */
1024 rctx->buf_cnt = hash_later;
1025
1026 return -EINPROGRESS;
1027}
1028
1029static void sahara_sha_unmap_sg(struct sahara_dev *dev,
1030 struct sahara_sha_reqctx *rctx)
1031{
1032 struct scatterlist *sg;
1033
1034 if (rctx->in_sg_chained) {
1035 sg = dev->in_sg;
1036 while (sg) {
1037 dma_unmap_sg(dev->device, sg, 1, DMA_TO_DEVICE);
1038 sg = sg_next(sg);
1039 }
1040 } else {
1041 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1042 DMA_TO_DEVICE);
1043 }
1044}
1045
1046static int sahara_sha_process(struct ahash_request *req)
1047{
1048 struct sahara_dev *dev = dev_ptr;
1049 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
df586cbb 1050 int ret;
58ed798b 1051 unsigned long timeout;
5a2bb93f
ST
1052
1053 ret = sahara_sha_prepare_request(req);
1054 if (!ret)
1055 return ret;
1056
1057 if (rctx->first) {
1058 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1059 dev->hw_desc[0]->next = 0;
1060 rctx->first = 0;
1061 } else {
1062 memcpy(dev->context_base, rctx->context, rctx->context_size);
1063
1064 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1065 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1066 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1067 dev->hw_desc[1]->next = 0;
1068 }
1069
1070 sahara_dump_descriptors(dev);
1071 sahara_dump_links(dev);
1072
1073 reinit_completion(&dev->dma_completion);
1074
1075 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1076
58ed798b 1077 timeout = wait_for_completion_timeout(&dev->dma_completion,
5a2bb93f 1078 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
58ed798b 1079 if (!timeout) {
5a2bb93f
ST
1080 dev_err(dev->device, "SHA timeout\n");
1081 return -ETIMEDOUT;
1082 }
1083
1084 if (rctx->sg_in_idx)
1085 sahara_sha_unmap_sg(dev, rctx);
1086
1087 memcpy(rctx->context, dev->context_base, rctx->context_size);
1088
1089 if (req->result)
1090 memcpy(req->result, rctx->context, rctx->digest_size);
1091
1092 return 0;
1093}
1094
c0c3c89a
ST
1095static int sahara_queue_manage(void *data)
1096{
1097 struct sahara_dev *dev = (struct sahara_dev *)data;
1098 struct crypto_async_request *async_req;
ddacc621 1099 struct crypto_async_request *backlog;
c0c3c89a
ST
1100 int ret = 0;
1101
1102 do {
1103 __set_current_state(TASK_INTERRUPTIBLE);
1104
1105 mutex_lock(&dev->queue_mutex);
ddacc621 1106 backlog = crypto_get_backlog(&dev->queue);
c0c3c89a
ST
1107 async_req = crypto_dequeue_request(&dev->queue);
1108 mutex_unlock(&dev->queue_mutex);
1109
ddacc621
ST
1110 if (backlog)
1111 backlog->complete(backlog, -EINPROGRESS);
1112
c0c3c89a 1113 if (async_req) {
5a2bb93f
ST
1114 if (crypto_tfm_alg_type(async_req->tfm) ==
1115 CRYPTO_ALG_TYPE_AHASH) {
1116 struct ahash_request *req =
1117 ahash_request_cast(async_req);
1118
1119 ret = sahara_sha_process(req);
1120 } else {
1121 struct ablkcipher_request *req =
1122 ablkcipher_request_cast(async_req);
c0c3c89a 1123
5a2bb93f
ST
1124 ret = sahara_aes_process(req);
1125 }
c0c3c89a
ST
1126
1127 async_req->complete(async_req, ret);
1128
1129 continue;
1130 }
1131
1132 schedule();
1133 } while (!kthread_should_stop());
1134
1135 return 0;
1136}
1137
5a2bb93f
ST
1138static int sahara_sha_enqueue(struct ahash_request *req, int last)
1139{
1140 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1141 struct sahara_dev *dev = dev_ptr;
1142 int ret;
1143
1144 if (!req->nbytes && !last)
1145 return 0;
1146
1147 mutex_lock(&rctx->mutex);
1148 rctx->last = last;
1149
1150 if (!rctx->active) {
1151 rctx->active = 1;
1152 rctx->first = 1;
1153 }
1154
1155 mutex_lock(&dev->queue_mutex);
1156 ret = crypto_enqueue_request(&dev->queue, &req->base);
1157 mutex_unlock(&dev->queue_mutex);
1158
1159 wake_up_process(dev->kthread);
1160 mutex_unlock(&rctx->mutex);
1161
1162 return ret;
1163}
1164
1165static int sahara_sha_init(struct ahash_request *req)
1166{
1167 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1168 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1169
1170 memset(rctx, 0, sizeof(*rctx));
1171
1172 switch (crypto_ahash_digestsize(tfm)) {
1173 case SHA1_DIGEST_SIZE:
1174 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1175 rctx->digest_size = SHA1_DIGEST_SIZE;
1176 break;
1177 case SHA256_DIGEST_SIZE:
1178 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1179 rctx->digest_size = SHA256_DIGEST_SIZE;
1180 break;
1181 default:
1182 return -EINVAL;
1183 }
1184
1185 rctx->context_size = rctx->digest_size + 4;
1186 rctx->active = 0;
1187
1188 mutex_init(&rctx->mutex);
1189
1190 return 0;
1191}
1192
1193static int sahara_sha_update(struct ahash_request *req)
1194{
1195 return sahara_sha_enqueue(req, 0);
1196}
1197
1198static int sahara_sha_final(struct ahash_request *req)
1199{
1200 req->nbytes = 0;
1201 return sahara_sha_enqueue(req, 1);
1202}
1203
1204static int sahara_sha_finup(struct ahash_request *req)
1205{
1206 return sahara_sha_enqueue(req, 1);
1207}
1208
1209static int sahara_sha_digest(struct ahash_request *req)
1210{
1211 sahara_sha_init(req);
1212
1213 return sahara_sha_finup(req);
1214}
1215
1216static int sahara_sha_export(struct ahash_request *req, void *out)
1217{
1218 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1219 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1220 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1221
1222 memcpy(out, ctx, sizeof(struct sahara_ctx));
1223 memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
1224 sizeof(struct sahara_sha_reqctx));
1225
1226 return 0;
1227}
1228
1229static int sahara_sha_import(struct ahash_request *req, const void *in)
1230{
1231 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1232 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1233 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1234
1235 memcpy(ctx, in, sizeof(struct sahara_ctx));
1236 memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
1237 sizeof(struct sahara_sha_reqctx));
1238
1239 return 0;
1240}
1241
1242static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1243{
1244 const char *name = crypto_tfm_alg_name(tfm);
1245 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1246
1247 ctx->shash_fallback = crypto_alloc_shash(name, 0,
1248 CRYPTO_ALG_NEED_FALLBACK);
1249 if (IS_ERR(ctx->shash_fallback)) {
1250 pr_err("Error allocating fallback algo %s\n", name);
1251 return PTR_ERR(ctx->shash_fallback);
1252 }
1253 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1254 sizeof(struct sahara_sha_reqctx) +
1255 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1256
1257 return 0;
1258}
1259
1260static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
1261{
1262 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1263
1264 crypto_free_shash(ctx->shash_fallback);
1265 ctx->shash_fallback = NULL;
1266}
1267
5de88752
JM
1268static struct crypto_alg aes_algs[] = {
1269{
1270 .cra_name = "ecb(aes)",
1271 .cra_driver_name = "sahara-ecb-aes",
1272 .cra_priority = 300,
1273 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1274 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1275 .cra_blocksize = AES_BLOCK_SIZE,
1276 .cra_ctxsize = sizeof(struct sahara_ctx),
1277 .cra_alignmask = 0x0,
1278 .cra_type = &crypto_ablkcipher_type,
1279 .cra_module = THIS_MODULE,
1280 .cra_init = sahara_aes_cra_init,
1281 .cra_exit = sahara_aes_cra_exit,
1282 .cra_u.ablkcipher = {
1283 .min_keysize = AES_MIN_KEY_SIZE ,
1284 .max_keysize = AES_MAX_KEY_SIZE,
1285 .setkey = sahara_aes_setkey,
1286 .encrypt = sahara_aes_ecb_encrypt,
1287 .decrypt = sahara_aes_ecb_decrypt,
1288 }
1289}, {
1290 .cra_name = "cbc(aes)",
1291 .cra_driver_name = "sahara-cbc-aes",
1292 .cra_priority = 300,
1293 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1294 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1295 .cra_blocksize = AES_BLOCK_SIZE,
1296 .cra_ctxsize = sizeof(struct sahara_ctx),
1297 .cra_alignmask = 0x0,
1298 .cra_type = &crypto_ablkcipher_type,
1299 .cra_module = THIS_MODULE,
1300 .cra_init = sahara_aes_cra_init,
1301 .cra_exit = sahara_aes_cra_exit,
1302 .cra_u.ablkcipher = {
1303 .min_keysize = AES_MIN_KEY_SIZE ,
1304 .max_keysize = AES_MAX_KEY_SIZE,
1305 .ivsize = AES_BLOCK_SIZE,
1306 .setkey = sahara_aes_setkey,
1307 .encrypt = sahara_aes_cbc_encrypt,
1308 .decrypt = sahara_aes_cbc_decrypt,
1309 }
1310}
1311};
1312
5a2bb93f
ST
1313static struct ahash_alg sha_v3_algs[] = {
1314{
1315 .init = sahara_sha_init,
1316 .update = sahara_sha_update,
1317 .final = sahara_sha_final,
1318 .finup = sahara_sha_finup,
1319 .digest = sahara_sha_digest,
1320 .export = sahara_sha_export,
1321 .import = sahara_sha_import,
1322 .halg.digestsize = SHA1_DIGEST_SIZE,
1323 .halg.base = {
1324 .cra_name = "sha1",
1325 .cra_driver_name = "sahara-sha1",
1326 .cra_priority = 300,
1327 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1328 CRYPTO_ALG_ASYNC |
1329 CRYPTO_ALG_NEED_FALLBACK,
1330 .cra_blocksize = SHA1_BLOCK_SIZE,
1331 .cra_ctxsize = sizeof(struct sahara_ctx),
1332 .cra_alignmask = 0,
1333 .cra_module = THIS_MODULE,
1334 .cra_init = sahara_sha_cra_init,
1335 .cra_exit = sahara_sha_cra_exit,
1336 }
1337},
1338};
1339
1340static struct ahash_alg sha_v4_algs[] = {
1341{
1342 .init = sahara_sha_init,
1343 .update = sahara_sha_update,
1344 .final = sahara_sha_final,
1345 .finup = sahara_sha_finup,
1346 .digest = sahara_sha_digest,
1347 .export = sahara_sha_export,
1348 .import = sahara_sha_import,
1349 .halg.digestsize = SHA256_DIGEST_SIZE,
1350 .halg.base = {
1351 .cra_name = "sha256",
1352 .cra_driver_name = "sahara-sha256",
1353 .cra_priority = 300,
1354 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1355 CRYPTO_ALG_ASYNC |
1356 CRYPTO_ALG_NEED_FALLBACK,
1357 .cra_blocksize = SHA256_BLOCK_SIZE,
1358 .cra_ctxsize = sizeof(struct sahara_ctx),
1359 .cra_alignmask = 0,
1360 .cra_module = THIS_MODULE,
1361 .cra_init = sahara_sha_cra_init,
1362 .cra_exit = sahara_sha_cra_exit,
1363 }
1364},
1365};
1366
5de88752
JM
1367static irqreturn_t sahara_irq_handler(int irq, void *data)
1368{
1369 struct sahara_dev *dev = (struct sahara_dev *)data;
1370 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1371 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1372
5de88752
JM
1373 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1374 SAHARA_REG_CMD);
1375
1376 sahara_decode_status(dev, stat);
1377
1378 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1379 return IRQ_NONE;
1380 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1381 dev->error = 0;
1382 } else {
1383 sahara_decode_error(dev, err);
1384 dev->error = -EINVAL;
1385 }
1386
c0c3c89a 1387 complete(&dev->dma_completion);
5de88752
JM
1388
1389 return IRQ_HANDLED;
1390}
1391
1392
1393static int sahara_register_algs(struct sahara_dev *dev)
1394{
5a2bb93f
ST
1395 int err;
1396 unsigned int i, j, k, l;
5de88752
JM
1397
1398 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1399 INIT_LIST_HEAD(&aes_algs[i].cra_list);
1400 err = crypto_register_alg(&aes_algs[i]);
1401 if (err)
1402 goto err_aes_algs;
1403 }
1404
5a2bb93f
ST
1405 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1406 err = crypto_register_ahash(&sha_v3_algs[k]);
1407 if (err)
1408 goto err_sha_v3_algs;
1409 }
1410
1411 if (dev->version > SAHARA_VERSION_3)
1412 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1413 err = crypto_register_ahash(&sha_v4_algs[l]);
1414 if (err)
1415 goto err_sha_v4_algs;
1416 }
1417
5de88752
JM
1418 return 0;
1419
5a2bb93f
ST
1420err_sha_v4_algs:
1421 for (j = 0; j < l; j++)
1422 crypto_unregister_ahash(&sha_v4_algs[j]);
1423
1424err_sha_v3_algs:
1425 for (j = 0; j < k; j++)
1426 crypto_unregister_ahash(&sha_v4_algs[j]);
1427
5de88752
JM
1428err_aes_algs:
1429 for (j = 0; j < i; j++)
1430 crypto_unregister_alg(&aes_algs[j]);
1431
1432 return err;
1433}
1434
1435static void sahara_unregister_algs(struct sahara_dev *dev)
1436{
5a2bb93f 1437 unsigned int i;
5de88752
JM
1438
1439 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1440 crypto_unregister_alg(&aes_algs[i]);
5a2bb93f
ST
1441
1442 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1443 crypto_unregister_ahash(&sha_v3_algs[i]);
1444
1445 if (dev->version > SAHARA_VERSION_3)
1446 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1447 crypto_unregister_ahash(&sha_v4_algs[i]);
5de88752
JM
1448}
1449
1450static struct platform_device_id sahara_platform_ids[] = {
1451 { .name = "sahara-imx27" },
1452 { /* sentinel */ }
1453};
1454MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1455
1456static struct of_device_id sahara_dt_ids[] = {
5ed903b3 1457 { .compatible = "fsl,imx53-sahara" },
5de88752
JM
1458 { .compatible = "fsl,imx27-sahara" },
1459 { /* sentinel */ }
1460};
68be0b1a 1461MODULE_DEVICE_TABLE(of, sahara_dt_ids);
5de88752
JM
1462
1463static int sahara_probe(struct platform_device *pdev)
1464{
1465 struct sahara_dev *dev;
1466 struct resource *res;
1467 u32 version;
1468 int irq;
1469 int err;
1470 int i;
1471
1472 dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1473 if (dev == NULL) {
1474 dev_err(&pdev->dev, "unable to alloc data struct.\n");
1475 return -ENOMEM;
1476 }
1477
1478 dev->device = &pdev->dev;
1479 platform_set_drvdata(pdev, dev);
1480
1481 /* Get the base address */
1482 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
9e95275c
JH
1483 dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1484 if (IS_ERR(dev->regs_base))
1485 return PTR_ERR(dev->regs_base);
5de88752
JM
1486
1487 /* Get the IRQ */
1488 irq = platform_get_irq(pdev, 0);
1489 if (irq < 0) {
1490 dev_err(&pdev->dev, "failed to get irq resource\n");
1491 return irq;
1492 }
1493
3d6f1d12
AS
1494 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1495 0, dev_name(&pdev->dev), dev);
1496 if (err) {
5de88752 1497 dev_err(&pdev->dev, "failed to request irq\n");
3d6f1d12 1498 return err;
5de88752
JM
1499 }
1500
1501 /* clocks */
1502 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1503 if (IS_ERR(dev->clk_ipg)) {
1504 dev_err(&pdev->dev, "Could not get ipg clock\n");
1505 return PTR_ERR(dev->clk_ipg);
1506 }
1507
1508 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1509 if (IS_ERR(dev->clk_ahb)) {
1510 dev_err(&pdev->dev, "Could not get ahb clock\n");
1511 return PTR_ERR(dev->clk_ahb);
1512 }
1513
1514 /* Allocate HW descriptors */
1515 dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
1516 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1517 &dev->hw_phys_desc[0], GFP_KERNEL);
1518 if (!dev->hw_desc[0]) {
1519 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1520 return -ENOMEM;
1521 }
1522 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1523 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1524 sizeof(struct sahara_hw_desc);
1525
1526 /* Allocate space for iv and key */
1527 dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1528 &dev->key_phys_base, GFP_KERNEL);
1529 if (!dev->key_base) {
1530 dev_err(&pdev->dev, "Could not allocate memory for key\n");
1531 err = -ENOMEM;
1532 goto err_key;
1533 }
1534 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1535 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1536
5a2bb93f
ST
1537 /* Allocate space for context: largest digest + message length field */
1538 dev->context_base = dma_alloc_coherent(&pdev->dev,
1539 SHA256_DIGEST_SIZE + 4,
1540 &dev->context_phys_base, GFP_KERNEL);
1541 if (!dev->context_base) {
1542 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1543 err = -ENOMEM;
1544 goto err_key;
1545 }
1546
5de88752
JM
1547 /* Allocate space for HW links */
1548 dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
1549 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1550 &dev->hw_phys_link[0], GFP_KERNEL);
393e661d 1551 if (!dev->hw_link[0]) {
5de88752
JM
1552 dev_err(&pdev->dev, "Could not allocate hw links\n");
1553 err = -ENOMEM;
1554 goto err_link;
1555 }
1556 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1557 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1558 sizeof(struct sahara_hw_link);
1559 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1560 }
1561
1562 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1563
20ec9d81 1564 spin_lock_init(&dev->lock);
c0c3c89a 1565 mutex_init(&dev->queue_mutex);
20ec9d81 1566
5de88752
JM
1567 dev_ptr = dev;
1568
c0c3c89a
ST
1569 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1570 if (IS_ERR(dev->kthread)) {
1571 err = PTR_ERR(dev->kthread);
1572 goto err_link;
1573 }
5de88752 1574
c0c3c89a 1575 init_completion(&dev->dma_completion);
5de88752
JM
1576
1577 clk_prepare_enable(dev->clk_ipg);
1578 clk_prepare_enable(dev->clk_ahb);
1579
1580 version = sahara_read(dev, SAHARA_REG_VERSION);
5ed903b3
ST
1581 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1582 if (version != SAHARA_VERSION_3)
1583 err = -ENODEV;
1584 } else if (of_device_is_compatible(pdev->dev.of_node,
1585 "fsl,imx53-sahara")) {
1586 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1587 err = -ENODEV;
1588 version = (version >> 8) & 0xff;
1589 }
1590 if (err == -ENODEV) {
5de88752 1591 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
5ed903b3 1592 version);
5de88752
JM
1593 goto err_algs;
1594 }
1595
5ed903b3
ST
1596 dev->version = version;
1597
5de88752
JM
1598 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1599 SAHARA_REG_CMD);
1600 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1601 SAHARA_CONTROL_SET_MAXBURST(8) |
1602 SAHARA_CONTROL_RNG_AUTORSD |
1603 SAHARA_CONTROL_ENABLE_INT,
1604 SAHARA_REG_CONTROL);
1605
1606 err = sahara_register_algs(dev);
1607 if (err)
1608 goto err_algs;
1609
1610 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1611
1612 return 0;
1613
1614err_algs:
1615 dma_free_coherent(&pdev->dev,
1616 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1617 dev->hw_link[0], dev->hw_phys_link[0]);
1618 clk_disable_unprepare(dev->clk_ipg);
1619 clk_disable_unprepare(dev->clk_ahb);
c0c3c89a 1620 kthread_stop(dev->kthread);
5de88752
JM
1621 dev_ptr = NULL;
1622err_link:
1623 dma_free_coherent(&pdev->dev,
1624 2 * AES_KEYSIZE_128,
1625 dev->key_base, dev->key_phys_base);
5a2bb93f
ST
1626 dma_free_coherent(&pdev->dev,
1627 SHA256_DIGEST_SIZE,
1628 dev->context_base, dev->context_phys_base);
5de88752
JM
1629err_key:
1630 dma_free_coherent(&pdev->dev,
1631 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1632 dev->hw_desc[0], dev->hw_phys_desc[0]);
1633
1634 return err;
1635}
1636
1637static int sahara_remove(struct platform_device *pdev)
1638{
1639 struct sahara_dev *dev = platform_get_drvdata(pdev);
1640
1641 dma_free_coherent(&pdev->dev,
1642 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1643 dev->hw_link[0], dev->hw_phys_link[0]);
1644 dma_free_coherent(&pdev->dev,
1645 2 * AES_KEYSIZE_128,
1646 dev->key_base, dev->key_phys_base);
1647 dma_free_coherent(&pdev->dev,
1648 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1649 dev->hw_desc[0], dev->hw_phys_desc[0]);
1650
c0c3c89a 1651 kthread_stop(dev->kthread);
5de88752
JM
1652
1653 sahara_unregister_algs(dev);
1654
1655 clk_disable_unprepare(dev->clk_ipg);
1656 clk_disable_unprepare(dev->clk_ahb);
1657
1658 dev_ptr = NULL;
1659
1660 return 0;
1661}
1662
1663static struct platform_driver sahara_driver = {
1664 .probe = sahara_probe,
1665 .remove = sahara_remove,
1666 .driver = {
1667 .name = SAHARA_NAME,
1b0b2605 1668 .of_match_table = sahara_dt_ids,
5de88752
JM
1669 },
1670 .id_table = sahara_platform_ids,
1671};
1672
1673module_platform_driver(sahara_driver);
1674
1675MODULE_LICENSE("GPL");
1676MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
5a2bb93f 1677MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
5de88752 1678MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");