]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/crypto/stm32/stm32-hash.c
crypto: stm32 - Support for STM32 HASH module
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / stm32 / stm32-hash.c
1 /*
2 * This file is part of STM32 Crypto driver for Linux.
3 *
4 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
5 * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
6 *
7 * License terms: GPL V2.0.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
16 * details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program. If not, see <http://www.gnu.org/licenses/>.
20 *
21 */
22
23 #include <linux/clk.h>
24 #include <linux/crypto.h>
25 #include <linux/delay.h>
26 #include <linux/dmaengine.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/iopoll.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/of_device.h>
33 #include <linux/platform_device.h>
34 #include <linux/reset.h>
35
36 #include <crypto/engine.h>
37 #include <crypto/hash.h>
38 #include <crypto/md5.h>
39 #include <crypto/scatterwalk.h>
40 #include <crypto/sha.h>
41 #include <crypto/internal/hash.h>
42
43 #define HASH_CR 0x00
44 #define HASH_DIN 0x04
45 #define HASH_STR 0x08
46 #define HASH_IMR 0x20
47 #define HASH_SR 0x24
48 #define HASH_CSR(x) (0x0F8 + ((x) * 0x04))
49 #define HASH_HREG(x) (0x310 + ((x) * 0x04))
50 #define HASH_HWCFGR 0x3F0
51 #define HASH_VER 0x3F4
52 #define HASH_ID 0x3F8
53
54 /* Control Register */
55 #define HASH_CR_INIT BIT(2)
56 #define HASH_CR_DMAE BIT(3)
57 #define HASH_CR_DATATYPE_POS 4
58 #define HASH_CR_MODE BIT(6)
59 #define HASH_CR_MDMAT BIT(13)
60 #define HASH_CR_DMAA BIT(14)
61 #define HASH_CR_LKEY BIT(16)
62
63 #define HASH_CR_ALGO_SHA1 0x0
64 #define HASH_CR_ALGO_MD5 0x80
65 #define HASH_CR_ALGO_SHA224 0x40000
66 #define HASH_CR_ALGO_SHA256 0x40080
67
68 /* Interrupt */
69 #define HASH_DINIE BIT(0)
70 #define HASH_DCIE BIT(1)
71
72 /* Interrupt Mask */
73 #define HASH_MASK_CALC_COMPLETION BIT(0)
74 #define HASH_MASK_DATA_INPUT BIT(1)
75
76 /* Context swap register */
77 #define HASH_CSR_REGISTER_NUMBER 53
78
79 /* Status Flags */
80 #define HASH_SR_DATA_INPUT_READY BIT(0)
81 #define HASH_SR_OUTPUT_READY BIT(1)
82 #define HASH_SR_DMA_ACTIVE BIT(2)
83 #define HASH_SR_BUSY BIT(3)
84
85 /* STR Register */
86 #define HASH_STR_NBLW_MASK GENMASK(4, 0)
87 #define HASH_STR_DCAL BIT(8)
88
89 #define HASH_FLAGS_INIT BIT(0)
90 #define HASH_FLAGS_OUTPUT_READY BIT(1)
91 #define HASH_FLAGS_CPU BIT(2)
92 #define HASH_FLAGS_DMA_READY BIT(3)
93 #define HASH_FLAGS_DMA_ACTIVE BIT(4)
94 #define HASH_FLAGS_HMAC_INIT BIT(5)
95 #define HASH_FLAGS_HMAC_FINAL BIT(6)
96 #define HASH_FLAGS_HMAC_KEY BIT(7)
97
98 #define HASH_FLAGS_FINAL BIT(15)
99 #define HASH_FLAGS_FINUP BIT(16)
100 #define HASH_FLAGS_ALGO_MASK GENMASK(21, 18)
101 #define HASH_FLAGS_MD5 BIT(18)
102 #define HASH_FLAGS_SHA1 BIT(19)
103 #define HASH_FLAGS_SHA224 BIT(20)
104 #define HASH_FLAGS_SHA256 BIT(21)
105 #define HASH_FLAGS_ERRORS BIT(22)
106 #define HASH_FLAGS_HMAC BIT(23)
107
108 #define HASH_OP_UPDATE 1
109 #define HASH_OP_FINAL 2
110
111 enum stm32_hash_data_format {
112 HASH_DATA_32_BITS = 0x0,
113 HASH_DATA_16_BITS = 0x1,
114 HASH_DATA_8_BITS = 0x2,
115 HASH_DATA_1_BIT = 0x3
116 };
117
118 #define HASH_BUFLEN 256
119 #define HASH_LONG_KEY 64
120 #define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8)
121 #define HASH_QUEUE_LENGTH 16
122 #define HASH_DMA_THRESHOLD 50
123
124 struct stm32_hash_ctx {
125 struct stm32_hash_dev *hdev;
126 unsigned long flags;
127
128 u8 key[HASH_MAX_KEY_SIZE];
129 int keylen;
130 };
131
132 struct stm32_hash_request_ctx {
133 struct stm32_hash_dev *hdev;
134 unsigned long flags;
135 unsigned long op;
136
137 u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
138 size_t digcnt;
139 size_t bufcnt;
140 size_t buflen;
141
142 /* DMA */
143 struct scatterlist *sg;
144 unsigned int offset;
145 unsigned int total;
146 struct scatterlist sg_key;
147
148 dma_addr_t dma_addr;
149 size_t dma_ct;
150 int nents;
151
152 u8 data_type;
153
154 u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
155
156 /* Export Context */
157 u32 *hw_context;
158 };
159
160 struct stm32_hash_algs_info {
161 struct ahash_alg *algs_list;
162 size_t size;
163 };
164
165 struct stm32_hash_pdata {
166 struct stm32_hash_algs_info *algs_info;
167 size_t algs_info_size;
168 };
169
170 struct stm32_hash_dev {
171 struct list_head list;
172 struct device *dev;
173 struct clk *clk;
174 struct reset_control *rst;
175 void __iomem *io_base;
176 phys_addr_t phys_base;
177 u32 dma_mode;
178 u32 dma_maxburst;
179
180 spinlock_t lock; /* lock to protect queue */
181
182 struct ahash_request *req;
183 struct crypto_engine *engine;
184
185 int err;
186 unsigned long flags;
187
188 struct dma_chan *dma_lch;
189 struct completion dma_completion;
190
191 const struct stm32_hash_pdata *pdata;
192 };
193
194 struct stm32_hash_drv {
195 struct list_head dev_list;
196 spinlock_t lock; /* List protection access */
197 };
198
199 static struct stm32_hash_drv stm32_hash = {
200 .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
201 .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
202 };
203
204 static void stm32_hash_dma_callback(void *param);
205
206 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
207 {
208 return readl_relaxed(hdev->io_base + offset);
209 }
210
211 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
212 u32 offset, u32 value)
213 {
214 writel_relaxed(value, hdev->io_base + offset);
215 }
216
217 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
218 {
219 u32 status;
220
221 return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
222 !(status & HASH_SR_BUSY), 10, 10000);
223 }
224
225 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
226 {
227 u32 reg;
228
229 reg = stm32_hash_read(hdev, HASH_STR);
230 reg &= ~(HASH_STR_NBLW_MASK);
231 reg |= (8U * ((length) % 4U));
232 stm32_hash_write(hdev, HASH_STR, reg);
233 }
234
235 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
236 {
237 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
238 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
239 u32 reg;
240 int keylen = ctx->keylen;
241 void *key = ctx->key;
242
243 if (keylen) {
244 stm32_hash_set_nblw(hdev, keylen);
245
246 while (keylen > 0) {
247 stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
248 keylen -= 4;
249 key += 4;
250 }
251
252 reg = stm32_hash_read(hdev, HASH_STR);
253 reg |= HASH_STR_DCAL;
254 stm32_hash_write(hdev, HASH_STR, reg);
255
256 return -EINPROGRESS;
257 }
258
259 return 0;
260 }
261
262 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
263 {
264 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
265 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
266 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
267
268 u32 reg = HASH_CR_INIT;
269
270 if (!(hdev->flags & HASH_FLAGS_INIT)) {
271 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
272 case HASH_FLAGS_MD5:
273 reg |= HASH_CR_ALGO_MD5;
274 break;
275 case HASH_FLAGS_SHA1:
276 reg |= HASH_CR_ALGO_SHA1;
277 break;
278 case HASH_FLAGS_SHA224:
279 reg |= HASH_CR_ALGO_SHA224;
280 break;
281 case HASH_FLAGS_SHA256:
282 reg |= HASH_CR_ALGO_SHA256;
283 break;
284 default:
285 reg |= HASH_CR_ALGO_MD5;
286 }
287
288 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
289
290 if (rctx->flags & HASH_FLAGS_HMAC) {
291 hdev->flags |= HASH_FLAGS_HMAC;
292 reg |= HASH_CR_MODE;
293 if (ctx->keylen > HASH_LONG_KEY)
294 reg |= HASH_CR_LKEY;
295 }
296
297 stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
298
299 stm32_hash_write(hdev, HASH_CR, reg);
300
301 hdev->flags |= HASH_FLAGS_INIT;
302
303 dev_dbg(hdev->dev, "Write Control %x\n", reg);
304 }
305 }
306
307 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
308 {
309 size_t count;
310
311 while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
312 count = min(rctx->sg->length - rctx->offset, rctx->total);
313 count = min(count, rctx->buflen - rctx->bufcnt);
314
315 if (count <= 0) {
316 if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
317 rctx->sg = sg_next(rctx->sg);
318 continue;
319 } else {
320 break;
321 }
322 }
323
324 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
325 rctx->offset, count, 0);
326
327 rctx->bufcnt += count;
328 rctx->offset += count;
329 rctx->total -= count;
330
331 if (rctx->offset == rctx->sg->length) {
332 rctx->sg = sg_next(rctx->sg);
333 if (rctx->sg)
334 rctx->offset = 0;
335 else
336 rctx->total = 0;
337 }
338 }
339 }
340
341 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
342 const u8 *buf, size_t length, int final)
343 {
344 unsigned int count, len32;
345 const u32 *buffer = (const u32 *)buf;
346 u32 reg;
347
348 if (final)
349 hdev->flags |= HASH_FLAGS_FINAL;
350
351 len32 = DIV_ROUND_UP(length, sizeof(u32));
352
353 dev_dbg(hdev->dev, "%s: length: %d, final: %x len32 %i\n",
354 __func__, length, final, len32);
355
356 hdev->flags |= HASH_FLAGS_CPU;
357
358 stm32_hash_write_ctrl(hdev);
359
360 if (stm32_hash_wait_busy(hdev))
361 return -ETIMEDOUT;
362
363 if ((hdev->flags & HASH_FLAGS_HMAC) &&
364 (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
365 hdev->flags |= HASH_FLAGS_HMAC_KEY;
366 stm32_hash_write_key(hdev);
367 if (stm32_hash_wait_busy(hdev))
368 return -ETIMEDOUT;
369 }
370
371 for (count = 0; count < len32; count++)
372 stm32_hash_write(hdev, HASH_DIN, buffer[count]);
373
374 if (final) {
375 stm32_hash_set_nblw(hdev, length);
376 reg = stm32_hash_read(hdev, HASH_STR);
377 reg |= HASH_STR_DCAL;
378 stm32_hash_write(hdev, HASH_STR, reg);
379 if (hdev->flags & HASH_FLAGS_HMAC) {
380 if (stm32_hash_wait_busy(hdev))
381 return -ETIMEDOUT;
382 stm32_hash_write_key(hdev);
383 }
384 return -EINPROGRESS;
385 }
386
387 return 0;
388 }
389
390 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
391 {
392 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
393 int bufcnt, err = 0, final;
394
395 dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
396
397 final = (rctx->flags & HASH_FLAGS_FINUP);
398
399 while ((rctx->total >= rctx->buflen) ||
400 (rctx->bufcnt + rctx->total >= rctx->buflen)) {
401 stm32_hash_append_sg(rctx);
402 bufcnt = rctx->bufcnt;
403 rctx->bufcnt = 0;
404 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
405 }
406
407 stm32_hash_append_sg(rctx);
408
409 if (final) {
410 bufcnt = rctx->bufcnt;
411 rctx->bufcnt = 0;
412 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
413 (rctx->flags & HASH_FLAGS_FINUP));
414 }
415
416 return err;
417 }
418
419 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
420 struct scatterlist *sg, int length, int mdma)
421 {
422 struct dma_async_tx_descriptor *in_desc;
423 dma_cookie_t cookie;
424 u32 reg;
425 int err;
426
427 in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
428 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
429 DMA_CTRL_ACK);
430 if (!in_desc) {
431 dev_err(hdev->dev, "dmaengine_prep_slave error\n");
432 return -ENOMEM;
433 }
434
435 reinit_completion(&hdev->dma_completion);
436 in_desc->callback = stm32_hash_dma_callback;
437 in_desc->callback_param = hdev;
438
439 hdev->flags |= HASH_FLAGS_FINAL;
440 hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
441
442 reg = stm32_hash_read(hdev, HASH_CR);
443
444 if (mdma)
445 reg |= HASH_CR_MDMAT;
446 else
447 reg &= ~HASH_CR_MDMAT;
448
449 reg |= HASH_CR_DMAE;
450
451 stm32_hash_write(hdev, HASH_CR, reg);
452
453 stm32_hash_set_nblw(hdev, length);
454
455 cookie = dmaengine_submit(in_desc);
456 err = dma_submit_error(cookie);
457 if (err)
458 return -ENOMEM;
459
460 dma_async_issue_pending(hdev->dma_lch);
461
462 if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
463 msecs_to_jiffies(100)))
464 err = -ETIMEDOUT;
465
466 if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
467 NULL, NULL) != DMA_COMPLETE)
468 err = -ETIMEDOUT;
469
470 if (err) {
471 dev_err(hdev->dev, "DMA Error %i\n", err);
472 dmaengine_terminate_all(hdev->dma_lch);
473 return err;
474 }
475
476 return -EINPROGRESS;
477 }
478
479 static void stm32_hash_dma_callback(void *param)
480 {
481 struct stm32_hash_dev *hdev = param;
482
483 complete(&hdev->dma_completion);
484
485 hdev->flags |= HASH_FLAGS_DMA_READY;
486 }
487
488 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
489 {
490 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
491 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
492 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
493 int err;
494
495 if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
496 err = stm32_hash_write_key(hdev);
497 if (stm32_hash_wait_busy(hdev))
498 return -ETIMEDOUT;
499 } else {
500 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
501 sg_init_one(&rctx->sg_key, ctx->key,
502 ALIGN(ctx->keylen, sizeof(u32)));
503
504 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
505 DMA_TO_DEVICE);
506 if (rctx->dma_ct == 0) {
507 dev_err(hdev->dev, "dma_map_sg error\n");
508 return -ENOMEM;
509 }
510
511 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
512
513 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
514 }
515
516 return err;
517 }
518
519 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
520 {
521 struct dma_slave_config dma_conf;
522 int err;
523
524 memset(&dma_conf, 0, sizeof(dma_conf));
525
526 dma_conf.direction = DMA_MEM_TO_DEV;
527 dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
528 dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
529 dma_conf.src_maxburst = hdev->dma_maxburst;
530 dma_conf.dst_maxburst = hdev->dma_maxburst;
531 dma_conf.device_fc = false;
532
533 hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
534 if (!hdev->dma_lch) {
535 dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
536 return -EBUSY;
537 }
538
539 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
540 if (err) {
541 dma_release_channel(hdev->dma_lch);
542 hdev->dma_lch = NULL;
543 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
544 return err;
545 }
546
547 init_completion(&hdev->dma_completion);
548
549 return 0;
550 }
551
552 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
553 {
554 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
555 struct scatterlist sg[1], *tsg;
556 int err = 0, len = 0, reg, ncp;
557 unsigned int i;
558 const u32 *buffer = (const u32 *)rctx->buffer;
559
560 rctx->sg = hdev->req->src;
561 rctx->total = hdev->req->nbytes;
562
563 rctx->nents = sg_nents(rctx->sg);
564
565 if (rctx->nents < 0)
566 return -EINVAL;
567
568 stm32_hash_write_ctrl(hdev);
569
570 if (hdev->flags & HASH_FLAGS_HMAC) {
571 err = stm32_hash_hmac_dma_send(hdev);
572 if (err != -EINPROGRESS)
573 return err;
574 }
575
576 for_each_sg(rctx->sg, tsg, rctx->nents, i) {
577 len = sg->length;
578
579 sg[0] = *tsg;
580 if (sg_is_last(sg)) {
581 if (hdev->dma_mode == 1) {
582 len = (ALIGN(sg->length, 16) - 16);
583
584 ncp = sg_pcopy_to_buffer(
585 rctx->sg, rctx->nents,
586 rctx->buffer, sg->length - len,
587 rctx->total - sg->length + len);
588
589 sg->length = len;
590 } else {
591 if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
592 len = sg->length;
593 sg->length = ALIGN(sg->length,
594 sizeof(u32));
595 }
596 }
597 }
598
599 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
600 DMA_TO_DEVICE);
601 if (rctx->dma_ct == 0) {
602 dev_err(hdev->dev, "dma_map_sg error\n");
603 return -ENOMEM;
604 }
605
606 err = stm32_hash_xmit_dma(hdev, sg, len,
607 !sg_is_last(sg));
608
609 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
610
611 if (err == -ENOMEM)
612 return err;
613 }
614
615 if (hdev->dma_mode == 1) {
616 if (stm32_hash_wait_busy(hdev))
617 return -ETIMEDOUT;
618 reg = stm32_hash_read(hdev, HASH_CR);
619 reg &= ~HASH_CR_DMAE;
620 reg |= HASH_CR_DMAA;
621 stm32_hash_write(hdev, HASH_CR, reg);
622
623 for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++)
624 stm32_hash_write(hdev, HASH_DIN, buffer[i]);
625
626 stm32_hash_set_nblw(hdev, ncp);
627 reg = stm32_hash_read(hdev, HASH_STR);
628 reg |= HASH_STR_DCAL;
629 stm32_hash_write(hdev, HASH_STR, reg);
630 err = -EINPROGRESS;
631 }
632
633 if (hdev->flags & HASH_FLAGS_HMAC) {
634 if (stm32_hash_wait_busy(hdev))
635 return -ETIMEDOUT;
636 err = stm32_hash_hmac_dma_send(hdev);
637 }
638
639 return err;
640 }
641
642 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
643 {
644 struct stm32_hash_dev *hdev = NULL, *tmp;
645
646 spin_lock_bh(&stm32_hash.lock);
647 if (!ctx->hdev) {
648 list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
649 hdev = tmp;
650 break;
651 }
652 ctx->hdev = hdev;
653 } else {
654 hdev = ctx->hdev;
655 }
656
657 spin_unlock_bh(&stm32_hash.lock);
658
659 return hdev;
660 }
661
662 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
663 {
664 struct scatterlist *sg;
665 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
666 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
667 int i;
668
669 if (req->nbytes <= HASH_DMA_THRESHOLD)
670 return false;
671
672 if (sg_nents(req->src) > 1) {
673 if (hdev->dma_mode == 1)
674 return false;
675 for_each_sg(req->src, sg, sg_nents(req->src), i) {
676 if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
677 (!sg_is_last(sg)))
678 return false;
679 }
680 }
681
682 if (req->src->offset % 4)
683 return false;
684
685 return true;
686 }
687
688 static int stm32_hash_init(struct ahash_request *req)
689 {
690 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
691 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
692 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
693 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
694
695 rctx->hdev = hdev;
696
697 rctx->flags = HASH_FLAGS_CPU;
698
699 rctx->digcnt = crypto_ahash_digestsize(tfm);
700 switch (rctx->digcnt) {
701 case MD5_DIGEST_SIZE:
702 rctx->flags |= HASH_FLAGS_MD5;
703 break;
704 case SHA1_DIGEST_SIZE:
705 rctx->flags |= HASH_FLAGS_SHA1;
706 break;
707 case SHA224_DIGEST_SIZE:
708 rctx->flags |= HASH_FLAGS_SHA224;
709 break;
710 case SHA256_DIGEST_SIZE:
711 rctx->flags |= HASH_FLAGS_SHA256;
712 break;
713 default:
714 return -EINVAL;
715 }
716
717 rctx->bufcnt = 0;
718 rctx->buflen = HASH_BUFLEN;
719 rctx->total = 0;
720 rctx->offset = 0;
721 rctx->data_type = HASH_DATA_8_BITS;
722
723 memset(rctx->buffer, 0, HASH_BUFLEN);
724
725 if (ctx->flags & HASH_FLAGS_HMAC)
726 rctx->flags |= HASH_FLAGS_HMAC;
727
728 dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
729
730 return 0;
731 }
732
733 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
734 {
735 return stm32_hash_update_cpu(hdev);
736 }
737
738 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
739 {
740 struct ahash_request *req = hdev->req;
741 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
742 int err;
743
744 if (!(rctx->flags & HASH_FLAGS_CPU))
745 err = stm32_hash_dma_send(hdev);
746 else
747 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, rctx->bufcnt, 1);
748
749 rctx->bufcnt = 0;
750
751 return err;
752 }
753
754 static void stm32_hash_copy_hash(struct ahash_request *req)
755 {
756 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
757 u32 *hash = (u32 *)rctx->digest;
758 unsigned int i, hashsize;
759
760 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
761 case HASH_FLAGS_MD5:
762 hashsize = MD5_DIGEST_SIZE;
763 break;
764 case HASH_FLAGS_SHA1:
765 hashsize = SHA1_DIGEST_SIZE;
766 break;
767 case HASH_FLAGS_SHA224:
768 hashsize = SHA224_DIGEST_SIZE;
769 break;
770 case HASH_FLAGS_SHA256:
771 hashsize = SHA256_DIGEST_SIZE;
772 break;
773 default:
774 return;
775 }
776
777 for (i = 0; i < hashsize / sizeof(u32); i++)
778 hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
779 HASH_HREG(i)));
780 }
781
782 static int stm32_hash_finish(struct ahash_request *req)
783 {
784 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
785
786 if (!req->result)
787 return -EINVAL;
788
789 memcpy(req->result, rctx->digest, rctx->digcnt);
790
791 return 0;
792 }
793
794 static void stm32_hash_finish_req(struct ahash_request *req, int err)
795 {
796 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
797 struct stm32_hash_dev *hdev = rctx->hdev;
798
799 if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
800 stm32_hash_copy_hash(req);
801 err = stm32_hash_finish(req);
802 hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
803 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
804 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
805 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
806 HASH_FLAGS_HMAC_KEY);
807 } else {
808 rctx->flags |= HASH_FLAGS_ERRORS;
809 }
810
811 crypto_finalize_hash_request(hdev->engine, req, err);
812 }
813
814 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
815 struct stm32_hash_request_ctx *rctx)
816 {
817 if (!(HASH_FLAGS_INIT & hdev->flags)) {
818 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
819 stm32_hash_write(hdev, HASH_STR, 0);
820 stm32_hash_write(hdev, HASH_DIN, 0);
821 stm32_hash_write(hdev, HASH_IMR, 0);
822 hdev->err = 0;
823 }
824
825 return 0;
826 }
827
828 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
829 struct ahash_request *req)
830 {
831 return crypto_transfer_hash_request_to_engine(hdev->engine, req);
832 }
833
834 static int stm32_hash_prepare_req(struct crypto_engine *engine,
835 struct ahash_request *req)
836 {
837 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
838 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
839 struct stm32_hash_request_ctx *rctx;
840
841 if (!hdev)
842 return -ENODEV;
843
844 hdev->req = req;
845
846 rctx = ahash_request_ctx(req);
847
848 dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
849 rctx->op, req->nbytes);
850
851 return stm32_hash_hw_init(hdev, rctx);
852 }
853
854 static int stm32_hash_one_request(struct crypto_engine *engine,
855 struct ahash_request *req)
856 {
857 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
858 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
859 struct stm32_hash_request_ctx *rctx;
860 int err = 0;
861
862 if (!hdev)
863 return -ENODEV;
864
865 hdev->req = req;
866
867 rctx = ahash_request_ctx(req);
868
869 if (rctx->op == HASH_OP_UPDATE)
870 err = stm32_hash_update_req(hdev);
871 else if (rctx->op == HASH_OP_FINAL)
872 err = stm32_hash_final_req(hdev);
873
874 if (err != -EINPROGRESS)
875 /* done task will not finish it, so do it here */
876 stm32_hash_finish_req(req, err);
877
878 return 0;
879 }
880
881 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
882 {
883 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
884 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
885 struct stm32_hash_dev *hdev = ctx->hdev;
886
887 rctx->op = op;
888
889 return stm32_hash_handle_queue(hdev, req);
890 }
891
892 static int stm32_hash_update(struct ahash_request *req)
893 {
894 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
895 int ret;
896
897 if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
898 return 0;
899
900 rctx->total = req->nbytes;
901 rctx->sg = req->src;
902 rctx->offset = 0;
903
904 if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
905 stm32_hash_append_sg(rctx);
906 return 0;
907 }
908
909 ret = stm32_hash_enqueue(req, HASH_OP_UPDATE);
910
911 if (rctx->flags & HASH_FLAGS_FINUP)
912 return ret;
913
914 return 0;
915 }
916
917 static int stm32_hash_final(struct ahash_request *req)
918 {
919 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
920
921 rctx->flags |= HASH_FLAGS_FINUP;
922
923 return stm32_hash_enqueue(req, HASH_OP_FINAL);
924 }
925
926 static int stm32_hash_finup(struct ahash_request *req)
927 {
928 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
929 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
930 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
931 int err1, err2;
932
933 rctx->flags |= HASH_FLAGS_FINUP;
934
935 if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
936 rctx->flags &= ~HASH_FLAGS_CPU;
937
938 err1 = stm32_hash_update(req);
939
940 if (err1 == -EINPROGRESS || err1 == -EBUSY)
941 return err1;
942
943 /*
944 * final() has to be always called to cleanup resources
945 * even if update() failed, except EINPROGRESS
946 */
947 err2 = stm32_hash_final(req);
948
949 return err1 ?: err2;
950 }
951
952 static int stm32_hash_digest(struct ahash_request *req)
953 {
954 return stm32_hash_init(req) ?: stm32_hash_finup(req);
955 }
956
957 static int stm32_hash_export(struct ahash_request *req, void *out)
958 {
959 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
960 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
961 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
962 u32 *preg;
963 unsigned int i;
964
965 while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
966 cpu_relax();
967
968 rctx->hw_context = kmalloc(sizeof(u32) * (3 + HASH_CSR_REGISTER_NUMBER),
969 GFP_KERNEL);
970
971 preg = rctx->hw_context;
972
973 *preg++ = stm32_hash_read(hdev, HASH_IMR);
974 *preg++ = stm32_hash_read(hdev, HASH_STR);
975 *preg++ = stm32_hash_read(hdev, HASH_CR);
976 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
977 *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
978
979 memcpy(out, rctx, sizeof(*rctx));
980
981 return 0;
982 }
983
984 static int stm32_hash_import(struct ahash_request *req, const void *in)
985 {
986 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
987 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
988 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
989 const u32 *preg = in;
990 u32 reg;
991 unsigned int i;
992
993 memcpy(rctx, in, sizeof(*rctx));
994
995 preg = rctx->hw_context;
996
997 stm32_hash_write(hdev, HASH_IMR, *preg++);
998 stm32_hash_write(hdev, HASH_STR, *preg++);
999 stm32_hash_write(hdev, HASH_CR, *preg);
1000 reg = *preg++ | HASH_CR_INIT;
1001 stm32_hash_write(hdev, HASH_CR, reg);
1002
1003 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1004 stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1005
1006 kfree(rctx->hw_context);
1007
1008 return 0;
1009 }
1010
1011 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1012 const u8 *key, unsigned int keylen)
1013 {
1014 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1015
1016 if (keylen <= HASH_MAX_KEY_SIZE) {
1017 memcpy(ctx->key, key, keylen);
1018 ctx->keylen = keylen;
1019 } else {
1020 return -ENOMEM;
1021 }
1022
1023 return 0;
1024 }
1025
1026 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1027 const char *algs_hmac_name)
1028 {
1029 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1030
1031 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1032 sizeof(struct stm32_hash_request_ctx));
1033
1034 ctx->keylen = 0;
1035
1036 if (algs_hmac_name)
1037 ctx->flags |= HASH_FLAGS_HMAC;
1038
1039 return 0;
1040 }
1041
1042 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1043 {
1044 return stm32_hash_cra_init_algs(tfm, NULL);
1045 }
1046
1047 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1048 {
1049 return stm32_hash_cra_init_algs(tfm, "md5");
1050 }
1051
1052 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1053 {
1054 return stm32_hash_cra_init_algs(tfm, "sha1");
1055 }
1056
1057 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1058 {
1059 return stm32_hash_cra_init_algs(tfm, "sha224");
1060 }
1061
1062 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1063 {
1064 return stm32_hash_cra_init_algs(tfm, "sha256");
1065 }
1066
1067 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1068 {
1069 struct stm32_hash_dev *hdev = dev_id;
1070 int err;
1071
1072 if (HASH_FLAGS_CPU & hdev->flags) {
1073 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1074 hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1075 goto finish;
1076 }
1077 } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1078 if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1079 hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1080 goto finish;
1081 }
1082 }
1083
1084 return IRQ_HANDLED;
1085
1086 finish:
1087 /*Finish current request */
1088 stm32_hash_finish_req(hdev->req, err);
1089
1090 return IRQ_HANDLED;
1091 }
1092
1093 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1094 {
1095 struct stm32_hash_dev *hdev = dev_id;
1096 u32 reg;
1097
1098 reg = stm32_hash_read(hdev, HASH_SR);
1099 if (reg & HASH_SR_OUTPUT_READY) {
1100 reg &= ~HASH_SR_OUTPUT_READY;
1101 stm32_hash_write(hdev, HASH_SR, reg);
1102 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1103 return IRQ_WAKE_THREAD;
1104 }
1105
1106 return IRQ_NONE;
1107 }
1108
1109 static struct ahash_alg algs_md5_sha1[] = {
1110 {
1111 .init = stm32_hash_init,
1112 .update = stm32_hash_update,
1113 .final = stm32_hash_final,
1114 .finup = stm32_hash_finup,
1115 .digest = stm32_hash_digest,
1116 .export = stm32_hash_export,
1117 .import = stm32_hash_import,
1118 .halg = {
1119 .digestsize = MD5_DIGEST_SIZE,
1120 .statesize = sizeof(struct stm32_hash_request_ctx),
1121 .base = {
1122 .cra_name = "md5",
1123 .cra_driver_name = "stm32-md5",
1124 .cra_priority = 200,
1125 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1126 CRYPTO_ALG_ASYNC |
1127 CRYPTO_ALG_KERN_DRIVER_ONLY,
1128 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1129 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1130 .cra_alignmask = 3,
1131 .cra_init = stm32_hash_cra_init,
1132 .cra_module = THIS_MODULE,
1133 }
1134 }
1135 },
1136 {
1137 .init = stm32_hash_init,
1138 .update = stm32_hash_update,
1139 .final = stm32_hash_final,
1140 .finup = stm32_hash_finup,
1141 .digest = stm32_hash_digest,
1142 .export = stm32_hash_export,
1143 .import = stm32_hash_import,
1144 .setkey = stm32_hash_setkey,
1145 .halg = {
1146 .digestsize = MD5_DIGEST_SIZE,
1147 .statesize = sizeof(struct stm32_hash_request_ctx),
1148 .base = {
1149 .cra_name = "hmac(md5)",
1150 .cra_driver_name = "stm32-hmac-md5",
1151 .cra_priority = 200,
1152 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1153 CRYPTO_ALG_ASYNC |
1154 CRYPTO_ALG_KERN_DRIVER_ONLY,
1155 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1156 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1157 .cra_alignmask = 3,
1158 .cra_init = stm32_hash_cra_md5_init,
1159 .cra_module = THIS_MODULE,
1160 }
1161 }
1162 },
1163 {
1164 .init = stm32_hash_init,
1165 .update = stm32_hash_update,
1166 .final = stm32_hash_final,
1167 .finup = stm32_hash_finup,
1168 .digest = stm32_hash_digest,
1169 .export = stm32_hash_export,
1170 .import = stm32_hash_import,
1171 .halg = {
1172 .digestsize = SHA1_DIGEST_SIZE,
1173 .statesize = sizeof(struct stm32_hash_request_ctx),
1174 .base = {
1175 .cra_name = "sha1",
1176 .cra_driver_name = "stm32-sha1",
1177 .cra_priority = 200,
1178 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1179 CRYPTO_ALG_ASYNC |
1180 CRYPTO_ALG_KERN_DRIVER_ONLY,
1181 .cra_blocksize = SHA1_BLOCK_SIZE,
1182 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1183 .cra_alignmask = 3,
1184 .cra_init = stm32_hash_cra_init,
1185 .cra_module = THIS_MODULE,
1186 }
1187 }
1188 },
1189 {
1190 .init = stm32_hash_init,
1191 .update = stm32_hash_update,
1192 .final = stm32_hash_final,
1193 .finup = stm32_hash_finup,
1194 .digest = stm32_hash_digest,
1195 .export = stm32_hash_export,
1196 .import = stm32_hash_import,
1197 .setkey = stm32_hash_setkey,
1198 .halg = {
1199 .digestsize = SHA1_DIGEST_SIZE,
1200 .statesize = sizeof(struct stm32_hash_request_ctx),
1201 .base = {
1202 .cra_name = "hmac(sha1)",
1203 .cra_driver_name = "stm32-hmac-sha1",
1204 .cra_priority = 200,
1205 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1206 CRYPTO_ALG_ASYNC |
1207 CRYPTO_ALG_KERN_DRIVER_ONLY,
1208 .cra_blocksize = SHA1_BLOCK_SIZE,
1209 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1210 .cra_alignmask = 3,
1211 .cra_init = stm32_hash_cra_sha1_init,
1212 .cra_module = THIS_MODULE,
1213 }
1214 }
1215 },
1216 };
1217
1218 static struct ahash_alg algs_sha224_sha256[] = {
1219 {
1220 .init = stm32_hash_init,
1221 .update = stm32_hash_update,
1222 .final = stm32_hash_final,
1223 .finup = stm32_hash_finup,
1224 .digest = stm32_hash_digest,
1225 .export = stm32_hash_export,
1226 .import = stm32_hash_import,
1227 .halg = {
1228 .digestsize = SHA224_DIGEST_SIZE,
1229 .statesize = sizeof(struct stm32_hash_request_ctx),
1230 .base = {
1231 .cra_name = "sha224",
1232 .cra_driver_name = "stm32-sha224",
1233 .cra_priority = 200,
1234 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1235 CRYPTO_ALG_ASYNC |
1236 CRYPTO_ALG_KERN_DRIVER_ONLY,
1237 .cra_blocksize = SHA224_BLOCK_SIZE,
1238 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1239 .cra_alignmask = 3,
1240 .cra_init = stm32_hash_cra_init,
1241 .cra_module = THIS_MODULE,
1242 }
1243 }
1244 },
1245 {
1246 .init = stm32_hash_init,
1247 .update = stm32_hash_update,
1248 .final = stm32_hash_final,
1249 .finup = stm32_hash_finup,
1250 .digest = stm32_hash_digest,
1251 .setkey = stm32_hash_setkey,
1252 .export = stm32_hash_export,
1253 .import = stm32_hash_import,
1254 .halg = {
1255 .digestsize = SHA224_DIGEST_SIZE,
1256 .statesize = sizeof(struct stm32_hash_request_ctx),
1257 .base = {
1258 .cra_name = "hmac(sha224)",
1259 .cra_driver_name = "stm32-hmac-sha224",
1260 .cra_priority = 200,
1261 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1262 CRYPTO_ALG_ASYNC |
1263 CRYPTO_ALG_KERN_DRIVER_ONLY,
1264 .cra_blocksize = SHA224_BLOCK_SIZE,
1265 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1266 .cra_alignmask = 3,
1267 .cra_init = stm32_hash_cra_sha224_init,
1268 .cra_module = THIS_MODULE,
1269 }
1270 }
1271 },
1272 {
1273 .init = stm32_hash_init,
1274 .update = stm32_hash_update,
1275 .final = stm32_hash_final,
1276 .finup = stm32_hash_finup,
1277 .digest = stm32_hash_digest,
1278 .export = stm32_hash_export,
1279 .import = stm32_hash_import,
1280 .halg = {
1281 .digestsize = SHA256_DIGEST_SIZE,
1282 .statesize = sizeof(struct stm32_hash_request_ctx),
1283 .base = {
1284 .cra_name = "sha256",
1285 .cra_driver_name = "stm32-sha256",
1286 .cra_priority = 200,
1287 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1288 CRYPTO_ALG_ASYNC |
1289 CRYPTO_ALG_KERN_DRIVER_ONLY,
1290 .cra_blocksize = SHA256_BLOCK_SIZE,
1291 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1292 .cra_alignmask = 3,
1293 .cra_init = stm32_hash_cra_init,
1294 .cra_module = THIS_MODULE,
1295 }
1296 }
1297 },
1298 {
1299 .init = stm32_hash_init,
1300 .update = stm32_hash_update,
1301 .final = stm32_hash_final,
1302 .finup = stm32_hash_finup,
1303 .digest = stm32_hash_digest,
1304 .export = stm32_hash_export,
1305 .import = stm32_hash_import,
1306 .setkey = stm32_hash_setkey,
1307 .halg = {
1308 .digestsize = SHA256_DIGEST_SIZE,
1309 .statesize = sizeof(struct stm32_hash_request_ctx),
1310 .base = {
1311 .cra_name = "hmac(sha256)",
1312 .cra_driver_name = "stm32-hmac-sha256",
1313 .cra_priority = 200,
1314 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1315 CRYPTO_ALG_ASYNC |
1316 CRYPTO_ALG_KERN_DRIVER_ONLY,
1317 .cra_blocksize = SHA256_BLOCK_SIZE,
1318 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1319 .cra_alignmask = 3,
1320 .cra_init = stm32_hash_cra_sha256_init,
1321 .cra_module = THIS_MODULE,
1322 }
1323 }
1324 },
1325 };
1326
1327 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1328 {
1329 unsigned int i, j;
1330 int err;
1331
1332 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1333 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1334 err = crypto_register_ahash(
1335 &hdev->pdata->algs_info[i].algs_list[j]);
1336 if (err)
1337 goto err_algs;
1338 }
1339 }
1340
1341 return 0;
1342 err_algs:
1343 dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1344 for (; i--; ) {
1345 for (; j--;)
1346 crypto_unregister_ahash(
1347 &hdev->pdata->algs_info[i].algs_list[j]);
1348 }
1349
1350 return err;
1351 }
1352
1353 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1354 {
1355 unsigned int i, j;
1356
1357 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1358 for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1359 crypto_unregister_ahash(
1360 &hdev->pdata->algs_info[i].algs_list[j]);
1361 }
1362
1363 return 0;
1364 }
1365
1366 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1367 {
1368 .algs_list = algs_md5_sha1,
1369 .size = ARRAY_SIZE(algs_md5_sha1),
1370 },
1371 };
1372
1373 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1374 .algs_info = stm32_hash_algs_info_stm32f4,
1375 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1376 };
1377
1378 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1379 {
1380 .algs_list = algs_md5_sha1,
1381 .size = ARRAY_SIZE(algs_md5_sha1),
1382 },
1383 {
1384 .algs_list = algs_sha224_sha256,
1385 .size = ARRAY_SIZE(algs_sha224_sha256),
1386 },
1387 };
1388
1389 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1390 .algs_info = stm32_hash_algs_info_stm32f7,
1391 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1392 };
1393
1394 static const struct of_device_id stm32_hash_of_match[] = {
1395 {
1396 .compatible = "st,stm32f456-hash",
1397 .data = &stm32_hash_pdata_stm32f4,
1398 },
1399 {
1400 .compatible = "st,stm32f756-hash",
1401 .data = &stm32_hash_pdata_stm32f7,
1402 },
1403 {},
1404 };
1405
1406 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1407
1408 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1409 struct device *dev)
1410 {
1411 const struct of_device_id *match;
1412 int err;
1413
1414 match = of_match_device(stm32_hash_of_match, dev);
1415 if (!match) {
1416 dev_err(dev, "no compatible OF match\n");
1417 return -EINVAL;
1418 }
1419
1420 err = of_property_read_u32(dev->of_node, "dma-maxburst",
1421 &hdev->dma_maxburst);
1422
1423 hdev->pdata = match->data;
1424
1425 return err;
1426 }
1427
1428 static int stm32_hash_probe(struct platform_device *pdev)
1429 {
1430 struct stm32_hash_dev *hdev;
1431 struct device *dev = &pdev->dev;
1432 struct resource *res;
1433 int ret, irq;
1434
1435 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1436 if (!hdev)
1437 return -ENOMEM;
1438
1439 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1440 hdev->io_base = devm_ioremap_resource(dev, res);
1441 if (IS_ERR(hdev->io_base))
1442 return PTR_ERR(hdev->io_base);
1443
1444 hdev->phys_base = res->start;
1445
1446 ret = stm32_hash_get_of_match(hdev, dev);
1447 if (ret)
1448 return ret;
1449
1450 irq = platform_get_irq(pdev, 0);
1451 if (irq < 0) {
1452 dev_err(dev, "Cannot get IRQ resource\n");
1453 return irq;
1454 }
1455
1456 ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
1457 stm32_hash_irq_thread, IRQF_ONESHOT,
1458 dev_name(dev), hdev);
1459 if (ret) {
1460 dev_err(dev, "Cannot grab IRQ\n");
1461 return ret;
1462 }
1463
1464 hdev->clk = devm_clk_get(&pdev->dev, NULL);
1465 if (IS_ERR(hdev->clk)) {
1466 dev_err(dev, "failed to get clock for hash (%lu)\n",
1467 PTR_ERR(hdev->clk));
1468 return PTR_ERR(hdev->clk);
1469 }
1470
1471 ret = clk_prepare_enable(hdev->clk);
1472 if (ret) {
1473 dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1474 return ret;
1475 }
1476
1477 hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1478 if (!IS_ERR(hdev->rst)) {
1479 reset_control_assert(hdev->rst);
1480 udelay(2);
1481 reset_control_deassert(hdev->rst);
1482 }
1483
1484 hdev->dev = dev;
1485
1486 platform_set_drvdata(pdev, hdev);
1487
1488 ret = stm32_hash_dma_init(hdev);
1489 if (ret)
1490 dev_dbg(dev, "DMA mode not available\n");
1491
1492 spin_lock(&stm32_hash.lock);
1493 list_add_tail(&hdev->list, &stm32_hash.dev_list);
1494 spin_unlock(&stm32_hash.lock);
1495
1496 /* Initialize crypto engine */
1497 hdev->engine = crypto_engine_alloc_init(dev, 1);
1498 if (!hdev->engine) {
1499 ret = -ENOMEM;
1500 goto err_engine;
1501 }
1502
1503 hdev->engine->prepare_hash_request = stm32_hash_prepare_req;
1504 hdev->engine->hash_one_request = stm32_hash_one_request;
1505
1506 ret = crypto_engine_start(hdev->engine);
1507 if (ret)
1508 goto err_engine_start;
1509
1510 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1511
1512 /* Register algos */
1513 ret = stm32_hash_register_algs(hdev);
1514 if (ret)
1515 goto err_algs;
1516
1517 dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1518 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1519
1520 return 0;
1521
1522 err_algs:
1523 err_engine_start:
1524 crypto_engine_exit(hdev->engine);
1525 err_engine:
1526 spin_lock(&stm32_hash.lock);
1527 list_del(&hdev->list);
1528 spin_unlock(&stm32_hash.lock);
1529
1530 if (hdev->dma_lch)
1531 dma_release_channel(hdev->dma_lch);
1532
1533 clk_disable_unprepare(hdev->clk);
1534
1535 return ret;
1536 }
1537
1538 static int stm32_hash_remove(struct platform_device *pdev)
1539 {
1540 static struct stm32_hash_dev *hdev;
1541
1542 hdev = platform_get_drvdata(pdev);
1543 if (!hdev)
1544 return -ENODEV;
1545
1546 stm32_hash_unregister_algs(hdev);
1547
1548 crypto_engine_exit(hdev->engine);
1549
1550 spin_lock(&stm32_hash.lock);
1551 list_del(&hdev->list);
1552 spin_unlock(&stm32_hash.lock);
1553
1554 if (hdev->dma_lch)
1555 dma_release_channel(hdev->dma_lch);
1556
1557 clk_disable_unprepare(hdev->clk);
1558
1559 return 0;
1560 }
1561
1562 static struct platform_driver stm32_hash_driver = {
1563 .probe = stm32_hash_probe,
1564 .remove = stm32_hash_remove,
1565 .driver = {
1566 .name = "stm32-hash",
1567 .of_match_table = stm32_hash_of_match,
1568 }
1569 };
1570
1571 module_platform_driver(stm32_hash_driver);
1572
1573 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1574 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1575 MODULE_LICENSE("GPL v2");