]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/x86/crypto/sha512-mb/sha512_mb.c
Merge remote-tracking branches 'asoc/topic/sgtl5000', 'asoc/topic/simple', 'asoc...
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / crypto / sha512-mb / sha512_mb.c
1 /*
2 * Multi buffer SHA512 algorithm Glue Code
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56 #include <crypto/internal/hash.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
59 #include <linux/mm.h>
60 #include <linux/cryptohash.h>
61 #include <linux/types.h>
62 #include <linux/list.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/mcryptd.h>
66 #include <crypto/crypto_wq.h>
67 #include <asm/byteorder.h>
68 #include <linux/hardirq.h>
69 #include <asm/fpu/api.h>
70 #include "sha512_mb_ctx.h"
71
72 #define FLUSH_INTERVAL 1000 /* in usec */
73
74 static struct mcryptd_alg_state sha512_mb_alg_state;
75
76 struct sha512_mb_ctx {
77 struct mcryptd_ahash *mcryptd_tfm;
78 };
79
80 static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx *hash_ctx)
82 {
83 struct ahash_request *areq;
84
85 areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
86 return container_of(areq, struct mcryptd_hash_request_ctx, areq);
87 }
88
89 static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
91 {
92 return container_of((void *) ctx, struct ahash_request, __ctx);
93 }
94
95 static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96 struct ahash_request *areq)
97 {
98 rctx->flag = HASH_UPDATE;
99 }
100
101 static asmlinkage void (*sha512_job_mgr_init)(struct sha512_mb_mgr *state);
102 static asmlinkage struct job_sha512* (*sha512_job_mgr_submit)
103 (struct sha512_mb_mgr *state,
104 struct job_sha512 *job);
105 static asmlinkage struct job_sha512* (*sha512_job_mgr_flush)
106 (struct sha512_mb_mgr *state);
107 static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job)
108 (struct sha512_mb_mgr *state);
109
110 inline void sha512_init_digest(uint64_t *digest)
111 {
112 static const uint64_t initial_digest[SHA512_DIGEST_LENGTH] = {
113 SHA512_H0, SHA512_H1, SHA512_H2,
114 SHA512_H3, SHA512_H4, SHA512_H5,
115 SHA512_H6, SHA512_H7 };
116 memcpy(digest, initial_digest, sizeof(initial_digest));
117 }
118
119 inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
120 uint32_t total_len)
121 {
122 uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1);
123
124 memset(&padblock[i], 0, SHA512_BLOCK_SIZE);
125 padblock[i] = 0x80;
126
127 i += ((SHA512_BLOCK_SIZE - 1) &
128 (0 - (total_len + SHA512_PADLENGTHFIELD_SIZE + 1)))
129 + 1 + SHA512_PADLENGTHFIELD_SIZE;
130
131 #if SHA512_PADLENGTHFIELD_SIZE == 16
132 *((uint64_t *) &padblock[i - 16]) = 0;
133 #endif
134
135 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
136
137 /* Number of extra blocks to hash */
138 return i >> SHA512_LOG2_BLOCK_SIZE;
139 }
140
141 static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit
142 (struct sha512_ctx_mgr *mgr, struct sha512_hash_ctx *ctx)
143 {
144 while (ctx) {
145 if (ctx->status & HASH_CTX_STS_COMPLETE) {
146 /* Clear PROCESSING bit */
147 ctx->status = HASH_CTX_STS_COMPLETE;
148 return ctx;
149 }
150
151 /*
152 * If the extra blocks are empty, begin hashing what remains
153 * in the user's buffer.
154 */
155 if (ctx->partial_block_buffer_length == 0 &&
156 ctx->incoming_buffer_length) {
157
158 const void *buffer = ctx->incoming_buffer;
159 uint32_t len = ctx->incoming_buffer_length;
160 uint32_t copy_len;
161
162 /*
163 * Only entire blocks can be hashed.
164 * Copy remainder to extra blocks buffer.
165 */
166 copy_len = len & (SHA512_BLOCK_SIZE-1);
167
168 if (copy_len) {
169 len -= copy_len;
170 memcpy(ctx->partial_block_buffer,
171 ((const char *) buffer + len),
172 copy_len);
173 ctx->partial_block_buffer_length = copy_len;
174 }
175
176 ctx->incoming_buffer_length = 0;
177
178 /* len should be a multiple of the block size now */
179 assert((len % SHA512_BLOCK_SIZE) == 0);
180
181 /* Set len to the number of blocks to be hashed */
182 len >>= SHA512_LOG2_BLOCK_SIZE;
183
184 if (len) {
185
186 ctx->job.buffer = (uint8_t *) buffer;
187 ctx->job.len = len;
188 ctx = (struct sha512_hash_ctx *)
189 sha512_job_mgr_submit(&mgr->mgr,
190 &ctx->job);
191 continue;
192 }
193 }
194
195 /*
196 * If the extra blocks are not empty, then we are
197 * either on the last block(s) or we need more
198 * user input before continuing.
199 */
200 if (ctx->status & HASH_CTX_STS_LAST) {
201
202 uint8_t *buf = ctx->partial_block_buffer;
203 uint32_t n_extra_blocks =
204 sha512_pad(buf, ctx->total_length);
205
206 ctx->status = (HASH_CTX_STS_PROCESSING |
207 HASH_CTX_STS_COMPLETE);
208 ctx->job.buffer = buf;
209 ctx->job.len = (uint32_t) n_extra_blocks;
210 ctx = (struct sha512_hash_ctx *)
211 sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
212 continue;
213 }
214
215 if (ctx)
216 ctx->status = HASH_CTX_STS_IDLE;
217 return ctx;
218 }
219
220 return NULL;
221 }
222
223 static struct sha512_hash_ctx
224 *sha512_ctx_mgr_get_comp_ctx(struct sha512_ctx_mgr *mgr)
225 {
226 /*
227 * If get_comp_job returns NULL, there are no jobs complete.
228 * If get_comp_job returns a job, verify that it is safe to return to
229 * the user.
230 * If it is not ready, resubmit the job to finish processing.
231 * If sha512_ctx_mgr_resubmit returned a job, it is ready to be
232 * returned.
233 * Otherwise, all jobs currently being managed by the hash_ctx_mgr
234 * still need processing.
235 */
236 struct sha512_hash_ctx *ctx;
237
238 ctx = (struct sha512_hash_ctx *)
239 sha512_job_mgr_get_comp_job(&mgr->mgr);
240 return sha512_ctx_mgr_resubmit(mgr, ctx);
241 }
242
243 static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
244 {
245 sha512_job_mgr_init(&mgr->mgr);
246 }
247
248 static struct sha512_hash_ctx
249 *sha512_ctx_mgr_submit(struct sha512_ctx_mgr *mgr,
250 struct sha512_hash_ctx *ctx,
251 const void *buffer,
252 uint32_t len,
253 int flags)
254 {
255 if (flags & (~HASH_ENTIRE)) {
256 /*
257 * User should not pass anything other than FIRST, UPDATE, or
258 * LAST
259 */
260 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
261 return ctx;
262 }
263
264 if (ctx->status & HASH_CTX_STS_PROCESSING) {
265 /* Cannot submit to a currently processing job. */
266 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
267 return ctx;
268 }
269
270 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
271 /* Cannot update a finished job. */
272 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
273 return ctx;
274 }
275
276
277 if (flags & HASH_FIRST) {
278 /* Init digest */
279 sha512_init_digest(ctx->job.result_digest);
280
281 /* Reset byte counter */
282 ctx->total_length = 0;
283
284 /* Clear extra blocks */
285 ctx->partial_block_buffer_length = 0;
286 }
287
288 /*
289 * If we made it here, there were no errors during this call to
290 * submit
291 */
292 ctx->error = HASH_CTX_ERROR_NONE;
293
294 /* Store buffer ptr info from user */
295 ctx->incoming_buffer = buffer;
296 ctx->incoming_buffer_length = len;
297
298 /*
299 * Store the user's request flags and mark this ctx as currently being
300 * processed.
301 */
302 ctx->status = (flags & HASH_LAST) ?
303 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
304 HASH_CTX_STS_PROCESSING;
305
306 /* Advance byte counter */
307 ctx->total_length += len;
308
309 /*
310 * If there is anything currently buffered in the extra blocks,
311 * append to it until it contains a whole block.
312 * Or if the user's buffer contains less than a whole block,
313 * append as much as possible to the extra block.
314 */
315 if (ctx->partial_block_buffer_length || len < SHA512_BLOCK_SIZE) {
316 /* Compute how many bytes to copy from user buffer into extra
317 * block
318 */
319 uint32_t copy_len = SHA512_BLOCK_SIZE -
320 ctx->partial_block_buffer_length;
321 if (len < copy_len)
322 copy_len = len;
323
324 if (copy_len) {
325 /* Copy and update relevant pointers and counters */
326 memcpy
327 (&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
328 buffer, copy_len);
329
330 ctx->partial_block_buffer_length += copy_len;
331 ctx->incoming_buffer = (const void *)
332 ((const char *)buffer + copy_len);
333 ctx->incoming_buffer_length = len - copy_len;
334 }
335
336 /* The extra block should never contain more than 1 block
337 * here
338 */
339 assert(ctx->partial_block_buffer_length <= SHA512_BLOCK_SIZE);
340
341 /* If the extra block buffer contains exactly 1 block, it can
342 * be hashed.
343 */
344 if (ctx->partial_block_buffer_length >= SHA512_BLOCK_SIZE) {
345 ctx->partial_block_buffer_length = 0;
346
347 ctx->job.buffer = ctx->partial_block_buffer;
348 ctx->job.len = 1;
349 ctx = (struct sha512_hash_ctx *)
350 sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
351 }
352 }
353
354 return sha512_ctx_mgr_resubmit(mgr, ctx);
355 }
356
357 static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct sha512_ctx_mgr *mgr)
358 {
359 struct sha512_hash_ctx *ctx;
360
361 while (1) {
362 ctx = (struct sha512_hash_ctx *)
363 sha512_job_mgr_flush(&mgr->mgr);
364
365 /* If flush returned 0, there are no more jobs in flight. */
366 if (!ctx)
367 return NULL;
368
369 /*
370 * If flush returned a job, resubmit the job to finish
371 * processing.
372 */
373 ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
374
375 /*
376 * If sha512_ctx_mgr_resubmit returned a job, it is ready to
377 * be returned. Otherwise, all jobs currently being managed by
378 * the sha512_ctx_mgr still need processing. Loop.
379 */
380 if (ctx)
381 return ctx;
382 }
383 }
384
385 static int sha512_mb_init(struct ahash_request *areq)
386 {
387 struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
388
389 hash_ctx_init(sctx);
390 sctx->job.result_digest[0] = SHA512_H0;
391 sctx->job.result_digest[1] = SHA512_H1;
392 sctx->job.result_digest[2] = SHA512_H2;
393 sctx->job.result_digest[3] = SHA512_H3;
394 sctx->job.result_digest[4] = SHA512_H4;
395 sctx->job.result_digest[5] = SHA512_H5;
396 sctx->job.result_digest[6] = SHA512_H6;
397 sctx->job.result_digest[7] = SHA512_H7;
398 sctx->total_length = 0;
399 sctx->partial_block_buffer_length = 0;
400 sctx->status = HASH_CTX_STS_IDLE;
401
402 return 0;
403 }
404
405 static int sha512_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
406 {
407 int i;
408 struct sha512_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
409 __be64 *dst = (__be64 *) rctx->out;
410
411 for (i = 0; i < 8; ++i)
412 dst[i] = cpu_to_be64(sctx->job.result_digest[i]);
413
414 return 0;
415 }
416
417 static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
418 struct mcryptd_alg_cstate *cstate, bool flush)
419 {
420 int flag = HASH_UPDATE;
421 int nbytes, err = 0;
422 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
423 struct sha512_hash_ctx *sha_ctx;
424
425 /* more work ? */
426 while (!(rctx->flag & HASH_DONE)) {
427 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
428 if (nbytes < 0) {
429 err = nbytes;
430 goto out;
431 }
432 /* check if the walk is done */
433 if (crypto_ahash_walk_last(&rctx->walk)) {
434 rctx->flag |= HASH_DONE;
435 if (rctx->flag & HASH_FINAL)
436 flag |= HASH_LAST;
437
438 }
439 sha_ctx = (struct sha512_hash_ctx *)
440 ahash_request_ctx(&rctx->areq);
441 kernel_fpu_begin();
442 sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx,
443 rctx->walk.data, nbytes, flag);
444 if (!sha_ctx) {
445 if (flush)
446 sha_ctx = sha512_ctx_mgr_flush(cstate->mgr);
447 }
448 kernel_fpu_end();
449 if (sha_ctx)
450 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
451 else {
452 rctx = NULL;
453 goto out;
454 }
455 }
456
457 /* copy the results */
458 if (rctx->flag & HASH_FINAL)
459 sha512_mb_set_results(rctx);
460
461 out:
462 *ret_rctx = rctx;
463 return err;
464 }
465
466 static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
467 struct mcryptd_alg_cstate *cstate,
468 int err)
469 {
470 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
471 struct sha512_hash_ctx *sha_ctx;
472 struct mcryptd_hash_request_ctx *req_ctx;
473 int ret;
474
475 /* remove from work list */
476 spin_lock(&cstate->work_lock);
477 list_del(&rctx->waiter);
478 spin_unlock(&cstate->work_lock);
479
480 if (irqs_disabled())
481 rctx->complete(&req->base, err);
482 else {
483 local_bh_disable();
484 rctx->complete(&req->base, err);
485 local_bh_enable();
486 }
487
488 /* check to see if there are other jobs that are done */
489 sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate->mgr);
490 while (sha_ctx) {
491 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
492 ret = sha_finish_walk(&req_ctx, cstate, false);
493 if (req_ctx) {
494 spin_lock(&cstate->work_lock);
495 list_del(&req_ctx->waiter);
496 spin_unlock(&cstate->work_lock);
497
498 req = cast_mcryptd_ctx_to_req(req_ctx);
499 if (irqs_disabled())
500 req_ctx->complete(&req->base, ret);
501 else {
502 local_bh_disable();
503 req_ctx->complete(&req->base, ret);
504 local_bh_enable();
505 }
506 }
507 sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate->mgr);
508 }
509
510 return 0;
511 }
512
513 static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
514 struct mcryptd_alg_cstate *cstate)
515 {
516 unsigned long next_flush;
517 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
518
519 /* initialize tag */
520 rctx->tag.arrival = jiffies; /* tag the arrival time */
521 rctx->tag.seq_num = cstate->next_seq_num++;
522 next_flush = rctx->tag.arrival + delay;
523 rctx->tag.expire = next_flush;
524
525 spin_lock(&cstate->work_lock);
526 list_add_tail(&rctx->waiter, &cstate->work_list);
527 spin_unlock(&cstate->work_lock);
528
529 mcryptd_arm_flusher(cstate, delay);
530 }
531
532 static int sha512_mb_update(struct ahash_request *areq)
533 {
534 struct mcryptd_hash_request_ctx *rctx =
535 container_of(areq, struct mcryptd_hash_request_ctx,
536 areq);
537 struct mcryptd_alg_cstate *cstate =
538 this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
539
540 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
541 struct sha512_hash_ctx *sha_ctx;
542 int ret = 0, nbytes;
543
544
545 /* sanity check */
546 if (rctx->tag.cpu != smp_processor_id()) {
547 pr_err("mcryptd error: cpu clash\n");
548 goto done;
549 }
550
551 /* need to init context */
552 req_ctx_init(rctx, areq);
553
554 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
555
556 if (nbytes < 0) {
557 ret = nbytes;
558 goto done;
559 }
560
561 if (crypto_ahash_walk_last(&rctx->walk))
562 rctx->flag |= HASH_DONE;
563
564 /* submit */
565 sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
566 sha512_mb_add_list(rctx, cstate);
567 kernel_fpu_begin();
568 sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
569 nbytes, HASH_UPDATE);
570 kernel_fpu_end();
571
572 /* check if anything is returned */
573 if (!sha_ctx)
574 return -EINPROGRESS;
575
576 if (sha_ctx->error) {
577 ret = sha_ctx->error;
578 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
579 goto done;
580 }
581
582 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
583 ret = sha_finish_walk(&rctx, cstate, false);
584
585 if (!rctx)
586 return -EINPROGRESS;
587 done:
588 sha_complete_job(rctx, cstate, ret);
589 return ret;
590 }
591
592 static int sha512_mb_finup(struct ahash_request *areq)
593 {
594 struct mcryptd_hash_request_ctx *rctx =
595 container_of(areq, struct mcryptd_hash_request_ctx,
596 areq);
597 struct mcryptd_alg_cstate *cstate =
598 this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
599
600 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
601 struct sha512_hash_ctx *sha_ctx;
602 int ret = 0, flag = HASH_UPDATE, nbytes;
603
604 /* sanity check */
605 if (rctx->tag.cpu != smp_processor_id()) {
606 pr_err("mcryptd error: cpu clash\n");
607 goto done;
608 }
609
610 /* need to init context */
611 req_ctx_init(rctx, areq);
612
613 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
614
615 if (nbytes < 0) {
616 ret = nbytes;
617 goto done;
618 }
619
620 if (crypto_ahash_walk_last(&rctx->walk)) {
621 rctx->flag |= HASH_DONE;
622 flag = HASH_LAST;
623 }
624
625 /* submit */
626 rctx->flag |= HASH_FINAL;
627 sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
628 sha512_mb_add_list(rctx, cstate);
629
630 kernel_fpu_begin();
631 sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
632 nbytes, flag);
633 kernel_fpu_end();
634
635 /* check if anything is returned */
636 if (!sha_ctx)
637 return -EINPROGRESS;
638
639 if (sha_ctx->error) {
640 ret = sha_ctx->error;
641 goto done;
642 }
643
644 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
645 ret = sha_finish_walk(&rctx, cstate, false);
646 if (!rctx)
647 return -EINPROGRESS;
648 done:
649 sha_complete_job(rctx, cstate, ret);
650 return ret;
651 }
652
653 static int sha512_mb_final(struct ahash_request *areq)
654 {
655 struct mcryptd_hash_request_ctx *rctx =
656 container_of(areq, struct mcryptd_hash_request_ctx,
657 areq);
658 struct mcryptd_alg_cstate *cstate =
659 this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
660
661 struct sha512_hash_ctx *sha_ctx;
662 int ret = 0;
663 u8 data;
664
665 /* sanity check */
666 if (rctx->tag.cpu != smp_processor_id()) {
667 pr_err("mcryptd error: cpu clash\n");
668 goto done;
669 }
670
671 /* need to init context */
672 req_ctx_init(rctx, areq);
673
674 rctx->flag |= HASH_DONE | HASH_FINAL;
675
676 sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
677 /* flag HASH_FINAL and 0 data size */
678 sha512_mb_add_list(rctx, cstate);
679 kernel_fpu_begin();
680 sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
681 HASH_LAST);
682 kernel_fpu_end();
683
684 /* check if anything is returned */
685 if (!sha_ctx)
686 return -EINPROGRESS;
687
688 if (sha_ctx->error) {
689 ret = sha_ctx->error;
690 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
691 goto done;
692 }
693
694 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
695 ret = sha_finish_walk(&rctx, cstate, false);
696 if (!rctx)
697 return -EINPROGRESS;
698 done:
699 sha_complete_job(rctx, cstate, ret);
700 return ret;
701 }
702
703 static int sha512_mb_export(struct ahash_request *areq, void *out)
704 {
705 struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
706
707 memcpy(out, sctx, sizeof(*sctx));
708
709 return 0;
710 }
711
712 static int sha512_mb_import(struct ahash_request *areq, const void *in)
713 {
714 struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
715
716 memcpy(sctx, in, sizeof(*sctx));
717
718 return 0;
719 }
720
721 static int sha512_mb_async_init_tfm(struct crypto_tfm *tfm)
722 {
723 struct mcryptd_ahash *mcryptd_tfm;
724 struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
725 struct mcryptd_hash_ctx *mctx;
726
727 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha512-mb",
728 CRYPTO_ALG_INTERNAL,
729 CRYPTO_ALG_INTERNAL);
730 if (IS_ERR(mcryptd_tfm))
731 return PTR_ERR(mcryptd_tfm);
732 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
733 mctx->alg_state = &sha512_mb_alg_state;
734 ctx->mcryptd_tfm = mcryptd_tfm;
735 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
736 sizeof(struct ahash_request) +
737 crypto_ahash_reqsize(&mcryptd_tfm->base));
738
739 return 0;
740 }
741
742 static void sha512_mb_async_exit_tfm(struct crypto_tfm *tfm)
743 {
744 struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
745
746 mcryptd_free_ahash(ctx->mcryptd_tfm);
747 }
748
749 static int sha512_mb_areq_init_tfm(struct crypto_tfm *tfm)
750 {
751 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
752 sizeof(struct ahash_request) +
753 sizeof(struct sha512_hash_ctx));
754
755 return 0;
756 }
757
758 static void sha512_mb_areq_exit_tfm(struct crypto_tfm *tfm)
759 {
760 struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
761
762 mcryptd_free_ahash(ctx->mcryptd_tfm);
763 }
764
765 static struct ahash_alg sha512_mb_areq_alg = {
766 .init = sha512_mb_init,
767 .update = sha512_mb_update,
768 .final = sha512_mb_final,
769 .finup = sha512_mb_finup,
770 .export = sha512_mb_export,
771 .import = sha512_mb_import,
772 .halg = {
773 .digestsize = SHA512_DIGEST_SIZE,
774 .statesize = sizeof(struct sha512_hash_ctx),
775 .base = {
776 .cra_name = "__sha512-mb",
777 .cra_driver_name = "__intel_sha512-mb",
778 .cra_priority = 100,
779 /*
780 * use ASYNC flag as some buffers in multi-buffer
781 * algo may not have completed before hashing thread
782 * sleep
783 */
784 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
785 CRYPTO_ALG_ASYNC |
786 CRYPTO_ALG_INTERNAL,
787 .cra_blocksize = SHA512_BLOCK_SIZE,
788 .cra_module = THIS_MODULE,
789 .cra_list = LIST_HEAD_INIT
790 (sha512_mb_areq_alg.halg.base.cra_list),
791 .cra_init = sha512_mb_areq_init_tfm,
792 .cra_exit = sha512_mb_areq_exit_tfm,
793 .cra_ctxsize = sizeof(struct sha512_hash_ctx),
794 }
795 }
796 };
797
798 static int sha512_mb_async_init(struct ahash_request *req)
799 {
800 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
801 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
802 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
803 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
804
805 memcpy(mcryptd_req, req, sizeof(*req));
806 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
807 return crypto_ahash_init(mcryptd_req);
808 }
809
810 static int sha512_mb_async_update(struct ahash_request *req)
811 {
812 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
813
814 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
815 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
816 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
817
818 memcpy(mcryptd_req, req, sizeof(*req));
819 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
820 return crypto_ahash_update(mcryptd_req);
821 }
822
823 static int sha512_mb_async_finup(struct ahash_request *req)
824 {
825 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
826
827 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
828 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
829 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
830
831 memcpy(mcryptd_req, req, sizeof(*req));
832 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
833 return crypto_ahash_finup(mcryptd_req);
834 }
835
836 static int sha512_mb_async_final(struct ahash_request *req)
837 {
838 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
839
840 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
841 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
842 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
843
844 memcpy(mcryptd_req, req, sizeof(*req));
845 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
846 return crypto_ahash_final(mcryptd_req);
847 }
848
849 static int sha512_mb_async_digest(struct ahash_request *req)
850 {
851 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
852 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
853 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
854 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
855
856 memcpy(mcryptd_req, req, sizeof(*req));
857 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
858 return crypto_ahash_digest(mcryptd_req);
859 }
860
861 static int sha512_mb_async_export(struct ahash_request *req, void *out)
862 {
863 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
864 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
865 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
866 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
867
868 memcpy(mcryptd_req, req, sizeof(*req));
869 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
870 return crypto_ahash_export(mcryptd_req, out);
871 }
872
873 static int sha512_mb_async_import(struct ahash_request *req, const void *in)
874 {
875 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
876 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
877 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
878 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
879 struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
880 struct mcryptd_hash_request_ctx *rctx;
881 struct ahash_request *areq;
882
883 memcpy(mcryptd_req, req, sizeof(*req));
884 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
885 rctx = ahash_request_ctx(mcryptd_req);
886
887 areq = &rctx->areq;
888
889 ahash_request_set_tfm(areq, child);
890 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
891 rctx->complete, req);
892
893 return crypto_ahash_import(mcryptd_req, in);
894 }
895
896 static struct ahash_alg sha512_mb_async_alg = {
897 .init = sha512_mb_async_init,
898 .update = sha512_mb_async_update,
899 .final = sha512_mb_async_final,
900 .finup = sha512_mb_async_finup,
901 .digest = sha512_mb_async_digest,
902 .export = sha512_mb_async_export,
903 .import = sha512_mb_async_import,
904 .halg = {
905 .digestsize = SHA512_DIGEST_SIZE,
906 .statesize = sizeof(struct sha512_hash_ctx),
907 .base = {
908 .cra_name = "sha512",
909 .cra_driver_name = "sha512_mb",
910 .cra_priority = 200,
911 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
912 CRYPTO_ALG_ASYNC,
913 .cra_blocksize = SHA512_BLOCK_SIZE,
914 .cra_type = &crypto_ahash_type,
915 .cra_module = THIS_MODULE,
916 .cra_list = LIST_HEAD_INIT
917 (sha512_mb_async_alg.halg.base.cra_list),
918 .cra_init = sha512_mb_async_init_tfm,
919 .cra_exit = sha512_mb_async_exit_tfm,
920 .cra_ctxsize = sizeof(struct sha512_mb_ctx),
921 .cra_alignmask = 0,
922 },
923 },
924 };
925
926 static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate)
927 {
928 struct mcryptd_hash_request_ctx *rctx;
929 unsigned long cur_time;
930 unsigned long next_flush = 0;
931 struct sha512_hash_ctx *sha_ctx;
932
933
934 cur_time = jiffies;
935
936 while (!list_empty(&cstate->work_list)) {
937 rctx = list_entry(cstate->work_list.next,
938 struct mcryptd_hash_request_ctx, waiter);
939 if time_before(cur_time, rctx->tag.expire)
940 break;
941 kernel_fpu_begin();
942 sha_ctx = (struct sha512_hash_ctx *)
943 sha512_ctx_mgr_flush(cstate->mgr);
944 kernel_fpu_end();
945 if (!sha_ctx) {
946 pr_err("sha512_mb error: nothing got flushed for"
947 " non-empty list\n");
948 break;
949 }
950 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
951 sha_finish_walk(&rctx, cstate, true);
952 sha_complete_job(rctx, cstate, 0);
953 }
954
955 if (!list_empty(&cstate->work_list)) {
956 rctx = list_entry(cstate->work_list.next,
957 struct mcryptd_hash_request_ctx, waiter);
958 /* get the hash context and then flush time */
959 next_flush = rctx->tag.expire;
960 mcryptd_arm_flusher(cstate, get_delay(next_flush));
961 }
962 return next_flush;
963 }
964
965 static int __init sha512_mb_mod_init(void)
966 {
967
968 int cpu;
969 int err;
970 struct mcryptd_alg_cstate *cpu_state;
971
972 /* check for dependent cpu features */
973 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
974 !boot_cpu_has(X86_FEATURE_BMI2))
975 return -ENODEV;
976
977 /* initialize multibuffer structures */
978 sha512_mb_alg_state.alg_cstate =
979 alloc_percpu(struct mcryptd_alg_cstate);
980
981 sha512_job_mgr_init = sha512_mb_mgr_init_avx2;
982 sha512_job_mgr_submit = sha512_mb_mgr_submit_avx2;
983 sha512_job_mgr_flush = sha512_mb_mgr_flush_avx2;
984 sha512_job_mgr_get_comp_job = sha512_mb_mgr_get_comp_job_avx2;
985
986 if (!sha512_mb_alg_state.alg_cstate)
987 return -ENOMEM;
988 for_each_possible_cpu(cpu) {
989 cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
990 cpu_state->next_flush = 0;
991 cpu_state->next_seq_num = 0;
992 cpu_state->flusher_engaged = false;
993 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
994 cpu_state->cpu = cpu;
995 cpu_state->alg_state = &sha512_mb_alg_state;
996 cpu_state->mgr = kzalloc(sizeof(struct sha512_ctx_mgr),
997 GFP_KERNEL);
998 if (!cpu_state->mgr)
999 goto err2;
1000 sha512_ctx_mgr_init(cpu_state->mgr);
1001 INIT_LIST_HEAD(&cpu_state->work_list);
1002 spin_lock_init(&cpu_state->work_lock);
1003 }
1004 sha512_mb_alg_state.flusher = &sha512_mb_flusher;
1005
1006 err = crypto_register_ahash(&sha512_mb_areq_alg);
1007 if (err)
1008 goto err2;
1009 err = crypto_register_ahash(&sha512_mb_async_alg);
1010 if (err)
1011 goto err1;
1012
1013
1014 return 0;
1015 err1:
1016 crypto_unregister_ahash(&sha512_mb_areq_alg);
1017 err2:
1018 for_each_possible_cpu(cpu) {
1019 cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
1020 kfree(cpu_state->mgr);
1021 }
1022 free_percpu(sha512_mb_alg_state.alg_cstate);
1023 return -ENODEV;
1024 }
1025
1026 static void __exit sha512_mb_mod_fini(void)
1027 {
1028 int cpu;
1029 struct mcryptd_alg_cstate *cpu_state;
1030
1031 crypto_unregister_ahash(&sha512_mb_async_alg);
1032 crypto_unregister_ahash(&sha512_mb_areq_alg);
1033 for_each_possible_cpu(cpu) {
1034 cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
1035 kfree(cpu_state->mgr);
1036 }
1037 free_percpu(sha512_mb_alg_state.alg_cstate);
1038 }
1039
1040 module_init(sha512_mb_mod_init);
1041 module_exit(sha512_mb_mod_fini);
1042
1043 MODULE_LICENSE("GPL");
1044 MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated");
1045
1046 MODULE_ALIAS("sha512");