]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/crypto/sha-mb/sha1_mb.c
x86/fpu: Rename fpu-internal.h to fpu/internal.h
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / crypto / sha-mb / sha1_mb.c
1 /*
2 * Multi buffer SHA1 algorithm Glue Code
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Tim Chen <tim.c.chen@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2014 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56 #include <crypto/internal/hash.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
59 #include <linux/mm.h>
60 #include <linux/cryptohash.h>
61 #include <linux/types.h>
62 #include <linux/list.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/mcryptd.h>
66 #include <crypto/crypto_wq.h>
67 #include <asm/byteorder.h>
68 #include <asm/xcr.h>
69 #include <asm/fpu/xsave.h>
70 #include <linux/hardirq.h>
71 #include <asm/fpu/internal.h>
72 #include "sha_mb_ctx.h"
73
74 #define FLUSH_INTERVAL 1000 /* in usec */
75
76 static struct mcryptd_alg_state sha1_mb_alg_state;
77
78 struct sha1_mb_ctx {
79 struct mcryptd_ahash *mcryptd_tfm;
80 };
81
82 static inline struct mcryptd_hash_request_ctx *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
83 {
84 struct shash_desc *desc;
85
86 desc = container_of((void *) hash_ctx, struct shash_desc, __ctx);
87 return container_of(desc, struct mcryptd_hash_request_ctx, desc);
88 }
89
90 static inline struct ahash_request *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
91 {
92 return container_of((void *) ctx, struct ahash_request, __ctx);
93 }
94
95 static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96 struct shash_desc *desc)
97 {
98 rctx->flag = HASH_UPDATE;
99 }
100
101 static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
102 static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)(struct sha1_mb_mgr *state,
103 struct job_sha1 *job);
104 static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)(struct sha1_mb_mgr *state);
105 static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)(struct sha1_mb_mgr *state);
106
107 inline void sha1_init_digest(uint32_t *digest)
108 {
109 static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0,
110 SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
111 memcpy(digest, initial_digest, sizeof(initial_digest));
112 }
113
114 inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
115 uint32_t total_len)
116 {
117 uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
118
119 memset(&padblock[i], 0, SHA1_BLOCK_SIZE);
120 padblock[i] = 0x80;
121
122 i += ((SHA1_BLOCK_SIZE - 1) &
123 (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1)))
124 + 1 + SHA1_PADLENGTHFIELD_SIZE;
125
126 #if SHA1_PADLENGTHFIELD_SIZE == 16
127 *((uint64_t *) &padblock[i - 16]) = 0;
128 #endif
129
130 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
131
132 /* Number of extra blocks to hash */
133 return i >> SHA1_LOG2_BLOCK_SIZE;
134 }
135
136 static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, struct sha1_hash_ctx *ctx)
137 {
138 while (ctx) {
139 if (ctx->status & HASH_CTX_STS_COMPLETE) {
140 /* Clear PROCESSING bit */
141 ctx->status = HASH_CTX_STS_COMPLETE;
142 return ctx;
143 }
144
145 /*
146 * If the extra blocks are empty, begin hashing what remains
147 * in the user's buffer.
148 */
149 if (ctx->partial_block_buffer_length == 0 &&
150 ctx->incoming_buffer_length) {
151
152 const void *buffer = ctx->incoming_buffer;
153 uint32_t len = ctx->incoming_buffer_length;
154 uint32_t copy_len;
155
156 /*
157 * Only entire blocks can be hashed.
158 * Copy remainder to extra blocks buffer.
159 */
160 copy_len = len & (SHA1_BLOCK_SIZE-1);
161
162 if (copy_len) {
163 len -= copy_len;
164 memcpy(ctx->partial_block_buffer,
165 ((const char *) buffer + len),
166 copy_len);
167 ctx->partial_block_buffer_length = copy_len;
168 }
169
170 ctx->incoming_buffer_length = 0;
171
172 /* len should be a multiple of the block size now */
173 assert((len % SHA1_BLOCK_SIZE) == 0);
174
175 /* Set len to the number of blocks to be hashed */
176 len >>= SHA1_LOG2_BLOCK_SIZE;
177
178 if (len) {
179
180 ctx->job.buffer = (uint8_t *) buffer;
181 ctx->job.len = len;
182 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr,
183 &ctx->job);
184 continue;
185 }
186 }
187
188 /*
189 * If the extra blocks are not empty, then we are
190 * either on the last block(s) or we need more
191 * user input before continuing.
192 */
193 if (ctx->status & HASH_CTX_STS_LAST) {
194
195 uint8_t *buf = ctx->partial_block_buffer;
196 uint32_t n_extra_blocks = sha1_pad(buf, ctx->total_length);
197
198 ctx->status = (HASH_CTX_STS_PROCESSING |
199 HASH_CTX_STS_COMPLETE);
200 ctx->job.buffer = buf;
201 ctx->job.len = (uint32_t) n_extra_blocks;
202 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
203 continue;
204 }
205
206 ctx->status = HASH_CTX_STS_IDLE;
207 return ctx;
208 }
209
210 return NULL;
211 }
212
213 static struct sha1_hash_ctx *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
214 {
215 /*
216 * If get_comp_job returns NULL, there are no jobs complete.
217 * If get_comp_job returns a job, verify that it is safe to return to the user.
218 * If it is not ready, resubmit the job to finish processing.
219 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
220 * Otherwise, all jobs currently being managed by the hash_ctx_mgr still need processing.
221 */
222 struct sha1_hash_ctx *ctx;
223
224 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr);
225 return sha1_ctx_mgr_resubmit(mgr, ctx);
226 }
227
228 static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr)
229 {
230 sha1_job_mgr_init(&mgr->mgr);
231 }
232
233 static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
234 struct sha1_hash_ctx *ctx,
235 const void *buffer,
236 uint32_t len,
237 int flags)
238 {
239 if (flags & (~HASH_ENTIRE)) {
240 /* User should not pass anything other than FIRST, UPDATE, or LAST */
241 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
242 return ctx;
243 }
244
245 if (ctx->status & HASH_CTX_STS_PROCESSING) {
246 /* Cannot submit to a currently processing job. */
247 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
248 return ctx;
249 }
250
251 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
252 /* Cannot update a finished job. */
253 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
254 return ctx;
255 }
256
257
258 if (flags & HASH_FIRST) {
259 /* Init digest */
260 sha1_init_digest(ctx->job.result_digest);
261
262 /* Reset byte counter */
263 ctx->total_length = 0;
264
265 /* Clear extra blocks */
266 ctx->partial_block_buffer_length = 0;
267 }
268
269 /* If we made it here, there were no errors during this call to submit */
270 ctx->error = HASH_CTX_ERROR_NONE;
271
272 /* Store buffer ptr info from user */
273 ctx->incoming_buffer = buffer;
274 ctx->incoming_buffer_length = len;
275
276 /* Store the user's request flags and mark this ctx as currently being processed. */
277 ctx->status = (flags & HASH_LAST) ?
278 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
279 HASH_CTX_STS_PROCESSING;
280
281 /* Advance byte counter */
282 ctx->total_length += len;
283
284 /*
285 * If there is anything currently buffered in the extra blocks,
286 * append to it until it contains a whole block.
287 * Or if the user's buffer contains less than a whole block,
288 * append as much as possible to the extra block.
289 */
290 if ((ctx->partial_block_buffer_length) | (len < SHA1_BLOCK_SIZE)) {
291 /* Compute how many bytes to copy from user buffer into extra block */
292 uint32_t copy_len = SHA1_BLOCK_SIZE - ctx->partial_block_buffer_length;
293 if (len < copy_len)
294 copy_len = len;
295
296 if (copy_len) {
297 /* Copy and update relevant pointers and counters */
298 memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
299 buffer, copy_len);
300
301 ctx->partial_block_buffer_length += copy_len;
302 ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len);
303 ctx->incoming_buffer_length = len - copy_len;
304 }
305
306 /* The extra block should never contain more than 1 block here */
307 assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
308
309 /* If the extra block buffer contains exactly 1 block, it can be hashed. */
310 if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
311 ctx->partial_block_buffer_length = 0;
312
313 ctx->job.buffer = ctx->partial_block_buffer;
314 ctx->job.len = 1;
315 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
316 }
317 }
318
319 return sha1_ctx_mgr_resubmit(mgr, ctx);
320 }
321
322 static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
323 {
324 struct sha1_hash_ctx *ctx;
325
326 while (1) {
327 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr);
328
329 /* If flush returned 0, there are no more jobs in flight. */
330 if (!ctx)
331 return NULL;
332
333 /*
334 * If flush returned a job, resubmit the job to finish processing.
335 */
336 ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
337
338 /*
339 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
340 * Otherwise, all jobs currently being managed by the sha1_ctx_mgr
341 * still need processing. Loop.
342 */
343 if (ctx)
344 return ctx;
345 }
346 }
347
348 static int sha1_mb_init(struct shash_desc *desc)
349 {
350 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
351
352 hash_ctx_init(sctx);
353 sctx->job.result_digest[0] = SHA1_H0;
354 sctx->job.result_digest[1] = SHA1_H1;
355 sctx->job.result_digest[2] = SHA1_H2;
356 sctx->job.result_digest[3] = SHA1_H3;
357 sctx->job.result_digest[4] = SHA1_H4;
358 sctx->total_length = 0;
359 sctx->partial_block_buffer_length = 0;
360 sctx->status = HASH_CTX_STS_IDLE;
361
362 return 0;
363 }
364
365 static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
366 {
367 int i;
368 struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc);
369 __be32 *dst = (__be32 *) rctx->out;
370
371 for (i = 0; i < 5; ++i)
372 dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
373
374 return 0;
375 }
376
377 static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
378 struct mcryptd_alg_cstate *cstate, bool flush)
379 {
380 int flag = HASH_UPDATE;
381 int nbytes, err = 0;
382 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
383 struct sha1_hash_ctx *sha_ctx;
384
385 /* more work ? */
386 while (!(rctx->flag & HASH_DONE)) {
387 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
388 if (nbytes < 0) {
389 err = nbytes;
390 goto out;
391 }
392 /* check if the walk is done */
393 if (crypto_ahash_walk_last(&rctx->walk)) {
394 rctx->flag |= HASH_DONE;
395 if (rctx->flag & HASH_FINAL)
396 flag |= HASH_LAST;
397
398 }
399 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(&rctx->desc);
400 kernel_fpu_begin();
401 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag);
402 if (!sha_ctx) {
403 if (flush)
404 sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
405 }
406 kernel_fpu_end();
407 if (sha_ctx)
408 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
409 else {
410 rctx = NULL;
411 goto out;
412 }
413 }
414
415 /* copy the results */
416 if (rctx->flag & HASH_FINAL)
417 sha1_mb_set_results(rctx);
418
419 out:
420 *ret_rctx = rctx;
421 return err;
422 }
423
424 static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
425 struct mcryptd_alg_cstate *cstate,
426 int err)
427 {
428 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
429 struct sha1_hash_ctx *sha_ctx;
430 struct mcryptd_hash_request_ctx *req_ctx;
431 int ret;
432
433 /* remove from work list */
434 spin_lock(&cstate->work_lock);
435 list_del(&rctx->waiter);
436 spin_unlock(&cstate->work_lock);
437
438 if (irqs_disabled())
439 rctx->complete(&req->base, err);
440 else {
441 local_bh_disable();
442 rctx->complete(&req->base, err);
443 local_bh_enable();
444 }
445
446 /* check to see if there are other jobs that are done */
447 sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
448 while (sha_ctx) {
449 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
450 ret = sha_finish_walk(&req_ctx, cstate, false);
451 if (req_ctx) {
452 spin_lock(&cstate->work_lock);
453 list_del(&req_ctx->waiter);
454 spin_unlock(&cstate->work_lock);
455
456 req = cast_mcryptd_ctx_to_req(req_ctx);
457 if (irqs_disabled())
458 rctx->complete(&req->base, ret);
459 else {
460 local_bh_disable();
461 rctx->complete(&req->base, ret);
462 local_bh_enable();
463 }
464 }
465 sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
466 }
467
468 return 0;
469 }
470
471 static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
472 struct mcryptd_alg_cstate *cstate)
473 {
474 unsigned long next_flush;
475 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
476
477 /* initialize tag */
478 rctx->tag.arrival = jiffies; /* tag the arrival time */
479 rctx->tag.seq_num = cstate->next_seq_num++;
480 next_flush = rctx->tag.arrival + delay;
481 rctx->tag.expire = next_flush;
482
483 spin_lock(&cstate->work_lock);
484 list_add_tail(&rctx->waiter, &cstate->work_list);
485 spin_unlock(&cstate->work_lock);
486
487 mcryptd_arm_flusher(cstate, delay);
488 }
489
490 static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
491 unsigned int len)
492 {
493 struct mcryptd_hash_request_ctx *rctx =
494 container_of(desc, struct mcryptd_hash_request_ctx, desc);
495 struct mcryptd_alg_cstate *cstate =
496 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
497
498 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
499 struct sha1_hash_ctx *sha_ctx;
500 int ret = 0, nbytes;
501
502
503 /* sanity check */
504 if (rctx->tag.cpu != smp_processor_id()) {
505 pr_err("mcryptd error: cpu clash\n");
506 goto done;
507 }
508
509 /* need to init context */
510 req_ctx_init(rctx, desc);
511
512 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
513
514 if (nbytes < 0) {
515 ret = nbytes;
516 goto done;
517 }
518
519 if (crypto_ahash_walk_last(&rctx->walk))
520 rctx->flag |= HASH_DONE;
521
522 /* submit */
523 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
524 sha1_mb_add_list(rctx, cstate);
525 kernel_fpu_begin();
526 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, HASH_UPDATE);
527 kernel_fpu_end();
528
529 /* check if anything is returned */
530 if (!sha_ctx)
531 return -EINPROGRESS;
532
533 if (sha_ctx->error) {
534 ret = sha_ctx->error;
535 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
536 goto done;
537 }
538
539 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
540 ret = sha_finish_walk(&rctx, cstate, false);
541
542 if (!rctx)
543 return -EINPROGRESS;
544 done:
545 sha_complete_job(rctx, cstate, ret);
546 return ret;
547 }
548
549 static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
550 unsigned int len, u8 *out)
551 {
552 struct mcryptd_hash_request_ctx *rctx =
553 container_of(desc, struct mcryptd_hash_request_ctx, desc);
554 struct mcryptd_alg_cstate *cstate =
555 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
556
557 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
558 struct sha1_hash_ctx *sha_ctx;
559 int ret = 0, flag = HASH_UPDATE, nbytes;
560
561 /* sanity check */
562 if (rctx->tag.cpu != smp_processor_id()) {
563 pr_err("mcryptd error: cpu clash\n");
564 goto done;
565 }
566
567 /* need to init context */
568 req_ctx_init(rctx, desc);
569
570 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
571
572 if (nbytes < 0) {
573 ret = nbytes;
574 goto done;
575 }
576
577 if (crypto_ahash_walk_last(&rctx->walk)) {
578 rctx->flag |= HASH_DONE;
579 flag = HASH_LAST;
580 }
581 rctx->out = out;
582
583 /* submit */
584 rctx->flag |= HASH_FINAL;
585 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
586 sha1_mb_add_list(rctx, cstate);
587
588 kernel_fpu_begin();
589 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag);
590 kernel_fpu_end();
591
592 /* check if anything is returned */
593 if (!sha_ctx)
594 return -EINPROGRESS;
595
596 if (sha_ctx->error) {
597 ret = sha_ctx->error;
598 goto done;
599 }
600
601 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
602 ret = sha_finish_walk(&rctx, cstate, false);
603 if (!rctx)
604 return -EINPROGRESS;
605 done:
606 sha_complete_job(rctx, cstate, ret);
607 return ret;
608 }
609
610 static int sha1_mb_final(struct shash_desc *desc, u8 *out)
611 {
612 struct mcryptd_hash_request_ctx *rctx =
613 container_of(desc, struct mcryptd_hash_request_ctx, desc);
614 struct mcryptd_alg_cstate *cstate =
615 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
616
617 struct sha1_hash_ctx *sha_ctx;
618 int ret = 0;
619 u8 data;
620
621 /* sanity check */
622 if (rctx->tag.cpu != smp_processor_id()) {
623 pr_err("mcryptd error: cpu clash\n");
624 goto done;
625 }
626
627 /* need to init context */
628 req_ctx_init(rctx, desc);
629
630 rctx->out = out;
631 rctx->flag |= HASH_DONE | HASH_FINAL;
632
633 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
634 /* flag HASH_FINAL and 0 data size */
635 sha1_mb_add_list(rctx, cstate);
636 kernel_fpu_begin();
637 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, HASH_LAST);
638 kernel_fpu_end();
639
640 /* check if anything is returned */
641 if (!sha_ctx)
642 return -EINPROGRESS;
643
644 if (sha_ctx->error) {
645 ret = sha_ctx->error;
646 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
647 goto done;
648 }
649
650 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
651 ret = sha_finish_walk(&rctx, cstate, false);
652 if (!rctx)
653 return -EINPROGRESS;
654 done:
655 sha_complete_job(rctx, cstate, ret);
656 return ret;
657 }
658
659 static int sha1_mb_export(struct shash_desc *desc, void *out)
660 {
661 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
662
663 memcpy(out, sctx, sizeof(*sctx));
664
665 return 0;
666 }
667
668 static int sha1_mb_import(struct shash_desc *desc, const void *in)
669 {
670 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
671
672 memcpy(sctx, in, sizeof(*sctx));
673
674 return 0;
675 }
676
677
678 static struct shash_alg sha1_mb_shash_alg = {
679 .digestsize = SHA1_DIGEST_SIZE,
680 .init = sha1_mb_init,
681 .update = sha1_mb_update,
682 .final = sha1_mb_final,
683 .finup = sha1_mb_finup,
684 .export = sha1_mb_export,
685 .import = sha1_mb_import,
686 .descsize = sizeof(struct sha1_hash_ctx),
687 .statesize = sizeof(struct sha1_hash_ctx),
688 .base = {
689 .cra_name = "__sha1-mb",
690 .cra_driver_name = "__intel_sha1-mb",
691 .cra_priority = 100,
692 /*
693 * use ASYNC flag as some buffers in multi-buffer
694 * algo may not have completed before hashing thread sleep
695 */
696 .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC |
697 CRYPTO_ALG_INTERNAL,
698 .cra_blocksize = SHA1_BLOCK_SIZE,
699 .cra_module = THIS_MODULE,
700 .cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list),
701 }
702 };
703
704 static int sha1_mb_async_init(struct ahash_request *req)
705 {
706 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
707 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
708 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
709 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
710
711 memcpy(mcryptd_req, req, sizeof(*req));
712 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
713 return crypto_ahash_init(mcryptd_req);
714 }
715
716 static int sha1_mb_async_update(struct ahash_request *req)
717 {
718 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
719
720 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
721 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
722 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
723
724 memcpy(mcryptd_req, req, sizeof(*req));
725 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
726 return crypto_ahash_update(mcryptd_req);
727 }
728
729 static int sha1_mb_async_finup(struct ahash_request *req)
730 {
731 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
732
733 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
734 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
735 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
736
737 memcpy(mcryptd_req, req, sizeof(*req));
738 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
739 return crypto_ahash_finup(mcryptd_req);
740 }
741
742 static int sha1_mb_async_final(struct ahash_request *req)
743 {
744 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
745
746 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
747 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
748 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
749
750 memcpy(mcryptd_req, req, sizeof(*req));
751 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
752 return crypto_ahash_final(mcryptd_req);
753 }
754
755 static int sha1_mb_async_digest(struct ahash_request *req)
756 {
757 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
758 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
759 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
760 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
761
762 memcpy(mcryptd_req, req, sizeof(*req));
763 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
764 return crypto_ahash_digest(mcryptd_req);
765 }
766
767 static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
768 {
769 struct mcryptd_ahash *mcryptd_tfm;
770 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
771 struct mcryptd_hash_ctx *mctx;
772
773 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
774 CRYPTO_ALG_INTERNAL,
775 CRYPTO_ALG_INTERNAL);
776 if (IS_ERR(mcryptd_tfm))
777 return PTR_ERR(mcryptd_tfm);
778 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
779 mctx->alg_state = &sha1_mb_alg_state;
780 ctx->mcryptd_tfm = mcryptd_tfm;
781 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
782 sizeof(struct ahash_request) +
783 crypto_ahash_reqsize(&mcryptd_tfm->base));
784
785 return 0;
786 }
787
788 static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
789 {
790 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
791
792 mcryptd_free_ahash(ctx->mcryptd_tfm);
793 }
794
795 static struct ahash_alg sha1_mb_async_alg = {
796 .init = sha1_mb_async_init,
797 .update = sha1_mb_async_update,
798 .final = sha1_mb_async_final,
799 .finup = sha1_mb_async_finup,
800 .digest = sha1_mb_async_digest,
801 .halg = {
802 .digestsize = SHA1_DIGEST_SIZE,
803 .base = {
804 .cra_name = "sha1",
805 .cra_driver_name = "sha1_mb",
806 .cra_priority = 200,
807 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
808 .cra_blocksize = SHA1_BLOCK_SIZE,
809 .cra_type = &crypto_ahash_type,
810 .cra_module = THIS_MODULE,
811 .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
812 .cra_init = sha1_mb_async_init_tfm,
813 .cra_exit = sha1_mb_async_exit_tfm,
814 .cra_ctxsize = sizeof(struct sha1_mb_ctx),
815 .cra_alignmask = 0,
816 },
817 },
818 };
819
820 static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
821 {
822 struct mcryptd_hash_request_ctx *rctx;
823 unsigned long cur_time;
824 unsigned long next_flush = 0;
825 struct sha1_hash_ctx *sha_ctx;
826
827
828 cur_time = jiffies;
829
830 while (!list_empty(&cstate->work_list)) {
831 rctx = list_entry(cstate->work_list.next,
832 struct mcryptd_hash_request_ctx, waiter);
833 if (time_before(cur_time, rctx->tag.expire))
834 break;
835 kernel_fpu_begin();
836 sha_ctx = (struct sha1_hash_ctx *) sha1_ctx_mgr_flush(cstate->mgr);
837 kernel_fpu_end();
838 if (!sha_ctx) {
839 pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
840 break;
841 }
842 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
843 sha_finish_walk(&rctx, cstate, true);
844 sha_complete_job(rctx, cstate, 0);
845 }
846
847 if (!list_empty(&cstate->work_list)) {
848 rctx = list_entry(cstate->work_list.next,
849 struct mcryptd_hash_request_ctx, waiter);
850 /* get the hash context and then flush time */
851 next_flush = rctx->tag.expire;
852 mcryptd_arm_flusher(cstate, get_delay(next_flush));
853 }
854 return next_flush;
855 }
856
857 static int __init sha1_mb_mod_init(void)
858 {
859
860 int cpu;
861 int err;
862 struct mcryptd_alg_cstate *cpu_state;
863
864 /* check for dependent cpu features */
865 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
866 !boot_cpu_has(X86_FEATURE_BMI2))
867 return -ENODEV;
868
869 /* initialize multibuffer structures */
870 sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate);
871
872 sha1_job_mgr_init = sha1_mb_mgr_init_avx2;
873 sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2;
874 sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2;
875 sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2;
876
877 if (!sha1_mb_alg_state.alg_cstate)
878 return -ENOMEM;
879 for_each_possible_cpu(cpu) {
880 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
881 cpu_state->next_flush = 0;
882 cpu_state->next_seq_num = 0;
883 cpu_state->flusher_engaged = false;
884 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
885 cpu_state->cpu = cpu;
886 cpu_state->alg_state = &sha1_mb_alg_state;
887 cpu_state->mgr = (struct sha1_ctx_mgr *) kzalloc(sizeof(struct sha1_ctx_mgr), GFP_KERNEL);
888 if (!cpu_state->mgr)
889 goto err2;
890 sha1_ctx_mgr_init(cpu_state->mgr);
891 INIT_LIST_HEAD(&cpu_state->work_list);
892 spin_lock_init(&cpu_state->work_lock);
893 }
894 sha1_mb_alg_state.flusher = &sha1_mb_flusher;
895
896 err = crypto_register_shash(&sha1_mb_shash_alg);
897 if (err)
898 goto err2;
899 err = crypto_register_ahash(&sha1_mb_async_alg);
900 if (err)
901 goto err1;
902
903
904 return 0;
905 err1:
906 crypto_unregister_shash(&sha1_mb_shash_alg);
907 err2:
908 for_each_possible_cpu(cpu) {
909 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
910 kfree(cpu_state->mgr);
911 }
912 free_percpu(sha1_mb_alg_state.alg_cstate);
913 return -ENODEV;
914 }
915
916 static void __exit sha1_mb_mod_fini(void)
917 {
918 int cpu;
919 struct mcryptd_alg_cstate *cpu_state;
920
921 crypto_unregister_ahash(&sha1_mb_async_alg);
922 crypto_unregister_shash(&sha1_mb_shash_alg);
923 for_each_possible_cpu(cpu) {
924 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
925 kfree(cpu_state->mgr);
926 }
927 free_percpu(sha1_mb_alg_state.alg_cstate);
928 }
929
930 module_init(sha1_mb_mod_init);
931 module_exit(sha1_mb_mod_fini);
932
933 MODULE_LICENSE("GPL");
934 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
935
936 MODULE_ALIAS_CRYPTO("sha1");