2 * Multi buffer SHA1 algorithm Glue Code
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * Copyright(c) 2014 Intel Corporation.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * Contact Information:
21 * Tim Chen <tim.c.chen@linux.intel.com>
25 * Copyright(c) 2014 Intel Corporation.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 #include <crypto/internal/hash.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
60 #include <linux/cryptohash.h>
61 #include <linux/types.h>
62 #include <linux/list.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/mcryptd.h>
66 #include <crypto/crypto_wq.h>
67 #include <asm/byteorder.h>
69 #include <asm/fpu/xsave.h>
70 #include <linux/hardirq.h>
71 #include <asm/fpu/internal.h>
72 #include "sha_mb_ctx.h"
74 #define FLUSH_INTERVAL 1000 /* in usec */
76 static struct mcryptd_alg_state sha1_mb_alg_state
;
79 struct mcryptd_ahash
*mcryptd_tfm
;
82 static inline struct mcryptd_hash_request_ctx
*cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx
*hash_ctx
)
84 struct shash_desc
*desc
;
86 desc
= container_of((void *) hash_ctx
, struct shash_desc
, __ctx
);
87 return container_of(desc
, struct mcryptd_hash_request_ctx
, desc
);
90 static inline struct ahash_request
*cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx
*ctx
)
92 return container_of((void *) ctx
, struct ahash_request
, __ctx
);
95 static void req_ctx_init(struct mcryptd_hash_request_ctx
*rctx
,
96 struct shash_desc
*desc
)
98 rctx
->flag
= HASH_UPDATE
;
101 static asmlinkage
void (*sha1_job_mgr_init
)(struct sha1_mb_mgr
*state
);
102 static asmlinkage
struct job_sha1
* (*sha1_job_mgr_submit
)(struct sha1_mb_mgr
*state
,
103 struct job_sha1
*job
);
104 static asmlinkage
struct job_sha1
* (*sha1_job_mgr_flush
)(struct sha1_mb_mgr
*state
);
105 static asmlinkage
struct job_sha1
* (*sha1_job_mgr_get_comp_job
)(struct sha1_mb_mgr
*state
);
107 inline void sha1_init_digest(uint32_t *digest
)
109 static const uint32_t initial_digest
[SHA1_DIGEST_LENGTH
] = {SHA1_H0
,
110 SHA1_H1
, SHA1_H2
, SHA1_H3
, SHA1_H4
};
111 memcpy(digest
, initial_digest
, sizeof(initial_digest
));
114 inline uint32_t sha1_pad(uint8_t padblock
[SHA1_BLOCK_SIZE
* 2],
117 uint32_t i
= total_len
& (SHA1_BLOCK_SIZE
- 1);
119 memset(&padblock
[i
], 0, SHA1_BLOCK_SIZE
);
122 i
+= ((SHA1_BLOCK_SIZE
- 1) &
123 (0 - (total_len
+ SHA1_PADLENGTHFIELD_SIZE
+ 1)))
124 + 1 + SHA1_PADLENGTHFIELD_SIZE
;
126 #if SHA1_PADLENGTHFIELD_SIZE == 16
127 *((uint64_t *) &padblock
[i
- 16]) = 0;
130 *((uint64_t *) &padblock
[i
- 8]) = cpu_to_be64(total_len
<< 3);
132 /* Number of extra blocks to hash */
133 return i
>> SHA1_LOG2_BLOCK_SIZE
;
136 static struct sha1_hash_ctx
*sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr
*mgr
, struct sha1_hash_ctx
*ctx
)
139 if (ctx
->status
& HASH_CTX_STS_COMPLETE
) {
140 /* Clear PROCESSING bit */
141 ctx
->status
= HASH_CTX_STS_COMPLETE
;
146 * If the extra blocks are empty, begin hashing what remains
147 * in the user's buffer.
149 if (ctx
->partial_block_buffer_length
== 0 &&
150 ctx
->incoming_buffer_length
) {
152 const void *buffer
= ctx
->incoming_buffer
;
153 uint32_t len
= ctx
->incoming_buffer_length
;
157 * Only entire blocks can be hashed.
158 * Copy remainder to extra blocks buffer.
160 copy_len
= len
& (SHA1_BLOCK_SIZE
-1);
164 memcpy(ctx
->partial_block_buffer
,
165 ((const char *) buffer
+ len
),
167 ctx
->partial_block_buffer_length
= copy_len
;
170 ctx
->incoming_buffer_length
= 0;
172 /* len should be a multiple of the block size now */
173 assert((len
% SHA1_BLOCK_SIZE
) == 0);
175 /* Set len to the number of blocks to be hashed */
176 len
>>= SHA1_LOG2_BLOCK_SIZE
;
180 ctx
->job
.buffer
= (uint8_t *) buffer
;
182 ctx
= (struct sha1_hash_ctx
*) sha1_job_mgr_submit(&mgr
->mgr
,
189 * If the extra blocks are not empty, then we are
190 * either on the last block(s) or we need more
191 * user input before continuing.
193 if (ctx
->status
& HASH_CTX_STS_LAST
) {
195 uint8_t *buf
= ctx
->partial_block_buffer
;
196 uint32_t n_extra_blocks
= sha1_pad(buf
, ctx
->total_length
);
198 ctx
->status
= (HASH_CTX_STS_PROCESSING
|
199 HASH_CTX_STS_COMPLETE
);
200 ctx
->job
.buffer
= buf
;
201 ctx
->job
.len
= (uint32_t) n_extra_blocks
;
202 ctx
= (struct sha1_hash_ctx
*) sha1_job_mgr_submit(&mgr
->mgr
, &ctx
->job
);
206 ctx
->status
= HASH_CTX_STS_IDLE
;
213 static struct sha1_hash_ctx
*sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr
*mgr
)
216 * If get_comp_job returns NULL, there are no jobs complete.
217 * If get_comp_job returns a job, verify that it is safe to return to the user.
218 * If it is not ready, resubmit the job to finish processing.
219 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
220 * Otherwise, all jobs currently being managed by the hash_ctx_mgr still need processing.
222 struct sha1_hash_ctx
*ctx
;
224 ctx
= (struct sha1_hash_ctx
*) sha1_job_mgr_get_comp_job(&mgr
->mgr
);
225 return sha1_ctx_mgr_resubmit(mgr
, ctx
);
228 static void sha1_ctx_mgr_init(struct sha1_ctx_mgr
*mgr
)
230 sha1_job_mgr_init(&mgr
->mgr
);
233 static struct sha1_hash_ctx
*sha1_ctx_mgr_submit(struct sha1_ctx_mgr
*mgr
,
234 struct sha1_hash_ctx
*ctx
,
239 if (flags
& (~HASH_ENTIRE
)) {
240 /* User should not pass anything other than FIRST, UPDATE, or LAST */
241 ctx
->error
= HASH_CTX_ERROR_INVALID_FLAGS
;
245 if (ctx
->status
& HASH_CTX_STS_PROCESSING
) {
246 /* Cannot submit to a currently processing job. */
247 ctx
->error
= HASH_CTX_ERROR_ALREADY_PROCESSING
;
251 if ((ctx
->status
& HASH_CTX_STS_COMPLETE
) && !(flags
& HASH_FIRST
)) {
252 /* Cannot update a finished job. */
253 ctx
->error
= HASH_CTX_ERROR_ALREADY_COMPLETED
;
258 if (flags
& HASH_FIRST
) {
260 sha1_init_digest(ctx
->job
.result_digest
);
262 /* Reset byte counter */
263 ctx
->total_length
= 0;
265 /* Clear extra blocks */
266 ctx
->partial_block_buffer_length
= 0;
269 /* If we made it here, there were no errors during this call to submit */
270 ctx
->error
= HASH_CTX_ERROR_NONE
;
272 /* Store buffer ptr info from user */
273 ctx
->incoming_buffer
= buffer
;
274 ctx
->incoming_buffer_length
= len
;
276 /* Store the user's request flags and mark this ctx as currently being processed. */
277 ctx
->status
= (flags
& HASH_LAST
) ?
278 (HASH_CTX_STS_PROCESSING
| HASH_CTX_STS_LAST
) :
279 HASH_CTX_STS_PROCESSING
;
281 /* Advance byte counter */
282 ctx
->total_length
+= len
;
285 * If there is anything currently buffered in the extra blocks,
286 * append to it until it contains a whole block.
287 * Or if the user's buffer contains less than a whole block,
288 * append as much as possible to the extra block.
290 if ((ctx
->partial_block_buffer_length
) | (len
< SHA1_BLOCK_SIZE
)) {
291 /* Compute how many bytes to copy from user buffer into extra block */
292 uint32_t copy_len
= SHA1_BLOCK_SIZE
- ctx
->partial_block_buffer_length
;
297 /* Copy and update relevant pointers and counters */
298 memcpy(&ctx
->partial_block_buffer
[ctx
->partial_block_buffer_length
],
301 ctx
->partial_block_buffer_length
+= copy_len
;
302 ctx
->incoming_buffer
= (const void *)((const char *)buffer
+ copy_len
);
303 ctx
->incoming_buffer_length
= len
- copy_len
;
306 /* The extra block should never contain more than 1 block here */
307 assert(ctx
->partial_block_buffer_length
<= SHA1_BLOCK_SIZE
);
309 /* If the extra block buffer contains exactly 1 block, it can be hashed. */
310 if (ctx
->partial_block_buffer_length
>= SHA1_BLOCK_SIZE
) {
311 ctx
->partial_block_buffer_length
= 0;
313 ctx
->job
.buffer
= ctx
->partial_block_buffer
;
315 ctx
= (struct sha1_hash_ctx
*) sha1_job_mgr_submit(&mgr
->mgr
, &ctx
->job
);
319 return sha1_ctx_mgr_resubmit(mgr
, ctx
);
322 static struct sha1_hash_ctx
*sha1_ctx_mgr_flush(struct sha1_ctx_mgr
*mgr
)
324 struct sha1_hash_ctx
*ctx
;
327 ctx
= (struct sha1_hash_ctx
*) sha1_job_mgr_flush(&mgr
->mgr
);
329 /* If flush returned 0, there are no more jobs in flight. */
334 * If flush returned a job, resubmit the job to finish processing.
336 ctx
= sha1_ctx_mgr_resubmit(mgr
, ctx
);
339 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
340 * Otherwise, all jobs currently being managed by the sha1_ctx_mgr
341 * still need processing. Loop.
348 static int sha1_mb_init(struct shash_desc
*desc
)
350 struct sha1_hash_ctx
*sctx
= shash_desc_ctx(desc
);
353 sctx
->job
.result_digest
[0] = SHA1_H0
;
354 sctx
->job
.result_digest
[1] = SHA1_H1
;
355 sctx
->job
.result_digest
[2] = SHA1_H2
;
356 sctx
->job
.result_digest
[3] = SHA1_H3
;
357 sctx
->job
.result_digest
[4] = SHA1_H4
;
358 sctx
->total_length
= 0;
359 sctx
->partial_block_buffer_length
= 0;
360 sctx
->status
= HASH_CTX_STS_IDLE
;
365 static int sha1_mb_set_results(struct mcryptd_hash_request_ctx
*rctx
)
368 struct sha1_hash_ctx
*sctx
= shash_desc_ctx(&rctx
->desc
);
369 __be32
*dst
= (__be32
*) rctx
->out
;
371 for (i
= 0; i
< 5; ++i
)
372 dst
[i
] = cpu_to_be32(sctx
->job
.result_digest
[i
]);
377 static int sha_finish_walk(struct mcryptd_hash_request_ctx
**ret_rctx
,
378 struct mcryptd_alg_cstate
*cstate
, bool flush
)
380 int flag
= HASH_UPDATE
;
382 struct mcryptd_hash_request_ctx
*rctx
= *ret_rctx
;
383 struct sha1_hash_ctx
*sha_ctx
;
386 while (!(rctx
->flag
& HASH_DONE
)) {
387 nbytes
= crypto_ahash_walk_done(&rctx
->walk
, 0);
392 /* check if the walk is done */
393 if (crypto_ahash_walk_last(&rctx
->walk
)) {
394 rctx
->flag
|= HASH_DONE
;
395 if (rctx
->flag
& HASH_FINAL
)
399 sha_ctx
= (struct sha1_hash_ctx
*) shash_desc_ctx(&rctx
->desc
);
401 sha_ctx
= sha1_ctx_mgr_submit(cstate
->mgr
, sha_ctx
, rctx
->walk
.data
, nbytes
, flag
);
404 sha_ctx
= sha1_ctx_mgr_flush(cstate
->mgr
);
408 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
415 /* copy the results */
416 if (rctx
->flag
& HASH_FINAL
)
417 sha1_mb_set_results(rctx
);
424 static int sha_complete_job(struct mcryptd_hash_request_ctx
*rctx
,
425 struct mcryptd_alg_cstate
*cstate
,
428 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
429 struct sha1_hash_ctx
*sha_ctx
;
430 struct mcryptd_hash_request_ctx
*req_ctx
;
433 /* remove from work list */
434 spin_lock(&cstate
->work_lock
);
435 list_del(&rctx
->waiter
);
436 spin_unlock(&cstate
->work_lock
);
439 rctx
->complete(&req
->base
, err
);
442 rctx
->complete(&req
->base
, err
);
446 /* check to see if there are other jobs that are done */
447 sha_ctx
= sha1_ctx_mgr_get_comp_ctx(cstate
->mgr
);
449 req_ctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
450 ret
= sha_finish_walk(&req_ctx
, cstate
, false);
452 spin_lock(&cstate
->work_lock
);
453 list_del(&req_ctx
->waiter
);
454 spin_unlock(&cstate
->work_lock
);
456 req
= cast_mcryptd_ctx_to_req(req_ctx
);
458 rctx
->complete(&req
->base
, ret
);
461 rctx
->complete(&req
->base
, ret
);
465 sha_ctx
= sha1_ctx_mgr_get_comp_ctx(cstate
->mgr
);
471 static void sha1_mb_add_list(struct mcryptd_hash_request_ctx
*rctx
,
472 struct mcryptd_alg_cstate
*cstate
)
474 unsigned long next_flush
;
475 unsigned long delay
= usecs_to_jiffies(FLUSH_INTERVAL
);
478 rctx
->tag
.arrival
= jiffies
; /* tag the arrival time */
479 rctx
->tag
.seq_num
= cstate
->next_seq_num
++;
480 next_flush
= rctx
->tag
.arrival
+ delay
;
481 rctx
->tag
.expire
= next_flush
;
483 spin_lock(&cstate
->work_lock
);
484 list_add_tail(&rctx
->waiter
, &cstate
->work_list
);
485 spin_unlock(&cstate
->work_lock
);
487 mcryptd_arm_flusher(cstate
, delay
);
490 static int sha1_mb_update(struct shash_desc
*desc
, const u8
*data
,
493 struct mcryptd_hash_request_ctx
*rctx
=
494 container_of(desc
, struct mcryptd_hash_request_ctx
, desc
);
495 struct mcryptd_alg_cstate
*cstate
=
496 this_cpu_ptr(sha1_mb_alg_state
.alg_cstate
);
498 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
499 struct sha1_hash_ctx
*sha_ctx
;
504 if (rctx
->tag
.cpu
!= smp_processor_id()) {
505 pr_err("mcryptd error: cpu clash\n");
509 /* need to init context */
510 req_ctx_init(rctx
, desc
);
512 nbytes
= crypto_ahash_walk_first(req
, &rctx
->walk
);
519 if (crypto_ahash_walk_last(&rctx
->walk
))
520 rctx
->flag
|= HASH_DONE
;
523 sha_ctx
= (struct sha1_hash_ctx
*) shash_desc_ctx(desc
);
524 sha1_mb_add_list(rctx
, cstate
);
526 sha_ctx
= sha1_ctx_mgr_submit(cstate
->mgr
, sha_ctx
, rctx
->walk
.data
, nbytes
, HASH_UPDATE
);
529 /* check if anything is returned */
533 if (sha_ctx
->error
) {
534 ret
= sha_ctx
->error
;
535 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
539 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
540 ret
= sha_finish_walk(&rctx
, cstate
, false);
545 sha_complete_job(rctx
, cstate
, ret
);
549 static int sha1_mb_finup(struct shash_desc
*desc
, const u8
*data
,
550 unsigned int len
, u8
*out
)
552 struct mcryptd_hash_request_ctx
*rctx
=
553 container_of(desc
, struct mcryptd_hash_request_ctx
, desc
);
554 struct mcryptd_alg_cstate
*cstate
=
555 this_cpu_ptr(sha1_mb_alg_state
.alg_cstate
);
557 struct ahash_request
*req
= cast_mcryptd_ctx_to_req(rctx
);
558 struct sha1_hash_ctx
*sha_ctx
;
559 int ret
= 0, flag
= HASH_UPDATE
, nbytes
;
562 if (rctx
->tag
.cpu
!= smp_processor_id()) {
563 pr_err("mcryptd error: cpu clash\n");
567 /* need to init context */
568 req_ctx_init(rctx
, desc
);
570 nbytes
= crypto_ahash_walk_first(req
, &rctx
->walk
);
577 if (crypto_ahash_walk_last(&rctx
->walk
)) {
578 rctx
->flag
|= HASH_DONE
;
584 rctx
->flag
|= HASH_FINAL
;
585 sha_ctx
= (struct sha1_hash_ctx
*) shash_desc_ctx(desc
);
586 sha1_mb_add_list(rctx
, cstate
);
589 sha_ctx
= sha1_ctx_mgr_submit(cstate
->mgr
, sha_ctx
, rctx
->walk
.data
, nbytes
, flag
);
592 /* check if anything is returned */
596 if (sha_ctx
->error
) {
597 ret
= sha_ctx
->error
;
601 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
602 ret
= sha_finish_walk(&rctx
, cstate
, false);
606 sha_complete_job(rctx
, cstate
, ret
);
610 static int sha1_mb_final(struct shash_desc
*desc
, u8
*out
)
612 struct mcryptd_hash_request_ctx
*rctx
=
613 container_of(desc
, struct mcryptd_hash_request_ctx
, desc
);
614 struct mcryptd_alg_cstate
*cstate
=
615 this_cpu_ptr(sha1_mb_alg_state
.alg_cstate
);
617 struct sha1_hash_ctx
*sha_ctx
;
622 if (rctx
->tag
.cpu
!= smp_processor_id()) {
623 pr_err("mcryptd error: cpu clash\n");
627 /* need to init context */
628 req_ctx_init(rctx
, desc
);
631 rctx
->flag
|= HASH_DONE
| HASH_FINAL
;
633 sha_ctx
= (struct sha1_hash_ctx
*) shash_desc_ctx(desc
);
634 /* flag HASH_FINAL and 0 data size */
635 sha1_mb_add_list(rctx
, cstate
);
637 sha_ctx
= sha1_ctx_mgr_submit(cstate
->mgr
, sha_ctx
, &data
, 0, HASH_LAST
);
640 /* check if anything is returned */
644 if (sha_ctx
->error
) {
645 ret
= sha_ctx
->error
;
646 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
650 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
651 ret
= sha_finish_walk(&rctx
, cstate
, false);
655 sha_complete_job(rctx
, cstate
, ret
);
659 static int sha1_mb_export(struct shash_desc
*desc
, void *out
)
661 struct sha1_hash_ctx
*sctx
= shash_desc_ctx(desc
);
663 memcpy(out
, sctx
, sizeof(*sctx
));
668 static int sha1_mb_import(struct shash_desc
*desc
, const void *in
)
670 struct sha1_hash_ctx
*sctx
= shash_desc_ctx(desc
);
672 memcpy(sctx
, in
, sizeof(*sctx
));
678 static struct shash_alg sha1_mb_shash_alg
= {
679 .digestsize
= SHA1_DIGEST_SIZE
,
680 .init
= sha1_mb_init
,
681 .update
= sha1_mb_update
,
682 .final
= sha1_mb_final
,
683 .finup
= sha1_mb_finup
,
684 .export
= sha1_mb_export
,
685 .import
= sha1_mb_import
,
686 .descsize
= sizeof(struct sha1_hash_ctx
),
687 .statesize
= sizeof(struct sha1_hash_ctx
),
689 .cra_name
= "__sha1-mb",
690 .cra_driver_name
= "__intel_sha1-mb",
693 * use ASYNC flag as some buffers in multi-buffer
694 * algo may not have completed before hashing thread sleep
696 .cra_flags
= CRYPTO_ALG_TYPE_SHASH
| CRYPTO_ALG_ASYNC
|
698 .cra_blocksize
= SHA1_BLOCK_SIZE
,
699 .cra_module
= THIS_MODULE
,
700 .cra_list
= LIST_HEAD_INIT(sha1_mb_shash_alg
.base
.cra_list
),
704 static int sha1_mb_async_init(struct ahash_request
*req
)
706 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
707 struct sha1_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
708 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
709 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
711 memcpy(mcryptd_req
, req
, sizeof(*req
));
712 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
713 return crypto_ahash_init(mcryptd_req
);
716 static int sha1_mb_async_update(struct ahash_request
*req
)
718 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
720 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
721 struct sha1_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
722 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
724 memcpy(mcryptd_req
, req
, sizeof(*req
));
725 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
726 return crypto_ahash_update(mcryptd_req
);
729 static int sha1_mb_async_finup(struct ahash_request
*req
)
731 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
733 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
734 struct sha1_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
735 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
737 memcpy(mcryptd_req
, req
, sizeof(*req
));
738 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
739 return crypto_ahash_finup(mcryptd_req
);
742 static int sha1_mb_async_final(struct ahash_request
*req
)
744 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
746 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
747 struct sha1_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
748 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
750 memcpy(mcryptd_req
, req
, sizeof(*req
));
751 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
752 return crypto_ahash_final(mcryptd_req
);
755 static int sha1_mb_async_digest(struct ahash_request
*req
)
757 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
758 struct sha1_mb_ctx
*ctx
= crypto_ahash_ctx(tfm
);
759 struct ahash_request
*mcryptd_req
= ahash_request_ctx(req
);
760 struct mcryptd_ahash
*mcryptd_tfm
= ctx
->mcryptd_tfm
;
762 memcpy(mcryptd_req
, req
, sizeof(*req
));
763 ahash_request_set_tfm(mcryptd_req
, &mcryptd_tfm
->base
);
764 return crypto_ahash_digest(mcryptd_req
);
767 static int sha1_mb_async_init_tfm(struct crypto_tfm
*tfm
)
769 struct mcryptd_ahash
*mcryptd_tfm
;
770 struct sha1_mb_ctx
*ctx
= crypto_tfm_ctx(tfm
);
771 struct mcryptd_hash_ctx
*mctx
;
773 mcryptd_tfm
= mcryptd_alloc_ahash("__intel_sha1-mb",
775 CRYPTO_ALG_INTERNAL
);
776 if (IS_ERR(mcryptd_tfm
))
777 return PTR_ERR(mcryptd_tfm
);
778 mctx
= crypto_ahash_ctx(&mcryptd_tfm
->base
);
779 mctx
->alg_state
= &sha1_mb_alg_state
;
780 ctx
->mcryptd_tfm
= mcryptd_tfm
;
781 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
782 sizeof(struct ahash_request
) +
783 crypto_ahash_reqsize(&mcryptd_tfm
->base
));
788 static void sha1_mb_async_exit_tfm(struct crypto_tfm
*tfm
)
790 struct sha1_mb_ctx
*ctx
= crypto_tfm_ctx(tfm
);
792 mcryptd_free_ahash(ctx
->mcryptd_tfm
);
795 static struct ahash_alg sha1_mb_async_alg
= {
796 .init
= sha1_mb_async_init
,
797 .update
= sha1_mb_async_update
,
798 .final
= sha1_mb_async_final
,
799 .finup
= sha1_mb_async_finup
,
800 .digest
= sha1_mb_async_digest
,
802 .digestsize
= SHA1_DIGEST_SIZE
,
805 .cra_driver_name
= "sha1_mb",
807 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
,
808 .cra_blocksize
= SHA1_BLOCK_SIZE
,
809 .cra_type
= &crypto_ahash_type
,
810 .cra_module
= THIS_MODULE
,
811 .cra_list
= LIST_HEAD_INIT(sha1_mb_async_alg
.halg
.base
.cra_list
),
812 .cra_init
= sha1_mb_async_init_tfm
,
813 .cra_exit
= sha1_mb_async_exit_tfm
,
814 .cra_ctxsize
= sizeof(struct sha1_mb_ctx
),
820 static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate
*cstate
)
822 struct mcryptd_hash_request_ctx
*rctx
;
823 unsigned long cur_time
;
824 unsigned long next_flush
= 0;
825 struct sha1_hash_ctx
*sha_ctx
;
830 while (!list_empty(&cstate
->work_list
)) {
831 rctx
= list_entry(cstate
->work_list
.next
,
832 struct mcryptd_hash_request_ctx
, waiter
);
833 if (time_before(cur_time
, rctx
->tag
.expire
))
836 sha_ctx
= (struct sha1_hash_ctx
*) sha1_ctx_mgr_flush(cstate
->mgr
);
839 pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
842 rctx
= cast_hash_to_mcryptd_ctx(sha_ctx
);
843 sha_finish_walk(&rctx
, cstate
, true);
844 sha_complete_job(rctx
, cstate
, 0);
847 if (!list_empty(&cstate
->work_list
)) {
848 rctx
= list_entry(cstate
->work_list
.next
,
849 struct mcryptd_hash_request_ctx
, waiter
);
850 /* get the hash context and then flush time */
851 next_flush
= rctx
->tag
.expire
;
852 mcryptd_arm_flusher(cstate
, get_delay(next_flush
));
857 static int __init
sha1_mb_mod_init(void)
862 struct mcryptd_alg_cstate
*cpu_state
;
864 /* check for dependent cpu features */
865 if (!boot_cpu_has(X86_FEATURE_AVX2
) ||
866 !boot_cpu_has(X86_FEATURE_BMI2
))
869 /* initialize multibuffer structures */
870 sha1_mb_alg_state
.alg_cstate
= alloc_percpu(struct mcryptd_alg_cstate
);
872 sha1_job_mgr_init
= sha1_mb_mgr_init_avx2
;
873 sha1_job_mgr_submit
= sha1_mb_mgr_submit_avx2
;
874 sha1_job_mgr_flush
= sha1_mb_mgr_flush_avx2
;
875 sha1_job_mgr_get_comp_job
= sha1_mb_mgr_get_comp_job_avx2
;
877 if (!sha1_mb_alg_state
.alg_cstate
)
879 for_each_possible_cpu(cpu
) {
880 cpu_state
= per_cpu_ptr(sha1_mb_alg_state
.alg_cstate
, cpu
);
881 cpu_state
->next_flush
= 0;
882 cpu_state
->next_seq_num
= 0;
883 cpu_state
->flusher_engaged
= false;
884 INIT_DELAYED_WORK(&cpu_state
->flush
, mcryptd_flusher
);
885 cpu_state
->cpu
= cpu
;
886 cpu_state
->alg_state
= &sha1_mb_alg_state
;
887 cpu_state
->mgr
= (struct sha1_ctx_mgr
*) kzalloc(sizeof(struct sha1_ctx_mgr
), GFP_KERNEL
);
890 sha1_ctx_mgr_init(cpu_state
->mgr
);
891 INIT_LIST_HEAD(&cpu_state
->work_list
);
892 spin_lock_init(&cpu_state
->work_lock
);
894 sha1_mb_alg_state
.flusher
= &sha1_mb_flusher
;
896 err
= crypto_register_shash(&sha1_mb_shash_alg
);
899 err
= crypto_register_ahash(&sha1_mb_async_alg
);
906 crypto_unregister_shash(&sha1_mb_shash_alg
);
908 for_each_possible_cpu(cpu
) {
909 cpu_state
= per_cpu_ptr(sha1_mb_alg_state
.alg_cstate
, cpu
);
910 kfree(cpu_state
->mgr
);
912 free_percpu(sha1_mb_alg_state
.alg_cstate
);
916 static void __exit
sha1_mb_mod_fini(void)
919 struct mcryptd_alg_cstate
*cpu_state
;
921 crypto_unregister_ahash(&sha1_mb_async_alg
);
922 crypto_unregister_shash(&sha1_mb_shash_alg
);
923 for_each_possible_cpu(cpu
) {
924 cpu_state
= per_cpu_ptr(sha1_mb_alg_state
.alg_cstate
, cpu
);
925 kfree(cpu_state
->mgr
);
927 free_percpu(sha1_mb_alg_state
.alg_cstate
);
930 module_init(sha1_mb_mod_init
);
931 module_exit(sha1_mb_mod_fini
);
933 MODULE_LICENSE("GPL");
934 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
936 MODULE_ALIAS_CRYPTO("sha1");