]>
Commit | Line | Data |
---|---|---|
f876f440 MD |
1 | /* |
2 | * Multi buffer SHA256 algorithm Glue Code | |
3 | * | |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
5 | * redistributing this file, you may do so under either license. | |
6 | * | |
7 | * GPL LICENSE SUMMARY | |
8 | * | |
9 | * Copyright(c) 2016 Intel Corporation. | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of version 2 of the GNU General Public License as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * Contact Information: | |
21 | * Megha Dey <megha.dey@linux.intel.com> | |
22 | * | |
23 | * BSD LICENSE | |
24 | * | |
25 | * Copyright(c) 2016 Intel Corporation. | |
26 | * | |
27 | * Redistribution and use in source and binary forms, with or without | |
28 | * modification, are permitted provided that the following conditions | |
29 | * are met: | |
30 | * | |
31 | * * Redistributions of source code must retain the above copyright | |
32 | * notice, this list of conditions and the following disclaimer. | |
33 | * * Redistributions in binary form must reproduce the above copyright | |
34 | * notice, this list of conditions and the following disclaimer in | |
35 | * the documentation and/or other materials provided with the | |
36 | * distribution. | |
37 | * * Neither the name of Intel Corporation nor the names of its | |
38 | * contributors may be used to endorse or promote products derived | |
39 | * from this software without specific prior written permission. | |
40 | * | |
41 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
42 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
43 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
44 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
45 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
46 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
47 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
48 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
49 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
50 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
51 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
52 | */ | |
53 | ||
54 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
55 | ||
56 | #include <crypto/internal/hash.h> | |
57 | #include <linux/init.h> | |
58 | #include <linux/module.h> | |
59 | #include <linux/mm.h> | |
60 | #include <linux/cryptohash.h> | |
61 | #include <linux/types.h> | |
62 | #include <linux/list.h> | |
63 | #include <crypto/scatterwalk.h> | |
64 | #include <crypto/sha.h> | |
65 | #include <crypto/mcryptd.h> | |
66 | #include <crypto/crypto_wq.h> | |
67 | #include <asm/byteorder.h> | |
68 | #include <linux/hardirq.h> | |
69 | #include <asm/fpu/api.h> | |
70 | #include "sha256_mb_ctx.h" | |
71 | ||
72 | #define FLUSH_INTERVAL 1000 /* in usec */ | |
73 | ||
74 | static struct mcryptd_alg_state sha256_mb_alg_state; | |
75 | ||
76 | struct sha256_mb_ctx { | |
77 | struct mcryptd_ahash *mcryptd_tfm; | |
78 | }; | |
79 | ||
80 | static inline struct mcryptd_hash_request_ctx | |
81 | *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx) | |
82 | { | |
83 | struct ahash_request *areq; | |
84 | ||
85 | areq = container_of((void *) hash_ctx, struct ahash_request, __ctx); | |
86 | return container_of(areq, struct mcryptd_hash_request_ctx, areq); | |
87 | } | |
88 | ||
89 | static inline struct ahash_request | |
90 | *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx) | |
91 | { | |
92 | return container_of((void *) ctx, struct ahash_request, __ctx); | |
93 | } | |
94 | ||
95 | static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, | |
96 | struct ahash_request *areq) | |
97 | { | |
98 | rctx->flag = HASH_UPDATE; | |
99 | } | |
100 | ||
101 | static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state); | |
102 | static asmlinkage struct job_sha256* (*sha256_job_mgr_submit) | |
103 | (struct sha256_mb_mgr *state, struct job_sha256 *job); | |
104 | static asmlinkage struct job_sha256* (*sha256_job_mgr_flush) | |
105 | (struct sha256_mb_mgr *state); | |
106 | static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job) | |
107 | (struct sha256_mb_mgr *state); | |
108 | ||
109 | inline void sha256_init_digest(uint32_t *digest) | |
110 | { | |
111 | static const uint32_t initial_digest[SHA256_DIGEST_LENGTH] = { | |
112 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, | |
113 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7}; | |
114 | memcpy(digest, initial_digest, sizeof(initial_digest)); | |
115 | } | |
116 | ||
117 | inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2], | |
118 | uint32_t total_len) | |
119 | { | |
120 | uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1); | |
121 | ||
122 | memset(&padblock[i], 0, SHA256_BLOCK_SIZE); | |
123 | padblock[i] = 0x80; | |
124 | ||
125 | i += ((SHA256_BLOCK_SIZE - 1) & | |
126 | (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1))) | |
127 | + 1 + SHA256_PADLENGTHFIELD_SIZE; | |
128 | ||
129 | #if SHA256_PADLENGTHFIELD_SIZE == 16 | |
130 | *((uint64_t *) &padblock[i - 16]) = 0; | |
131 | #endif | |
132 | ||
133 | *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3); | |
134 | ||
135 | /* Number of extra blocks to hash */ | |
136 | return i >> SHA256_LOG2_BLOCK_SIZE; | |
137 | } | |
138 | ||
139 | static struct sha256_hash_ctx | |
140 | *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr, | |
141 | struct sha256_hash_ctx *ctx) | |
142 | { | |
143 | while (ctx) { | |
144 | if (ctx->status & HASH_CTX_STS_COMPLETE) { | |
145 | /* Clear PROCESSING bit */ | |
146 | ctx->status = HASH_CTX_STS_COMPLETE; | |
147 | return ctx; | |
148 | } | |
149 | ||
150 | /* | |
151 | * If the extra blocks are empty, begin hashing what remains | |
152 | * in the user's buffer. | |
153 | */ | |
154 | if (ctx->partial_block_buffer_length == 0 && | |
155 | ctx->incoming_buffer_length) { | |
156 | ||
157 | const void *buffer = ctx->incoming_buffer; | |
158 | uint32_t len = ctx->incoming_buffer_length; | |
159 | uint32_t copy_len; | |
160 | ||
161 | /* | |
162 | * Only entire blocks can be hashed. | |
163 | * Copy remainder to extra blocks buffer. | |
164 | */ | |
165 | copy_len = len & (SHA256_BLOCK_SIZE-1); | |
166 | ||
167 | if (copy_len) { | |
168 | len -= copy_len; | |
169 | memcpy(ctx->partial_block_buffer, | |
170 | ((const char *) buffer + len), | |
171 | copy_len); | |
172 | ctx->partial_block_buffer_length = copy_len; | |
173 | } | |
174 | ||
175 | ctx->incoming_buffer_length = 0; | |
176 | ||
177 | /* len should be a multiple of the block size now */ | |
178 | assert((len % SHA256_BLOCK_SIZE) == 0); | |
179 | ||
180 | /* Set len to the number of blocks to be hashed */ | |
181 | len >>= SHA256_LOG2_BLOCK_SIZE; | |
182 | ||
183 | if (len) { | |
184 | ||
185 | ctx->job.buffer = (uint8_t *) buffer; | |
186 | ctx->job.len = len; | |
187 | ctx = (struct sha256_hash_ctx *) | |
188 | sha256_job_mgr_submit(&mgr->mgr, &ctx->job); | |
189 | continue; | |
190 | } | |
191 | } | |
192 | ||
193 | /* | |
194 | * If the extra blocks are not empty, then we are | |
195 | * either on the last block(s) or we need more | |
196 | * user input before continuing. | |
197 | */ | |
198 | if (ctx->status & HASH_CTX_STS_LAST) { | |
199 | ||
200 | uint8_t *buf = ctx->partial_block_buffer; | |
201 | uint32_t n_extra_blocks = | |
202 | sha256_pad(buf, ctx->total_length); | |
203 | ||
204 | ctx->status = (HASH_CTX_STS_PROCESSING | | |
205 | HASH_CTX_STS_COMPLETE); | |
206 | ctx->job.buffer = buf; | |
207 | ctx->job.len = (uint32_t) n_extra_blocks; | |
208 | ctx = (struct sha256_hash_ctx *) | |
209 | sha256_job_mgr_submit(&mgr->mgr, &ctx->job); | |
210 | continue; | |
211 | } | |
212 | ||
213 | ctx->status = HASH_CTX_STS_IDLE; | |
214 | return ctx; | |
215 | } | |
216 | ||
217 | return NULL; | |
218 | } | |
219 | ||
220 | static struct sha256_hash_ctx | |
221 | *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr) | |
222 | { | |
223 | /* | |
224 | * If get_comp_job returns NULL, there are no jobs complete. | |
225 | * If get_comp_job returns a job, verify that it is safe to return to | |
226 | * the user. If it is not ready, resubmit the job to finish processing. | |
227 | * If sha256_ctx_mgr_resubmit returned a job, it is ready to be | |
228 | * returned. Otherwise, all jobs currently being managed by the | |
229 | * hash_ctx_mgr still need processing. | |
230 | */ | |
231 | struct sha256_hash_ctx *ctx; | |
232 | ||
233 | ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr); | |
234 | return sha256_ctx_mgr_resubmit(mgr, ctx); | |
235 | } | |
236 | ||
237 | static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr) | |
238 | { | |
239 | sha256_job_mgr_init(&mgr->mgr); | |
240 | } | |
241 | ||
242 | static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr, | |
243 | struct sha256_hash_ctx *ctx, | |
244 | const void *buffer, | |
245 | uint32_t len, | |
246 | int flags) | |
247 | { | |
248 | if (flags & (~HASH_ENTIRE)) { | |
249 | /* User should not pass anything other than FIRST, UPDATE | |
250 | * or LAST | |
251 | */ | |
252 | ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; | |
253 | return ctx; | |
254 | } | |
255 | ||
256 | if (ctx->status & HASH_CTX_STS_PROCESSING) { | |
257 | /* Cannot submit to a currently processing job. */ | |
258 | ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; | |
259 | return ctx; | |
260 | } | |
261 | ||
262 | if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) { | |
263 | /* Cannot update a finished job. */ | |
264 | ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; | |
265 | return ctx; | |
266 | } | |
267 | ||
268 | if (flags & HASH_FIRST) { | |
269 | /* Init digest */ | |
270 | sha256_init_digest(ctx->job.result_digest); | |
271 | ||
272 | /* Reset byte counter */ | |
273 | ctx->total_length = 0; | |
274 | ||
275 | /* Clear extra blocks */ | |
276 | ctx->partial_block_buffer_length = 0; | |
277 | } | |
278 | ||
279 | /* If we made it here, there was no error during this call to submit */ | |
280 | ctx->error = HASH_CTX_ERROR_NONE; | |
281 | ||
282 | /* Store buffer ptr info from user */ | |
283 | ctx->incoming_buffer = buffer; | |
284 | ctx->incoming_buffer_length = len; | |
285 | ||
eb9bc8e7 TC |
286 | /* |
287 | * Store the user's request flags and mark this ctx as currently | |
f876f440 MD |
288 | * being processed. |
289 | */ | |
290 | ctx->status = (flags & HASH_LAST) ? | |
291 | (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) : | |
292 | HASH_CTX_STS_PROCESSING; | |
293 | ||
294 | /* Advance byte counter */ | |
295 | ctx->total_length += len; | |
296 | ||
297 | /* | |
298 | * If there is anything currently buffered in the extra blocks, | |
299 | * append to it until it contains a whole block. | |
300 | * Or if the user's buffer contains less than a whole block, | |
301 | * append as much as possible to the extra block. | |
302 | */ | |
eb9bc8e7 TC |
303 | if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) { |
304 | /* | |
305 | * Compute how many bytes to copy from user buffer into | |
f876f440 MD |
306 | * extra block |
307 | */ | |
308 | uint32_t copy_len = SHA256_BLOCK_SIZE - | |
309 | ctx->partial_block_buffer_length; | |
310 | if (len < copy_len) | |
311 | copy_len = len; | |
312 | ||
313 | if (copy_len) { | |
314 | /* Copy and update relevant pointers and counters */ | |
315 | memcpy( | |
316 | &ctx->partial_block_buffer[ctx->partial_block_buffer_length], | |
317 | buffer, copy_len); | |
318 | ||
319 | ctx->partial_block_buffer_length += copy_len; | |
320 | ctx->incoming_buffer = (const void *) | |
321 | ((const char *)buffer + copy_len); | |
322 | ctx->incoming_buffer_length = len - copy_len; | |
323 | } | |
324 | ||
325 | /* The extra block should never contain more than 1 block */ | |
326 | assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE); | |
327 | ||
eb9bc8e7 TC |
328 | /* |
329 | * If the extra block buffer contains exactly 1 block, | |
f876f440 MD |
330 | * it can be hashed. |
331 | */ | |
332 | if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) { | |
333 | ctx->partial_block_buffer_length = 0; | |
334 | ||
335 | ctx->job.buffer = ctx->partial_block_buffer; | |
336 | ctx->job.len = 1; | |
337 | ctx = (struct sha256_hash_ctx *) | |
338 | sha256_job_mgr_submit(&mgr->mgr, &ctx->job); | |
339 | } | |
340 | } | |
341 | ||
342 | return sha256_ctx_mgr_resubmit(mgr, ctx); | |
343 | } | |
344 | ||
345 | static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr) | |
346 | { | |
347 | struct sha256_hash_ctx *ctx; | |
348 | ||
349 | while (1) { | |
350 | ctx = (struct sha256_hash_ctx *) | |
351 | sha256_job_mgr_flush(&mgr->mgr); | |
352 | ||
353 | /* If flush returned 0, there are no more jobs in flight. */ | |
354 | if (!ctx) | |
355 | return NULL; | |
356 | ||
357 | /* | |
358 | * If flush returned a job, resubmit the job to finish | |
359 | * processing. | |
360 | */ | |
361 | ctx = sha256_ctx_mgr_resubmit(mgr, ctx); | |
362 | ||
363 | /* | |
364 | * If sha256_ctx_mgr_resubmit returned a job, it is ready to | |
365 | * be returned. Otherwise, all jobs currently being managed by | |
366 | * the sha256_ctx_mgr still need processing. Loop. | |
367 | */ | |
368 | if (ctx) | |
369 | return ctx; | |
370 | } | |
371 | } | |
372 | ||
373 | static int sha256_mb_init(struct ahash_request *areq) | |
374 | { | |
375 | struct sha256_hash_ctx *sctx = ahash_request_ctx(areq); | |
376 | ||
377 | hash_ctx_init(sctx); | |
378 | sctx->job.result_digest[0] = SHA256_H0; | |
379 | sctx->job.result_digest[1] = SHA256_H1; | |
380 | sctx->job.result_digest[2] = SHA256_H2; | |
381 | sctx->job.result_digest[3] = SHA256_H3; | |
382 | sctx->job.result_digest[4] = SHA256_H4; | |
383 | sctx->job.result_digest[5] = SHA256_H5; | |
384 | sctx->job.result_digest[6] = SHA256_H6; | |
385 | sctx->job.result_digest[7] = SHA256_H7; | |
386 | sctx->total_length = 0; | |
387 | sctx->partial_block_buffer_length = 0; | |
388 | sctx->status = HASH_CTX_STS_IDLE; | |
389 | ||
390 | return 0; | |
391 | } | |
392 | ||
393 | static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx) | |
394 | { | |
395 | int i; | |
396 | struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq); | |
397 | __be32 *dst = (__be32 *) rctx->out; | |
398 | ||
399 | for (i = 0; i < 8; ++i) | |
400 | dst[i] = cpu_to_be32(sctx->job.result_digest[i]); | |
401 | ||
402 | return 0; | |
403 | } | |
404 | ||
405 | static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx, | |
406 | struct mcryptd_alg_cstate *cstate, bool flush) | |
407 | { | |
408 | int flag = HASH_UPDATE; | |
409 | int nbytes, err = 0; | |
410 | struct mcryptd_hash_request_ctx *rctx = *ret_rctx; | |
411 | struct sha256_hash_ctx *sha_ctx; | |
412 | ||
413 | /* more work ? */ | |
414 | while (!(rctx->flag & HASH_DONE)) { | |
415 | nbytes = crypto_ahash_walk_done(&rctx->walk, 0); | |
416 | if (nbytes < 0) { | |
417 | err = nbytes; | |
418 | goto out; | |
419 | } | |
420 | /* check if the walk is done */ | |
421 | if (crypto_ahash_walk_last(&rctx->walk)) { | |
422 | rctx->flag |= HASH_DONE; | |
423 | if (rctx->flag & HASH_FINAL) | |
424 | flag |= HASH_LAST; | |
425 | ||
426 | } | |
427 | sha_ctx = (struct sha256_hash_ctx *) | |
428 | ahash_request_ctx(&rctx->areq); | |
429 | kernel_fpu_begin(); | |
430 | sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, | |
431 | rctx->walk.data, nbytes, flag); | |
432 | if (!sha_ctx) { | |
433 | if (flush) | |
434 | sha_ctx = sha256_ctx_mgr_flush(cstate->mgr); | |
435 | } | |
436 | kernel_fpu_end(); | |
437 | if (sha_ctx) | |
438 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
439 | else { | |
440 | rctx = NULL; | |
441 | goto out; | |
442 | } | |
443 | } | |
444 | ||
445 | /* copy the results */ | |
446 | if (rctx->flag & HASH_FINAL) | |
447 | sha256_mb_set_results(rctx); | |
448 | ||
449 | out: | |
450 | *ret_rctx = rctx; | |
451 | return err; | |
452 | } | |
453 | ||
454 | static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, | |
455 | struct mcryptd_alg_cstate *cstate, | |
456 | int err) | |
457 | { | |
458 | struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); | |
459 | struct sha256_hash_ctx *sha_ctx; | |
460 | struct mcryptd_hash_request_ctx *req_ctx; | |
461 | int ret; | |
462 | ||
463 | /* remove from work list */ | |
464 | spin_lock(&cstate->work_lock); | |
465 | list_del(&rctx->waiter); | |
466 | spin_unlock(&cstate->work_lock); | |
467 | ||
468 | if (irqs_disabled()) | |
469 | rctx->complete(&req->base, err); | |
470 | else { | |
471 | local_bh_disable(); | |
472 | rctx->complete(&req->base, err); | |
473 | local_bh_enable(); | |
474 | } | |
475 | ||
476 | /* check to see if there are other jobs that are done */ | |
477 | sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr); | |
478 | while (sha_ctx) { | |
479 | req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
480 | ret = sha_finish_walk(&req_ctx, cstate, false); | |
481 | if (req_ctx) { | |
482 | spin_lock(&cstate->work_lock); | |
483 | list_del(&req_ctx->waiter); | |
484 | spin_unlock(&cstate->work_lock); | |
485 | ||
486 | req = cast_mcryptd_ctx_to_req(req_ctx); | |
487 | if (irqs_disabled()) | |
172b1d6b | 488 | req_ctx->complete(&req->base, ret); |
f876f440 MD |
489 | else { |
490 | local_bh_disable(); | |
172b1d6b | 491 | req_ctx->complete(&req->base, ret); |
f876f440 MD |
492 | local_bh_enable(); |
493 | } | |
494 | } | |
495 | sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr); | |
496 | } | |
497 | ||
498 | return 0; | |
499 | } | |
500 | ||
501 | static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx, | |
502 | struct mcryptd_alg_cstate *cstate) | |
503 | { | |
504 | unsigned long next_flush; | |
505 | unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL); | |
506 | ||
507 | /* initialize tag */ | |
508 | rctx->tag.arrival = jiffies; /* tag the arrival time */ | |
509 | rctx->tag.seq_num = cstate->next_seq_num++; | |
510 | next_flush = rctx->tag.arrival + delay; | |
511 | rctx->tag.expire = next_flush; | |
512 | ||
513 | spin_lock(&cstate->work_lock); | |
514 | list_add_tail(&rctx->waiter, &cstate->work_list); | |
515 | spin_unlock(&cstate->work_lock); | |
516 | ||
517 | mcryptd_arm_flusher(cstate, delay); | |
518 | } | |
519 | ||
520 | static int sha256_mb_update(struct ahash_request *areq) | |
521 | { | |
522 | struct mcryptd_hash_request_ctx *rctx = | |
523 | container_of(areq, struct mcryptd_hash_request_ctx, areq); | |
524 | struct mcryptd_alg_cstate *cstate = | |
525 | this_cpu_ptr(sha256_mb_alg_state.alg_cstate); | |
526 | ||
527 | struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); | |
528 | struct sha256_hash_ctx *sha_ctx; | |
529 | int ret = 0, nbytes; | |
530 | ||
531 | /* sanity check */ | |
532 | if (rctx->tag.cpu != smp_processor_id()) { | |
533 | pr_err("mcryptd error: cpu clash\n"); | |
534 | goto done; | |
535 | } | |
536 | ||
537 | /* need to init context */ | |
538 | req_ctx_init(rctx, areq); | |
539 | ||
540 | nbytes = crypto_ahash_walk_first(req, &rctx->walk); | |
541 | ||
542 | if (nbytes < 0) { | |
543 | ret = nbytes; | |
544 | goto done; | |
545 | } | |
546 | ||
547 | if (crypto_ahash_walk_last(&rctx->walk)) | |
548 | rctx->flag |= HASH_DONE; | |
549 | ||
550 | /* submit */ | |
551 | sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq); | |
552 | sha256_mb_add_list(rctx, cstate); | |
553 | kernel_fpu_begin(); | |
554 | sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, | |
555 | nbytes, HASH_UPDATE); | |
556 | kernel_fpu_end(); | |
557 | ||
558 | /* check if anything is returned */ | |
559 | if (!sha_ctx) | |
560 | return -EINPROGRESS; | |
561 | ||
562 | if (sha_ctx->error) { | |
563 | ret = sha_ctx->error; | |
564 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
565 | goto done; | |
566 | } | |
567 | ||
568 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
569 | ret = sha_finish_walk(&rctx, cstate, false); | |
570 | ||
571 | if (!rctx) | |
572 | return -EINPROGRESS; | |
573 | done: | |
574 | sha_complete_job(rctx, cstate, ret); | |
575 | return ret; | |
576 | } | |
577 | ||
578 | static int sha256_mb_finup(struct ahash_request *areq) | |
579 | { | |
580 | struct mcryptd_hash_request_ctx *rctx = | |
581 | container_of(areq, struct mcryptd_hash_request_ctx, areq); | |
582 | struct mcryptd_alg_cstate *cstate = | |
583 | this_cpu_ptr(sha256_mb_alg_state.alg_cstate); | |
584 | ||
585 | struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); | |
586 | struct sha256_hash_ctx *sha_ctx; | |
587 | int ret = 0, flag = HASH_UPDATE, nbytes; | |
588 | ||
589 | /* sanity check */ | |
590 | if (rctx->tag.cpu != smp_processor_id()) { | |
591 | pr_err("mcryptd error: cpu clash\n"); | |
592 | goto done; | |
593 | } | |
594 | ||
595 | /* need to init context */ | |
596 | req_ctx_init(rctx, areq); | |
597 | ||
598 | nbytes = crypto_ahash_walk_first(req, &rctx->walk); | |
599 | ||
600 | if (nbytes < 0) { | |
601 | ret = nbytes; | |
602 | goto done; | |
603 | } | |
604 | ||
605 | if (crypto_ahash_walk_last(&rctx->walk)) { | |
606 | rctx->flag |= HASH_DONE; | |
607 | flag = HASH_LAST; | |
608 | } | |
609 | ||
610 | /* submit */ | |
611 | rctx->flag |= HASH_FINAL; | |
612 | sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq); | |
613 | sha256_mb_add_list(rctx, cstate); | |
614 | ||
615 | kernel_fpu_begin(); | |
616 | sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, | |
617 | nbytes, flag); | |
618 | kernel_fpu_end(); | |
619 | ||
620 | /* check if anything is returned */ | |
621 | if (!sha_ctx) | |
622 | return -EINPROGRESS; | |
623 | ||
624 | if (sha_ctx->error) { | |
625 | ret = sha_ctx->error; | |
626 | goto done; | |
627 | } | |
628 | ||
629 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
630 | ret = sha_finish_walk(&rctx, cstate, false); | |
631 | if (!rctx) | |
632 | return -EINPROGRESS; | |
633 | done: | |
634 | sha_complete_job(rctx, cstate, ret); | |
635 | return ret; | |
636 | } | |
637 | ||
638 | static int sha256_mb_final(struct ahash_request *areq) | |
639 | { | |
640 | struct mcryptd_hash_request_ctx *rctx = | |
641 | container_of(areq, struct mcryptd_hash_request_ctx, | |
642 | areq); | |
643 | struct mcryptd_alg_cstate *cstate = | |
644 | this_cpu_ptr(sha256_mb_alg_state.alg_cstate); | |
645 | ||
646 | struct sha256_hash_ctx *sha_ctx; | |
647 | int ret = 0; | |
648 | u8 data; | |
649 | ||
650 | /* sanity check */ | |
651 | if (rctx->tag.cpu != smp_processor_id()) { | |
652 | pr_err("mcryptd error: cpu clash\n"); | |
653 | goto done; | |
654 | } | |
655 | ||
656 | /* need to init context */ | |
657 | req_ctx_init(rctx, areq); | |
658 | ||
659 | rctx->flag |= HASH_DONE | HASH_FINAL; | |
660 | ||
661 | sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq); | |
662 | /* flag HASH_FINAL and 0 data size */ | |
663 | sha256_mb_add_list(rctx, cstate); | |
664 | kernel_fpu_begin(); | |
665 | sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, | |
666 | HASH_LAST); | |
667 | kernel_fpu_end(); | |
668 | ||
669 | /* check if anything is returned */ | |
670 | if (!sha_ctx) | |
671 | return -EINPROGRESS; | |
672 | ||
673 | if (sha_ctx->error) { | |
674 | ret = sha_ctx->error; | |
675 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
676 | goto done; | |
677 | } | |
678 | ||
679 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
680 | ret = sha_finish_walk(&rctx, cstate, false); | |
681 | if (!rctx) | |
682 | return -EINPROGRESS; | |
683 | done: | |
684 | sha_complete_job(rctx, cstate, ret); | |
685 | return ret; | |
686 | } | |
687 | ||
688 | static int sha256_mb_export(struct ahash_request *areq, void *out) | |
689 | { | |
690 | struct sha256_hash_ctx *sctx = ahash_request_ctx(areq); | |
691 | ||
692 | memcpy(out, sctx, sizeof(*sctx)); | |
693 | ||
694 | return 0; | |
695 | } | |
696 | ||
697 | static int sha256_mb_import(struct ahash_request *areq, const void *in) | |
698 | { | |
699 | struct sha256_hash_ctx *sctx = ahash_request_ctx(areq); | |
700 | ||
701 | memcpy(sctx, in, sizeof(*sctx)); | |
702 | ||
703 | return 0; | |
704 | } | |
705 | ||
706 | static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm) | |
707 | { | |
708 | struct mcryptd_ahash *mcryptd_tfm; | |
709 | struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm); | |
710 | struct mcryptd_hash_ctx *mctx; | |
711 | ||
712 | mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb", | |
713 | CRYPTO_ALG_INTERNAL, | |
714 | CRYPTO_ALG_INTERNAL); | |
715 | if (IS_ERR(mcryptd_tfm)) | |
716 | return PTR_ERR(mcryptd_tfm); | |
717 | mctx = crypto_ahash_ctx(&mcryptd_tfm->base); | |
718 | mctx->alg_state = &sha256_mb_alg_state; | |
719 | ctx->mcryptd_tfm = mcryptd_tfm; | |
720 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
721 | sizeof(struct ahash_request) + | |
722 | crypto_ahash_reqsize(&mcryptd_tfm->base)); | |
723 | ||
724 | return 0; | |
725 | } | |
726 | ||
727 | static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm) | |
728 | { | |
729 | struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm); | |
730 | ||
731 | mcryptd_free_ahash(ctx->mcryptd_tfm); | |
732 | } | |
733 | ||
734 | static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm) | |
735 | { | |
736 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
737 | sizeof(struct ahash_request) + | |
738 | sizeof(struct sha256_hash_ctx)); | |
739 | ||
740 | return 0; | |
741 | } | |
742 | ||
743 | static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm) | |
744 | { | |
745 | struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm); | |
746 | ||
747 | mcryptd_free_ahash(ctx->mcryptd_tfm); | |
748 | } | |
749 | ||
750 | static struct ahash_alg sha256_mb_areq_alg = { | |
751 | .init = sha256_mb_init, | |
752 | .update = sha256_mb_update, | |
753 | .final = sha256_mb_final, | |
754 | .finup = sha256_mb_finup, | |
755 | .export = sha256_mb_export, | |
756 | .import = sha256_mb_import, | |
757 | .halg = { | |
758 | .digestsize = SHA256_DIGEST_SIZE, | |
759 | .statesize = sizeof(struct sha256_hash_ctx), | |
760 | .base = { | |
761 | .cra_name = "__sha256-mb", | |
762 | .cra_driver_name = "__intel_sha256-mb", | |
763 | .cra_priority = 100, | |
764 | /* | |
765 | * use ASYNC flag as some buffers in multi-buffer | |
766 | * algo may not have completed before hashing thread | |
767 | * sleep | |
768 | */ | |
769 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | |
770 | CRYPTO_ALG_ASYNC | | |
771 | CRYPTO_ALG_INTERNAL, | |
772 | .cra_blocksize = SHA256_BLOCK_SIZE, | |
773 | .cra_module = THIS_MODULE, | |
774 | .cra_list = LIST_HEAD_INIT | |
775 | (sha256_mb_areq_alg.halg.base.cra_list), | |
776 | .cra_init = sha256_mb_areq_init_tfm, | |
777 | .cra_exit = sha256_mb_areq_exit_tfm, | |
778 | .cra_ctxsize = sizeof(struct sha256_hash_ctx), | |
779 | } | |
780 | } | |
781 | }; | |
782 | ||
783 | static int sha256_mb_async_init(struct ahash_request *req) | |
784 | { | |
785 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
786 | struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
787 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
788 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
789 | ||
790 | memcpy(mcryptd_req, req, sizeof(*req)); | |
791 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
792 | return crypto_ahash_init(mcryptd_req); | |
793 | } | |
794 | ||
795 | static int sha256_mb_async_update(struct ahash_request *req) | |
796 | { | |
797 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
798 | ||
799 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
800 | struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
801 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
802 | ||
803 | memcpy(mcryptd_req, req, sizeof(*req)); | |
804 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
805 | return crypto_ahash_update(mcryptd_req); | |
806 | } | |
807 | ||
808 | static int sha256_mb_async_finup(struct ahash_request *req) | |
809 | { | |
810 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
811 | ||
812 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
813 | struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
814 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
815 | ||
816 | memcpy(mcryptd_req, req, sizeof(*req)); | |
817 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
818 | return crypto_ahash_finup(mcryptd_req); | |
819 | } | |
820 | ||
821 | static int sha256_mb_async_final(struct ahash_request *req) | |
822 | { | |
823 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
824 | ||
825 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
826 | struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
827 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
828 | ||
829 | memcpy(mcryptd_req, req, sizeof(*req)); | |
830 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
831 | return crypto_ahash_final(mcryptd_req); | |
832 | } | |
833 | ||
834 | static int sha256_mb_async_digest(struct ahash_request *req) | |
835 | { | |
836 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
837 | struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
838 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
839 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
840 | ||
841 | memcpy(mcryptd_req, req, sizeof(*req)); | |
842 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
843 | return crypto_ahash_digest(mcryptd_req); | |
844 | } | |
845 | ||
846 | static int sha256_mb_async_export(struct ahash_request *req, void *out) | |
847 | { | |
848 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
849 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
850 | struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
851 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
852 | ||
853 | memcpy(mcryptd_req, req, sizeof(*req)); | |
854 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
855 | return crypto_ahash_export(mcryptd_req, out); | |
856 | } | |
857 | ||
858 | static int sha256_mb_async_import(struct ahash_request *req, const void *in) | |
859 | { | |
860 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
861 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
862 | struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
863 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
864 | struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm); | |
865 | struct mcryptd_hash_request_ctx *rctx; | |
866 | struct ahash_request *areq; | |
867 | ||
868 | memcpy(mcryptd_req, req, sizeof(*req)); | |
869 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
870 | rctx = ahash_request_ctx(mcryptd_req); | |
871 | areq = &rctx->areq; | |
872 | ||
873 | ahash_request_set_tfm(areq, child); | |
874 | ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP, | |
875 | rctx->complete, req); | |
876 | ||
877 | return crypto_ahash_import(mcryptd_req, in); | |
878 | } | |
879 | ||
880 | static struct ahash_alg sha256_mb_async_alg = { | |
881 | .init = sha256_mb_async_init, | |
882 | .update = sha256_mb_async_update, | |
883 | .final = sha256_mb_async_final, | |
884 | .finup = sha256_mb_async_finup, | |
885 | .export = sha256_mb_async_export, | |
886 | .import = sha256_mb_async_import, | |
887 | .digest = sha256_mb_async_digest, | |
888 | .halg = { | |
889 | .digestsize = SHA256_DIGEST_SIZE, | |
890 | .statesize = sizeof(struct sha256_hash_ctx), | |
891 | .base = { | |
892 | .cra_name = "sha256", | |
893 | .cra_driver_name = "sha256_mb", | |
894 | .cra_priority = 200, | |
895 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | |
896 | CRYPTO_ALG_ASYNC, | |
897 | .cra_blocksize = SHA256_BLOCK_SIZE, | |
898 | .cra_type = &crypto_ahash_type, | |
899 | .cra_module = THIS_MODULE, | |
900 | .cra_list = LIST_HEAD_INIT | |
901 | (sha256_mb_async_alg.halg.base.cra_list), | |
902 | .cra_init = sha256_mb_async_init_tfm, | |
903 | .cra_exit = sha256_mb_async_exit_tfm, | |
904 | .cra_ctxsize = sizeof(struct sha256_mb_ctx), | |
905 | .cra_alignmask = 0, | |
906 | }, | |
907 | }, | |
908 | }; | |
909 | ||
910 | static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate) | |
911 | { | |
912 | struct mcryptd_hash_request_ctx *rctx; | |
913 | unsigned long cur_time; | |
914 | unsigned long next_flush = 0; | |
915 | struct sha256_hash_ctx *sha_ctx; | |
916 | ||
917 | ||
918 | cur_time = jiffies; | |
919 | ||
920 | while (!list_empty(&cstate->work_list)) { | |
921 | rctx = list_entry(cstate->work_list.next, | |
922 | struct mcryptd_hash_request_ctx, waiter); | |
923 | if (time_before(cur_time, rctx->tag.expire)) | |
924 | break; | |
925 | kernel_fpu_begin(); | |
926 | sha_ctx = (struct sha256_hash_ctx *) | |
927 | sha256_ctx_mgr_flush(cstate->mgr); | |
928 | kernel_fpu_end(); | |
929 | if (!sha_ctx) { | |
930 | pr_err("sha256_mb error: nothing got" | |
931 | " flushed for non-empty list\n"); | |
932 | break; | |
933 | } | |
934 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
935 | sha_finish_walk(&rctx, cstate, true); | |
936 | sha_complete_job(rctx, cstate, 0); | |
937 | } | |
938 | ||
939 | if (!list_empty(&cstate->work_list)) { | |
940 | rctx = list_entry(cstate->work_list.next, | |
941 | struct mcryptd_hash_request_ctx, waiter); | |
942 | /* get the hash context and then flush time */ | |
943 | next_flush = rctx->tag.expire; | |
944 | mcryptd_arm_flusher(cstate, get_delay(next_flush)); | |
945 | } | |
946 | return next_flush; | |
947 | } | |
948 | ||
949 | static int __init sha256_mb_mod_init(void) | |
950 | { | |
951 | ||
952 | int cpu; | |
953 | int err; | |
954 | struct mcryptd_alg_cstate *cpu_state; | |
955 | ||
956 | /* check for dependent cpu features */ | |
957 | if (!boot_cpu_has(X86_FEATURE_AVX2) || | |
958 | !boot_cpu_has(X86_FEATURE_BMI2)) | |
959 | return -ENODEV; | |
960 | ||
961 | /* initialize multibuffer structures */ | |
962 | sha256_mb_alg_state.alg_cstate = alloc_percpu | |
963 | (struct mcryptd_alg_cstate); | |
964 | ||
965 | sha256_job_mgr_init = sha256_mb_mgr_init_avx2; | |
966 | sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2; | |
967 | sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2; | |
968 | sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2; | |
969 | ||
970 | if (!sha256_mb_alg_state.alg_cstate) | |
971 | return -ENOMEM; | |
972 | for_each_possible_cpu(cpu) { | |
973 | cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu); | |
974 | cpu_state->next_flush = 0; | |
975 | cpu_state->next_seq_num = 0; | |
976 | cpu_state->flusher_engaged = false; | |
977 | INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); | |
978 | cpu_state->cpu = cpu; | |
979 | cpu_state->alg_state = &sha256_mb_alg_state; | |
980 | cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr), | |
981 | GFP_KERNEL); | |
982 | if (!cpu_state->mgr) | |
983 | goto err2; | |
984 | sha256_ctx_mgr_init(cpu_state->mgr); | |
985 | INIT_LIST_HEAD(&cpu_state->work_list); | |
986 | spin_lock_init(&cpu_state->work_lock); | |
987 | } | |
988 | sha256_mb_alg_state.flusher = &sha256_mb_flusher; | |
989 | ||
990 | err = crypto_register_ahash(&sha256_mb_areq_alg); | |
991 | if (err) | |
992 | goto err2; | |
993 | err = crypto_register_ahash(&sha256_mb_async_alg); | |
994 | if (err) | |
995 | goto err1; | |
996 | ||
997 | ||
998 | return 0; | |
999 | err1: | |
1000 | crypto_unregister_ahash(&sha256_mb_areq_alg); | |
1001 | err2: | |
1002 | for_each_possible_cpu(cpu) { | |
1003 | cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu); | |
1004 | kfree(cpu_state->mgr); | |
1005 | } | |
1006 | free_percpu(sha256_mb_alg_state.alg_cstate); | |
1007 | return -ENODEV; | |
1008 | } | |
1009 | ||
1010 | static void __exit sha256_mb_mod_fini(void) | |
1011 | { | |
1012 | int cpu; | |
1013 | struct mcryptd_alg_cstate *cpu_state; | |
1014 | ||
1015 | crypto_unregister_ahash(&sha256_mb_async_alg); | |
1016 | crypto_unregister_ahash(&sha256_mb_areq_alg); | |
1017 | for_each_possible_cpu(cpu) { | |
1018 | cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu); | |
1019 | kfree(cpu_state->mgr); | |
1020 | } | |
1021 | free_percpu(sha256_mb_alg_state.alg_cstate); | |
1022 | } | |
1023 | ||
1024 | module_init(sha256_mb_mod_init); | |
1025 | module_exit(sha256_mb_mod_fini); | |
1026 | ||
1027 | MODULE_LICENSE("GPL"); | |
1028 | MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated"); | |
1029 | ||
1030 | MODULE_ALIAS_CRYPTO("sha256"); |