]>
Commit | Line | Data |
---|---|---|
ad61e042 TC |
1 | /* |
2 | * Multi buffer SHA1 algorithm Glue Code | |
3 | * | |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
5 | * redistributing this file, you may do so under either license. | |
6 | * | |
7 | * GPL LICENSE SUMMARY | |
8 | * | |
9 | * Copyright(c) 2014 Intel Corporation. | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of version 2 of the GNU General Public License as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * Contact Information: | |
21 | * Tim Chen <tim.c.chen@linux.intel.com> | |
22 | * | |
23 | * BSD LICENSE | |
24 | * | |
25 | * Copyright(c) 2014 Intel Corporation. | |
26 | * | |
27 | * Redistribution and use in source and binary forms, with or without | |
28 | * modification, are permitted provided that the following conditions | |
29 | * are met: | |
30 | * | |
31 | * * Redistributions of source code must retain the above copyright | |
32 | * notice, this list of conditions and the following disclaimer. | |
33 | * * Redistributions in binary form must reproduce the above copyright | |
34 | * notice, this list of conditions and the following disclaimer in | |
35 | * the documentation and/or other materials provided with the | |
36 | * distribution. | |
37 | * * Neither the name of Intel Corporation nor the names of its | |
38 | * contributors may be used to endorse or promote products derived | |
39 | * from this software without specific prior written permission. | |
40 | * | |
41 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
42 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
43 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
44 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
45 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
46 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
47 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
48 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
49 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
50 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
51 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
52 | */ | |
53 | ||
54 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
55 | ||
56 | #include <crypto/internal/hash.h> | |
57 | #include <linux/init.h> | |
58 | #include <linux/module.h> | |
59 | #include <linux/mm.h> | |
60 | #include <linux/cryptohash.h> | |
61 | #include <linux/types.h> | |
62 | #include <linux/list.h> | |
63 | #include <crypto/scatterwalk.h> | |
64 | #include <crypto/sha.h> | |
65 | #include <crypto/mcryptd.h> | |
66 | #include <crypto/crypto_wq.h> | |
67 | #include <asm/byteorder.h> | |
68 | #include <asm/i387.h> | |
69 | #include <asm/xcr.h> | |
70 | #include <asm/xsave.h> | |
71 | #include <linux/hardirq.h> | |
72 | #include <asm/fpu-internal.h> | |
73 | #include "sha_mb_ctx.h" | |
74 | ||
75 | #define FLUSH_INTERVAL 1000 /* in usec */ | |
76 | ||
4c1948fc | 77 | static struct mcryptd_alg_state sha1_mb_alg_state; |
ad61e042 TC |
78 | |
79 | struct sha1_mb_ctx { | |
80 | struct mcryptd_ahash *mcryptd_tfm; | |
81 | }; | |
82 | ||
83 | static inline struct mcryptd_hash_request_ctx *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx) | |
84 | { | |
85 | struct shash_desc *desc; | |
86 | ||
87 | desc = container_of((void *) hash_ctx, struct shash_desc, __ctx); | |
88 | return container_of(desc, struct mcryptd_hash_request_ctx, desc); | |
89 | } | |
90 | ||
91 | static inline struct ahash_request *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx) | |
92 | { | |
93 | return container_of((void *) ctx, struct ahash_request, __ctx); | |
94 | } | |
95 | ||
96 | static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, | |
97 | struct shash_desc *desc) | |
98 | { | |
99 | rctx->flag = HASH_UPDATE; | |
100 | } | |
101 | ||
4c1948fc FW |
102 | static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state); |
103 | static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)(struct sha1_mb_mgr *state, | |
ad61e042 | 104 | struct job_sha1 *job); |
4c1948fc FW |
105 | static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)(struct sha1_mb_mgr *state); |
106 | static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)(struct sha1_mb_mgr *state); | |
ad61e042 TC |
107 | |
108 | inline void sha1_init_digest(uint32_t *digest) | |
109 | { | |
110 | static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0, | |
111 | SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }; | |
112 | memcpy(digest, initial_digest, sizeof(initial_digest)); | |
113 | } | |
114 | ||
115 | inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2], | |
116 | uint32_t total_len) | |
117 | { | |
118 | uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1); | |
119 | ||
120 | memset(&padblock[i], 0, SHA1_BLOCK_SIZE); | |
121 | padblock[i] = 0x80; | |
122 | ||
123 | i += ((SHA1_BLOCK_SIZE - 1) & | |
124 | (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1))) | |
125 | + 1 + SHA1_PADLENGTHFIELD_SIZE; | |
126 | ||
127 | #if SHA1_PADLENGTHFIELD_SIZE == 16 | |
128 | *((uint64_t *) &padblock[i - 16]) = 0; | |
129 | #endif | |
130 | ||
131 | *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3); | |
132 | ||
133 | /* Number of extra blocks to hash */ | |
134 | return i >> SHA1_LOG2_BLOCK_SIZE; | |
135 | } | |
136 | ||
137 | static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, struct sha1_hash_ctx *ctx) | |
138 | { | |
139 | while (ctx) { | |
140 | if (ctx->status & HASH_CTX_STS_COMPLETE) { | |
141 | /* Clear PROCESSING bit */ | |
142 | ctx->status = HASH_CTX_STS_COMPLETE; | |
143 | return ctx; | |
144 | } | |
145 | ||
146 | /* | |
147 | * If the extra blocks are empty, begin hashing what remains | |
148 | * in the user's buffer. | |
149 | */ | |
150 | if (ctx->partial_block_buffer_length == 0 && | |
151 | ctx->incoming_buffer_length) { | |
152 | ||
153 | const void *buffer = ctx->incoming_buffer; | |
154 | uint32_t len = ctx->incoming_buffer_length; | |
155 | uint32_t copy_len; | |
156 | ||
157 | /* | |
158 | * Only entire blocks can be hashed. | |
159 | * Copy remainder to extra blocks buffer. | |
160 | */ | |
161 | copy_len = len & (SHA1_BLOCK_SIZE-1); | |
162 | ||
163 | if (copy_len) { | |
164 | len -= copy_len; | |
165 | memcpy(ctx->partial_block_buffer, | |
166 | ((const char *) buffer + len), | |
167 | copy_len); | |
168 | ctx->partial_block_buffer_length = copy_len; | |
169 | } | |
170 | ||
171 | ctx->incoming_buffer_length = 0; | |
172 | ||
173 | /* len should be a multiple of the block size now */ | |
174 | assert((len % SHA1_BLOCK_SIZE) == 0); | |
175 | ||
176 | /* Set len to the number of blocks to be hashed */ | |
177 | len >>= SHA1_LOG2_BLOCK_SIZE; | |
178 | ||
179 | if (len) { | |
180 | ||
181 | ctx->job.buffer = (uint8_t *) buffer; | |
182 | ctx->job.len = len; | |
183 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, | |
184 | &ctx->job); | |
185 | continue; | |
186 | } | |
187 | } | |
188 | ||
189 | /* | |
190 | * If the extra blocks are not empty, then we are | |
191 | * either on the last block(s) or we need more | |
192 | * user input before continuing. | |
193 | */ | |
194 | if (ctx->status & HASH_CTX_STS_LAST) { | |
195 | ||
196 | uint8_t *buf = ctx->partial_block_buffer; | |
197 | uint32_t n_extra_blocks = sha1_pad(buf, ctx->total_length); | |
198 | ||
199 | ctx->status = (HASH_CTX_STS_PROCESSING | | |
200 | HASH_CTX_STS_COMPLETE); | |
201 | ctx->job.buffer = buf; | |
202 | ctx->job.len = (uint32_t) n_extra_blocks; | |
203 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job); | |
204 | continue; | |
205 | } | |
206 | ||
5d1b3c98 | 207 | ctx->status = HASH_CTX_STS_IDLE; |
ad61e042 TC |
208 | return ctx; |
209 | } | |
210 | ||
211 | return NULL; | |
212 | } | |
213 | ||
4c1948fc | 214 | static struct sha1_hash_ctx *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr) |
ad61e042 TC |
215 | { |
216 | /* | |
217 | * If get_comp_job returns NULL, there are no jobs complete. | |
218 | * If get_comp_job returns a job, verify that it is safe to return to the user. | |
219 | * If it is not ready, resubmit the job to finish processing. | |
220 | * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned. | |
221 | * Otherwise, all jobs currently being managed by the hash_ctx_mgr still need processing. | |
222 | */ | |
223 | struct sha1_hash_ctx *ctx; | |
224 | ||
225 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr); | |
226 | return sha1_ctx_mgr_resubmit(mgr, ctx); | |
227 | } | |
228 | ||
4c1948fc | 229 | static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr) |
ad61e042 TC |
230 | { |
231 | sha1_job_mgr_init(&mgr->mgr); | |
232 | } | |
233 | ||
4c1948fc | 234 | static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr, |
ad61e042 TC |
235 | struct sha1_hash_ctx *ctx, |
236 | const void *buffer, | |
237 | uint32_t len, | |
238 | int flags) | |
239 | { | |
240 | if (flags & (~HASH_ENTIRE)) { | |
241 | /* User should not pass anything other than FIRST, UPDATE, or LAST */ | |
242 | ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; | |
243 | return ctx; | |
244 | } | |
245 | ||
246 | if (ctx->status & HASH_CTX_STS_PROCESSING) { | |
247 | /* Cannot submit to a currently processing job. */ | |
248 | ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; | |
249 | return ctx; | |
250 | } | |
251 | ||
252 | if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) { | |
253 | /* Cannot update a finished job. */ | |
254 | ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; | |
255 | return ctx; | |
256 | } | |
257 | ||
258 | ||
259 | if (flags & HASH_FIRST) { | |
260 | /* Init digest */ | |
261 | sha1_init_digest(ctx->job.result_digest); | |
262 | ||
263 | /* Reset byte counter */ | |
264 | ctx->total_length = 0; | |
265 | ||
266 | /* Clear extra blocks */ | |
267 | ctx->partial_block_buffer_length = 0; | |
268 | } | |
269 | ||
270 | /* If we made it here, there were no errors during this call to submit */ | |
271 | ctx->error = HASH_CTX_ERROR_NONE; | |
272 | ||
273 | /* Store buffer ptr info from user */ | |
274 | ctx->incoming_buffer = buffer; | |
275 | ctx->incoming_buffer_length = len; | |
276 | ||
277 | /* Store the user's request flags and mark this ctx as currently being processed. */ | |
278 | ctx->status = (flags & HASH_LAST) ? | |
279 | (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) : | |
280 | HASH_CTX_STS_PROCESSING; | |
281 | ||
282 | /* Advance byte counter */ | |
283 | ctx->total_length += len; | |
284 | ||
285 | /* | |
286 | * If there is anything currently buffered in the extra blocks, | |
287 | * append to it until it contains a whole block. | |
288 | * Or if the user's buffer contains less than a whole block, | |
289 | * append as much as possible to the extra block. | |
290 | */ | |
291 | if ((ctx->partial_block_buffer_length) | (len < SHA1_BLOCK_SIZE)) { | |
292 | /* Compute how many bytes to copy from user buffer into extra block */ | |
293 | uint32_t copy_len = SHA1_BLOCK_SIZE - ctx->partial_block_buffer_length; | |
294 | if (len < copy_len) | |
295 | copy_len = len; | |
296 | ||
297 | if (copy_len) { | |
298 | /* Copy and update relevant pointers and counters */ | |
299 | memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length], | |
300 | buffer, copy_len); | |
301 | ||
302 | ctx->partial_block_buffer_length += copy_len; | |
303 | ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len); | |
304 | ctx->incoming_buffer_length = len - copy_len; | |
305 | } | |
306 | ||
307 | /* The extra block should never contain more than 1 block here */ | |
308 | assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE); | |
309 | ||
310 | /* If the extra block buffer contains exactly 1 block, it can be hashed. */ | |
311 | if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) { | |
312 | ctx->partial_block_buffer_length = 0; | |
313 | ||
314 | ctx->job.buffer = ctx->partial_block_buffer; | |
315 | ctx->job.len = 1; | |
316 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job); | |
317 | } | |
318 | } | |
319 | ||
320 | return sha1_ctx_mgr_resubmit(mgr, ctx); | |
321 | } | |
322 | ||
4c1948fc | 323 | static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr) |
ad61e042 TC |
324 | { |
325 | struct sha1_hash_ctx *ctx; | |
326 | ||
327 | while (1) { | |
328 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr); | |
329 | ||
330 | /* If flush returned 0, there are no more jobs in flight. */ | |
331 | if (!ctx) | |
332 | return NULL; | |
333 | ||
334 | /* | |
335 | * If flush returned a job, resubmit the job to finish processing. | |
336 | */ | |
337 | ctx = sha1_ctx_mgr_resubmit(mgr, ctx); | |
338 | ||
339 | /* | |
340 | * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned. | |
341 | * Otherwise, all jobs currently being managed by the sha1_ctx_mgr | |
342 | * still need processing. Loop. | |
343 | */ | |
344 | if (ctx) | |
345 | return ctx; | |
346 | } | |
347 | } | |
348 | ||
349 | static int sha1_mb_init(struct shash_desc *desc) | |
350 | { | |
351 | struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); | |
352 | ||
353 | hash_ctx_init(sctx); | |
354 | sctx->job.result_digest[0] = SHA1_H0; | |
355 | sctx->job.result_digest[1] = SHA1_H1; | |
356 | sctx->job.result_digest[2] = SHA1_H2; | |
357 | sctx->job.result_digest[3] = SHA1_H3; | |
358 | sctx->job.result_digest[4] = SHA1_H4; | |
359 | sctx->total_length = 0; | |
360 | sctx->partial_block_buffer_length = 0; | |
361 | sctx->status = HASH_CTX_STS_IDLE; | |
362 | ||
363 | return 0; | |
364 | } | |
365 | ||
366 | static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx) | |
367 | { | |
368 | int i; | |
369 | struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc); | |
370 | __be32 *dst = (__be32 *) rctx->out; | |
371 | ||
372 | for (i = 0; i < 5; ++i) | |
373 | dst[i] = cpu_to_be32(sctx->job.result_digest[i]); | |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
378 | static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx, | |
379 | struct mcryptd_alg_cstate *cstate, bool flush) | |
380 | { | |
381 | int flag = HASH_UPDATE; | |
382 | int nbytes, err = 0; | |
383 | struct mcryptd_hash_request_ctx *rctx = *ret_rctx; | |
384 | struct sha1_hash_ctx *sha_ctx; | |
385 | ||
386 | /* more work ? */ | |
387 | while (!(rctx->flag & HASH_DONE)) { | |
388 | nbytes = crypto_ahash_walk_done(&rctx->walk, 0); | |
389 | if (nbytes < 0) { | |
390 | err = nbytes; | |
391 | goto out; | |
392 | } | |
393 | /* check if the walk is done */ | |
394 | if (crypto_ahash_walk_last(&rctx->walk)) { | |
395 | rctx->flag |= HASH_DONE; | |
396 | if (rctx->flag & HASH_FINAL) | |
397 | flag |= HASH_LAST; | |
398 | ||
399 | } | |
400 | sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(&rctx->desc); | |
401 | kernel_fpu_begin(); | |
402 | sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); | |
403 | if (!sha_ctx) { | |
404 | if (flush) | |
405 | sha_ctx = sha1_ctx_mgr_flush(cstate->mgr); | |
406 | } | |
407 | kernel_fpu_end(); | |
408 | if (sha_ctx) | |
409 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
410 | else { | |
411 | rctx = NULL; | |
412 | goto out; | |
413 | } | |
414 | } | |
415 | ||
416 | /* copy the results */ | |
417 | if (rctx->flag & HASH_FINAL) | |
418 | sha1_mb_set_results(rctx); | |
419 | ||
420 | out: | |
421 | *ret_rctx = rctx; | |
422 | return err; | |
423 | } | |
424 | ||
425 | static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, | |
426 | struct mcryptd_alg_cstate *cstate, | |
427 | int err) | |
428 | { | |
429 | struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); | |
430 | struct sha1_hash_ctx *sha_ctx; | |
431 | struct mcryptd_hash_request_ctx *req_ctx; | |
432 | int ret; | |
433 | ||
434 | /* remove from work list */ | |
435 | spin_lock(&cstate->work_lock); | |
436 | list_del(&rctx->waiter); | |
437 | spin_unlock(&cstate->work_lock); | |
438 | ||
439 | if (irqs_disabled()) | |
440 | rctx->complete(&req->base, err); | |
441 | else { | |
442 | local_bh_disable(); | |
443 | rctx->complete(&req->base, err); | |
444 | local_bh_enable(); | |
445 | } | |
446 | ||
447 | /* check to see if there are other jobs that are done */ | |
448 | sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr); | |
449 | while (sha_ctx) { | |
450 | req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
451 | ret = sha_finish_walk(&req_ctx, cstate, false); | |
452 | if (req_ctx) { | |
453 | spin_lock(&cstate->work_lock); | |
454 | list_del(&req_ctx->waiter); | |
455 | spin_unlock(&cstate->work_lock); | |
456 | ||
457 | req = cast_mcryptd_ctx_to_req(req_ctx); | |
458 | if (irqs_disabled()) | |
459 | rctx->complete(&req->base, ret); | |
460 | else { | |
461 | local_bh_disable(); | |
462 | rctx->complete(&req->base, ret); | |
463 | local_bh_enable(); | |
464 | } | |
465 | } | |
466 | sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr); | |
467 | } | |
468 | ||
469 | return 0; | |
470 | } | |
471 | ||
472 | static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx, | |
473 | struct mcryptd_alg_cstate *cstate) | |
474 | { | |
475 | unsigned long next_flush; | |
476 | unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL); | |
477 | ||
478 | /* initialize tag */ | |
479 | rctx->tag.arrival = jiffies; /* tag the arrival time */ | |
480 | rctx->tag.seq_num = cstate->next_seq_num++; | |
481 | next_flush = rctx->tag.arrival + delay; | |
482 | rctx->tag.expire = next_flush; | |
483 | ||
484 | spin_lock(&cstate->work_lock); | |
485 | list_add_tail(&rctx->waiter, &cstate->work_list); | |
486 | spin_unlock(&cstate->work_lock); | |
487 | ||
488 | mcryptd_arm_flusher(cstate, delay); | |
489 | } | |
490 | ||
491 | static int sha1_mb_update(struct shash_desc *desc, const u8 *data, | |
492 | unsigned int len) | |
493 | { | |
494 | struct mcryptd_hash_request_ctx *rctx = | |
495 | container_of(desc, struct mcryptd_hash_request_ctx, desc); | |
496 | struct mcryptd_alg_cstate *cstate = | |
497 | this_cpu_ptr(sha1_mb_alg_state.alg_cstate); | |
498 | ||
499 | struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); | |
500 | struct sha1_hash_ctx *sha_ctx; | |
501 | int ret = 0, nbytes; | |
502 | ||
503 | ||
504 | /* sanity check */ | |
505 | if (rctx->tag.cpu != smp_processor_id()) { | |
506 | pr_err("mcryptd error: cpu clash\n"); | |
507 | goto done; | |
508 | } | |
509 | ||
510 | /* need to init context */ | |
511 | req_ctx_init(rctx, desc); | |
512 | ||
513 | nbytes = crypto_ahash_walk_first(req, &rctx->walk); | |
514 | ||
515 | if (nbytes < 0) { | |
516 | ret = nbytes; | |
517 | goto done; | |
518 | } | |
519 | ||
520 | if (crypto_ahash_walk_last(&rctx->walk)) | |
521 | rctx->flag |= HASH_DONE; | |
522 | ||
523 | /* submit */ | |
524 | sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); | |
525 | sha1_mb_add_list(rctx, cstate); | |
526 | kernel_fpu_begin(); | |
527 | sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, HASH_UPDATE); | |
528 | kernel_fpu_end(); | |
529 | ||
530 | /* check if anything is returned */ | |
531 | if (!sha_ctx) | |
532 | return -EINPROGRESS; | |
533 | ||
534 | if (sha_ctx->error) { | |
535 | ret = sha_ctx->error; | |
536 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
537 | goto done; | |
538 | } | |
539 | ||
540 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
541 | ret = sha_finish_walk(&rctx, cstate, false); | |
542 | ||
543 | if (!rctx) | |
544 | return -EINPROGRESS; | |
545 | done: | |
546 | sha_complete_job(rctx, cstate, ret); | |
547 | return ret; | |
548 | } | |
549 | ||
550 | static int sha1_mb_finup(struct shash_desc *desc, const u8 *data, | |
551 | unsigned int len, u8 *out) | |
552 | { | |
553 | struct mcryptd_hash_request_ctx *rctx = | |
554 | container_of(desc, struct mcryptd_hash_request_ctx, desc); | |
555 | struct mcryptd_alg_cstate *cstate = | |
556 | this_cpu_ptr(sha1_mb_alg_state.alg_cstate); | |
557 | ||
558 | struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); | |
559 | struct sha1_hash_ctx *sha_ctx; | |
560 | int ret = 0, flag = HASH_UPDATE, nbytes; | |
561 | ||
562 | /* sanity check */ | |
563 | if (rctx->tag.cpu != smp_processor_id()) { | |
564 | pr_err("mcryptd error: cpu clash\n"); | |
565 | goto done; | |
566 | } | |
567 | ||
568 | /* need to init context */ | |
569 | req_ctx_init(rctx, desc); | |
570 | ||
571 | nbytes = crypto_ahash_walk_first(req, &rctx->walk); | |
572 | ||
573 | if (nbytes < 0) { | |
574 | ret = nbytes; | |
575 | goto done; | |
576 | } | |
577 | ||
578 | if (crypto_ahash_walk_last(&rctx->walk)) { | |
579 | rctx->flag |= HASH_DONE; | |
580 | flag = HASH_LAST; | |
581 | } | |
582 | rctx->out = out; | |
583 | ||
584 | /* submit */ | |
585 | rctx->flag |= HASH_FINAL; | |
586 | sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); | |
587 | sha1_mb_add_list(rctx, cstate); | |
588 | ||
589 | kernel_fpu_begin(); | |
590 | sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); | |
591 | kernel_fpu_end(); | |
592 | ||
593 | /* check if anything is returned */ | |
594 | if (!sha_ctx) | |
595 | return -EINPROGRESS; | |
596 | ||
597 | if (sha_ctx->error) { | |
598 | ret = sha_ctx->error; | |
599 | goto done; | |
600 | } | |
601 | ||
602 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
603 | ret = sha_finish_walk(&rctx, cstate, false); | |
604 | if (!rctx) | |
605 | return -EINPROGRESS; | |
606 | done: | |
607 | sha_complete_job(rctx, cstate, ret); | |
608 | return ret; | |
609 | } | |
610 | ||
611 | static int sha1_mb_final(struct shash_desc *desc, u8 *out) | |
612 | { | |
613 | struct mcryptd_hash_request_ctx *rctx = | |
614 | container_of(desc, struct mcryptd_hash_request_ctx, desc); | |
615 | struct mcryptd_alg_cstate *cstate = | |
616 | this_cpu_ptr(sha1_mb_alg_state.alg_cstate); | |
617 | ||
618 | struct sha1_hash_ctx *sha_ctx; | |
619 | int ret = 0; | |
620 | u8 data; | |
621 | ||
622 | /* sanity check */ | |
623 | if (rctx->tag.cpu != smp_processor_id()) { | |
624 | pr_err("mcryptd error: cpu clash\n"); | |
625 | goto done; | |
626 | } | |
627 | ||
628 | /* need to init context */ | |
629 | req_ctx_init(rctx, desc); | |
630 | ||
631 | rctx->out = out; | |
632 | rctx->flag |= HASH_DONE | HASH_FINAL; | |
633 | ||
634 | sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); | |
635 | /* flag HASH_FINAL and 0 data size */ | |
636 | sha1_mb_add_list(rctx, cstate); | |
637 | kernel_fpu_begin(); | |
638 | sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, HASH_LAST); | |
639 | kernel_fpu_end(); | |
640 | ||
641 | /* check if anything is returned */ | |
642 | if (!sha_ctx) | |
643 | return -EINPROGRESS; | |
644 | ||
645 | if (sha_ctx->error) { | |
646 | ret = sha_ctx->error; | |
647 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
648 | goto done; | |
649 | } | |
650 | ||
651 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
652 | ret = sha_finish_walk(&rctx, cstate, false); | |
653 | if (!rctx) | |
654 | return -EINPROGRESS; | |
655 | done: | |
656 | sha_complete_job(rctx, cstate, ret); | |
657 | return ret; | |
658 | } | |
659 | ||
660 | static int sha1_mb_export(struct shash_desc *desc, void *out) | |
661 | { | |
662 | struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); | |
663 | ||
664 | memcpy(out, sctx, sizeof(*sctx)); | |
665 | ||
666 | return 0; | |
667 | } | |
668 | ||
669 | static int sha1_mb_import(struct shash_desc *desc, const void *in) | |
670 | { | |
671 | struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); | |
672 | ||
673 | memcpy(sctx, in, sizeof(*sctx)); | |
674 | ||
675 | return 0; | |
676 | } | |
677 | ||
678 | ||
679 | static struct shash_alg sha1_mb_shash_alg = { | |
680 | .digestsize = SHA1_DIGEST_SIZE, | |
681 | .init = sha1_mb_init, | |
682 | .update = sha1_mb_update, | |
683 | .final = sha1_mb_final, | |
684 | .finup = sha1_mb_finup, | |
685 | .export = sha1_mb_export, | |
686 | .import = sha1_mb_import, | |
687 | .descsize = sizeof(struct sha1_hash_ctx), | |
688 | .statesize = sizeof(struct sha1_hash_ctx), | |
689 | .base = { | |
690 | .cra_name = "__sha1-mb", | |
691 | .cra_driver_name = "__intel_sha1-mb", | |
692 | .cra_priority = 100, | |
693 | /* | |
694 | * use ASYNC flag as some buffers in multi-buffer | |
695 | * algo may not have completed before hashing thread sleep | |
696 | */ | |
697 | .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC, | |
698 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
699 | .cra_module = THIS_MODULE, | |
700 | .cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list), | |
701 | } | |
702 | }; | |
703 | ||
704 | static int sha1_mb_async_init(struct ahash_request *req) | |
705 | { | |
706 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
707 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
708 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
709 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
710 | ||
711 | memcpy(mcryptd_req, req, sizeof(*req)); | |
712 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
713 | return crypto_ahash_init(mcryptd_req); | |
714 | } | |
715 | ||
716 | static int sha1_mb_async_update(struct ahash_request *req) | |
717 | { | |
718 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
719 | ||
720 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
721 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
722 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
723 | ||
724 | memcpy(mcryptd_req, req, sizeof(*req)); | |
725 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
726 | return crypto_ahash_update(mcryptd_req); | |
727 | } | |
728 | ||
729 | static int sha1_mb_async_finup(struct ahash_request *req) | |
730 | { | |
731 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
732 | ||
733 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
734 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
735 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
736 | ||
737 | memcpy(mcryptd_req, req, sizeof(*req)); | |
738 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
739 | return crypto_ahash_finup(mcryptd_req); | |
740 | } | |
741 | ||
742 | static int sha1_mb_async_final(struct ahash_request *req) | |
743 | { | |
744 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
745 | ||
746 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
747 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
748 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
749 | ||
750 | memcpy(mcryptd_req, req, sizeof(*req)); | |
751 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
752 | return crypto_ahash_final(mcryptd_req); | |
753 | } | |
754 | ||
4c1948fc | 755 | static int sha1_mb_async_digest(struct ahash_request *req) |
ad61e042 TC |
756 | { |
757 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
758 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
759 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
760 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
761 | ||
762 | memcpy(mcryptd_req, req, sizeof(*req)); | |
763 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
764 | return crypto_ahash_digest(mcryptd_req); | |
765 | } | |
766 | ||
767 | static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm) | |
768 | { | |
769 | struct mcryptd_ahash *mcryptd_tfm; | |
770 | struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); | |
771 | struct mcryptd_hash_ctx *mctx; | |
772 | ||
773 | mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", 0, 0); | |
774 | if (IS_ERR(mcryptd_tfm)) | |
775 | return PTR_ERR(mcryptd_tfm); | |
776 | mctx = crypto_ahash_ctx(&mcryptd_tfm->base); | |
777 | mctx->alg_state = &sha1_mb_alg_state; | |
778 | ctx->mcryptd_tfm = mcryptd_tfm; | |
779 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
780 | sizeof(struct ahash_request) + | |
781 | crypto_ahash_reqsize(&mcryptd_tfm->base)); | |
782 | ||
783 | return 0; | |
784 | } | |
785 | ||
786 | static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm) | |
787 | { | |
788 | struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); | |
789 | ||
790 | mcryptd_free_ahash(ctx->mcryptd_tfm); | |
791 | } | |
792 | ||
793 | static struct ahash_alg sha1_mb_async_alg = { | |
794 | .init = sha1_mb_async_init, | |
795 | .update = sha1_mb_async_update, | |
796 | .final = sha1_mb_async_final, | |
797 | .finup = sha1_mb_async_finup, | |
798 | .digest = sha1_mb_async_digest, | |
799 | .halg = { | |
800 | .digestsize = SHA1_DIGEST_SIZE, | |
801 | .base = { | |
802 | .cra_name = "sha1", | |
803 | .cra_driver_name = "sha1_mb", | |
804 | .cra_priority = 200, | |
805 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, | |
806 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
807 | .cra_type = &crypto_ahash_type, | |
808 | .cra_module = THIS_MODULE, | |
809 | .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list), | |
810 | .cra_init = sha1_mb_async_init_tfm, | |
811 | .cra_exit = sha1_mb_async_exit_tfm, | |
812 | .cra_ctxsize = sizeof(struct sha1_mb_ctx), | |
813 | .cra_alignmask = 0, | |
814 | }, | |
815 | }, | |
816 | }; | |
817 | ||
4c1948fc | 818 | static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate) |
ad61e042 TC |
819 | { |
820 | struct mcryptd_hash_request_ctx *rctx; | |
821 | unsigned long cur_time; | |
822 | unsigned long next_flush = 0; | |
823 | struct sha1_hash_ctx *sha_ctx; | |
824 | ||
825 | ||
826 | cur_time = jiffies; | |
827 | ||
828 | while (!list_empty(&cstate->work_list)) { | |
829 | rctx = list_entry(cstate->work_list.next, | |
830 | struct mcryptd_hash_request_ctx, waiter); | |
831 | if time_before(cur_time, rctx->tag.expire) | |
832 | break; | |
833 | kernel_fpu_begin(); | |
834 | sha_ctx = (struct sha1_hash_ctx *) sha1_ctx_mgr_flush(cstate->mgr); | |
835 | kernel_fpu_end(); | |
836 | if (!sha_ctx) { | |
837 | pr_err("sha1_mb error: nothing got flushed for non-empty list\n"); | |
838 | break; | |
839 | } | |
840 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
841 | sha_finish_walk(&rctx, cstate, true); | |
842 | sha_complete_job(rctx, cstate, 0); | |
843 | } | |
844 | ||
845 | if (!list_empty(&cstate->work_list)) { | |
846 | rctx = list_entry(cstate->work_list.next, | |
847 | struct mcryptd_hash_request_ctx, waiter); | |
848 | /* get the hash context and then flush time */ | |
849 | next_flush = rctx->tag.expire; | |
850 | mcryptd_arm_flusher(cstate, get_delay(next_flush)); | |
851 | } | |
852 | return next_flush; | |
853 | } | |
854 | ||
855 | static int __init sha1_mb_mod_init(void) | |
856 | { | |
857 | ||
858 | int cpu; | |
859 | int err; | |
860 | struct mcryptd_alg_cstate *cpu_state; | |
861 | ||
862 | /* check for dependent cpu features */ | |
863 | if (!boot_cpu_has(X86_FEATURE_AVX2) || | |
864 | !boot_cpu_has(X86_FEATURE_BMI2)) | |
865 | return -ENODEV; | |
866 | ||
867 | /* initialize multibuffer structures */ | |
868 | sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate); | |
869 | ||
870 | sha1_job_mgr_init = sha1_mb_mgr_init_avx2; | |
871 | sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2; | |
872 | sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2; | |
873 | sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2; | |
874 | ||
875 | if (!sha1_mb_alg_state.alg_cstate) | |
876 | return -ENOMEM; | |
877 | for_each_possible_cpu(cpu) { | |
878 | cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); | |
879 | cpu_state->next_flush = 0; | |
880 | cpu_state->next_seq_num = 0; | |
881 | cpu_state->flusher_engaged = false; | |
882 | INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); | |
883 | cpu_state->cpu = cpu; | |
884 | cpu_state->alg_state = &sha1_mb_alg_state; | |
885 | cpu_state->mgr = (struct sha1_ctx_mgr *) kzalloc(sizeof(struct sha1_ctx_mgr), GFP_KERNEL); | |
886 | if (!cpu_state->mgr) | |
887 | goto err2; | |
888 | sha1_ctx_mgr_init(cpu_state->mgr); | |
889 | INIT_LIST_HEAD(&cpu_state->work_list); | |
890 | spin_lock_init(&cpu_state->work_lock); | |
891 | } | |
892 | sha1_mb_alg_state.flusher = &sha1_mb_flusher; | |
893 | ||
894 | err = crypto_register_shash(&sha1_mb_shash_alg); | |
895 | if (err) | |
896 | goto err2; | |
897 | err = crypto_register_ahash(&sha1_mb_async_alg); | |
898 | if (err) | |
899 | goto err1; | |
900 | ||
901 | ||
902 | return 0; | |
903 | err1: | |
904 | crypto_unregister_shash(&sha1_mb_shash_alg); | |
905 | err2: | |
906 | for_each_possible_cpu(cpu) { | |
907 | cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); | |
908 | kfree(cpu_state->mgr); | |
909 | } | |
910 | free_percpu(sha1_mb_alg_state.alg_cstate); | |
911 | return -ENODEV; | |
912 | } | |
913 | ||
914 | static void __exit sha1_mb_mod_fini(void) | |
915 | { | |
916 | int cpu; | |
917 | struct mcryptd_alg_cstate *cpu_state; | |
918 | ||
919 | crypto_unregister_ahash(&sha1_mb_async_alg); | |
920 | crypto_unregister_shash(&sha1_mb_shash_alg); | |
921 | for_each_possible_cpu(cpu) { | |
922 | cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); | |
923 | kfree(cpu_state->mgr); | |
924 | } | |
925 | free_percpu(sha1_mb_alg_state.alg_cstate); | |
926 | } | |
927 | ||
928 | module_init(sha1_mb_mod_init); | |
929 | module_exit(sha1_mb_mod_fini); | |
930 | ||
931 | MODULE_LICENSE("GPL"); | |
932 | MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated"); | |
933 | ||
3e14dcf7 | 934 | MODULE_ALIAS_CRYPTO("sha1"); |