]>
Commit | Line | Data |
---|---|---|
1b44c5a6 AT |
1 | /* |
2 | * Copyright (C) 2017 Marvell | |
3 | * | |
4 | * Antoine Tenart <antoine.tenart@free-electrons.com> | |
5 | * | |
6 | * This file is licensed under the terms of the GNU General Public | |
7 | * License version 2. This program is licensed "as is" without any | |
8 | * warranty of any kind, whether express or implied. | |
9 | */ | |
10 | ||
aed3731e | 11 | #include <crypto/hmac.h> |
293f89cf | 12 | #include <crypto/md5.h> |
1b44c5a6 AT |
13 | #include <crypto/sha.h> |
14 | #include <linux/device.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | #include <linux/dmapool.h> | |
17 | ||
1b44c5a6 AT |
18 | #include "safexcel.h" |
19 | ||
20 | struct safexcel_ahash_ctx { | |
21 | struct safexcel_context base; | |
22 | struct safexcel_crypto_priv *priv; | |
23 | ||
24 | u32 alg; | |
1b44c5a6 | 25 | |
0de54fb1 AT |
26 | u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)]; |
27 | u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)]; | |
1b44c5a6 AT |
28 | }; |
29 | ||
30 | struct safexcel_ahash_req { | |
31 | bool last_req; | |
32 | bool finish; | |
33 | bool hmac; | |
1eb7b403 | 34 | bool needs_inv; |
1b44c5a6 | 35 | |
c957f8b3 | 36 | int nents; |
b8592027 | 37 | dma_addr_t result_dma; |
c957f8b3 | 38 | |
b869648c AT |
39 | u32 digest; |
40 | ||
1b44c5a6 | 41 | u8 state_sz; /* expected sate size, only set once */ |
b460edb6 | 42 | u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); |
1b44c5a6 | 43 | |
b460edb6 AT |
44 | u64 len[2]; |
45 | u64 processed[2]; | |
1b44c5a6 | 46 | |
b460edb6 | 47 | u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); |
cff9a175 AT |
48 | dma_addr_t cache_dma; |
49 | unsigned int cache_sz; | |
50 | ||
b460edb6 | 51 | u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); |
1b44c5a6 AT |
52 | }; |
53 | ||
b460edb6 AT |
54 | static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) |
55 | { | |
56 | if (req->len[1] > req->processed[1]) | |
57 | return 0xffffffff - (req->len[0] - req->processed[0]); | |
58 | ||
59 | return req->len[0] - req->processed[0]; | |
60 | } | |
61 | ||
1b44c5a6 AT |
62 | static void safexcel_hash_token(struct safexcel_command_desc *cdesc, |
63 | u32 input_length, u32 result_length) | |
64 | { | |
65 | struct safexcel_token *token = | |
66 | (struct safexcel_token *)cdesc->control_data.token; | |
67 | ||
68 | token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; | |
69 | token[0].packet_length = input_length; | |
70 | token[0].stat = EIP197_TOKEN_STAT_LAST_HASH; | |
71 | token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH; | |
72 | ||
73 | token[1].opcode = EIP197_TOKEN_OPCODE_INSERT; | |
74 | token[1].packet_length = result_length; | |
75 | token[1].stat = EIP197_TOKEN_STAT_LAST_HASH | | |
76 | EIP197_TOKEN_STAT_LAST_PACKET; | |
77 | token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | | |
78 | EIP197_TOKEN_INS_INSERT_HASH_DIGEST; | |
79 | } | |
80 | ||
81 | static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, | |
82 | struct safexcel_ahash_req *req, | |
83 | struct safexcel_command_desc *cdesc, | |
25bc9551 | 84 | unsigned int digestsize) |
1b44c5a6 | 85 | { |
b460edb6 | 86 | struct safexcel_crypto_priv *priv = ctx->priv; |
1b44c5a6 AT |
87 | int i; |
88 | ||
89 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT; | |
90 | cdesc->control_data.control0 |= ctx->alg; | |
b869648c | 91 | cdesc->control_data.control0 |= req->digest; |
1b44c5a6 | 92 | |
b869648c | 93 | if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { |
b460edb6 | 94 | if (req->processed[0] || req->processed[1]) { |
293f89cf OH |
95 | if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) |
96 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5); | |
97 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) | |
1b44c5a6 AT |
98 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6); |
99 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 || | |
100 | ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) | |
101 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9); | |
9e46eafd AT |
102 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 || |
103 | ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512) | |
b460edb6 | 104 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17); |
1b44c5a6 AT |
105 | |
106 | cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT; | |
107 | } else { | |
108 | cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; | |
109 | } | |
110 | ||
111 | if (!req->finish) | |
112 | cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH; | |
113 | ||
114 | /* | |
115 | * Copy the input digest if needed, and setup the context | |
116 | * fields. Do this now as we need it to setup the first command | |
117 | * descriptor. | |
118 | */ | |
b460edb6 | 119 | if (req->processed[0] || req->processed[1]) { |
1b44c5a6 AT |
120 | for (i = 0; i < digestsize / sizeof(u32); i++) |
121 | ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]); | |
122 | ||
b460edb6 AT |
123 | if (req->finish) { |
124 | u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; | |
125 | count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * | |
126 | req->processed[1]); | |
127 | ||
128 | /* This is a haredware limitation, as the | |
129 | * counter must fit into an u32. This represents | |
130 | * a farily big amount of input data, so we | |
131 | * shouldn't see this. | |
132 | */ | |
133 | if (unlikely(count & 0xffff0000)) { | |
134 | dev_warn(priv->dev, | |
135 | "Input data is too big\n"); | |
136 | return; | |
137 | } | |
138 | ||
139 | ctx->base.ctxr->data[i] = cpu_to_le32(count); | |
140 | } | |
1b44c5a6 | 141 | } |
b869648c | 142 | } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) { |
4505bb02 | 143 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32)); |
1b44c5a6 | 144 | |
4505bb02 AT |
145 | memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz); |
146 | memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32), | |
147 | ctx->opad, req->state_sz); | |
1b44c5a6 AT |
148 | } |
149 | } | |
150 | ||
1eb7b403 OH |
151 | static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, |
152 | struct crypto_async_request *async, | |
153 | bool *should_complete, int *ret) | |
1b44c5a6 AT |
154 | { |
155 | struct safexcel_result_desc *rdesc; | |
156 | struct ahash_request *areq = ahash_request_cast(async); | |
157 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
158 | struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); | |
b460edb6 | 159 | u64 cache_len; |
1b44c5a6 AT |
160 | |
161 | *ret = 0; | |
162 | ||
1b44c5a6 AT |
163 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); |
164 | if (IS_ERR(rdesc)) { | |
165 | dev_err(priv->dev, | |
166 | "hash: result: could not retrieve the result descriptor\n"); | |
167 | *ret = PTR_ERR(rdesc); | |
bdfd1909 AT |
168 | } else { |
169 | *ret = safexcel_rdesc_check_errors(priv, rdesc); | |
1b44c5a6 AT |
170 | } |
171 | ||
172 | safexcel_complete(priv, ring); | |
1b44c5a6 | 173 | |
c957f8b3 AT |
174 | if (sreq->nents) { |
175 | dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); | |
176 | sreq->nents = 0; | |
177 | } | |
1b44c5a6 | 178 | |
b8592027 OH |
179 | if (sreq->result_dma) { |
180 | dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz, | |
181 | DMA_FROM_DEVICE); | |
182 | sreq->result_dma = 0; | |
183 | } | |
184 | ||
cff9a175 AT |
185 | if (sreq->cache_dma) { |
186 | dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz, | |
187 | DMA_TO_DEVICE); | |
188 | sreq->cache_dma = 0; | |
189 | } | |
1b44c5a6 | 190 | |
b89a8159 AT |
191 | if (sreq->finish) |
192 | memcpy(areq->result, sreq->state, | |
193 | crypto_ahash_digestsize(ahash)); | |
194 | ||
b460edb6 | 195 | cache_len = safexcel_queued_len(sreq); |
1b44c5a6 AT |
196 | if (cache_len) |
197 | memcpy(sreq->cache, sreq->cache_next, cache_len); | |
198 | ||
199 | *should_complete = true; | |
200 | ||
201 | return 1; | |
202 | } | |
203 | ||
1eb7b403 | 204 | static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, |
1eb7b403 | 205 | int *commands, int *results) |
1b44c5a6 AT |
206 | { |
207 | struct ahash_request *areq = ahash_request_cast(async); | |
208 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
209 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
210 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
211 | struct safexcel_crypto_priv *priv = ctx->priv; | |
212 | struct safexcel_command_desc *cdesc, *first_cdesc = NULL; | |
213 | struct safexcel_result_desc *rdesc; | |
214 | struct scatterlist *sg; | |
b460edb6 AT |
215 | int i, extra, n_cdesc = 0, ret = 0; |
216 | u64 queued, len, cache_len; | |
1b44c5a6 | 217 | |
b460edb6 | 218 | queued = len = safexcel_queued_len(req); |
666a9c70 | 219 | if (queued <= crypto_ahash_blocksize(ahash)) |
1b44c5a6 AT |
220 | cache_len = queued; |
221 | else | |
222 | cache_len = queued - areq->nbytes; | |
223 | ||
809778e0 AT |
224 | if (!req->last_req) { |
225 | /* If this is not the last request and the queued data does not | |
226 | * fit into full blocks, cache it for the next send() call. | |
227 | */ | |
228 | extra = queued & (crypto_ahash_blocksize(ahash) - 1); | |
229 | if (!extra) | |
230 | /* If this is not the last request and the queued data | |
231 | * is a multiple of a block, cache the last one for now. | |
232 | */ | |
c1a8fa6e | 233 | extra = crypto_ahash_blocksize(ahash); |
809778e0 AT |
234 | |
235 | if (extra) { | |
236 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | |
237 | req->cache_next, extra, | |
238 | areq->nbytes - extra); | |
239 | ||
240 | queued -= extra; | |
241 | len -= extra; | |
242 | ||
243 | if (!queued) { | |
244 | *commands = 0; | |
245 | *results = 0; | |
246 | return 0; | |
247 | } | |
248 | } | |
1b44c5a6 AT |
249 | } |
250 | ||
1b44c5a6 AT |
251 | /* Add a command descriptor for the cached data, if any */ |
252 | if (cache_len) { | |
cff9a175 AT |
253 | req->cache_dma = dma_map_single(priv->dev, req->cache, |
254 | cache_len, DMA_TO_DEVICE); | |
9744fec9 | 255 | if (dma_mapping_error(priv->dev, req->cache_dma)) |
cff9a175 | 256 | return -EINVAL; |
1b44c5a6 | 257 | |
cff9a175 | 258 | req->cache_sz = cache_len; |
1b44c5a6 AT |
259 | first_cdesc = safexcel_add_cdesc(priv, ring, 1, |
260 | (cache_len == len), | |
cff9a175 | 261 | req->cache_dma, cache_len, len, |
1b44c5a6 AT |
262 | ctx->base.ctxr_dma); |
263 | if (IS_ERR(first_cdesc)) { | |
264 | ret = PTR_ERR(first_cdesc); | |
265 | goto unmap_cache; | |
266 | } | |
267 | n_cdesc++; | |
268 | ||
269 | queued -= cache_len; | |
270 | if (!queued) | |
271 | goto send_command; | |
272 | } | |
273 | ||
274 | /* Now handle the current ahash request buffer(s) */ | |
c957f8b3 AT |
275 | req->nents = dma_map_sg(priv->dev, areq->src, |
276 | sg_nents_for_len(areq->src, areq->nbytes), | |
277 | DMA_TO_DEVICE); | |
278 | if (!req->nents) { | |
1b44c5a6 AT |
279 | ret = -ENOMEM; |
280 | goto cdesc_rollback; | |
281 | } | |
282 | ||
c957f8b3 | 283 | for_each_sg(areq->src, sg, req->nents, i) { |
1b44c5a6 AT |
284 | int sglen = sg_dma_len(sg); |
285 | ||
286 | /* Do not overflow the request */ | |
b460edb6 | 287 | if (queued < sglen) |
1b44c5a6 AT |
288 | sglen = queued; |
289 | ||
290 | cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, | |
291 | !(queued - sglen), sg_dma_address(sg), | |
292 | sglen, len, ctx->base.ctxr_dma); | |
293 | if (IS_ERR(cdesc)) { | |
294 | ret = PTR_ERR(cdesc); | |
57433b58 | 295 | goto unmap_sg; |
1b44c5a6 AT |
296 | } |
297 | n_cdesc++; | |
298 | ||
299 | if (n_cdesc == 1) | |
300 | first_cdesc = cdesc; | |
301 | ||
302 | queued -= sglen; | |
303 | if (!queued) | |
304 | break; | |
305 | } | |
306 | ||
307 | send_command: | |
308 | /* Setup the context options */ | |
25bc9551 | 309 | safexcel_context_control(ctx, req, first_cdesc, req->state_sz); |
1b44c5a6 AT |
310 | |
311 | /* Add the token */ | |
312 | safexcel_hash_token(first_cdesc, len, req->state_sz); | |
313 | ||
b8592027 OH |
314 | req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz, |
315 | DMA_FROM_DEVICE); | |
316 | if (dma_mapping_error(priv->dev, req->result_dma)) { | |
1b44c5a6 | 317 | ret = -EINVAL; |
57433b58 | 318 | goto unmap_sg; |
1b44c5a6 AT |
319 | } |
320 | ||
321 | /* Add a result descriptor */ | |
b8592027 | 322 | rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma, |
1b44c5a6 AT |
323 | req->state_sz); |
324 | if (IS_ERR(rdesc)) { | |
325 | ret = PTR_ERR(rdesc); | |
57240a78 | 326 | goto unmap_result; |
1b44c5a6 AT |
327 | } |
328 | ||
9744fec9 | 329 | safexcel_rdr_req_set(priv, ring, rdesc, &areq->base); |
1b44c5a6 | 330 | |
b460edb6 AT |
331 | req->processed[0] += len; |
332 | if (req->processed[0] < len) | |
333 | req->processed[1]++; | |
334 | ||
1b44c5a6 AT |
335 | *commands = n_cdesc; |
336 | *results = 1; | |
337 | return 0; | |
338 | ||
57240a78 | 339 | unmap_result: |
57433b58 AT |
340 | dma_unmap_single(priv->dev, req->result_dma, req->state_sz, |
341 | DMA_FROM_DEVICE); | |
342 | unmap_sg: | |
57240a78 | 343 | dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE); |
1b44c5a6 AT |
344 | cdesc_rollback: |
345 | for (i = 0; i < n_cdesc; i++) | |
346 | safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); | |
347 | unmap_cache: | |
cff9a175 AT |
348 | if (req->cache_dma) { |
349 | dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz, | |
350 | DMA_TO_DEVICE); | |
351 | req->cache_sz = 0; | |
1b44c5a6 | 352 | } |
1b44c5a6 | 353 | |
1b44c5a6 AT |
354 | return ret; |
355 | } | |
356 | ||
357 | static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq) | |
358 | { | |
359 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
360 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1b44c5a6 | 361 | unsigned int state_w_sz = req->state_sz / sizeof(u32); |
b460edb6 | 362 | u64 processed; |
1b44c5a6 AT |
363 | int i; |
364 | ||
b460edb6 AT |
365 | processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; |
366 | processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1]; | |
367 | ||
1b44c5a6 AT |
368 | for (i = 0; i < state_w_sz; i++) |
369 | if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i])) | |
370 | return true; | |
371 | ||
b460edb6 | 372 | if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed)) |
1b44c5a6 AT |
373 | return true; |
374 | ||
375 | return false; | |
376 | } | |
377 | ||
378 | static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, | |
379 | int ring, | |
380 | struct crypto_async_request *async, | |
381 | bool *should_complete, int *ret) | |
382 | { | |
383 | struct safexcel_result_desc *rdesc; | |
384 | struct ahash_request *areq = ahash_request_cast(async); | |
385 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
386 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash); | |
387 | int enq_ret; | |
388 | ||
389 | *ret = 0; | |
390 | ||
1b44c5a6 AT |
391 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); |
392 | if (IS_ERR(rdesc)) { | |
393 | dev_err(priv->dev, | |
394 | "hash: invalidate: could not retrieve the result descriptor\n"); | |
395 | *ret = PTR_ERR(rdesc); | |
cda3e73a AT |
396 | } else { |
397 | *ret = safexcel_rdesc_check_errors(priv, rdesc); | |
1b44c5a6 AT |
398 | } |
399 | ||
400 | safexcel_complete(priv, ring); | |
1b44c5a6 AT |
401 | |
402 | if (ctx->base.exit_inv) { | |
403 | dma_pool_free(priv->context_pool, ctx->base.ctxr, | |
404 | ctx->base.ctxr_dma); | |
405 | ||
406 | *should_complete = true; | |
407 | return 1; | |
408 | } | |
409 | ||
86671abb AT |
410 | ring = safexcel_select_ring(priv); |
411 | ctx->base.ring = ring; | |
1b44c5a6 | 412 | |
86671abb AT |
413 | spin_lock_bh(&priv->ring[ring].queue_lock); |
414 | enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); | |
415 | spin_unlock_bh(&priv->ring[ring].queue_lock); | |
1b44c5a6 AT |
416 | |
417 | if (enq_ret != -EINPROGRESS) | |
418 | *ret = enq_ret; | |
419 | ||
8472e778 AT |
420 | queue_work(priv->ring[ring].workqueue, |
421 | &priv->ring[ring].work_data.work); | |
86671abb | 422 | |
1b44c5a6 AT |
423 | *should_complete = false; |
424 | ||
425 | return 1; | |
426 | } | |
427 | ||
1eb7b403 OH |
428 | static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, |
429 | struct crypto_async_request *async, | |
430 | bool *should_complete, int *ret) | |
431 | { | |
432 | struct ahash_request *areq = ahash_request_cast(async); | |
433 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
434 | int err; | |
435 | ||
53c83e91 | 436 | BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv); |
871df319 | 437 | |
1eb7b403 OH |
438 | if (req->needs_inv) { |
439 | req->needs_inv = false; | |
440 | err = safexcel_handle_inv_result(priv, ring, async, | |
441 | should_complete, ret); | |
442 | } else { | |
443 | err = safexcel_handle_req_result(priv, ring, async, | |
444 | should_complete, ret); | |
445 | } | |
446 | ||
447 | return err; | |
448 | } | |
449 | ||
1b44c5a6 | 450 | static int safexcel_ahash_send_inv(struct crypto_async_request *async, |
9744fec9 | 451 | int ring, int *commands, int *results) |
1b44c5a6 AT |
452 | { |
453 | struct ahash_request *areq = ahash_request_cast(async); | |
454 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
455 | int ret; | |
456 | ||
5290ad6e | 457 | ret = safexcel_invalidate_cache(async, ctx->priv, |
9744fec9 | 458 | ctx->base.ctxr_dma, ring); |
1b44c5a6 AT |
459 | if (unlikely(ret)) |
460 | return ret; | |
461 | ||
462 | *commands = 1; | |
463 | *results = 1; | |
464 | ||
465 | return 0; | |
466 | } | |
467 | ||
1eb7b403 | 468 | static int safexcel_ahash_send(struct crypto_async_request *async, |
9744fec9 | 469 | int ring, int *commands, int *results) |
1eb7b403 OH |
470 | { |
471 | struct ahash_request *areq = ahash_request_cast(async); | |
472 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
473 | int ret; | |
474 | ||
475 | if (req->needs_inv) | |
9744fec9 | 476 | ret = safexcel_ahash_send_inv(async, ring, commands, results); |
1eb7b403 | 477 | else |
9744fec9 OH |
478 | ret = safexcel_ahash_send_req(async, ring, commands, results); |
479 | ||
1eb7b403 OH |
480 | return ret; |
481 | } | |
482 | ||
1b44c5a6 AT |
483 | static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) |
484 | { | |
485 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); | |
486 | struct safexcel_crypto_priv *priv = ctx->priv; | |
61824806 | 487 | EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE); |
7cad2fab | 488 | struct safexcel_ahash_req *rctx = ahash_request_ctx(req); |
3e1166b9 | 489 | struct safexcel_inv_result result = {}; |
86671abb | 490 | int ring = ctx->base.ring; |
1b44c5a6 | 491 | |
7cad2fab | 492 | memset(req, 0, sizeof(struct ahash_request)); |
1b44c5a6 AT |
493 | |
494 | /* create invalidation request */ | |
495 | init_completion(&result.completion); | |
7cad2fab | 496 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
1b44c5a6 AT |
497 | safexcel_inv_complete, &result); |
498 | ||
7cad2fab AT |
499 | ahash_request_set_tfm(req, __crypto_ahash_cast(tfm)); |
500 | ctx = crypto_tfm_ctx(req->base.tfm); | |
1b44c5a6 | 501 | ctx->base.exit_inv = true; |
1eb7b403 | 502 | rctx->needs_inv = true; |
1b44c5a6 | 503 | |
86671abb | 504 | spin_lock_bh(&priv->ring[ring].queue_lock); |
7cad2fab | 505 | crypto_enqueue_request(&priv->ring[ring].queue, &req->base); |
86671abb | 506 | spin_unlock_bh(&priv->ring[ring].queue_lock); |
1b44c5a6 | 507 | |
8472e778 AT |
508 | queue_work(priv->ring[ring].workqueue, |
509 | &priv->ring[ring].work_data.work); | |
1b44c5a6 | 510 | |
b7007dbc | 511 | wait_for_completion(&result.completion); |
1b44c5a6 AT |
512 | |
513 | if (result.error) { | |
514 | dev_warn(priv->dev, "hash: completion error (%d)\n", | |
515 | result.error); | |
516 | return result.error; | |
517 | } | |
518 | ||
519 | return 0; | |
520 | } | |
521 | ||
cc75f5ce AT |
522 | /* safexcel_ahash_cache: cache data until at least one request can be sent to |
523 | * the engine, aka. when there is at least 1 block size in the pipe. | |
524 | */ | |
1b44c5a6 AT |
525 | static int safexcel_ahash_cache(struct ahash_request *areq) |
526 | { | |
527 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
528 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
b460edb6 | 529 | u64 queued, cache_len; |
1b44c5a6 | 530 | |
cc75f5ce AT |
531 | /* queued: everything accepted by the driver which will be handled by |
532 | * the next send() calls. | |
533 | * tot sz handled by update() - tot sz handled by send() | |
534 | */ | |
b460edb6 AT |
535 | queued = safexcel_queued_len(req); |
536 | /* cache_len: everything accepted by the driver but not sent yet, | |
537 | * tot sz handled by update() - last req sz - tot sz handled by send() | |
538 | */ | |
539 | cache_len = queued - areq->nbytes; | |
1b44c5a6 AT |
540 | |
541 | /* | |
542 | * In case there isn't enough bytes to proceed (less than a | |
543 | * block size), cache the data until we have enough. | |
544 | */ | |
545 | if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) { | |
546 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | |
547 | req->cache + cache_len, | |
548 | areq->nbytes, 0); | |
549 | return areq->nbytes; | |
550 | } | |
551 | ||
dfbcc08f | 552 | /* We couldn't cache all the data */ |
1b44c5a6 AT |
553 | return -E2BIG; |
554 | } | |
555 | ||
556 | static int safexcel_ahash_enqueue(struct ahash_request *areq) | |
557 | { | |
558 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
559 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
560 | struct safexcel_crypto_priv *priv = ctx->priv; | |
86671abb | 561 | int ret, ring; |
1b44c5a6 | 562 | |
1eb7b403 | 563 | req->needs_inv = false; |
1b44c5a6 | 564 | |
1b44c5a6 | 565 | if (ctx->base.ctxr) { |
53c83e91 | 566 | if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv && |
b460edb6 | 567 | (req->processed[0] || req->processed[1]) && |
b869648c | 568 | req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) |
c4daf4cc OH |
569 | /* We're still setting needs_inv here, even though it is |
570 | * cleared right away, because the needs_inv flag can be | |
571 | * set in other functions and we want to keep the same | |
572 | * logic. | |
573 | */ | |
574 | ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); | |
575 | ||
1eb7b403 OH |
576 | if (ctx->base.needs_inv) { |
577 | ctx->base.needs_inv = false; | |
578 | req->needs_inv = true; | |
579 | } | |
1b44c5a6 AT |
580 | } else { |
581 | ctx->base.ring = safexcel_select_ring(priv); | |
582 | ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, | |
583 | EIP197_GFP_FLAGS(areq->base), | |
584 | &ctx->base.ctxr_dma); | |
585 | if (!ctx->base.ctxr) | |
586 | return -ENOMEM; | |
587 | } | |
588 | ||
86671abb AT |
589 | ring = ctx->base.ring; |
590 | ||
591 | spin_lock_bh(&priv->ring[ring].queue_lock); | |
592 | ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base); | |
593 | spin_unlock_bh(&priv->ring[ring].queue_lock); | |
1b44c5a6 | 594 | |
8472e778 AT |
595 | queue_work(priv->ring[ring].workqueue, |
596 | &priv->ring[ring].work_data.work); | |
1b44c5a6 AT |
597 | |
598 | return ret; | |
599 | } | |
600 | ||
601 | static int safexcel_ahash_update(struct ahash_request *areq) | |
602 | { | |
1b44c5a6 AT |
603 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
604 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
605 | ||
606 | /* If the request is 0 length, do nothing */ | |
607 | if (!areq->nbytes) | |
608 | return 0; | |
609 | ||
b460edb6 AT |
610 | req->len[0] += areq->nbytes; |
611 | if (req->len[0] < areq->nbytes) | |
612 | req->len[1]++; | |
1b44c5a6 AT |
613 | |
614 | safexcel_ahash_cache(areq); | |
615 | ||
616 | /* | |
617 | * We're not doing partial updates when performing an hmac request. | |
618 | * Everything will be handled by the final() call. | |
619 | */ | |
b869648c | 620 | if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) |
1b44c5a6 AT |
621 | return 0; |
622 | ||
623 | if (req->hmac) | |
624 | return safexcel_ahash_enqueue(areq); | |
625 | ||
626 | if (!req->last_req && | |
b460edb6 | 627 | safexcel_queued_len(req) > crypto_ahash_blocksize(ahash)) |
1b44c5a6 AT |
628 | return safexcel_ahash_enqueue(areq); |
629 | ||
630 | return 0; | |
631 | } | |
632 | ||
633 | static int safexcel_ahash_final(struct ahash_request *areq) | |
634 | { | |
635 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
636 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
637 | ||
638 | req->last_req = true; | |
639 | req->finish = true; | |
640 | ||
641 | /* If we have an overall 0 length request */ | |
b460edb6 | 642 | if (!req->len[0] && !req->len[1] && !areq->nbytes) { |
293f89cf OH |
643 | if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) |
644 | memcpy(areq->result, md5_zero_message_hash, | |
645 | MD5_DIGEST_SIZE); | |
646 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) | |
1b44c5a6 AT |
647 | memcpy(areq->result, sha1_zero_message_hash, |
648 | SHA1_DIGEST_SIZE); | |
649 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224) | |
650 | memcpy(areq->result, sha224_zero_message_hash, | |
651 | SHA224_DIGEST_SIZE); | |
652 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) | |
653 | memcpy(areq->result, sha256_zero_message_hash, | |
654 | SHA256_DIGEST_SIZE); | |
9e46eafd AT |
655 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384) |
656 | memcpy(areq->result, sha384_zero_message_hash, | |
657 | SHA384_DIGEST_SIZE); | |
b460edb6 AT |
658 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512) |
659 | memcpy(areq->result, sha512_zero_message_hash, | |
660 | SHA512_DIGEST_SIZE); | |
1b44c5a6 AT |
661 | |
662 | return 0; | |
663 | } | |
664 | ||
665 | return safexcel_ahash_enqueue(areq); | |
666 | } | |
667 | ||
668 | static int safexcel_ahash_finup(struct ahash_request *areq) | |
669 | { | |
670 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
671 | ||
672 | req->last_req = true; | |
673 | req->finish = true; | |
674 | ||
675 | safexcel_ahash_update(areq); | |
676 | return safexcel_ahash_final(areq); | |
677 | } | |
678 | ||
679 | static int safexcel_ahash_export(struct ahash_request *areq, void *out) | |
680 | { | |
681 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
682 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
683 | struct safexcel_ahash_export_state *export = out; | |
684 | ||
b460edb6 AT |
685 | export->len[0] = req->len[0]; |
686 | export->len[1] = req->len[1]; | |
687 | export->processed[0] = req->processed[0]; | |
688 | export->processed[1] = req->processed[1]; | |
1b44c5a6 | 689 | |
b869648c AT |
690 | export->digest = req->digest; |
691 | ||
1b44c5a6 | 692 | memcpy(export->state, req->state, req->state_sz); |
1b44c5a6 AT |
693 | memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); |
694 | ||
695 | return 0; | |
696 | } | |
697 | ||
698 | static int safexcel_ahash_import(struct ahash_request *areq, const void *in) | |
699 | { | |
700 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
701 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
702 | const struct safexcel_ahash_export_state *export = in; | |
703 | int ret; | |
704 | ||
705 | ret = crypto_ahash_init(areq); | |
706 | if (ret) | |
707 | return ret; | |
708 | ||
b460edb6 AT |
709 | req->len[0] = export->len[0]; |
710 | req->len[1] = export->len[1]; | |
711 | req->processed[0] = export->processed[0]; | |
712 | req->processed[1] = export->processed[1]; | |
1b44c5a6 | 713 | |
b869648c AT |
714 | req->digest = export->digest; |
715 | ||
1b44c5a6 AT |
716 | memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash)); |
717 | memcpy(req->state, export->state, req->state_sz); | |
718 | ||
719 | return 0; | |
720 | } | |
721 | ||
722 | static int safexcel_ahash_cra_init(struct crypto_tfm *tfm) | |
723 | { | |
724 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); | |
725 | struct safexcel_alg_template *tmpl = | |
726 | container_of(__crypto_ahash_alg(tfm->__crt_alg), | |
727 | struct safexcel_alg_template, alg.ahash); | |
728 | ||
729 | ctx->priv = tmpl->priv; | |
1eb7b403 OH |
730 | ctx->base.send = safexcel_ahash_send; |
731 | ctx->base.handle_result = safexcel_handle_result; | |
1b44c5a6 AT |
732 | |
733 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
734 | sizeof(struct safexcel_ahash_req)); | |
735 | return 0; | |
736 | } | |
737 | ||
738 | static int safexcel_sha1_init(struct ahash_request *areq) | |
739 | { | |
740 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
741 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
742 | ||
743 | memset(req, 0, sizeof(*req)); | |
744 | ||
745 | req->state[0] = SHA1_H0; | |
746 | req->state[1] = SHA1_H1; | |
747 | req->state[2] = SHA1_H2; | |
748 | req->state[3] = SHA1_H3; | |
749 | req->state[4] = SHA1_H4; | |
750 | ||
751 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; | |
b869648c | 752 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; |
1b44c5a6 AT |
753 | req->state_sz = SHA1_DIGEST_SIZE; |
754 | ||
755 | return 0; | |
756 | } | |
757 | ||
758 | static int safexcel_sha1_digest(struct ahash_request *areq) | |
759 | { | |
760 | int ret = safexcel_sha1_init(areq); | |
761 | ||
762 | if (ret) | |
763 | return ret; | |
764 | ||
765 | return safexcel_ahash_finup(areq); | |
766 | } | |
767 | ||
768 | static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm) | |
769 | { | |
770 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); | |
771 | struct safexcel_crypto_priv *priv = ctx->priv; | |
772 | int ret; | |
773 | ||
774 | /* context not allocated, skip invalidation */ | |
775 | if (!ctx->base.ctxr) | |
776 | return; | |
777 | ||
53c83e91 | 778 | if (priv->flags & EIP197_TRC_CACHE) { |
871df319 AT |
779 | ret = safexcel_ahash_exit_inv(tfm); |
780 | if (ret) | |
781 | dev_warn(priv->dev, "hash: invalidation error %d\n", ret); | |
782 | } else { | |
783 | dma_pool_free(priv->context_pool, ctx->base.ctxr, | |
784 | ctx->base.ctxr_dma); | |
785 | } | |
1b44c5a6 AT |
786 | } |
787 | ||
788 | struct safexcel_alg_template safexcel_alg_sha1 = { | |
789 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 790 | .engines = EIP97IES | EIP197B | EIP197D, |
1b44c5a6 AT |
791 | .alg.ahash = { |
792 | .init = safexcel_sha1_init, | |
793 | .update = safexcel_ahash_update, | |
794 | .final = safexcel_ahash_final, | |
795 | .finup = safexcel_ahash_finup, | |
796 | .digest = safexcel_sha1_digest, | |
797 | .export = safexcel_ahash_export, | |
798 | .import = safexcel_ahash_import, | |
799 | .halg = { | |
800 | .digestsize = SHA1_DIGEST_SIZE, | |
801 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
802 | .base = { | |
803 | .cra_name = "sha1", | |
804 | .cra_driver_name = "safexcel-sha1", | |
805 | .cra_priority = 300, | |
806 | .cra_flags = CRYPTO_ALG_ASYNC | | |
807 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
808 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
809 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
810 | .cra_init = safexcel_ahash_cra_init, | |
811 | .cra_exit = safexcel_ahash_cra_exit, | |
812 | .cra_module = THIS_MODULE, | |
813 | }, | |
814 | }, | |
815 | }, | |
816 | }; | |
817 | ||
818 | static int safexcel_hmac_sha1_init(struct ahash_request *areq) | |
819 | { | |
b869648c | 820 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
1b44c5a6 AT |
821 | |
822 | safexcel_sha1_init(areq); | |
b869648c | 823 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; |
1b44c5a6 AT |
824 | return 0; |
825 | } | |
826 | ||
827 | static int safexcel_hmac_sha1_digest(struct ahash_request *areq) | |
828 | { | |
829 | int ret = safexcel_hmac_sha1_init(areq); | |
830 | ||
831 | if (ret) | |
832 | return ret; | |
833 | ||
834 | return safexcel_ahash_finup(areq); | |
835 | } | |
836 | ||
837 | struct safexcel_ahash_result { | |
838 | struct completion completion; | |
839 | int error; | |
840 | }; | |
841 | ||
842 | static void safexcel_ahash_complete(struct crypto_async_request *req, int error) | |
843 | { | |
844 | struct safexcel_ahash_result *result = req->data; | |
845 | ||
846 | if (error == -EINPROGRESS) | |
847 | return; | |
848 | ||
849 | result->error = error; | |
850 | complete(&result->completion); | |
851 | } | |
852 | ||
853 | static int safexcel_hmac_init_pad(struct ahash_request *areq, | |
854 | unsigned int blocksize, const u8 *key, | |
855 | unsigned int keylen, u8 *ipad, u8 *opad) | |
856 | { | |
857 | struct safexcel_ahash_result result; | |
858 | struct scatterlist sg; | |
859 | int ret, i; | |
860 | u8 *keydup; | |
861 | ||
862 | if (keylen <= blocksize) { | |
863 | memcpy(ipad, key, keylen); | |
864 | } else { | |
865 | keydup = kmemdup(key, keylen, GFP_KERNEL); | |
866 | if (!keydup) | |
867 | return -ENOMEM; | |
868 | ||
869 | ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG, | |
870 | safexcel_ahash_complete, &result); | |
871 | sg_init_one(&sg, keydup, keylen); | |
872 | ahash_request_set_crypt(areq, &sg, ipad, keylen); | |
873 | init_completion(&result.completion); | |
874 | ||
875 | ret = crypto_ahash_digest(areq); | |
4dc5475a | 876 | if (ret == -EINPROGRESS || ret == -EBUSY) { |
1b44c5a6 AT |
877 | wait_for_completion_interruptible(&result.completion); |
878 | ret = result.error; | |
879 | } | |
880 | ||
881 | /* Avoid leaking */ | |
882 | memzero_explicit(keydup, keylen); | |
883 | kfree(keydup); | |
884 | ||
885 | if (ret) | |
886 | return ret; | |
887 | ||
888 | keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq)); | |
889 | } | |
890 | ||
891 | memset(ipad + keylen, 0, blocksize - keylen); | |
892 | memcpy(opad, ipad, blocksize); | |
893 | ||
894 | for (i = 0; i < blocksize; i++) { | |
aed3731e AT |
895 | ipad[i] ^= HMAC_IPAD_VALUE; |
896 | opad[i] ^= HMAC_OPAD_VALUE; | |
1b44c5a6 AT |
897 | } |
898 | ||
899 | return 0; | |
900 | } | |
901 | ||
902 | static int safexcel_hmac_init_iv(struct ahash_request *areq, | |
903 | unsigned int blocksize, u8 *pad, void *state) | |
904 | { | |
905 | struct safexcel_ahash_result result; | |
906 | struct safexcel_ahash_req *req; | |
907 | struct scatterlist sg; | |
908 | int ret; | |
909 | ||
910 | ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG, | |
911 | safexcel_ahash_complete, &result); | |
912 | sg_init_one(&sg, pad, blocksize); | |
913 | ahash_request_set_crypt(areq, &sg, pad, blocksize); | |
914 | init_completion(&result.completion); | |
915 | ||
916 | ret = crypto_ahash_init(areq); | |
917 | if (ret) | |
918 | return ret; | |
919 | ||
920 | req = ahash_request_ctx(areq); | |
921 | req->hmac = true; | |
922 | req->last_req = true; | |
923 | ||
924 | ret = crypto_ahash_update(areq); | |
12bf4142 | 925 | if (ret && ret != -EINPROGRESS && ret != -EBUSY) |
1b44c5a6 AT |
926 | return ret; |
927 | ||
928 | wait_for_completion_interruptible(&result.completion); | |
929 | if (result.error) | |
930 | return result.error; | |
931 | ||
932 | return crypto_ahash_export(areq, state); | |
933 | } | |
934 | ||
f6beaea3 AT |
935 | int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen, |
936 | void *istate, void *ostate) | |
1b44c5a6 AT |
937 | { |
938 | struct ahash_request *areq; | |
939 | struct crypto_ahash *tfm; | |
940 | unsigned int blocksize; | |
941 | u8 *ipad, *opad; | |
942 | int ret; | |
943 | ||
944 | tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH, | |
945 | CRYPTO_ALG_TYPE_AHASH_MASK); | |
946 | if (IS_ERR(tfm)) | |
947 | return PTR_ERR(tfm); | |
948 | ||
949 | areq = ahash_request_alloc(tfm, GFP_KERNEL); | |
950 | if (!areq) { | |
951 | ret = -ENOMEM; | |
952 | goto free_ahash; | |
953 | } | |
954 | ||
955 | crypto_ahash_clear_flags(tfm, ~0); | |
956 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | |
957 | ||
6396bb22 | 958 | ipad = kcalloc(2, blocksize, GFP_KERNEL); |
1b44c5a6 AT |
959 | if (!ipad) { |
960 | ret = -ENOMEM; | |
961 | goto free_request; | |
962 | } | |
963 | ||
964 | opad = ipad + blocksize; | |
965 | ||
966 | ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad); | |
967 | if (ret) | |
968 | goto free_ipad; | |
969 | ||
970 | ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate); | |
971 | if (ret) | |
972 | goto free_ipad; | |
973 | ||
974 | ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate); | |
975 | ||
976 | free_ipad: | |
977 | kfree(ipad); | |
978 | free_request: | |
979 | ahash_request_free(areq); | |
980 | free_ahash: | |
981 | crypto_free_ahash(tfm); | |
982 | ||
983 | return ret; | |
984 | } | |
985 | ||
73f36ea7 AT |
986 | static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key, |
987 | unsigned int keylen, const char *alg, | |
988 | unsigned int state_sz) | |
1b44c5a6 AT |
989 | { |
990 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | |
871df319 | 991 | struct safexcel_crypto_priv *priv = ctx->priv; |
1b44c5a6 AT |
992 | struct safexcel_ahash_export_state istate, ostate; |
993 | int ret, i; | |
994 | ||
73f36ea7 | 995 | ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate); |
1b44c5a6 AT |
996 | if (ret) |
997 | return ret; | |
998 | ||
53c83e91 | 999 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) { |
73f36ea7 | 1000 | for (i = 0; i < state_sz / sizeof(u32); i++) { |
c4daf4cc OH |
1001 | if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || |
1002 | ctx->opad[i] != le32_to_cpu(ostate.state[i])) { | |
1003 | ctx->base.needs_inv = true; | |
1004 | break; | |
1005 | } | |
1b44c5a6 AT |
1006 | } |
1007 | } | |
1008 | ||
73f36ea7 AT |
1009 | memcpy(ctx->ipad, &istate.state, state_sz); |
1010 | memcpy(ctx->opad, &ostate.state, state_sz); | |
42ef3bed | 1011 | |
1b44c5a6 AT |
1012 | return 0; |
1013 | } | |
1014 | ||
73f36ea7 AT |
1015 | static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, |
1016 | unsigned int keylen) | |
1017 | { | |
1018 | return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1", | |
1019 | SHA1_DIGEST_SIZE); | |
1020 | } | |
1021 | ||
1b44c5a6 AT |
1022 | struct safexcel_alg_template safexcel_alg_hmac_sha1 = { |
1023 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1024 | .engines = EIP97IES | EIP197B | EIP197D, |
1b44c5a6 AT |
1025 | .alg.ahash = { |
1026 | .init = safexcel_hmac_sha1_init, | |
1027 | .update = safexcel_ahash_update, | |
1028 | .final = safexcel_ahash_final, | |
1029 | .finup = safexcel_ahash_finup, | |
1030 | .digest = safexcel_hmac_sha1_digest, | |
1031 | .setkey = safexcel_hmac_sha1_setkey, | |
1032 | .export = safexcel_ahash_export, | |
1033 | .import = safexcel_ahash_import, | |
1034 | .halg = { | |
1035 | .digestsize = SHA1_DIGEST_SIZE, | |
1036 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1037 | .base = { | |
1038 | .cra_name = "hmac(sha1)", | |
1039 | .cra_driver_name = "safexcel-hmac-sha1", | |
1040 | .cra_priority = 300, | |
1041 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1042 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1043 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
1044 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1045 | .cra_init = safexcel_ahash_cra_init, | |
1046 | .cra_exit = safexcel_ahash_cra_exit, | |
1047 | .cra_module = THIS_MODULE, | |
1048 | }, | |
1049 | }, | |
1050 | }, | |
1051 | }; | |
1052 | ||
1053 | static int safexcel_sha256_init(struct ahash_request *areq) | |
1054 | { | |
1055 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
1056 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1057 | ||
1058 | memset(req, 0, sizeof(*req)); | |
1059 | ||
1060 | req->state[0] = SHA256_H0; | |
1061 | req->state[1] = SHA256_H1; | |
1062 | req->state[2] = SHA256_H2; | |
1063 | req->state[3] = SHA256_H3; | |
1064 | req->state[4] = SHA256_H4; | |
1065 | req->state[5] = SHA256_H5; | |
1066 | req->state[6] = SHA256_H6; | |
1067 | req->state[7] = SHA256_H7; | |
1068 | ||
1069 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; | |
b869648c | 1070 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; |
1b44c5a6 AT |
1071 | req->state_sz = SHA256_DIGEST_SIZE; |
1072 | ||
1073 | return 0; | |
1074 | } | |
1075 | ||
1076 | static int safexcel_sha256_digest(struct ahash_request *areq) | |
1077 | { | |
1078 | int ret = safexcel_sha256_init(areq); | |
1079 | ||
1080 | if (ret) | |
1081 | return ret; | |
1082 | ||
1083 | return safexcel_ahash_finup(areq); | |
1084 | } | |
1085 | ||
1086 | struct safexcel_alg_template safexcel_alg_sha256 = { | |
1087 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1088 | .engines = EIP97IES | EIP197B | EIP197D, |
1b44c5a6 AT |
1089 | .alg.ahash = { |
1090 | .init = safexcel_sha256_init, | |
1091 | .update = safexcel_ahash_update, | |
1092 | .final = safexcel_ahash_final, | |
1093 | .finup = safexcel_ahash_finup, | |
1094 | .digest = safexcel_sha256_digest, | |
1095 | .export = safexcel_ahash_export, | |
1096 | .import = safexcel_ahash_import, | |
1097 | .halg = { | |
1098 | .digestsize = SHA256_DIGEST_SIZE, | |
1099 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1100 | .base = { | |
1101 | .cra_name = "sha256", | |
1102 | .cra_driver_name = "safexcel-sha256", | |
1103 | .cra_priority = 300, | |
1104 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1105 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1106 | .cra_blocksize = SHA256_BLOCK_SIZE, | |
1107 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1108 | .cra_init = safexcel_ahash_cra_init, | |
1109 | .cra_exit = safexcel_ahash_cra_exit, | |
1110 | .cra_module = THIS_MODULE, | |
1111 | }, | |
1112 | }, | |
1113 | }, | |
1114 | }; | |
1115 | ||
1116 | static int safexcel_sha224_init(struct ahash_request *areq) | |
1117 | { | |
1118 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
1119 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1120 | ||
1121 | memset(req, 0, sizeof(*req)); | |
1122 | ||
1123 | req->state[0] = SHA224_H0; | |
1124 | req->state[1] = SHA224_H1; | |
1125 | req->state[2] = SHA224_H2; | |
1126 | req->state[3] = SHA224_H3; | |
1127 | req->state[4] = SHA224_H4; | |
1128 | req->state[5] = SHA224_H5; | |
1129 | req->state[6] = SHA224_H6; | |
1130 | req->state[7] = SHA224_H7; | |
1131 | ||
1132 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; | |
b869648c | 1133 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; |
1b44c5a6 AT |
1134 | req->state_sz = SHA256_DIGEST_SIZE; |
1135 | ||
1136 | return 0; | |
1137 | } | |
1138 | ||
1139 | static int safexcel_sha224_digest(struct ahash_request *areq) | |
1140 | { | |
1141 | int ret = safexcel_sha224_init(areq); | |
1142 | ||
1143 | if (ret) | |
1144 | return ret; | |
1145 | ||
1146 | return safexcel_ahash_finup(areq); | |
1147 | } | |
1148 | ||
1149 | struct safexcel_alg_template safexcel_alg_sha224 = { | |
1150 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1151 | .engines = EIP97IES | EIP197B | EIP197D, |
1b44c5a6 AT |
1152 | .alg.ahash = { |
1153 | .init = safexcel_sha224_init, | |
1154 | .update = safexcel_ahash_update, | |
1155 | .final = safexcel_ahash_final, | |
1156 | .finup = safexcel_ahash_finup, | |
1157 | .digest = safexcel_sha224_digest, | |
1158 | .export = safexcel_ahash_export, | |
1159 | .import = safexcel_ahash_import, | |
1160 | .halg = { | |
1161 | .digestsize = SHA224_DIGEST_SIZE, | |
1162 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1163 | .base = { | |
1164 | .cra_name = "sha224", | |
1165 | .cra_driver_name = "safexcel-sha224", | |
1166 | .cra_priority = 300, | |
1167 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1168 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1169 | .cra_blocksize = SHA224_BLOCK_SIZE, | |
1170 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1171 | .cra_init = safexcel_ahash_cra_init, | |
1172 | .cra_exit = safexcel_ahash_cra_exit, | |
1173 | .cra_module = THIS_MODULE, | |
1174 | }, | |
1175 | }, | |
1176 | }, | |
1177 | }; | |
73f36ea7 | 1178 | |
3ad618d8 AT |
1179 | static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key, |
1180 | unsigned int keylen) | |
1181 | { | |
1182 | return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224", | |
1183 | SHA256_DIGEST_SIZE); | |
1184 | } | |
1185 | ||
1186 | static int safexcel_hmac_sha224_init(struct ahash_request *areq) | |
1187 | { | |
1188 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1189 | ||
1190 | safexcel_sha224_init(areq); | |
1191 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | |
1192 | return 0; | |
1193 | } | |
1194 | ||
1195 | static int safexcel_hmac_sha224_digest(struct ahash_request *areq) | |
1196 | { | |
1197 | int ret = safexcel_hmac_sha224_init(areq); | |
1198 | ||
1199 | if (ret) | |
1200 | return ret; | |
1201 | ||
1202 | return safexcel_ahash_finup(areq); | |
1203 | } | |
1204 | ||
1205 | struct safexcel_alg_template safexcel_alg_hmac_sha224 = { | |
1206 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1207 | .engines = EIP97IES | EIP197B | EIP197D, |
3ad618d8 AT |
1208 | .alg.ahash = { |
1209 | .init = safexcel_hmac_sha224_init, | |
1210 | .update = safexcel_ahash_update, | |
1211 | .final = safexcel_ahash_final, | |
1212 | .finup = safexcel_ahash_finup, | |
1213 | .digest = safexcel_hmac_sha224_digest, | |
1214 | .setkey = safexcel_hmac_sha224_setkey, | |
1215 | .export = safexcel_ahash_export, | |
1216 | .import = safexcel_ahash_import, | |
1217 | .halg = { | |
1218 | .digestsize = SHA224_DIGEST_SIZE, | |
1219 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1220 | .base = { | |
1221 | .cra_name = "hmac(sha224)", | |
1222 | .cra_driver_name = "safexcel-hmac-sha224", | |
1223 | .cra_priority = 300, | |
1224 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1225 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1226 | .cra_blocksize = SHA224_BLOCK_SIZE, | |
1227 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1228 | .cra_init = safexcel_ahash_cra_init, | |
1229 | .cra_exit = safexcel_ahash_cra_exit, | |
1230 | .cra_module = THIS_MODULE, | |
1231 | }, | |
1232 | }, | |
1233 | }, | |
1234 | }; | |
1235 | ||
73f36ea7 AT |
1236 | static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, |
1237 | unsigned int keylen) | |
1238 | { | |
1239 | return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256", | |
1240 | SHA256_DIGEST_SIZE); | |
1241 | } | |
1242 | ||
1243 | static int safexcel_hmac_sha256_init(struct ahash_request *areq) | |
1244 | { | |
1245 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1246 | ||
1247 | safexcel_sha256_init(areq); | |
1248 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | |
1249 | return 0; | |
1250 | } | |
1251 | ||
1252 | static int safexcel_hmac_sha256_digest(struct ahash_request *areq) | |
1253 | { | |
1254 | int ret = safexcel_hmac_sha256_init(areq); | |
1255 | ||
1256 | if (ret) | |
1257 | return ret; | |
1258 | ||
1259 | return safexcel_ahash_finup(areq); | |
1260 | } | |
1261 | ||
1262 | struct safexcel_alg_template safexcel_alg_hmac_sha256 = { | |
1263 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1264 | .engines = EIP97IES | EIP197B | EIP197D, |
73f36ea7 AT |
1265 | .alg.ahash = { |
1266 | .init = safexcel_hmac_sha256_init, | |
1267 | .update = safexcel_ahash_update, | |
1268 | .final = safexcel_ahash_final, | |
1269 | .finup = safexcel_ahash_finup, | |
1270 | .digest = safexcel_hmac_sha256_digest, | |
1271 | .setkey = safexcel_hmac_sha256_setkey, | |
1272 | .export = safexcel_ahash_export, | |
1273 | .import = safexcel_ahash_import, | |
1274 | .halg = { | |
1275 | .digestsize = SHA256_DIGEST_SIZE, | |
1276 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1277 | .base = { | |
1278 | .cra_name = "hmac(sha256)", | |
1279 | .cra_driver_name = "safexcel-hmac-sha256", | |
1280 | .cra_priority = 300, | |
1281 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1282 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1283 | .cra_blocksize = SHA256_BLOCK_SIZE, | |
1284 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1285 | .cra_init = safexcel_ahash_cra_init, | |
1286 | .cra_exit = safexcel_ahash_cra_exit, | |
1287 | .cra_module = THIS_MODULE, | |
1288 | }, | |
1289 | }, | |
1290 | }, | |
1291 | }; | |
b460edb6 AT |
1292 | |
1293 | static int safexcel_sha512_init(struct ahash_request *areq) | |
1294 | { | |
1295 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
1296 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1297 | ||
1298 | memset(req, 0, sizeof(*req)); | |
1299 | ||
1300 | req->state[0] = lower_32_bits(SHA512_H0); | |
1301 | req->state[1] = upper_32_bits(SHA512_H0); | |
1302 | req->state[2] = lower_32_bits(SHA512_H1); | |
1303 | req->state[3] = upper_32_bits(SHA512_H1); | |
1304 | req->state[4] = lower_32_bits(SHA512_H2); | |
1305 | req->state[5] = upper_32_bits(SHA512_H2); | |
1306 | req->state[6] = lower_32_bits(SHA512_H3); | |
1307 | req->state[7] = upper_32_bits(SHA512_H3); | |
1308 | req->state[8] = lower_32_bits(SHA512_H4); | |
1309 | req->state[9] = upper_32_bits(SHA512_H4); | |
1310 | req->state[10] = lower_32_bits(SHA512_H5); | |
1311 | req->state[11] = upper_32_bits(SHA512_H5); | |
1312 | req->state[12] = lower_32_bits(SHA512_H6); | |
1313 | req->state[13] = upper_32_bits(SHA512_H6); | |
1314 | req->state[14] = lower_32_bits(SHA512_H7); | |
1315 | req->state[15] = upper_32_bits(SHA512_H7); | |
1316 | ||
1317 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; | |
1318 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | |
1319 | req->state_sz = SHA512_DIGEST_SIZE; | |
1320 | ||
1321 | return 0; | |
1322 | } | |
1323 | ||
1324 | static int safexcel_sha512_digest(struct ahash_request *areq) | |
1325 | { | |
1326 | int ret = safexcel_sha512_init(areq); | |
1327 | ||
1328 | if (ret) | |
1329 | return ret; | |
1330 | ||
1331 | return safexcel_ahash_finup(areq); | |
1332 | } | |
1333 | ||
1334 | struct safexcel_alg_template safexcel_alg_sha512 = { | |
1335 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1336 | .engines = EIP97IES | EIP197B | EIP197D, |
b460edb6 AT |
1337 | .alg.ahash = { |
1338 | .init = safexcel_sha512_init, | |
1339 | .update = safexcel_ahash_update, | |
1340 | .final = safexcel_ahash_final, | |
1341 | .finup = safexcel_ahash_finup, | |
1342 | .digest = safexcel_sha512_digest, | |
1343 | .export = safexcel_ahash_export, | |
1344 | .import = safexcel_ahash_import, | |
1345 | .halg = { | |
1346 | .digestsize = SHA512_DIGEST_SIZE, | |
1347 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1348 | .base = { | |
1349 | .cra_name = "sha512", | |
1350 | .cra_driver_name = "safexcel-sha512", | |
1351 | .cra_priority = 300, | |
1352 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1353 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1354 | .cra_blocksize = SHA512_BLOCK_SIZE, | |
1355 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1356 | .cra_init = safexcel_ahash_cra_init, | |
1357 | .cra_exit = safexcel_ahash_cra_exit, | |
1358 | .cra_module = THIS_MODULE, | |
1359 | }, | |
1360 | }, | |
1361 | }, | |
1362 | }; | |
0de54fb1 | 1363 | |
9e46eafd AT |
1364 | static int safexcel_sha384_init(struct ahash_request *areq) |
1365 | { | |
1366 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
1367 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1368 | ||
1369 | memset(req, 0, sizeof(*req)); | |
1370 | ||
1371 | req->state[0] = lower_32_bits(SHA384_H0); | |
1372 | req->state[1] = upper_32_bits(SHA384_H0); | |
1373 | req->state[2] = lower_32_bits(SHA384_H1); | |
1374 | req->state[3] = upper_32_bits(SHA384_H1); | |
1375 | req->state[4] = lower_32_bits(SHA384_H2); | |
1376 | req->state[5] = upper_32_bits(SHA384_H2); | |
1377 | req->state[6] = lower_32_bits(SHA384_H3); | |
1378 | req->state[7] = upper_32_bits(SHA384_H3); | |
1379 | req->state[8] = lower_32_bits(SHA384_H4); | |
1380 | req->state[9] = upper_32_bits(SHA384_H4); | |
1381 | req->state[10] = lower_32_bits(SHA384_H5); | |
1382 | req->state[11] = upper_32_bits(SHA384_H5); | |
1383 | req->state[12] = lower_32_bits(SHA384_H6); | |
1384 | req->state[13] = upper_32_bits(SHA384_H6); | |
1385 | req->state[14] = lower_32_bits(SHA384_H7); | |
1386 | req->state[15] = upper_32_bits(SHA384_H7); | |
1387 | ||
1388 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; | |
1389 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | |
1390 | req->state_sz = SHA512_DIGEST_SIZE; | |
1391 | ||
1392 | return 0; | |
1393 | } | |
1394 | ||
1395 | static int safexcel_sha384_digest(struct ahash_request *areq) | |
1396 | { | |
1397 | int ret = safexcel_sha384_init(areq); | |
1398 | ||
1399 | if (ret) | |
1400 | return ret; | |
1401 | ||
1402 | return safexcel_ahash_finup(areq); | |
1403 | } | |
1404 | ||
1405 | struct safexcel_alg_template safexcel_alg_sha384 = { | |
1406 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1407 | .engines = EIP97IES | EIP197B | EIP197D, |
9e46eafd AT |
1408 | .alg.ahash = { |
1409 | .init = safexcel_sha384_init, | |
1410 | .update = safexcel_ahash_update, | |
1411 | .final = safexcel_ahash_final, | |
1412 | .finup = safexcel_ahash_finup, | |
1413 | .digest = safexcel_sha384_digest, | |
1414 | .export = safexcel_ahash_export, | |
1415 | .import = safexcel_ahash_import, | |
1416 | .halg = { | |
1417 | .digestsize = SHA384_DIGEST_SIZE, | |
1418 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1419 | .base = { | |
1420 | .cra_name = "sha384", | |
1421 | .cra_driver_name = "safexcel-sha384", | |
1422 | .cra_priority = 300, | |
1423 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1424 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1425 | .cra_blocksize = SHA384_BLOCK_SIZE, | |
1426 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1427 | .cra_init = safexcel_ahash_cra_init, | |
1428 | .cra_exit = safexcel_ahash_cra_exit, | |
1429 | .cra_module = THIS_MODULE, | |
1430 | }, | |
1431 | }, | |
1432 | }, | |
1433 | }; | |
1434 | ||
0de54fb1 AT |
1435 | static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key, |
1436 | unsigned int keylen) | |
1437 | { | |
1438 | return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512", | |
1439 | SHA512_DIGEST_SIZE); | |
1440 | } | |
1441 | ||
1442 | static int safexcel_hmac_sha512_init(struct ahash_request *areq) | |
1443 | { | |
1444 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1445 | ||
1446 | safexcel_sha512_init(areq); | |
1447 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | |
1448 | return 0; | |
1449 | } | |
1450 | ||
1451 | static int safexcel_hmac_sha512_digest(struct ahash_request *areq) | |
1452 | { | |
1453 | int ret = safexcel_hmac_sha512_init(areq); | |
1454 | ||
1455 | if (ret) | |
1456 | return ret; | |
1457 | ||
1458 | return safexcel_ahash_finup(areq); | |
1459 | } | |
1460 | ||
1461 | struct safexcel_alg_template safexcel_alg_hmac_sha512 = { | |
1462 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1463 | .engines = EIP97IES | EIP197B | EIP197D, |
0de54fb1 AT |
1464 | .alg.ahash = { |
1465 | .init = safexcel_hmac_sha512_init, | |
1466 | .update = safexcel_ahash_update, | |
1467 | .final = safexcel_ahash_final, | |
1468 | .finup = safexcel_ahash_finup, | |
1469 | .digest = safexcel_hmac_sha512_digest, | |
1470 | .setkey = safexcel_hmac_sha512_setkey, | |
1471 | .export = safexcel_ahash_export, | |
1472 | .import = safexcel_ahash_import, | |
1473 | .halg = { | |
1474 | .digestsize = SHA512_DIGEST_SIZE, | |
1475 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1476 | .base = { | |
1477 | .cra_name = "hmac(sha512)", | |
1478 | .cra_driver_name = "safexcel-hmac-sha512", | |
1479 | .cra_priority = 300, | |
1480 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1481 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1482 | .cra_blocksize = SHA512_BLOCK_SIZE, | |
1483 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1484 | .cra_init = safexcel_ahash_cra_init, | |
1485 | .cra_exit = safexcel_ahash_cra_exit, | |
1486 | .cra_module = THIS_MODULE, | |
1487 | }, | |
1488 | }, | |
1489 | }, | |
1490 | }; | |
1f5d5d98 AT |
1491 | |
1492 | static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key, | |
1493 | unsigned int keylen) | |
1494 | { | |
1495 | return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384", | |
1496 | SHA512_DIGEST_SIZE); | |
1497 | } | |
1498 | ||
1499 | static int safexcel_hmac_sha384_init(struct ahash_request *areq) | |
1500 | { | |
1501 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1502 | ||
1503 | safexcel_sha384_init(areq); | |
1504 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | |
1505 | return 0; | |
1506 | } | |
1507 | ||
1508 | static int safexcel_hmac_sha384_digest(struct ahash_request *areq) | |
1509 | { | |
1510 | int ret = safexcel_hmac_sha384_init(areq); | |
1511 | ||
1512 | if (ret) | |
1513 | return ret; | |
1514 | ||
1515 | return safexcel_ahash_finup(areq); | |
1516 | } | |
1517 | ||
1518 | struct safexcel_alg_template safexcel_alg_hmac_sha384 = { | |
1519 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1520 | .engines = EIP97IES | EIP197B | EIP197D, |
1f5d5d98 AT |
1521 | .alg.ahash = { |
1522 | .init = safexcel_hmac_sha384_init, | |
1523 | .update = safexcel_ahash_update, | |
1524 | .final = safexcel_ahash_final, | |
1525 | .finup = safexcel_ahash_finup, | |
1526 | .digest = safexcel_hmac_sha384_digest, | |
1527 | .setkey = safexcel_hmac_sha384_setkey, | |
1528 | .export = safexcel_ahash_export, | |
1529 | .import = safexcel_ahash_import, | |
1530 | .halg = { | |
1531 | .digestsize = SHA384_DIGEST_SIZE, | |
1532 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1533 | .base = { | |
1534 | .cra_name = "hmac(sha384)", | |
1535 | .cra_driver_name = "safexcel-hmac-sha384", | |
1536 | .cra_priority = 300, | |
1537 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1538 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1539 | .cra_blocksize = SHA384_BLOCK_SIZE, | |
1540 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1541 | .cra_init = safexcel_ahash_cra_init, | |
1542 | .cra_exit = safexcel_ahash_cra_exit, | |
1543 | .cra_module = THIS_MODULE, | |
1544 | }, | |
1545 | }, | |
1546 | }, | |
1547 | }; | |
293f89cf OH |
1548 | |
1549 | static int safexcel_md5_init(struct ahash_request *areq) | |
1550 | { | |
1551 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
1552 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1553 | ||
1554 | memset(req, 0, sizeof(*req)); | |
1555 | ||
1556 | req->state[0] = MD5_H0; | |
1557 | req->state[1] = MD5_H1; | |
1558 | req->state[2] = MD5_H2; | |
1559 | req->state[3] = MD5_H3; | |
1560 | ||
1561 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; | |
1562 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | |
1563 | req->state_sz = MD5_DIGEST_SIZE; | |
1564 | ||
1565 | return 0; | |
1566 | } | |
1567 | ||
1568 | static int safexcel_md5_digest(struct ahash_request *areq) | |
1569 | { | |
1570 | int ret = safexcel_md5_init(areq); | |
1571 | ||
1572 | if (ret) | |
1573 | return ret; | |
1574 | ||
1575 | return safexcel_ahash_finup(areq); | |
1576 | } | |
1577 | ||
1578 | struct safexcel_alg_template safexcel_alg_md5 = { | |
1579 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
1580 | .engines = EIP97IES | EIP197B | EIP197D, | |
1581 | .alg.ahash = { | |
1582 | .init = safexcel_md5_init, | |
1583 | .update = safexcel_ahash_update, | |
1584 | .final = safexcel_ahash_final, | |
1585 | .finup = safexcel_ahash_finup, | |
1586 | .digest = safexcel_md5_digest, | |
1587 | .export = safexcel_ahash_export, | |
1588 | .import = safexcel_ahash_import, | |
1589 | .halg = { | |
1590 | .digestsize = MD5_DIGEST_SIZE, | |
1591 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1592 | .base = { | |
1593 | .cra_name = "md5", | |
1594 | .cra_driver_name = "safexcel-md5", | |
1595 | .cra_priority = 300, | |
1596 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1597 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1598 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, | |
1599 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1600 | .cra_init = safexcel_ahash_cra_init, | |
1601 | .cra_exit = safexcel_ahash_cra_exit, | |
1602 | .cra_module = THIS_MODULE, | |
1603 | }, | |
1604 | }, | |
1605 | }, | |
1606 | }; | |
b471e4b9 OH |
1607 | |
1608 | static int safexcel_hmac_md5_init(struct ahash_request *areq) | |
1609 | { | |
1610 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1611 | ||
1612 | safexcel_md5_init(areq); | |
1613 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | |
1614 | return 0; | |
1615 | } | |
1616 | ||
1617 | static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, | |
1618 | unsigned int keylen) | |
1619 | { | |
1620 | return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5", | |
1621 | MD5_DIGEST_SIZE); | |
1622 | } | |
1623 | ||
1624 | static int safexcel_hmac_md5_digest(struct ahash_request *areq) | |
1625 | { | |
1626 | int ret = safexcel_hmac_md5_init(areq); | |
1627 | ||
1628 | if (ret) | |
1629 | return ret; | |
1630 | ||
1631 | return safexcel_ahash_finup(areq); | |
1632 | } | |
1633 | ||
1634 | struct safexcel_alg_template safexcel_alg_hmac_md5 = { | |
1635 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
1636 | .engines = EIP97IES | EIP197B | EIP197D, | |
1637 | .alg.ahash = { | |
1638 | .init = safexcel_hmac_md5_init, | |
1639 | .update = safexcel_ahash_update, | |
1640 | .final = safexcel_ahash_final, | |
1641 | .finup = safexcel_ahash_finup, | |
1642 | .digest = safexcel_hmac_md5_digest, | |
1643 | .setkey = safexcel_hmac_md5_setkey, | |
1644 | .export = safexcel_ahash_export, | |
1645 | .import = safexcel_ahash_import, | |
1646 | .halg = { | |
1647 | .digestsize = MD5_DIGEST_SIZE, | |
1648 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1649 | .base = { | |
1650 | .cra_name = "hmac(md5)", | |
1651 | .cra_driver_name = "safexcel-hmac-md5", | |
1652 | .cra_priority = 300, | |
1653 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1654 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1655 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, | |
1656 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1657 | .cra_init = safexcel_ahash_cra_init, | |
1658 | .cra_exit = safexcel_ahash_cra_exit, | |
1659 | .cra_module = THIS_MODULE, | |
1660 | }, | |
1661 | }, | |
1662 | }, | |
1663 | }; |