]>
Commit | Line | Data |
---|---|---|
301422e3 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1b44c5a6 AT |
2 | /* |
3 | * Copyright (C) 2017 Marvell | |
4 | * | |
5 | * Antoine Tenart <antoine.tenart@free-electrons.com> | |
1b44c5a6 AT |
6 | */ |
7 | ||
aed3731e | 8 | #include <crypto/hmac.h> |
293f89cf | 9 | #include <crypto/md5.h> |
1b44c5a6 AT |
10 | #include <crypto/sha.h> |
11 | #include <linux/device.h> | |
12 | #include <linux/dma-mapping.h> | |
13 | #include <linux/dmapool.h> | |
14 | ||
1b44c5a6 AT |
15 | #include "safexcel.h" |
16 | ||
17 | struct safexcel_ahash_ctx { | |
18 | struct safexcel_context base; | |
19 | struct safexcel_crypto_priv *priv; | |
20 | ||
21 | u32 alg; | |
1b44c5a6 | 22 | |
0de54fb1 AT |
23 | u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)]; |
24 | u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)]; | |
1b44c5a6 AT |
25 | }; |
26 | ||
27 | struct safexcel_ahash_req { | |
28 | bool last_req; | |
29 | bool finish; | |
30 | bool hmac; | |
1eb7b403 | 31 | bool needs_inv; |
1b44c5a6 | 32 | |
c957f8b3 | 33 | int nents; |
b8592027 | 34 | dma_addr_t result_dma; |
c957f8b3 | 35 | |
b869648c AT |
36 | u32 digest; |
37 | ||
1b44c5a6 | 38 | u8 state_sz; /* expected sate size, only set once */ |
b460edb6 | 39 | u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); |
1b44c5a6 | 40 | |
b460edb6 AT |
41 | u64 len[2]; |
42 | u64 processed[2]; | |
1b44c5a6 | 43 | |
b460edb6 | 44 | u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); |
cff9a175 AT |
45 | dma_addr_t cache_dma; |
46 | unsigned int cache_sz; | |
47 | ||
b460edb6 | 48 | u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); |
1b44c5a6 AT |
49 | }; |
50 | ||
b460edb6 AT |
51 | static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) |
52 | { | |
53 | if (req->len[1] > req->processed[1]) | |
54 | return 0xffffffff - (req->len[0] - req->processed[0]); | |
55 | ||
56 | return req->len[0] - req->processed[0]; | |
57 | } | |
58 | ||
1b44c5a6 AT |
59 | static void safexcel_hash_token(struct safexcel_command_desc *cdesc, |
60 | u32 input_length, u32 result_length) | |
61 | { | |
62 | struct safexcel_token *token = | |
63 | (struct safexcel_token *)cdesc->control_data.token; | |
64 | ||
65 | token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; | |
66 | token[0].packet_length = input_length; | |
67 | token[0].stat = EIP197_TOKEN_STAT_LAST_HASH; | |
68 | token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH; | |
69 | ||
70 | token[1].opcode = EIP197_TOKEN_OPCODE_INSERT; | |
71 | token[1].packet_length = result_length; | |
72 | token[1].stat = EIP197_TOKEN_STAT_LAST_HASH | | |
73 | EIP197_TOKEN_STAT_LAST_PACKET; | |
74 | token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | | |
75 | EIP197_TOKEN_INS_INSERT_HASH_DIGEST; | |
76 | } | |
77 | ||
78 | static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, | |
79 | struct safexcel_ahash_req *req, | |
80 | struct safexcel_command_desc *cdesc, | |
25bc9551 | 81 | unsigned int digestsize) |
1b44c5a6 | 82 | { |
b460edb6 | 83 | struct safexcel_crypto_priv *priv = ctx->priv; |
1b44c5a6 AT |
84 | int i; |
85 | ||
86 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT; | |
87 | cdesc->control_data.control0 |= ctx->alg; | |
b869648c | 88 | cdesc->control_data.control0 |= req->digest; |
1b44c5a6 | 89 | |
b869648c | 90 | if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { |
b460edb6 | 91 | if (req->processed[0] || req->processed[1]) { |
293f89cf OH |
92 | if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) |
93 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5); | |
94 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) | |
1b44c5a6 AT |
95 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6); |
96 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 || | |
97 | ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) | |
98 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9); | |
9e46eafd AT |
99 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 || |
100 | ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512) | |
b460edb6 | 101 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17); |
1b44c5a6 AT |
102 | |
103 | cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT; | |
104 | } else { | |
105 | cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; | |
106 | } | |
107 | ||
108 | if (!req->finish) | |
109 | cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH; | |
110 | ||
111 | /* | |
112 | * Copy the input digest if needed, and setup the context | |
113 | * fields. Do this now as we need it to setup the first command | |
114 | * descriptor. | |
115 | */ | |
b460edb6 | 116 | if (req->processed[0] || req->processed[1]) { |
1b44c5a6 AT |
117 | for (i = 0; i < digestsize / sizeof(u32); i++) |
118 | ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]); | |
119 | ||
b460edb6 AT |
120 | if (req->finish) { |
121 | u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; | |
122 | count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * | |
123 | req->processed[1]); | |
124 | ||
125 | /* This is a haredware limitation, as the | |
126 | * counter must fit into an u32. This represents | |
127 | * a farily big amount of input data, so we | |
128 | * shouldn't see this. | |
129 | */ | |
130 | if (unlikely(count & 0xffff0000)) { | |
131 | dev_warn(priv->dev, | |
132 | "Input data is too big\n"); | |
133 | return; | |
134 | } | |
135 | ||
136 | ctx->base.ctxr->data[i] = cpu_to_le32(count); | |
137 | } | |
1b44c5a6 | 138 | } |
b869648c | 139 | } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) { |
4505bb02 | 140 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32)); |
1b44c5a6 | 141 | |
4505bb02 AT |
142 | memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz); |
143 | memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32), | |
144 | ctx->opad, req->state_sz); | |
1b44c5a6 AT |
145 | } |
146 | } | |
147 | ||
1eb7b403 OH |
148 | static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, |
149 | struct crypto_async_request *async, | |
150 | bool *should_complete, int *ret) | |
1b44c5a6 AT |
151 | { |
152 | struct safexcel_result_desc *rdesc; | |
153 | struct ahash_request *areq = ahash_request_cast(async); | |
154 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
155 | struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); | |
b460edb6 | 156 | u64 cache_len; |
1b44c5a6 AT |
157 | |
158 | *ret = 0; | |
159 | ||
1b44c5a6 AT |
160 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); |
161 | if (IS_ERR(rdesc)) { | |
162 | dev_err(priv->dev, | |
163 | "hash: result: could not retrieve the result descriptor\n"); | |
164 | *ret = PTR_ERR(rdesc); | |
bdfd1909 AT |
165 | } else { |
166 | *ret = safexcel_rdesc_check_errors(priv, rdesc); | |
1b44c5a6 AT |
167 | } |
168 | ||
169 | safexcel_complete(priv, ring); | |
1b44c5a6 | 170 | |
c957f8b3 AT |
171 | if (sreq->nents) { |
172 | dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); | |
173 | sreq->nents = 0; | |
174 | } | |
1b44c5a6 | 175 | |
b8592027 OH |
176 | if (sreq->result_dma) { |
177 | dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz, | |
178 | DMA_FROM_DEVICE); | |
179 | sreq->result_dma = 0; | |
180 | } | |
181 | ||
cff9a175 AT |
182 | if (sreq->cache_dma) { |
183 | dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz, | |
184 | DMA_TO_DEVICE); | |
185 | sreq->cache_dma = 0; | |
aa524286 | 186 | sreq->cache_sz = 0; |
cff9a175 | 187 | } |
1b44c5a6 | 188 | |
b89a8159 AT |
189 | if (sreq->finish) |
190 | memcpy(areq->result, sreq->state, | |
191 | crypto_ahash_digestsize(ahash)); | |
192 | ||
b460edb6 | 193 | cache_len = safexcel_queued_len(sreq); |
1b44c5a6 AT |
194 | if (cache_len) |
195 | memcpy(sreq->cache, sreq->cache_next, cache_len); | |
196 | ||
197 | *should_complete = true; | |
198 | ||
199 | return 1; | |
200 | } | |
201 | ||
1eb7b403 | 202 | static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, |
1eb7b403 | 203 | int *commands, int *results) |
1b44c5a6 AT |
204 | { |
205 | struct ahash_request *areq = ahash_request_cast(async); | |
206 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
207 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
208 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
209 | struct safexcel_crypto_priv *priv = ctx->priv; | |
210 | struct safexcel_command_desc *cdesc, *first_cdesc = NULL; | |
211 | struct safexcel_result_desc *rdesc; | |
212 | struct scatterlist *sg; | |
b460edb6 AT |
213 | int i, extra, n_cdesc = 0, ret = 0; |
214 | u64 queued, len, cache_len; | |
1b44c5a6 | 215 | |
b460edb6 | 216 | queued = len = safexcel_queued_len(req); |
666a9c70 | 217 | if (queued <= crypto_ahash_blocksize(ahash)) |
1b44c5a6 AT |
218 | cache_len = queued; |
219 | else | |
220 | cache_len = queued - areq->nbytes; | |
221 | ||
809778e0 AT |
222 | if (!req->last_req) { |
223 | /* If this is not the last request and the queued data does not | |
224 | * fit into full blocks, cache it for the next send() call. | |
225 | */ | |
226 | extra = queued & (crypto_ahash_blocksize(ahash) - 1); | |
dd4306a6 AT |
227 | |
228 | /* If this is not the last request and the queued data | |
229 | * is a multiple of a block, cache the last one for now. | |
230 | */ | |
809778e0 | 231 | if (!extra) |
c1a8fa6e | 232 | extra = crypto_ahash_blocksize(ahash); |
809778e0 | 233 | |
709ecc10 AT |
234 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), |
235 | req->cache_next, extra, | |
236 | areq->nbytes - extra); | |
237 | ||
238 | queued -= extra; | |
239 | len -= extra; | |
240 | ||
241 | if (!queued) { | |
242 | *commands = 0; | |
243 | *results = 0; | |
244 | return 0; | |
809778e0 | 245 | } |
1b44c5a6 AT |
246 | } |
247 | ||
1b44c5a6 AT |
248 | /* Add a command descriptor for the cached data, if any */ |
249 | if (cache_len) { | |
cff9a175 AT |
250 | req->cache_dma = dma_map_single(priv->dev, req->cache, |
251 | cache_len, DMA_TO_DEVICE); | |
9744fec9 | 252 | if (dma_mapping_error(priv->dev, req->cache_dma)) |
cff9a175 | 253 | return -EINVAL; |
1b44c5a6 | 254 | |
cff9a175 | 255 | req->cache_sz = cache_len; |
1b44c5a6 AT |
256 | first_cdesc = safexcel_add_cdesc(priv, ring, 1, |
257 | (cache_len == len), | |
cff9a175 | 258 | req->cache_dma, cache_len, len, |
1b44c5a6 AT |
259 | ctx->base.ctxr_dma); |
260 | if (IS_ERR(first_cdesc)) { | |
261 | ret = PTR_ERR(first_cdesc); | |
262 | goto unmap_cache; | |
263 | } | |
264 | n_cdesc++; | |
265 | ||
266 | queued -= cache_len; | |
267 | if (!queued) | |
268 | goto send_command; | |
269 | } | |
270 | ||
271 | /* Now handle the current ahash request buffer(s) */ | |
c957f8b3 AT |
272 | req->nents = dma_map_sg(priv->dev, areq->src, |
273 | sg_nents_for_len(areq->src, areq->nbytes), | |
274 | DMA_TO_DEVICE); | |
275 | if (!req->nents) { | |
1b44c5a6 AT |
276 | ret = -ENOMEM; |
277 | goto cdesc_rollback; | |
278 | } | |
279 | ||
c957f8b3 | 280 | for_each_sg(areq->src, sg, req->nents, i) { |
1b44c5a6 AT |
281 | int sglen = sg_dma_len(sg); |
282 | ||
283 | /* Do not overflow the request */ | |
b460edb6 | 284 | if (queued < sglen) |
1b44c5a6 AT |
285 | sglen = queued; |
286 | ||
287 | cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, | |
288 | !(queued - sglen), sg_dma_address(sg), | |
289 | sglen, len, ctx->base.ctxr_dma); | |
290 | if (IS_ERR(cdesc)) { | |
291 | ret = PTR_ERR(cdesc); | |
57433b58 | 292 | goto unmap_sg; |
1b44c5a6 AT |
293 | } |
294 | n_cdesc++; | |
295 | ||
296 | if (n_cdesc == 1) | |
297 | first_cdesc = cdesc; | |
298 | ||
299 | queued -= sglen; | |
300 | if (!queued) | |
301 | break; | |
302 | } | |
303 | ||
304 | send_command: | |
305 | /* Setup the context options */ | |
25bc9551 | 306 | safexcel_context_control(ctx, req, first_cdesc, req->state_sz); |
1b44c5a6 AT |
307 | |
308 | /* Add the token */ | |
309 | safexcel_hash_token(first_cdesc, len, req->state_sz); | |
310 | ||
b8592027 OH |
311 | req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz, |
312 | DMA_FROM_DEVICE); | |
313 | if (dma_mapping_error(priv->dev, req->result_dma)) { | |
1b44c5a6 | 314 | ret = -EINVAL; |
57433b58 | 315 | goto unmap_sg; |
1b44c5a6 AT |
316 | } |
317 | ||
318 | /* Add a result descriptor */ | |
b8592027 | 319 | rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma, |
1b44c5a6 AT |
320 | req->state_sz); |
321 | if (IS_ERR(rdesc)) { | |
322 | ret = PTR_ERR(rdesc); | |
57240a78 | 323 | goto unmap_result; |
1b44c5a6 AT |
324 | } |
325 | ||
9744fec9 | 326 | safexcel_rdr_req_set(priv, ring, rdesc, &areq->base); |
1b44c5a6 | 327 | |
b460edb6 AT |
328 | req->processed[0] += len; |
329 | if (req->processed[0] < len) | |
330 | req->processed[1]++; | |
331 | ||
1b44c5a6 AT |
332 | *commands = n_cdesc; |
333 | *results = 1; | |
334 | return 0; | |
335 | ||
57240a78 | 336 | unmap_result: |
57433b58 AT |
337 | dma_unmap_single(priv->dev, req->result_dma, req->state_sz, |
338 | DMA_FROM_DEVICE); | |
339 | unmap_sg: | |
57240a78 | 340 | dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE); |
1b44c5a6 AT |
341 | cdesc_rollback: |
342 | for (i = 0; i < n_cdesc; i++) | |
343 | safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); | |
344 | unmap_cache: | |
cff9a175 AT |
345 | if (req->cache_dma) { |
346 | dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz, | |
347 | DMA_TO_DEVICE); | |
aa524286 | 348 | req->cache_dma = 0; |
cff9a175 | 349 | req->cache_sz = 0; |
1b44c5a6 | 350 | } |
1b44c5a6 | 351 | |
1b44c5a6 AT |
352 | return ret; |
353 | } | |
354 | ||
355 | static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq) | |
356 | { | |
357 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
358 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1b44c5a6 | 359 | unsigned int state_w_sz = req->state_sz / sizeof(u32); |
b460edb6 | 360 | u64 processed; |
1b44c5a6 AT |
361 | int i; |
362 | ||
b460edb6 AT |
363 | processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; |
364 | processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1]; | |
365 | ||
1b44c5a6 AT |
366 | for (i = 0; i < state_w_sz; i++) |
367 | if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i])) | |
368 | return true; | |
369 | ||
b460edb6 | 370 | if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed)) |
1b44c5a6 AT |
371 | return true; |
372 | ||
373 | return false; | |
374 | } | |
375 | ||
376 | static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, | |
377 | int ring, | |
378 | struct crypto_async_request *async, | |
379 | bool *should_complete, int *ret) | |
380 | { | |
381 | struct safexcel_result_desc *rdesc; | |
382 | struct ahash_request *areq = ahash_request_cast(async); | |
383 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
384 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash); | |
385 | int enq_ret; | |
386 | ||
387 | *ret = 0; | |
388 | ||
1b44c5a6 AT |
389 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); |
390 | if (IS_ERR(rdesc)) { | |
391 | dev_err(priv->dev, | |
392 | "hash: invalidate: could not retrieve the result descriptor\n"); | |
393 | *ret = PTR_ERR(rdesc); | |
cda3e73a AT |
394 | } else { |
395 | *ret = safexcel_rdesc_check_errors(priv, rdesc); | |
1b44c5a6 AT |
396 | } |
397 | ||
398 | safexcel_complete(priv, ring); | |
1b44c5a6 AT |
399 | |
400 | if (ctx->base.exit_inv) { | |
401 | dma_pool_free(priv->context_pool, ctx->base.ctxr, | |
402 | ctx->base.ctxr_dma); | |
403 | ||
404 | *should_complete = true; | |
405 | return 1; | |
406 | } | |
407 | ||
86671abb AT |
408 | ring = safexcel_select_ring(priv); |
409 | ctx->base.ring = ring; | |
1b44c5a6 | 410 | |
86671abb AT |
411 | spin_lock_bh(&priv->ring[ring].queue_lock); |
412 | enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); | |
413 | spin_unlock_bh(&priv->ring[ring].queue_lock); | |
1b44c5a6 AT |
414 | |
415 | if (enq_ret != -EINPROGRESS) | |
416 | *ret = enq_ret; | |
417 | ||
8472e778 AT |
418 | queue_work(priv->ring[ring].workqueue, |
419 | &priv->ring[ring].work_data.work); | |
86671abb | 420 | |
1b44c5a6 AT |
421 | *should_complete = false; |
422 | ||
423 | return 1; | |
424 | } | |
425 | ||
1eb7b403 OH |
426 | static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, |
427 | struct crypto_async_request *async, | |
428 | bool *should_complete, int *ret) | |
429 | { | |
430 | struct ahash_request *areq = ahash_request_cast(async); | |
431 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
432 | int err; | |
433 | ||
53c83e91 | 434 | BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv); |
871df319 | 435 | |
1eb7b403 OH |
436 | if (req->needs_inv) { |
437 | req->needs_inv = false; | |
438 | err = safexcel_handle_inv_result(priv, ring, async, | |
439 | should_complete, ret); | |
440 | } else { | |
441 | err = safexcel_handle_req_result(priv, ring, async, | |
442 | should_complete, ret); | |
443 | } | |
444 | ||
445 | return err; | |
446 | } | |
447 | ||
1b44c5a6 | 448 | static int safexcel_ahash_send_inv(struct crypto_async_request *async, |
9744fec9 | 449 | int ring, int *commands, int *results) |
1b44c5a6 AT |
450 | { |
451 | struct ahash_request *areq = ahash_request_cast(async); | |
452 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
453 | int ret; | |
454 | ||
5290ad6e | 455 | ret = safexcel_invalidate_cache(async, ctx->priv, |
9744fec9 | 456 | ctx->base.ctxr_dma, ring); |
1b44c5a6 AT |
457 | if (unlikely(ret)) |
458 | return ret; | |
459 | ||
460 | *commands = 1; | |
461 | *results = 1; | |
462 | ||
463 | return 0; | |
464 | } | |
465 | ||
1eb7b403 | 466 | static int safexcel_ahash_send(struct crypto_async_request *async, |
9744fec9 | 467 | int ring, int *commands, int *results) |
1eb7b403 OH |
468 | { |
469 | struct ahash_request *areq = ahash_request_cast(async); | |
470 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
471 | int ret; | |
472 | ||
473 | if (req->needs_inv) | |
9744fec9 | 474 | ret = safexcel_ahash_send_inv(async, ring, commands, results); |
1eb7b403 | 475 | else |
9744fec9 OH |
476 | ret = safexcel_ahash_send_req(async, ring, commands, results); |
477 | ||
1eb7b403 OH |
478 | return ret; |
479 | } | |
480 | ||
1b44c5a6 AT |
481 | static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) |
482 | { | |
483 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); | |
484 | struct safexcel_crypto_priv *priv = ctx->priv; | |
61824806 | 485 | EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE); |
7cad2fab | 486 | struct safexcel_ahash_req *rctx = ahash_request_ctx(req); |
3e1166b9 | 487 | struct safexcel_inv_result result = {}; |
86671abb | 488 | int ring = ctx->base.ring; |
1b44c5a6 | 489 | |
7cad2fab | 490 | memset(req, 0, sizeof(struct ahash_request)); |
1b44c5a6 AT |
491 | |
492 | /* create invalidation request */ | |
493 | init_completion(&result.completion); | |
7cad2fab | 494 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
1b44c5a6 AT |
495 | safexcel_inv_complete, &result); |
496 | ||
7cad2fab AT |
497 | ahash_request_set_tfm(req, __crypto_ahash_cast(tfm)); |
498 | ctx = crypto_tfm_ctx(req->base.tfm); | |
1b44c5a6 | 499 | ctx->base.exit_inv = true; |
1eb7b403 | 500 | rctx->needs_inv = true; |
1b44c5a6 | 501 | |
86671abb | 502 | spin_lock_bh(&priv->ring[ring].queue_lock); |
7cad2fab | 503 | crypto_enqueue_request(&priv->ring[ring].queue, &req->base); |
86671abb | 504 | spin_unlock_bh(&priv->ring[ring].queue_lock); |
1b44c5a6 | 505 | |
8472e778 AT |
506 | queue_work(priv->ring[ring].workqueue, |
507 | &priv->ring[ring].work_data.work); | |
1b44c5a6 | 508 | |
b7007dbc | 509 | wait_for_completion(&result.completion); |
1b44c5a6 AT |
510 | |
511 | if (result.error) { | |
512 | dev_warn(priv->dev, "hash: completion error (%d)\n", | |
513 | result.error); | |
514 | return result.error; | |
515 | } | |
516 | ||
517 | return 0; | |
518 | } | |
519 | ||
cc75f5ce AT |
520 | /* safexcel_ahash_cache: cache data until at least one request can be sent to |
521 | * the engine, aka. when there is at least 1 block size in the pipe. | |
522 | */ | |
1b44c5a6 AT |
523 | static int safexcel_ahash_cache(struct ahash_request *areq) |
524 | { | |
525 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
526 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
b460edb6 | 527 | u64 queued, cache_len; |
1b44c5a6 | 528 | |
cc75f5ce AT |
529 | /* queued: everything accepted by the driver which will be handled by |
530 | * the next send() calls. | |
531 | * tot sz handled by update() - tot sz handled by send() | |
532 | */ | |
b460edb6 AT |
533 | queued = safexcel_queued_len(req); |
534 | /* cache_len: everything accepted by the driver but not sent yet, | |
535 | * tot sz handled by update() - last req sz - tot sz handled by send() | |
536 | */ | |
537 | cache_len = queued - areq->nbytes; | |
1b44c5a6 AT |
538 | |
539 | /* | |
540 | * In case there isn't enough bytes to proceed (less than a | |
541 | * block size), cache the data until we have enough. | |
542 | */ | |
543 | if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) { | |
544 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | |
545 | req->cache + cache_len, | |
546 | areq->nbytes, 0); | |
547 | return areq->nbytes; | |
548 | } | |
549 | ||
dfbcc08f | 550 | /* We couldn't cache all the data */ |
1b44c5a6 AT |
551 | return -E2BIG; |
552 | } | |
553 | ||
554 | static int safexcel_ahash_enqueue(struct ahash_request *areq) | |
555 | { | |
556 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
557 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
558 | struct safexcel_crypto_priv *priv = ctx->priv; | |
86671abb | 559 | int ret, ring; |
1b44c5a6 | 560 | |
1eb7b403 | 561 | req->needs_inv = false; |
1b44c5a6 | 562 | |
1b44c5a6 | 563 | if (ctx->base.ctxr) { |
53c83e91 | 564 | if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv && |
b460edb6 | 565 | (req->processed[0] || req->processed[1]) && |
b869648c | 566 | req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) |
c4daf4cc OH |
567 | /* We're still setting needs_inv here, even though it is |
568 | * cleared right away, because the needs_inv flag can be | |
569 | * set in other functions and we want to keep the same | |
570 | * logic. | |
571 | */ | |
572 | ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); | |
573 | ||
1eb7b403 OH |
574 | if (ctx->base.needs_inv) { |
575 | ctx->base.needs_inv = false; | |
576 | req->needs_inv = true; | |
577 | } | |
1b44c5a6 AT |
578 | } else { |
579 | ctx->base.ring = safexcel_select_ring(priv); | |
580 | ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, | |
581 | EIP197_GFP_FLAGS(areq->base), | |
582 | &ctx->base.ctxr_dma); | |
583 | if (!ctx->base.ctxr) | |
584 | return -ENOMEM; | |
585 | } | |
586 | ||
86671abb AT |
587 | ring = ctx->base.ring; |
588 | ||
589 | spin_lock_bh(&priv->ring[ring].queue_lock); | |
590 | ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base); | |
591 | spin_unlock_bh(&priv->ring[ring].queue_lock); | |
1b44c5a6 | 592 | |
8472e778 AT |
593 | queue_work(priv->ring[ring].workqueue, |
594 | &priv->ring[ring].work_data.work); | |
1b44c5a6 AT |
595 | |
596 | return ret; | |
597 | } | |
598 | ||
599 | static int safexcel_ahash_update(struct ahash_request *areq) | |
600 | { | |
1b44c5a6 AT |
601 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
602 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
603 | ||
604 | /* If the request is 0 length, do nothing */ | |
605 | if (!areq->nbytes) | |
606 | return 0; | |
607 | ||
b460edb6 AT |
608 | req->len[0] += areq->nbytes; |
609 | if (req->len[0] < areq->nbytes) | |
610 | req->len[1]++; | |
1b44c5a6 AT |
611 | |
612 | safexcel_ahash_cache(areq); | |
613 | ||
614 | /* | |
615 | * We're not doing partial updates when performing an hmac request. | |
616 | * Everything will be handled by the final() call. | |
617 | */ | |
b869648c | 618 | if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) |
1b44c5a6 AT |
619 | return 0; |
620 | ||
621 | if (req->hmac) | |
622 | return safexcel_ahash_enqueue(areq); | |
623 | ||
624 | if (!req->last_req && | |
b460edb6 | 625 | safexcel_queued_len(req) > crypto_ahash_blocksize(ahash)) |
1b44c5a6 AT |
626 | return safexcel_ahash_enqueue(areq); |
627 | ||
628 | return 0; | |
629 | } | |
630 | ||
631 | static int safexcel_ahash_final(struct ahash_request *areq) | |
632 | { | |
633 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
634 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
635 | ||
636 | req->last_req = true; | |
637 | req->finish = true; | |
638 | ||
639 | /* If we have an overall 0 length request */ | |
b460edb6 | 640 | if (!req->len[0] && !req->len[1] && !areq->nbytes) { |
293f89cf OH |
641 | if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) |
642 | memcpy(areq->result, md5_zero_message_hash, | |
643 | MD5_DIGEST_SIZE); | |
644 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) | |
1b44c5a6 AT |
645 | memcpy(areq->result, sha1_zero_message_hash, |
646 | SHA1_DIGEST_SIZE); | |
647 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224) | |
648 | memcpy(areq->result, sha224_zero_message_hash, | |
649 | SHA224_DIGEST_SIZE); | |
650 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) | |
651 | memcpy(areq->result, sha256_zero_message_hash, | |
652 | SHA256_DIGEST_SIZE); | |
9e46eafd AT |
653 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384) |
654 | memcpy(areq->result, sha384_zero_message_hash, | |
655 | SHA384_DIGEST_SIZE); | |
b460edb6 AT |
656 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512) |
657 | memcpy(areq->result, sha512_zero_message_hash, | |
658 | SHA512_DIGEST_SIZE); | |
1b44c5a6 AT |
659 | |
660 | return 0; | |
661 | } | |
662 | ||
663 | return safexcel_ahash_enqueue(areq); | |
664 | } | |
665 | ||
666 | static int safexcel_ahash_finup(struct ahash_request *areq) | |
667 | { | |
668 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
669 | ||
670 | req->last_req = true; | |
671 | req->finish = true; | |
672 | ||
673 | safexcel_ahash_update(areq); | |
674 | return safexcel_ahash_final(areq); | |
675 | } | |
676 | ||
677 | static int safexcel_ahash_export(struct ahash_request *areq, void *out) | |
678 | { | |
679 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
680 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
681 | struct safexcel_ahash_export_state *export = out; | |
682 | ||
b460edb6 AT |
683 | export->len[0] = req->len[0]; |
684 | export->len[1] = req->len[1]; | |
685 | export->processed[0] = req->processed[0]; | |
686 | export->processed[1] = req->processed[1]; | |
1b44c5a6 | 687 | |
b869648c AT |
688 | export->digest = req->digest; |
689 | ||
1b44c5a6 | 690 | memcpy(export->state, req->state, req->state_sz); |
1b44c5a6 AT |
691 | memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); |
692 | ||
693 | return 0; | |
694 | } | |
695 | ||
696 | static int safexcel_ahash_import(struct ahash_request *areq, const void *in) | |
697 | { | |
698 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | |
699 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
700 | const struct safexcel_ahash_export_state *export = in; | |
701 | int ret; | |
702 | ||
703 | ret = crypto_ahash_init(areq); | |
704 | if (ret) | |
705 | return ret; | |
706 | ||
b460edb6 AT |
707 | req->len[0] = export->len[0]; |
708 | req->len[1] = export->len[1]; | |
709 | req->processed[0] = export->processed[0]; | |
710 | req->processed[1] = export->processed[1]; | |
1b44c5a6 | 711 | |
b869648c AT |
712 | req->digest = export->digest; |
713 | ||
1b44c5a6 AT |
714 | memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash)); |
715 | memcpy(req->state, export->state, req->state_sz); | |
716 | ||
717 | return 0; | |
718 | } | |
719 | ||
720 | static int safexcel_ahash_cra_init(struct crypto_tfm *tfm) | |
721 | { | |
722 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); | |
723 | struct safexcel_alg_template *tmpl = | |
724 | container_of(__crypto_ahash_alg(tfm->__crt_alg), | |
725 | struct safexcel_alg_template, alg.ahash); | |
726 | ||
727 | ctx->priv = tmpl->priv; | |
1eb7b403 OH |
728 | ctx->base.send = safexcel_ahash_send; |
729 | ctx->base.handle_result = safexcel_handle_result; | |
1b44c5a6 AT |
730 | |
731 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
732 | sizeof(struct safexcel_ahash_req)); | |
733 | return 0; | |
734 | } | |
735 | ||
736 | static int safexcel_sha1_init(struct ahash_request *areq) | |
737 | { | |
738 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
739 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
740 | ||
741 | memset(req, 0, sizeof(*req)); | |
742 | ||
743 | req->state[0] = SHA1_H0; | |
744 | req->state[1] = SHA1_H1; | |
745 | req->state[2] = SHA1_H2; | |
746 | req->state[3] = SHA1_H3; | |
747 | req->state[4] = SHA1_H4; | |
748 | ||
749 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; | |
b869648c | 750 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; |
1b44c5a6 AT |
751 | req->state_sz = SHA1_DIGEST_SIZE; |
752 | ||
753 | return 0; | |
754 | } | |
755 | ||
756 | static int safexcel_sha1_digest(struct ahash_request *areq) | |
757 | { | |
758 | int ret = safexcel_sha1_init(areq); | |
759 | ||
760 | if (ret) | |
761 | return ret; | |
762 | ||
763 | return safexcel_ahash_finup(areq); | |
764 | } | |
765 | ||
766 | static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm) | |
767 | { | |
768 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); | |
769 | struct safexcel_crypto_priv *priv = ctx->priv; | |
770 | int ret; | |
771 | ||
772 | /* context not allocated, skip invalidation */ | |
773 | if (!ctx->base.ctxr) | |
774 | return; | |
775 | ||
53c83e91 | 776 | if (priv->flags & EIP197_TRC_CACHE) { |
871df319 AT |
777 | ret = safexcel_ahash_exit_inv(tfm); |
778 | if (ret) | |
779 | dev_warn(priv->dev, "hash: invalidation error %d\n", ret); | |
780 | } else { | |
781 | dma_pool_free(priv->context_pool, ctx->base.ctxr, | |
782 | ctx->base.ctxr_dma); | |
783 | } | |
1b44c5a6 AT |
784 | } |
785 | ||
786 | struct safexcel_alg_template safexcel_alg_sha1 = { | |
787 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 788 | .engines = EIP97IES | EIP197B | EIP197D, |
1b44c5a6 AT |
789 | .alg.ahash = { |
790 | .init = safexcel_sha1_init, | |
791 | .update = safexcel_ahash_update, | |
792 | .final = safexcel_ahash_final, | |
793 | .finup = safexcel_ahash_finup, | |
794 | .digest = safexcel_sha1_digest, | |
795 | .export = safexcel_ahash_export, | |
796 | .import = safexcel_ahash_import, | |
797 | .halg = { | |
798 | .digestsize = SHA1_DIGEST_SIZE, | |
799 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
800 | .base = { | |
801 | .cra_name = "sha1", | |
802 | .cra_driver_name = "safexcel-sha1", | |
803 | .cra_priority = 300, | |
804 | .cra_flags = CRYPTO_ALG_ASYNC | | |
805 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
806 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
807 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
808 | .cra_init = safexcel_ahash_cra_init, | |
809 | .cra_exit = safexcel_ahash_cra_exit, | |
810 | .cra_module = THIS_MODULE, | |
811 | }, | |
812 | }, | |
813 | }, | |
814 | }; | |
815 | ||
816 | static int safexcel_hmac_sha1_init(struct ahash_request *areq) | |
817 | { | |
b869648c | 818 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
1b44c5a6 AT |
819 | |
820 | safexcel_sha1_init(areq); | |
b869648c | 821 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; |
1b44c5a6 AT |
822 | return 0; |
823 | } | |
824 | ||
825 | static int safexcel_hmac_sha1_digest(struct ahash_request *areq) | |
826 | { | |
827 | int ret = safexcel_hmac_sha1_init(areq); | |
828 | ||
829 | if (ret) | |
830 | return ret; | |
831 | ||
832 | return safexcel_ahash_finup(areq); | |
833 | } | |
834 | ||
835 | struct safexcel_ahash_result { | |
836 | struct completion completion; | |
837 | int error; | |
838 | }; | |
839 | ||
840 | static void safexcel_ahash_complete(struct crypto_async_request *req, int error) | |
841 | { | |
842 | struct safexcel_ahash_result *result = req->data; | |
843 | ||
844 | if (error == -EINPROGRESS) | |
845 | return; | |
846 | ||
847 | result->error = error; | |
848 | complete(&result->completion); | |
849 | } | |
850 | ||
851 | static int safexcel_hmac_init_pad(struct ahash_request *areq, | |
852 | unsigned int blocksize, const u8 *key, | |
853 | unsigned int keylen, u8 *ipad, u8 *opad) | |
854 | { | |
855 | struct safexcel_ahash_result result; | |
856 | struct scatterlist sg; | |
857 | int ret, i; | |
858 | u8 *keydup; | |
859 | ||
860 | if (keylen <= blocksize) { | |
861 | memcpy(ipad, key, keylen); | |
862 | } else { | |
863 | keydup = kmemdup(key, keylen, GFP_KERNEL); | |
864 | if (!keydup) | |
865 | return -ENOMEM; | |
866 | ||
867 | ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG, | |
868 | safexcel_ahash_complete, &result); | |
869 | sg_init_one(&sg, keydup, keylen); | |
870 | ahash_request_set_crypt(areq, &sg, ipad, keylen); | |
871 | init_completion(&result.completion); | |
872 | ||
873 | ret = crypto_ahash_digest(areq); | |
4dc5475a | 874 | if (ret == -EINPROGRESS || ret == -EBUSY) { |
1b44c5a6 AT |
875 | wait_for_completion_interruptible(&result.completion); |
876 | ret = result.error; | |
877 | } | |
878 | ||
879 | /* Avoid leaking */ | |
880 | memzero_explicit(keydup, keylen); | |
881 | kfree(keydup); | |
882 | ||
883 | if (ret) | |
884 | return ret; | |
885 | ||
886 | keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq)); | |
887 | } | |
888 | ||
889 | memset(ipad + keylen, 0, blocksize - keylen); | |
890 | memcpy(opad, ipad, blocksize); | |
891 | ||
892 | for (i = 0; i < blocksize; i++) { | |
aed3731e AT |
893 | ipad[i] ^= HMAC_IPAD_VALUE; |
894 | opad[i] ^= HMAC_OPAD_VALUE; | |
1b44c5a6 AT |
895 | } |
896 | ||
897 | return 0; | |
898 | } | |
899 | ||
900 | static int safexcel_hmac_init_iv(struct ahash_request *areq, | |
901 | unsigned int blocksize, u8 *pad, void *state) | |
902 | { | |
903 | struct safexcel_ahash_result result; | |
904 | struct safexcel_ahash_req *req; | |
905 | struct scatterlist sg; | |
906 | int ret; | |
907 | ||
908 | ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG, | |
909 | safexcel_ahash_complete, &result); | |
910 | sg_init_one(&sg, pad, blocksize); | |
911 | ahash_request_set_crypt(areq, &sg, pad, blocksize); | |
912 | init_completion(&result.completion); | |
913 | ||
914 | ret = crypto_ahash_init(areq); | |
915 | if (ret) | |
916 | return ret; | |
917 | ||
918 | req = ahash_request_ctx(areq); | |
919 | req->hmac = true; | |
920 | req->last_req = true; | |
921 | ||
922 | ret = crypto_ahash_update(areq); | |
12bf4142 | 923 | if (ret && ret != -EINPROGRESS && ret != -EBUSY) |
1b44c5a6 AT |
924 | return ret; |
925 | ||
926 | wait_for_completion_interruptible(&result.completion); | |
927 | if (result.error) | |
928 | return result.error; | |
929 | ||
930 | return crypto_ahash_export(areq, state); | |
931 | } | |
932 | ||
f6beaea3 AT |
933 | int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen, |
934 | void *istate, void *ostate) | |
1b44c5a6 AT |
935 | { |
936 | struct ahash_request *areq; | |
937 | struct crypto_ahash *tfm; | |
938 | unsigned int blocksize; | |
939 | u8 *ipad, *opad; | |
940 | int ret; | |
941 | ||
85d7311f | 942 | tfm = crypto_alloc_ahash(alg, 0, 0); |
1b44c5a6 AT |
943 | if (IS_ERR(tfm)) |
944 | return PTR_ERR(tfm); | |
945 | ||
946 | areq = ahash_request_alloc(tfm, GFP_KERNEL); | |
947 | if (!areq) { | |
948 | ret = -ENOMEM; | |
949 | goto free_ahash; | |
950 | } | |
951 | ||
952 | crypto_ahash_clear_flags(tfm, ~0); | |
953 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | |
954 | ||
6396bb22 | 955 | ipad = kcalloc(2, blocksize, GFP_KERNEL); |
1b44c5a6 AT |
956 | if (!ipad) { |
957 | ret = -ENOMEM; | |
958 | goto free_request; | |
959 | } | |
960 | ||
961 | opad = ipad + blocksize; | |
962 | ||
963 | ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad); | |
964 | if (ret) | |
965 | goto free_ipad; | |
966 | ||
967 | ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate); | |
968 | if (ret) | |
969 | goto free_ipad; | |
970 | ||
971 | ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate); | |
972 | ||
973 | free_ipad: | |
974 | kfree(ipad); | |
975 | free_request: | |
976 | ahash_request_free(areq); | |
977 | free_ahash: | |
978 | crypto_free_ahash(tfm); | |
979 | ||
980 | return ret; | |
981 | } | |
982 | ||
73f36ea7 AT |
983 | static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key, |
984 | unsigned int keylen, const char *alg, | |
985 | unsigned int state_sz) | |
1b44c5a6 AT |
986 | { |
987 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | |
871df319 | 988 | struct safexcel_crypto_priv *priv = ctx->priv; |
1b44c5a6 AT |
989 | struct safexcel_ahash_export_state istate, ostate; |
990 | int ret, i; | |
991 | ||
73f36ea7 | 992 | ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate); |
1b44c5a6 AT |
993 | if (ret) |
994 | return ret; | |
995 | ||
53c83e91 | 996 | if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) { |
73f36ea7 | 997 | for (i = 0; i < state_sz / sizeof(u32); i++) { |
c4daf4cc OH |
998 | if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || |
999 | ctx->opad[i] != le32_to_cpu(ostate.state[i])) { | |
1000 | ctx->base.needs_inv = true; | |
1001 | break; | |
1002 | } | |
1b44c5a6 AT |
1003 | } |
1004 | } | |
1005 | ||
73f36ea7 AT |
1006 | memcpy(ctx->ipad, &istate.state, state_sz); |
1007 | memcpy(ctx->opad, &ostate.state, state_sz); | |
42ef3bed | 1008 | |
1b44c5a6 AT |
1009 | return 0; |
1010 | } | |
1011 | ||
73f36ea7 AT |
1012 | static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, |
1013 | unsigned int keylen) | |
1014 | { | |
1015 | return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1", | |
1016 | SHA1_DIGEST_SIZE); | |
1017 | } | |
1018 | ||
1b44c5a6 AT |
1019 | struct safexcel_alg_template safexcel_alg_hmac_sha1 = { |
1020 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1021 | .engines = EIP97IES | EIP197B | EIP197D, |
1b44c5a6 AT |
1022 | .alg.ahash = { |
1023 | .init = safexcel_hmac_sha1_init, | |
1024 | .update = safexcel_ahash_update, | |
1025 | .final = safexcel_ahash_final, | |
1026 | .finup = safexcel_ahash_finup, | |
1027 | .digest = safexcel_hmac_sha1_digest, | |
1028 | .setkey = safexcel_hmac_sha1_setkey, | |
1029 | .export = safexcel_ahash_export, | |
1030 | .import = safexcel_ahash_import, | |
1031 | .halg = { | |
1032 | .digestsize = SHA1_DIGEST_SIZE, | |
1033 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1034 | .base = { | |
1035 | .cra_name = "hmac(sha1)", | |
1036 | .cra_driver_name = "safexcel-hmac-sha1", | |
1037 | .cra_priority = 300, | |
1038 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1039 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1040 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
1041 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1042 | .cra_init = safexcel_ahash_cra_init, | |
1043 | .cra_exit = safexcel_ahash_cra_exit, | |
1044 | .cra_module = THIS_MODULE, | |
1045 | }, | |
1046 | }, | |
1047 | }, | |
1048 | }; | |
1049 | ||
1050 | static int safexcel_sha256_init(struct ahash_request *areq) | |
1051 | { | |
1052 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
1053 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1054 | ||
1055 | memset(req, 0, sizeof(*req)); | |
1056 | ||
1057 | req->state[0] = SHA256_H0; | |
1058 | req->state[1] = SHA256_H1; | |
1059 | req->state[2] = SHA256_H2; | |
1060 | req->state[3] = SHA256_H3; | |
1061 | req->state[4] = SHA256_H4; | |
1062 | req->state[5] = SHA256_H5; | |
1063 | req->state[6] = SHA256_H6; | |
1064 | req->state[7] = SHA256_H7; | |
1065 | ||
1066 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; | |
b869648c | 1067 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; |
1b44c5a6 AT |
1068 | req->state_sz = SHA256_DIGEST_SIZE; |
1069 | ||
1070 | return 0; | |
1071 | } | |
1072 | ||
1073 | static int safexcel_sha256_digest(struct ahash_request *areq) | |
1074 | { | |
1075 | int ret = safexcel_sha256_init(areq); | |
1076 | ||
1077 | if (ret) | |
1078 | return ret; | |
1079 | ||
1080 | return safexcel_ahash_finup(areq); | |
1081 | } | |
1082 | ||
1083 | struct safexcel_alg_template safexcel_alg_sha256 = { | |
1084 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1085 | .engines = EIP97IES | EIP197B | EIP197D, |
1b44c5a6 AT |
1086 | .alg.ahash = { |
1087 | .init = safexcel_sha256_init, | |
1088 | .update = safexcel_ahash_update, | |
1089 | .final = safexcel_ahash_final, | |
1090 | .finup = safexcel_ahash_finup, | |
1091 | .digest = safexcel_sha256_digest, | |
1092 | .export = safexcel_ahash_export, | |
1093 | .import = safexcel_ahash_import, | |
1094 | .halg = { | |
1095 | .digestsize = SHA256_DIGEST_SIZE, | |
1096 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1097 | .base = { | |
1098 | .cra_name = "sha256", | |
1099 | .cra_driver_name = "safexcel-sha256", | |
1100 | .cra_priority = 300, | |
1101 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1102 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1103 | .cra_blocksize = SHA256_BLOCK_SIZE, | |
1104 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1105 | .cra_init = safexcel_ahash_cra_init, | |
1106 | .cra_exit = safexcel_ahash_cra_exit, | |
1107 | .cra_module = THIS_MODULE, | |
1108 | }, | |
1109 | }, | |
1110 | }, | |
1111 | }; | |
1112 | ||
1113 | static int safexcel_sha224_init(struct ahash_request *areq) | |
1114 | { | |
1115 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
1116 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1117 | ||
1118 | memset(req, 0, sizeof(*req)); | |
1119 | ||
1120 | req->state[0] = SHA224_H0; | |
1121 | req->state[1] = SHA224_H1; | |
1122 | req->state[2] = SHA224_H2; | |
1123 | req->state[3] = SHA224_H3; | |
1124 | req->state[4] = SHA224_H4; | |
1125 | req->state[5] = SHA224_H5; | |
1126 | req->state[6] = SHA224_H6; | |
1127 | req->state[7] = SHA224_H7; | |
1128 | ||
1129 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; | |
b869648c | 1130 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; |
1b44c5a6 AT |
1131 | req->state_sz = SHA256_DIGEST_SIZE; |
1132 | ||
1133 | return 0; | |
1134 | } | |
1135 | ||
1136 | static int safexcel_sha224_digest(struct ahash_request *areq) | |
1137 | { | |
1138 | int ret = safexcel_sha224_init(areq); | |
1139 | ||
1140 | if (ret) | |
1141 | return ret; | |
1142 | ||
1143 | return safexcel_ahash_finup(areq); | |
1144 | } | |
1145 | ||
1146 | struct safexcel_alg_template safexcel_alg_sha224 = { | |
1147 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1148 | .engines = EIP97IES | EIP197B | EIP197D, |
1b44c5a6 AT |
1149 | .alg.ahash = { |
1150 | .init = safexcel_sha224_init, | |
1151 | .update = safexcel_ahash_update, | |
1152 | .final = safexcel_ahash_final, | |
1153 | .finup = safexcel_ahash_finup, | |
1154 | .digest = safexcel_sha224_digest, | |
1155 | .export = safexcel_ahash_export, | |
1156 | .import = safexcel_ahash_import, | |
1157 | .halg = { | |
1158 | .digestsize = SHA224_DIGEST_SIZE, | |
1159 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1160 | .base = { | |
1161 | .cra_name = "sha224", | |
1162 | .cra_driver_name = "safexcel-sha224", | |
1163 | .cra_priority = 300, | |
1164 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1165 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1166 | .cra_blocksize = SHA224_BLOCK_SIZE, | |
1167 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1168 | .cra_init = safexcel_ahash_cra_init, | |
1169 | .cra_exit = safexcel_ahash_cra_exit, | |
1170 | .cra_module = THIS_MODULE, | |
1171 | }, | |
1172 | }, | |
1173 | }, | |
1174 | }; | |
73f36ea7 | 1175 | |
3ad618d8 AT |
1176 | static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key, |
1177 | unsigned int keylen) | |
1178 | { | |
1179 | return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224", | |
1180 | SHA256_DIGEST_SIZE); | |
1181 | } | |
1182 | ||
1183 | static int safexcel_hmac_sha224_init(struct ahash_request *areq) | |
1184 | { | |
1185 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1186 | ||
1187 | safexcel_sha224_init(areq); | |
1188 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | |
1189 | return 0; | |
1190 | } | |
1191 | ||
1192 | static int safexcel_hmac_sha224_digest(struct ahash_request *areq) | |
1193 | { | |
1194 | int ret = safexcel_hmac_sha224_init(areq); | |
1195 | ||
1196 | if (ret) | |
1197 | return ret; | |
1198 | ||
1199 | return safexcel_ahash_finup(areq); | |
1200 | } | |
1201 | ||
1202 | struct safexcel_alg_template safexcel_alg_hmac_sha224 = { | |
1203 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1204 | .engines = EIP97IES | EIP197B | EIP197D, |
3ad618d8 AT |
1205 | .alg.ahash = { |
1206 | .init = safexcel_hmac_sha224_init, | |
1207 | .update = safexcel_ahash_update, | |
1208 | .final = safexcel_ahash_final, | |
1209 | .finup = safexcel_ahash_finup, | |
1210 | .digest = safexcel_hmac_sha224_digest, | |
1211 | .setkey = safexcel_hmac_sha224_setkey, | |
1212 | .export = safexcel_ahash_export, | |
1213 | .import = safexcel_ahash_import, | |
1214 | .halg = { | |
1215 | .digestsize = SHA224_DIGEST_SIZE, | |
1216 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1217 | .base = { | |
1218 | .cra_name = "hmac(sha224)", | |
1219 | .cra_driver_name = "safexcel-hmac-sha224", | |
1220 | .cra_priority = 300, | |
1221 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1222 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1223 | .cra_blocksize = SHA224_BLOCK_SIZE, | |
1224 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1225 | .cra_init = safexcel_ahash_cra_init, | |
1226 | .cra_exit = safexcel_ahash_cra_exit, | |
1227 | .cra_module = THIS_MODULE, | |
1228 | }, | |
1229 | }, | |
1230 | }, | |
1231 | }; | |
1232 | ||
73f36ea7 AT |
1233 | static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, |
1234 | unsigned int keylen) | |
1235 | { | |
1236 | return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256", | |
1237 | SHA256_DIGEST_SIZE); | |
1238 | } | |
1239 | ||
1240 | static int safexcel_hmac_sha256_init(struct ahash_request *areq) | |
1241 | { | |
1242 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1243 | ||
1244 | safexcel_sha256_init(areq); | |
1245 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | |
1246 | return 0; | |
1247 | } | |
1248 | ||
1249 | static int safexcel_hmac_sha256_digest(struct ahash_request *areq) | |
1250 | { | |
1251 | int ret = safexcel_hmac_sha256_init(areq); | |
1252 | ||
1253 | if (ret) | |
1254 | return ret; | |
1255 | ||
1256 | return safexcel_ahash_finup(areq); | |
1257 | } | |
1258 | ||
1259 | struct safexcel_alg_template safexcel_alg_hmac_sha256 = { | |
1260 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1261 | .engines = EIP97IES | EIP197B | EIP197D, |
73f36ea7 AT |
1262 | .alg.ahash = { |
1263 | .init = safexcel_hmac_sha256_init, | |
1264 | .update = safexcel_ahash_update, | |
1265 | .final = safexcel_ahash_final, | |
1266 | .finup = safexcel_ahash_finup, | |
1267 | .digest = safexcel_hmac_sha256_digest, | |
1268 | .setkey = safexcel_hmac_sha256_setkey, | |
1269 | .export = safexcel_ahash_export, | |
1270 | .import = safexcel_ahash_import, | |
1271 | .halg = { | |
1272 | .digestsize = SHA256_DIGEST_SIZE, | |
1273 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1274 | .base = { | |
1275 | .cra_name = "hmac(sha256)", | |
1276 | .cra_driver_name = "safexcel-hmac-sha256", | |
1277 | .cra_priority = 300, | |
1278 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1279 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1280 | .cra_blocksize = SHA256_BLOCK_SIZE, | |
1281 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1282 | .cra_init = safexcel_ahash_cra_init, | |
1283 | .cra_exit = safexcel_ahash_cra_exit, | |
1284 | .cra_module = THIS_MODULE, | |
1285 | }, | |
1286 | }, | |
1287 | }, | |
1288 | }; | |
b460edb6 AT |
1289 | |
1290 | static int safexcel_sha512_init(struct ahash_request *areq) | |
1291 | { | |
1292 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
1293 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1294 | ||
1295 | memset(req, 0, sizeof(*req)); | |
1296 | ||
1297 | req->state[0] = lower_32_bits(SHA512_H0); | |
1298 | req->state[1] = upper_32_bits(SHA512_H0); | |
1299 | req->state[2] = lower_32_bits(SHA512_H1); | |
1300 | req->state[3] = upper_32_bits(SHA512_H1); | |
1301 | req->state[4] = lower_32_bits(SHA512_H2); | |
1302 | req->state[5] = upper_32_bits(SHA512_H2); | |
1303 | req->state[6] = lower_32_bits(SHA512_H3); | |
1304 | req->state[7] = upper_32_bits(SHA512_H3); | |
1305 | req->state[8] = lower_32_bits(SHA512_H4); | |
1306 | req->state[9] = upper_32_bits(SHA512_H4); | |
1307 | req->state[10] = lower_32_bits(SHA512_H5); | |
1308 | req->state[11] = upper_32_bits(SHA512_H5); | |
1309 | req->state[12] = lower_32_bits(SHA512_H6); | |
1310 | req->state[13] = upper_32_bits(SHA512_H6); | |
1311 | req->state[14] = lower_32_bits(SHA512_H7); | |
1312 | req->state[15] = upper_32_bits(SHA512_H7); | |
1313 | ||
1314 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; | |
1315 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | |
1316 | req->state_sz = SHA512_DIGEST_SIZE; | |
1317 | ||
1318 | return 0; | |
1319 | } | |
1320 | ||
1321 | static int safexcel_sha512_digest(struct ahash_request *areq) | |
1322 | { | |
1323 | int ret = safexcel_sha512_init(areq); | |
1324 | ||
1325 | if (ret) | |
1326 | return ret; | |
1327 | ||
1328 | return safexcel_ahash_finup(areq); | |
1329 | } | |
1330 | ||
1331 | struct safexcel_alg_template safexcel_alg_sha512 = { | |
1332 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1333 | .engines = EIP97IES | EIP197B | EIP197D, |
b460edb6 AT |
1334 | .alg.ahash = { |
1335 | .init = safexcel_sha512_init, | |
1336 | .update = safexcel_ahash_update, | |
1337 | .final = safexcel_ahash_final, | |
1338 | .finup = safexcel_ahash_finup, | |
1339 | .digest = safexcel_sha512_digest, | |
1340 | .export = safexcel_ahash_export, | |
1341 | .import = safexcel_ahash_import, | |
1342 | .halg = { | |
1343 | .digestsize = SHA512_DIGEST_SIZE, | |
1344 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1345 | .base = { | |
1346 | .cra_name = "sha512", | |
1347 | .cra_driver_name = "safexcel-sha512", | |
1348 | .cra_priority = 300, | |
1349 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1350 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1351 | .cra_blocksize = SHA512_BLOCK_SIZE, | |
1352 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1353 | .cra_init = safexcel_ahash_cra_init, | |
1354 | .cra_exit = safexcel_ahash_cra_exit, | |
1355 | .cra_module = THIS_MODULE, | |
1356 | }, | |
1357 | }, | |
1358 | }, | |
1359 | }; | |
0de54fb1 | 1360 | |
9e46eafd AT |
1361 | static int safexcel_sha384_init(struct ahash_request *areq) |
1362 | { | |
1363 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
1364 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1365 | ||
1366 | memset(req, 0, sizeof(*req)); | |
1367 | ||
1368 | req->state[0] = lower_32_bits(SHA384_H0); | |
1369 | req->state[1] = upper_32_bits(SHA384_H0); | |
1370 | req->state[2] = lower_32_bits(SHA384_H1); | |
1371 | req->state[3] = upper_32_bits(SHA384_H1); | |
1372 | req->state[4] = lower_32_bits(SHA384_H2); | |
1373 | req->state[5] = upper_32_bits(SHA384_H2); | |
1374 | req->state[6] = lower_32_bits(SHA384_H3); | |
1375 | req->state[7] = upper_32_bits(SHA384_H3); | |
1376 | req->state[8] = lower_32_bits(SHA384_H4); | |
1377 | req->state[9] = upper_32_bits(SHA384_H4); | |
1378 | req->state[10] = lower_32_bits(SHA384_H5); | |
1379 | req->state[11] = upper_32_bits(SHA384_H5); | |
1380 | req->state[12] = lower_32_bits(SHA384_H6); | |
1381 | req->state[13] = upper_32_bits(SHA384_H6); | |
1382 | req->state[14] = lower_32_bits(SHA384_H7); | |
1383 | req->state[15] = upper_32_bits(SHA384_H7); | |
1384 | ||
1385 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; | |
1386 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | |
1387 | req->state_sz = SHA512_DIGEST_SIZE; | |
1388 | ||
1389 | return 0; | |
1390 | } | |
1391 | ||
1392 | static int safexcel_sha384_digest(struct ahash_request *areq) | |
1393 | { | |
1394 | int ret = safexcel_sha384_init(areq); | |
1395 | ||
1396 | if (ret) | |
1397 | return ret; | |
1398 | ||
1399 | return safexcel_ahash_finup(areq); | |
1400 | } | |
1401 | ||
1402 | struct safexcel_alg_template safexcel_alg_sha384 = { | |
1403 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1404 | .engines = EIP97IES | EIP197B | EIP197D, |
9e46eafd AT |
1405 | .alg.ahash = { |
1406 | .init = safexcel_sha384_init, | |
1407 | .update = safexcel_ahash_update, | |
1408 | .final = safexcel_ahash_final, | |
1409 | .finup = safexcel_ahash_finup, | |
1410 | .digest = safexcel_sha384_digest, | |
1411 | .export = safexcel_ahash_export, | |
1412 | .import = safexcel_ahash_import, | |
1413 | .halg = { | |
1414 | .digestsize = SHA384_DIGEST_SIZE, | |
1415 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1416 | .base = { | |
1417 | .cra_name = "sha384", | |
1418 | .cra_driver_name = "safexcel-sha384", | |
1419 | .cra_priority = 300, | |
1420 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1421 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1422 | .cra_blocksize = SHA384_BLOCK_SIZE, | |
1423 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1424 | .cra_init = safexcel_ahash_cra_init, | |
1425 | .cra_exit = safexcel_ahash_cra_exit, | |
1426 | .cra_module = THIS_MODULE, | |
1427 | }, | |
1428 | }, | |
1429 | }, | |
1430 | }; | |
1431 | ||
0de54fb1 AT |
1432 | static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key, |
1433 | unsigned int keylen) | |
1434 | { | |
1435 | return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512", | |
1436 | SHA512_DIGEST_SIZE); | |
1437 | } | |
1438 | ||
1439 | static int safexcel_hmac_sha512_init(struct ahash_request *areq) | |
1440 | { | |
1441 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1442 | ||
1443 | safexcel_sha512_init(areq); | |
1444 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | |
1445 | return 0; | |
1446 | } | |
1447 | ||
1448 | static int safexcel_hmac_sha512_digest(struct ahash_request *areq) | |
1449 | { | |
1450 | int ret = safexcel_hmac_sha512_init(areq); | |
1451 | ||
1452 | if (ret) | |
1453 | return ret; | |
1454 | ||
1455 | return safexcel_ahash_finup(areq); | |
1456 | } | |
1457 | ||
1458 | struct safexcel_alg_template safexcel_alg_hmac_sha512 = { | |
1459 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1460 | .engines = EIP97IES | EIP197B | EIP197D, |
0de54fb1 AT |
1461 | .alg.ahash = { |
1462 | .init = safexcel_hmac_sha512_init, | |
1463 | .update = safexcel_ahash_update, | |
1464 | .final = safexcel_ahash_final, | |
1465 | .finup = safexcel_ahash_finup, | |
1466 | .digest = safexcel_hmac_sha512_digest, | |
1467 | .setkey = safexcel_hmac_sha512_setkey, | |
1468 | .export = safexcel_ahash_export, | |
1469 | .import = safexcel_ahash_import, | |
1470 | .halg = { | |
1471 | .digestsize = SHA512_DIGEST_SIZE, | |
1472 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1473 | .base = { | |
1474 | .cra_name = "hmac(sha512)", | |
1475 | .cra_driver_name = "safexcel-hmac-sha512", | |
1476 | .cra_priority = 300, | |
1477 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1478 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1479 | .cra_blocksize = SHA512_BLOCK_SIZE, | |
1480 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1481 | .cra_init = safexcel_ahash_cra_init, | |
1482 | .cra_exit = safexcel_ahash_cra_exit, | |
1483 | .cra_module = THIS_MODULE, | |
1484 | }, | |
1485 | }, | |
1486 | }, | |
1487 | }; | |
1f5d5d98 AT |
1488 | |
1489 | static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key, | |
1490 | unsigned int keylen) | |
1491 | { | |
1492 | return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384", | |
1493 | SHA512_DIGEST_SIZE); | |
1494 | } | |
1495 | ||
1496 | static int safexcel_hmac_sha384_init(struct ahash_request *areq) | |
1497 | { | |
1498 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1499 | ||
1500 | safexcel_sha384_init(areq); | |
1501 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | |
1502 | return 0; | |
1503 | } | |
1504 | ||
1505 | static int safexcel_hmac_sha384_digest(struct ahash_request *areq) | |
1506 | { | |
1507 | int ret = safexcel_hmac_sha384_init(areq); | |
1508 | ||
1509 | if (ret) | |
1510 | return ret; | |
1511 | ||
1512 | return safexcel_ahash_finup(areq); | |
1513 | } | |
1514 | ||
1515 | struct safexcel_alg_template safexcel_alg_hmac_sha384 = { | |
1516 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
5eb09118 | 1517 | .engines = EIP97IES | EIP197B | EIP197D, |
1f5d5d98 AT |
1518 | .alg.ahash = { |
1519 | .init = safexcel_hmac_sha384_init, | |
1520 | .update = safexcel_ahash_update, | |
1521 | .final = safexcel_ahash_final, | |
1522 | .finup = safexcel_ahash_finup, | |
1523 | .digest = safexcel_hmac_sha384_digest, | |
1524 | .setkey = safexcel_hmac_sha384_setkey, | |
1525 | .export = safexcel_ahash_export, | |
1526 | .import = safexcel_ahash_import, | |
1527 | .halg = { | |
1528 | .digestsize = SHA384_DIGEST_SIZE, | |
1529 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1530 | .base = { | |
1531 | .cra_name = "hmac(sha384)", | |
1532 | .cra_driver_name = "safexcel-hmac-sha384", | |
1533 | .cra_priority = 300, | |
1534 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1535 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1536 | .cra_blocksize = SHA384_BLOCK_SIZE, | |
1537 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1538 | .cra_init = safexcel_ahash_cra_init, | |
1539 | .cra_exit = safexcel_ahash_cra_exit, | |
1540 | .cra_module = THIS_MODULE, | |
1541 | }, | |
1542 | }, | |
1543 | }, | |
1544 | }; | |
293f89cf OH |
1545 | |
1546 | static int safexcel_md5_init(struct ahash_request *areq) | |
1547 | { | |
1548 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | |
1549 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1550 | ||
1551 | memset(req, 0, sizeof(*req)); | |
1552 | ||
1553 | req->state[0] = MD5_H0; | |
1554 | req->state[1] = MD5_H1; | |
1555 | req->state[2] = MD5_H2; | |
1556 | req->state[3] = MD5_H3; | |
1557 | ||
1558 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; | |
1559 | req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | |
1560 | req->state_sz = MD5_DIGEST_SIZE; | |
1561 | ||
1562 | return 0; | |
1563 | } | |
1564 | ||
1565 | static int safexcel_md5_digest(struct ahash_request *areq) | |
1566 | { | |
1567 | int ret = safexcel_md5_init(areq); | |
1568 | ||
1569 | if (ret) | |
1570 | return ret; | |
1571 | ||
1572 | return safexcel_ahash_finup(areq); | |
1573 | } | |
1574 | ||
1575 | struct safexcel_alg_template safexcel_alg_md5 = { | |
1576 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
1577 | .engines = EIP97IES | EIP197B | EIP197D, | |
1578 | .alg.ahash = { | |
1579 | .init = safexcel_md5_init, | |
1580 | .update = safexcel_ahash_update, | |
1581 | .final = safexcel_ahash_final, | |
1582 | .finup = safexcel_ahash_finup, | |
1583 | .digest = safexcel_md5_digest, | |
1584 | .export = safexcel_ahash_export, | |
1585 | .import = safexcel_ahash_import, | |
1586 | .halg = { | |
1587 | .digestsize = MD5_DIGEST_SIZE, | |
1588 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1589 | .base = { | |
1590 | .cra_name = "md5", | |
1591 | .cra_driver_name = "safexcel-md5", | |
1592 | .cra_priority = 300, | |
1593 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1594 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1595 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, | |
1596 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1597 | .cra_init = safexcel_ahash_cra_init, | |
1598 | .cra_exit = safexcel_ahash_cra_exit, | |
1599 | .cra_module = THIS_MODULE, | |
1600 | }, | |
1601 | }, | |
1602 | }, | |
1603 | }; | |
b471e4b9 OH |
1604 | |
1605 | static int safexcel_hmac_md5_init(struct ahash_request *areq) | |
1606 | { | |
1607 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | |
1608 | ||
1609 | safexcel_md5_init(areq); | |
1610 | req->digest = CONTEXT_CONTROL_DIGEST_HMAC; | |
1611 | return 0; | |
1612 | } | |
1613 | ||
1614 | static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, | |
1615 | unsigned int keylen) | |
1616 | { | |
1617 | return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5", | |
1618 | MD5_DIGEST_SIZE); | |
1619 | } | |
1620 | ||
1621 | static int safexcel_hmac_md5_digest(struct ahash_request *areq) | |
1622 | { | |
1623 | int ret = safexcel_hmac_md5_init(areq); | |
1624 | ||
1625 | if (ret) | |
1626 | return ret; | |
1627 | ||
1628 | return safexcel_ahash_finup(areq); | |
1629 | } | |
1630 | ||
1631 | struct safexcel_alg_template safexcel_alg_hmac_md5 = { | |
1632 | .type = SAFEXCEL_ALG_TYPE_AHASH, | |
1633 | .engines = EIP97IES | EIP197B | EIP197D, | |
1634 | .alg.ahash = { | |
1635 | .init = safexcel_hmac_md5_init, | |
1636 | .update = safexcel_ahash_update, | |
1637 | .final = safexcel_ahash_final, | |
1638 | .finup = safexcel_ahash_finup, | |
1639 | .digest = safexcel_hmac_md5_digest, | |
1640 | .setkey = safexcel_hmac_md5_setkey, | |
1641 | .export = safexcel_ahash_export, | |
1642 | .import = safexcel_ahash_import, | |
1643 | .halg = { | |
1644 | .digestsize = MD5_DIGEST_SIZE, | |
1645 | .statesize = sizeof(struct safexcel_ahash_export_state), | |
1646 | .base = { | |
1647 | .cra_name = "hmac(md5)", | |
1648 | .cra_driver_name = "safexcel-hmac-md5", | |
1649 | .cra_priority = 300, | |
1650 | .cra_flags = CRYPTO_ALG_ASYNC | | |
1651 | CRYPTO_ALG_KERN_DRIVER_ONLY, | |
1652 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, | |
1653 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | |
1654 | .cra_init = safexcel_ahash_cra_init, | |
1655 | .cra_exit = safexcel_ahash_cra_exit, | |
1656 | .cra_module = THIS_MODULE, | |
1657 | }, | |
1658 | }, | |
1659 | }, | |
1660 | }; |