]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/staging/ccree/ssi_hash.c
staging: ccree: Uninitialized return in ssi_ahash_import()
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / ccree / ssi_hash.c
CommitLineData
50cfbbb7
GBY
1/*
2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
c8f17865 3 *
50cfbbb7
GBY
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
c8f17865 7 *
50cfbbb7
GBY
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
c8f17865 12 *
50cfbbb7
GBY
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <crypto/algapi.h>
21#include <crypto/hash.h>
22#include <crypto/sha.h>
23#include <crypto/md5.h>
24#include <crypto/internal/hash.h>
25
26#include "ssi_config.h"
27#include "ssi_driver.h"
28#include "ssi_request_mgr.h"
29#include "ssi_buffer_mgr.h"
30#include "ssi_sysfs.h"
31#include "ssi_hash.h"
32#include "ssi_sram_mgr.h"
33
34#define SSI_MAX_AHASH_SEQ_LEN 12
35#define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
36
37struct ssi_hash_handle {
38 ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
39 ssi_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
40 struct list_head hash_list;
41 struct completion init_comp;
42};
43
a1ab41eb 44static const u32 digest_len_init[] = {
50cfbbb7 45 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
a1ab41eb 46static const u32 md5_init[] = {
50cfbbb7 47 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
a1ab41eb 48static const u32 sha1_init[] = {
50cfbbb7 49 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
a1ab41eb 50static const u32 sha224_init[] = {
50cfbbb7
GBY
51 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
52 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
a1ab41eb 53static const u32 sha256_init[] = {
50cfbbb7
GBY
54 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
55 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
56#if (DX_DEV_SHA_MAX > 256)
a1ab41eb 57static const u32 digest_len_sha512_init[] = {
50cfbbb7 58 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
a1ab41eb 59static const u64 sha384_init[] = {
50cfbbb7
GBY
60 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
61 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
a1ab41eb 62static const u64 sha512_init[] = {
50cfbbb7
GBY
63 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
64 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
65#endif
66
67static void ssi_hash_create_xcbc_setup(
c8f17865 68 struct ahash_request *areq,
8ca57f5c 69 struct cc_hw_desc desc[],
50cfbbb7
GBY
70 unsigned int *seq_size);
71
c8f17865 72static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
3151c1df
SS
73 struct cc_hw_desc desc[],
74 unsigned int *seq_size);
50cfbbb7
GBY
75
76struct ssi_hash_alg {
77 struct list_head entry;
50cfbbb7
GBY
78 int hash_mode;
79 int hw_mode;
80 int inter_digestsize;
81 struct ssi_drvdata *drvdata;
d3eff572 82 struct ahash_alg ahash_alg;
50cfbbb7
GBY
83};
84
50cfbbb7 85struct hash_key_req_ctx {
a1ab41eb 86 u32 keylen;
50cfbbb7
GBY
87 dma_addr_t key_dma_addr;
88};
89
90/* hash per-session context */
91struct ssi_hash_ctx {
92 struct ssi_drvdata *drvdata;
c8f17865 93 /* holds the origin digest; the digest after "setkey" if HMAC,*
1a3a8d2e
DR
94 * the initial digest if HASH.
95 */
a1ab41eb
GBY
96 u8 digest_buff[SSI_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
97 u8 opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE] ____cacheline_aligned;
492ddcbb 98
50cfbbb7
GBY
99 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
100 dma_addr_t digest_buff_dma_addr;
101 /* use for hmac with key large then mode block size */
102 struct hash_key_req_ctx key_params;
103 int hash_mode;
104 int hw_mode;
105 int inter_digestsize;
106 struct completion setkey_comp;
107 bool is_hmac;
108};
109
50cfbbb7
GBY
110static void ssi_hash_create_data_desc(
111 struct ahash_req_ctx *areq_ctx,
c8f17865 112 struct ssi_hash_ctx *ctx,
e7258b6a 113 unsigned int flow_mode, struct cc_hw_desc desc[],
50cfbbb7
GBY
114 bool is_not_last_data,
115 unsigned int *seq_size);
116
8ca57f5c 117static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
50cfbbb7
GBY
118{
119 if (unlikely((mode == DRV_HASH_MD5) ||
3151c1df
SS
120 (mode == DRV_HASH_SHA384) ||
121 (mode == DRV_HASH_SHA512))) {
8b64e512 122 set_bytes_swap(desc, 1);
50cfbbb7 123 } else {
8b64e512 124 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
50cfbbb7
GBY
125 }
126}
127
c8f17865
TI
128static int ssi_hash_map_result(struct device *dev,
129 struct ahash_req_ctx *state,
50cfbbb7
GBY
130 unsigned int digestsize)
131{
c8f17865 132 state->digest_result_dma_addr =
50cfbbb7
GBY
133 dma_map_single(dev, (void *)state->digest_result_buff,
134 digestsize,
135 DMA_BIDIRECTIONAL);
136 if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
bdd0873d
GBY
137 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
138 digestsize);
50cfbbb7
GBY
139 return -ENOMEM;
140 }
bdd0873d 141 dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
50cfbbb7 142 digestsize, state->digest_result_buff,
bdd0873d 143 &state->digest_result_dma_addr);
50cfbbb7
GBY
144
145 return 0;
146}
147
c8f17865
TI
148static int ssi_hash_map_request(struct device *dev,
149 struct ahash_req_ctx *state,
50cfbbb7
GBY
150 struct ssi_hash_ctx *ctx)
151{
152 bool is_hmac = ctx->is_hmac;
153 ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
154 ctx->drvdata, ctx->hash_mode);
155 struct ssi_crypto_req ssi_req = {};
8ca57f5c 156 struct cc_hw_desc desc;
50cfbbb7
GBY
157 int rc = -ENOMEM;
158
e7258b6a 159 state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
553aff5f 160 if (!state->buff0)
50cfbbb7 161 goto fail0;
553aff5f 162
e7258b6a 163 state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
553aff5f 164 if (!state->buff1)
50cfbbb7 165 goto fail_buff0;
553aff5f 166
e7258b6a 167 state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE, GFP_KERNEL | GFP_DMA);
553aff5f 168 if (!state->digest_result_buff)
50cfbbb7 169 goto fail_buff1;
553aff5f 170
e7258b6a 171 state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
553aff5f 172 if (!state->digest_buff)
50cfbbb7 173 goto fail_digest_result_buff;
50cfbbb7 174
bdd0873d
GBY
175 dev_dbg(dev, "Allocated digest-buffer in context ctx->digest_buff=@%p\n",
176 state->digest_buff);
50cfbbb7 177 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
e7258b6a 178 state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL | GFP_DMA);
553aff5f 179 if (!state->digest_bytes_len)
50cfbbb7 180 goto fail1;
553aff5f 181
bdd0873d
GBY
182 dev_dbg(dev, "Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n",
183 state->digest_bytes_len);
50cfbbb7
GBY
184 } else {
185 state->digest_bytes_len = NULL;
186 }
187
e7258b6a 188 state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
553aff5f 189 if (!state->opad_digest_buff)
50cfbbb7 190 goto fail2;
553aff5f 191
bdd0873d
GBY
192 dev_dbg(dev, "Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n",
193 state->opad_digest_buff);
50cfbbb7
GBY
194
195 state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
196 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
bdd0873d
GBY
197 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
198 ctx->inter_digestsize, state->digest_buff);
50cfbbb7
GBY
199 goto fail3;
200 }
bdd0873d
GBY
201 dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
202 ctx->inter_digestsize, state->digest_buff,
203 &state->digest_buff_dma_addr);
50cfbbb7
GBY
204
205 if (is_hmac) {
50cfbbb7 206 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
50cfbbb7
GBY
207 if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) {
208 memset(state->digest_buff, 0, ctx->inter_digestsize);
209 } else { /*sha*/
210 memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
211#if (DX_DEV_SHA_MAX > 256)
a8f6cbaa 212 if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384)))
50cfbbb7 213 memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
a8f6cbaa 214 else
50cfbbb7 215 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
50cfbbb7
GBY
216#else
217 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
218#endif
219 }
50cfbbb7 220 dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
50cfbbb7
GBY
221
222 if (ctx->hash_mode != DRV_HASH_NULL) {
50cfbbb7
GBY
223 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
224 memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
c8f17865 225 }
50cfbbb7
GBY
226 } else { /*hash*/
227 /* Copy the initial digests if hash flow. The SRAM contains the
1a3a8d2e
DR
228 * initial digests in the expected order for all SHA*
229 */
8b64e512
GBY
230 hw_desc_init(&desc);
231 set_din_sram(&desc, larval_digest_addr, ctx->inter_digestsize);
232 set_dout_dlli(&desc, state->digest_buff_dma_addr,
233 ctx->inter_digestsize, NS_BIT, 0);
234 set_flow_mode(&desc, BYPASS);
50cfbbb7
GBY
235
236 rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
237 if (unlikely(rc != 0)) {
bdd0873d 238 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
239 goto fail4;
240 }
241 }
242
243 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
244 state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
245 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
bdd0873d
GBY
246 dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
247 HASH_LEN_SIZE, state->digest_bytes_len);
50cfbbb7
GBY
248 goto fail4;
249 }
bdd0873d
GBY
250 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
251 HASH_LEN_SIZE, state->digest_bytes_len,
252 &state->digest_bytes_len_dma_addr);
50cfbbb7
GBY
253 } else {
254 state->digest_bytes_len_dma_addr = 0;
255 }
256
257 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
258 state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
259 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
bdd0873d
GBY
260 dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
261 ctx->inter_digestsize,
262 state->opad_digest_buff);
50cfbbb7
GBY
263 goto fail5;
264 }
bdd0873d
GBY
265 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
266 ctx->inter_digestsize, state->opad_digest_buff,
267 &state->opad_digest_dma_addr);
50cfbbb7
GBY
268 } else {
269 state->opad_digest_dma_addr = 0;
270 }
271 state->buff0_cnt = 0;
272 state->buff1_cnt = 0;
273 state->buff_index = 0;
274 state->mlli_params.curr_pool = NULL;
275
276 return 0;
277
278fail5:
279 if (state->digest_bytes_len_dma_addr != 0) {
50cfbbb7
GBY
280 dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
281 state->digest_bytes_len_dma_addr = 0;
282 }
283fail4:
284 if (state->digest_buff_dma_addr != 0) {
50cfbbb7
GBY
285 dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
286 state->digest_buff_dma_addr = 0;
287 }
288fail3:
785185f4 289 kfree(state->opad_digest_buff);
50cfbbb7 290fail2:
785185f4 291 kfree(state->digest_bytes_len);
50cfbbb7 292fail1:
785185f4 293 kfree(state->digest_buff);
50cfbbb7 294fail_digest_result_buff:
a7b1ba23
SM
295 kfree(state->digest_result_buff);
296 state->digest_result_buff = NULL;
50cfbbb7 297fail_buff1:
a7b1ba23
SM
298 kfree(state->buff1);
299 state->buff1 = NULL;
50cfbbb7 300fail_buff0:
a7b1ba23
SM
301 kfree(state->buff0);
302 state->buff0 = NULL;
50cfbbb7
GBY
303fail0:
304 return rc;
305}
306
c8f17865
TI
307static void ssi_hash_unmap_request(struct device *dev,
308 struct ahash_req_ctx *state,
50cfbbb7
GBY
309 struct ssi_hash_ctx *ctx)
310{
311 if (state->digest_buff_dma_addr != 0) {
50cfbbb7
GBY
312 dma_unmap_single(dev, state->digest_buff_dma_addr,
313 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
bdd0873d
GBY
314 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
315 &state->digest_buff_dma_addr);
50cfbbb7
GBY
316 state->digest_buff_dma_addr = 0;
317 }
318 if (state->digest_bytes_len_dma_addr != 0) {
50cfbbb7
GBY
319 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
320 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
bdd0873d
GBY
321 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
322 &state->digest_bytes_len_dma_addr);
50cfbbb7
GBY
323 state->digest_bytes_len_dma_addr = 0;
324 }
325 if (state->opad_digest_dma_addr != 0) {
50cfbbb7
GBY
326 dma_unmap_single(dev, state->opad_digest_dma_addr,
327 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
bdd0873d
GBY
328 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
329 &state->opad_digest_dma_addr);
50cfbbb7
GBY
330 state->opad_digest_dma_addr = 0;
331 }
332
785185f4 333 kfree(state->opad_digest_buff);
334 kfree(state->digest_bytes_len);
335 kfree(state->digest_buff);
336 kfree(state->digest_result_buff);
337 kfree(state->buff1);
338 kfree(state->buff0);
50cfbbb7
GBY
339}
340
c8f17865
TI
341static void ssi_hash_unmap_result(struct device *dev,
342 struct ahash_req_ctx *state,
50cfbbb7
GBY
343 unsigned int digestsize, u8 *result)
344{
345 if (state->digest_result_dma_addr != 0) {
50cfbbb7
GBY
346 dma_unmap_single(dev,
347 state->digest_result_dma_addr,
348 digestsize,
c8f17865 349 DMA_BIDIRECTIONAL);
bdd0873d
GBY
350 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
351 state->digest_result_buff,
352 &state->digest_result_dma_addr, digestsize);
50cfbbb7
GBY
353 memcpy(result,
354 state->digest_result_buff,
355 digestsize);
356 }
357 state->digest_result_dma_addr = 0;
358}
359
360static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
361{
362 struct ahash_request *req = (struct ahash_request *)ssi_req;
363 struct ahash_req_ctx *state = ahash_request_ctx(req);
364
bdd0873d 365 dev_dbg(dev, "req=%pK\n", req);
50cfbbb7
GBY
366
367 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
368 req->base.complete(&req->base, 0);
369}
370
371static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
372{
373 struct ahash_request *req = (struct ahash_request *)ssi_req;
374 struct ahash_req_ctx *state = ahash_request_ctx(req);
375 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
376 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
a1ab41eb 377 u32 digestsize = crypto_ahash_digestsize(tfm);
c8f17865 378
bdd0873d 379 dev_dbg(dev, "req=%pK\n", req);
50cfbbb7
GBY
380
381 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
382 ssi_hash_unmap_result(dev, state, digestsize, req->result);
383 ssi_hash_unmap_request(dev, state, ctx);
384 req->base.complete(&req->base, 0);
385}
386
387static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
388{
389 struct ahash_request *req = (struct ahash_request *)ssi_req;
390 struct ahash_req_ctx *state = ahash_request_ctx(req);
391 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
392 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
a1ab41eb 393 u32 digestsize = crypto_ahash_digestsize(tfm);
c8f17865 394
bdd0873d 395 dev_dbg(dev, "req=%pK\n", req);
50cfbbb7
GBY
396
397 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
398 ssi_hash_unmap_result(dev, state, digestsize, req->result);
399 ssi_hash_unmap_request(dev, state, ctx);
400 req->base.complete(&req->base, 0);
401}
402
c8f17865
TI
403static int ssi_hash_digest(struct ahash_req_ctx *state,
404 struct ssi_hash_ctx *ctx,
405 unsigned int digestsize,
406 struct scatterlist *src,
407 unsigned int nbytes, u8 *result,
50cfbbb7
GBY
408 void *async_req)
409{
a55ef6f5 410 struct device *dev = drvdata_to_dev(ctx->drvdata);
50cfbbb7
GBY
411 bool is_hmac = ctx->is_hmac;
412 struct ssi_crypto_req ssi_req = {};
8ca57f5c 413 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
50cfbbb7
GBY
414 ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
415 ctx->drvdata, ctx->hash_mode);
416 int idx = 0;
417 int rc = 0;
418
bdd0873d
GBY
419 dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
420 nbytes);
50cfbbb7
GBY
421
422 if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
bdd0873d 423 dev_err(dev, "map_ahash_source() failed\n");
50cfbbb7
GBY
424 return -ENOMEM;
425 }
426
427 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
bdd0873d 428 dev_err(dev, "map_ahash_digest() failed\n");
50cfbbb7
GBY
429 return -ENOMEM;
430 }
431
432 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
bdd0873d 433 dev_err(dev, "map_ahash_request_final() failed\n");
50cfbbb7
GBY
434 return -ENOMEM;
435 }
436
437 if (async_req) {
438 /* Setup DX request structure */
439 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
440 ssi_req.user_arg = (void *)async_req;
50cfbbb7
GBY
441 }
442
443 /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
8b64e512
GBY
444 hw_desc_init(&desc[idx]);
445 set_cipher_mode(&desc[idx], ctx->hw_mode);
50cfbbb7 446 if (is_hmac) {
8b64e512
GBY
447 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
448 ctx->inter_digestsize, NS_BIT);
50cfbbb7 449 } else {
8b64e512
GBY
450 set_din_sram(&desc[idx], larval_digest_addr,
451 ctx->inter_digestsize);
50cfbbb7 452 }
8b64e512
GBY
453 set_flow_mode(&desc[idx], S_DIN_to_HASH);
454 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
50cfbbb7
GBY
455 idx++;
456
457 /* Load the hash current length */
8b64e512
GBY
458 hw_desc_init(&desc[idx]);
459 set_cipher_mode(&desc[idx], ctx->hw_mode);
50cfbbb7
GBY
460
461 if (is_hmac) {
8b64e512
GBY
462 set_din_type(&desc[idx], DMA_DLLI,
463 state->digest_bytes_len_dma_addr, HASH_LEN_SIZE,
464 NS_BIT);
50cfbbb7 465 } else {
8b64e512 466 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
a8f6cbaa 467 if (likely(nbytes != 0))
8b64e512 468 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
a8f6cbaa 469 else
8b64e512 470 set_cipher_do(&desc[idx], DO_PAD);
50cfbbb7 471 }
8b64e512
GBY
472 set_flow_mode(&desc[idx], S_DIN_to_HASH);
473 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
50cfbbb7
GBY
474 idx++;
475
476 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
477
478 if (is_hmac) {
479 /* HW last hash block padding (aka. "DO_PAD") */
8b64e512
GBY
480 hw_desc_init(&desc[idx]);
481 set_cipher_mode(&desc[idx], ctx->hw_mode);
482 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
483 HASH_LEN_SIZE, NS_BIT, 0);
484 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
485 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
486 set_cipher_do(&desc[idx], DO_PAD);
50cfbbb7
GBY
487 idx++;
488
489 /* store the hash digest result in the context */
8b64e512
GBY
490 hw_desc_init(&desc[idx]);
491 set_cipher_mode(&desc[idx], ctx->hw_mode);
492 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
493 digestsize, NS_BIT, 0);
494 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
50cfbbb7 495 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
8b64e512 496 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
50cfbbb7
GBY
497 idx++;
498
499 /* Loading hash opad xor key state */
8b64e512
GBY
500 hw_desc_init(&desc[idx]);
501 set_cipher_mode(&desc[idx], ctx->hw_mode);
502 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
503 ctx->inter_digestsize, NS_BIT);
504 set_flow_mode(&desc[idx], S_DIN_to_HASH);
505 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
50cfbbb7
GBY
506 idx++;
507
508 /* Load the hash current length */
8b64e512
GBY
509 hw_desc_init(&desc[idx]);
510 set_cipher_mode(&desc[idx], ctx->hw_mode);
511 set_din_sram(&desc[idx],
512 ssi_ahash_get_initial_digest_len_sram_addr(
513ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
514 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
515 set_flow_mode(&desc[idx], S_DIN_to_HASH);
516 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
50cfbbb7
GBY
517 idx++;
518
519 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
8b64e512
GBY
520 hw_desc_init(&desc[idx]);
521 set_din_no_dma(&desc[idx], 0, 0xfffff0);
522 set_dout_no_dma(&desc[idx], 0, 0, 1);
50cfbbb7
GBY
523 idx++;
524
525 /* Perform HASH update */
8b64e512
GBY
526 hw_desc_init(&desc[idx]);
527 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
528 digestsize, NS_BIT);
529 set_flow_mode(&desc[idx], DIN_HASH);
50cfbbb7
GBY
530 idx++;
531 }
532
533 /* Get final MAC result */
8b64e512
GBY
534 hw_desc_init(&desc[idx]);
535 set_cipher_mode(&desc[idx], ctx->hw_mode);
536 /* TODO */
537 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
538 NS_BIT, (async_req ? 1 : 0));
a8f6cbaa 539 if (async_req)
8b64e512 540 set_queue_last_ind(&desc[idx]);
8b64e512
GBY
541 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
542 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
543 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
50cfbbb7
GBY
544 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
545 idx++;
546
547 if (async_req) {
548 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
549 if (unlikely(rc != -EINPROGRESS)) {
bdd0873d 550 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
551 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
552 ssi_hash_unmap_result(dev, state, digestsize, result);
553 ssi_hash_unmap_request(dev, state, ctx);
554 }
555 } else {
556 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
557 if (rc != 0) {
bdd0873d 558 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
559 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
560 } else {
c8f17865 561 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
50cfbbb7
GBY
562 }
563 ssi_hash_unmap_result(dev, state, digestsize, result);
564 ssi_hash_unmap_request(dev, state, ctx);
565 }
566 return rc;
567}
568
c8f17865
TI
569static int ssi_hash_update(struct ahash_req_ctx *state,
570 struct ssi_hash_ctx *ctx,
571 unsigned int block_size,
572 struct scatterlist *src,
573 unsigned int nbytes,
50cfbbb7
GBY
574 void *async_req)
575{
a55ef6f5 576 struct device *dev = drvdata_to_dev(ctx->drvdata);
50cfbbb7 577 struct ssi_crypto_req ssi_req = {};
8ca57f5c 578 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
a1ab41eb 579 u32 idx = 0;
50cfbbb7
GBY
580 int rc;
581
bdd0873d
GBY
582 dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
583 "hmac" : "hash", nbytes);
50cfbbb7
GBY
584
585 if (nbytes == 0) {
586 /* no real updates required */
587 return 0;
588 }
589
462ba38c
TO
590 rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size);
591 if (unlikely(rc)) {
50cfbbb7 592 if (rc == 1) {
bdd0873d
GBY
593 dev_dbg(dev, " data size not require HW update %x\n",
594 nbytes);
50cfbbb7
GBY
595 /* No hardware updates are required */
596 return 0;
597 }
bdd0873d 598 dev_err(dev, "map_ahash_request_update() failed\n");
50cfbbb7
GBY
599 return -ENOMEM;
600 }
601
602 if (async_req) {
603 /* Setup DX request structure */
604 ssi_req.user_cb = (void *)ssi_hash_update_complete;
605 ssi_req.user_arg = async_req;
50cfbbb7
GBY
606 }
607
608 /* Restore hash digest */
8b64e512
GBY
609 hw_desc_init(&desc[idx]);
610 set_cipher_mode(&desc[idx], ctx->hw_mode);
611 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
612 ctx->inter_digestsize, NS_BIT);
613 set_flow_mode(&desc[idx], S_DIN_to_HASH);
614 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
50cfbbb7
GBY
615 idx++;
616 /* Restore hash current length */
8b64e512
GBY
617 hw_desc_init(&desc[idx]);
618 set_cipher_mode(&desc[idx], ctx->hw_mode);
619 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
620 HASH_LEN_SIZE, NS_BIT);
621 set_flow_mode(&desc[idx], S_DIN_to_HASH);
622 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
50cfbbb7
GBY
623 idx++;
624
625 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
626
627 /* store the hash digest result in context */
8b64e512
GBY
628 hw_desc_init(&desc[idx]);
629 set_cipher_mode(&desc[idx], ctx->hw_mode);
630 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
631 ctx->inter_digestsize, NS_BIT, 0);
632 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
633 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
50cfbbb7
GBY
634 idx++;
635
636 /* store current hash length in context */
8b64e512
GBY
637 hw_desc_init(&desc[idx]);
638 set_cipher_mode(&desc[idx], ctx->hw_mode);
639 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
640 HASH_LEN_SIZE, NS_BIT, (async_req ? 1 : 0));
a8f6cbaa 641 if (async_req)
8b64e512 642 set_queue_last_ind(&desc[idx]);
8b64e512
GBY
643 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
644 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
50cfbbb7
GBY
645 idx++;
646
647 if (async_req) {
648 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
649 if (unlikely(rc != -EINPROGRESS)) {
bdd0873d 650 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
651 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
652 }
653 } else {
654 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
655 if (rc != 0) {
bdd0873d 656 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
657 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
658 } else {
659 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
660 }
661 }
662 return rc;
663}
664
c8f17865
TI
665static int ssi_hash_finup(struct ahash_req_ctx *state,
666 struct ssi_hash_ctx *ctx,
667 unsigned int digestsize,
668 struct scatterlist *src,
669 unsigned int nbytes,
670 u8 *result,
50cfbbb7
GBY
671 void *async_req)
672{
a55ef6f5 673 struct device *dev = drvdata_to_dev(ctx->drvdata);
50cfbbb7
GBY
674 bool is_hmac = ctx->is_hmac;
675 struct ssi_crypto_req ssi_req = {};
8ca57f5c 676 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
50cfbbb7
GBY
677 int idx = 0;
678 int rc;
679
bdd0873d
GBY
680 dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
681 nbytes);
50cfbbb7 682
e7258b6a 683 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
bdd0873d 684 dev_err(dev, "map_ahash_request_final() failed\n");
50cfbbb7
GBY
685 return -ENOMEM;
686 }
687 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
bdd0873d 688 dev_err(dev, "map_ahash_digest() failed\n");
50cfbbb7
GBY
689 return -ENOMEM;
690 }
691
692 if (async_req) {
693 /* Setup DX request structure */
694 ssi_req.user_cb = (void *)ssi_hash_complete;
695 ssi_req.user_arg = async_req;
50cfbbb7
GBY
696 }
697
698 /* Restore hash digest */
8b64e512
GBY
699 hw_desc_init(&desc[idx]);
700 set_cipher_mode(&desc[idx], ctx->hw_mode);
701 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
702 ctx->inter_digestsize, NS_BIT);
703 set_flow_mode(&desc[idx], S_DIN_to_HASH);
704 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
50cfbbb7
GBY
705 idx++;
706
707 /* Restore hash current length */
8b64e512
GBY
708 hw_desc_init(&desc[idx]);
709 set_cipher_mode(&desc[idx], ctx->hw_mode);
710 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
711 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
712 HASH_LEN_SIZE, NS_BIT);
713 set_flow_mode(&desc[idx], S_DIN_to_HASH);
714 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
50cfbbb7
GBY
715 idx++;
716
717 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
718
719 if (is_hmac) {
720 /* Store the hash digest result in the context */
8b64e512
GBY
721 hw_desc_init(&desc[idx]);
722 set_cipher_mode(&desc[idx], ctx->hw_mode);
723 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
724 digestsize, NS_BIT, 0);
e7258b6a 725 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
8b64e512
GBY
726 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
727 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
50cfbbb7
GBY
728 idx++;
729
730 /* Loading hash OPAD xor key state */
8b64e512
GBY
731 hw_desc_init(&desc[idx]);
732 set_cipher_mode(&desc[idx], ctx->hw_mode);
733 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
734 ctx->inter_digestsize, NS_BIT);
735 set_flow_mode(&desc[idx], S_DIN_to_HASH);
736 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
50cfbbb7
GBY
737 idx++;
738
739 /* Load the hash current length */
8b64e512
GBY
740 hw_desc_init(&desc[idx]);
741 set_cipher_mode(&desc[idx], ctx->hw_mode);
742 set_din_sram(&desc[idx],
743 ssi_ahash_get_initial_digest_len_sram_addr(
744ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
745 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
746 set_flow_mode(&desc[idx], S_DIN_to_HASH);
747 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
50cfbbb7
GBY
748 idx++;
749
750 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
8b64e512
GBY
751 hw_desc_init(&desc[idx]);
752 set_din_no_dma(&desc[idx], 0, 0xfffff0);
753 set_dout_no_dma(&desc[idx], 0, 0, 1);
50cfbbb7
GBY
754 idx++;
755
756 /* Perform HASH update on last digest */
8b64e512
GBY
757 hw_desc_init(&desc[idx]);
758 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
759 digestsize, NS_BIT);
760 set_flow_mode(&desc[idx], DIN_HASH);
50cfbbb7
GBY
761 idx++;
762 }
763
764 /* Get final MAC result */
8b64e512
GBY
765 hw_desc_init(&desc[idx]);
766 /* TODO */
767 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
768 NS_BIT, (async_req ? 1 : 0));
a8f6cbaa 769 if (async_req)
8b64e512 770 set_queue_last_ind(&desc[idx]);
8b64e512
GBY
771 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
772 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
773 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
e7258b6a 774 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
8b64e512 775 set_cipher_mode(&desc[idx], ctx->hw_mode);
50cfbbb7
GBY
776 idx++;
777
778 if (async_req) {
779 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
780 if (unlikely(rc != -EINPROGRESS)) {
bdd0873d 781 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
782 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
783 ssi_hash_unmap_result(dev, state, digestsize, result);
784 }
785 } else {
786 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
787 if (rc != 0) {
bdd0873d 788 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
789 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
790 ssi_hash_unmap_result(dev, state, digestsize, result);
791 } else {
792 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
793 ssi_hash_unmap_result(dev, state, digestsize, result);
794 ssi_hash_unmap_request(dev, state, ctx);
795 }
796 }
797 return rc;
798}
799
c8f17865
TI
800static int ssi_hash_final(struct ahash_req_ctx *state,
801 struct ssi_hash_ctx *ctx,
802 unsigned int digestsize,
803 struct scatterlist *src,
804 unsigned int nbytes,
805 u8 *result,
50cfbbb7
GBY
806 void *async_req)
807{
a55ef6f5 808 struct device *dev = drvdata_to_dev(ctx->drvdata);
50cfbbb7
GBY
809 bool is_hmac = ctx->is_hmac;
810 struct ssi_crypto_req ssi_req = {};
8ca57f5c 811 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
50cfbbb7
GBY
812 int idx = 0;
813 int rc;
814
bdd0873d
GBY
815 dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
816 nbytes);
50cfbbb7
GBY
817
818 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
bdd0873d 819 dev_err(dev, "map_ahash_request_final() failed\n");
50cfbbb7
GBY
820 return -ENOMEM;
821 }
822
823 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
bdd0873d 824 dev_err(dev, "map_ahash_digest() failed\n");
50cfbbb7
GBY
825 return -ENOMEM;
826 }
827
828 if (async_req) {
829 /* Setup DX request structure */
830 ssi_req.user_cb = (void *)ssi_hash_complete;
831 ssi_req.user_arg = async_req;
50cfbbb7
GBY
832 }
833
834 /* Restore hash digest */
8b64e512
GBY
835 hw_desc_init(&desc[idx]);
836 set_cipher_mode(&desc[idx], ctx->hw_mode);
837 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
838 ctx->inter_digestsize, NS_BIT);
839 set_flow_mode(&desc[idx], S_DIN_to_HASH);
840 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
50cfbbb7
GBY
841 idx++;
842
843 /* Restore hash current length */
8b64e512
GBY
844 hw_desc_init(&desc[idx]);
845 set_cipher_mode(&desc[idx], ctx->hw_mode);
846 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
847 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
848 HASH_LEN_SIZE, NS_BIT);
849 set_flow_mode(&desc[idx], S_DIN_to_HASH);
850 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
50cfbbb7
GBY
851 idx++;
852
853 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
854
855 /* "DO-PAD" must be enabled only when writing current length to HW */
8b64e512
GBY
856 hw_desc_init(&desc[idx]);
857 set_cipher_do(&desc[idx], DO_PAD);
858 set_cipher_mode(&desc[idx], ctx->hw_mode);
859 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
860 HASH_LEN_SIZE, NS_BIT, 0);
861 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
862 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
50cfbbb7
GBY
863 idx++;
864
865 if (is_hmac) {
866 /* Store the hash digest result in the context */
8b64e512
GBY
867 hw_desc_init(&desc[idx]);
868 set_cipher_mode(&desc[idx], ctx->hw_mode);
869 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
870 digestsize, NS_BIT, 0);
e7258b6a 871 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
8b64e512
GBY
872 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
873 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
50cfbbb7
GBY
874 idx++;
875
876 /* Loading hash OPAD xor key state */
8b64e512
GBY
877 hw_desc_init(&desc[idx]);
878 set_cipher_mode(&desc[idx], ctx->hw_mode);
879 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
880 ctx->inter_digestsize, NS_BIT);
881 set_flow_mode(&desc[idx], S_DIN_to_HASH);
882 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
50cfbbb7
GBY
883 idx++;
884
885 /* Load the hash current length */
8b64e512
GBY
886 hw_desc_init(&desc[idx]);
887 set_cipher_mode(&desc[idx], ctx->hw_mode);
888 set_din_sram(&desc[idx],
889 ssi_ahash_get_initial_digest_len_sram_addr(
890ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
891 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
892 set_flow_mode(&desc[idx], S_DIN_to_HASH);
893 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
50cfbbb7
GBY
894 idx++;
895
896 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
8b64e512
GBY
897 hw_desc_init(&desc[idx]);
898 set_din_no_dma(&desc[idx], 0, 0xfffff0);
899 set_dout_no_dma(&desc[idx], 0, 0, 1);
50cfbbb7
GBY
900 idx++;
901
902 /* Perform HASH update on last digest */
8b64e512
GBY
903 hw_desc_init(&desc[idx]);
904 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
905 digestsize, NS_BIT);
906 set_flow_mode(&desc[idx], DIN_HASH);
50cfbbb7
GBY
907 idx++;
908 }
909
910 /* Get final MAC result */
8b64e512
GBY
911 hw_desc_init(&desc[idx]);
912 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
913 NS_BIT, (async_req ? 1 : 0));
a8f6cbaa 914 if (async_req)
8b64e512 915 set_queue_last_ind(&desc[idx]);
8b64e512
GBY
916 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
917 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
918 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
e7258b6a 919 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
8b64e512 920 set_cipher_mode(&desc[idx], ctx->hw_mode);
50cfbbb7
GBY
921 idx++;
922
923 if (async_req) {
924 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
925 if (unlikely(rc != -EINPROGRESS)) {
bdd0873d 926 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
927 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
928 ssi_hash_unmap_result(dev, state, digestsize, result);
929 }
930 } else {
931 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
932 if (rc != 0) {
bdd0873d 933 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
934 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
935 ssi_hash_unmap_result(dev, state, digestsize, result);
936 } else {
937 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
938 ssi_hash_unmap_result(dev, state, digestsize, result);
939 ssi_hash_unmap_request(dev, state, ctx);
940 }
941 }
942 return rc;
943}
944
945static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
946{
a55ef6f5 947 struct device *dev = drvdata_to_dev(ctx->drvdata);
492ddcbb 948
c8f17865 949 state->xcbc_count = 0;
50cfbbb7
GBY
950
951 ssi_hash_map_request(dev, state, ctx);
952
953 return 0;
954}
955
50cfbbb7 956static int ssi_hash_setkey(void *hash,
c8f17865
TI
957 const u8 *key,
958 unsigned int keylen,
50cfbbb7
GBY
959 bool synchronize)
960{
42886fab 961 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
50cfbbb7
GBY
962 struct ssi_crypto_req ssi_req = {};
963 struct ssi_hash_ctx *ctx = NULL;
964 int blocksize = 0;
965 int digestsize = 0;
966 int i, idx = 0, rc = 0;
8ca57f5c 967 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
50cfbbb7 968 ssi_sram_addr_t larval_addr;
a55ef6f5 969 struct device *dev;
50cfbbb7 970
d3eff572 971 ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
a55ef6f5 972 dev = drvdata_to_dev(ctx->drvdata);
bdd0873d
GBY
973 dev_dbg(dev, "start keylen: %d", keylen);
974
d3eff572
GBY
975 blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
976 digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
c8f17865 977
50cfbbb7
GBY
978 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
979 ctx->drvdata, ctx->hash_mode);
980
981 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
1a3a8d2e
DR
982 * any NON-ZERO value utilizes HMAC flow
983 */
50cfbbb7
GBY
984 ctx->key_params.keylen = keylen;
985 ctx->key_params.key_dma_addr = 0;
986 ctx->is_hmac = true;
987
988 if (keylen != 0) {
989 ctx->key_params.key_dma_addr = dma_map_single(
a55ef6f5 990 dev, (void *)key,
50cfbbb7 991 keylen, DMA_TO_DEVICE);
a55ef6f5 992 if (unlikely(dma_mapping_error(dev,
50cfbbb7 993 ctx->key_params.key_dma_addr))) {
bdd0873d
GBY
994 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
995 key, keylen);
50cfbbb7
GBY
996 return -ENOMEM;
997 }
bdd0873d
GBY
998 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
999 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
50cfbbb7
GBY
1000
1001 if (keylen > blocksize) {
1002 /* Load hash initial state */
8b64e512
GBY
1003 hw_desc_init(&desc[idx]);
1004 set_cipher_mode(&desc[idx], ctx->hw_mode);
1005 set_din_sram(&desc[idx], larval_addr,
1006 ctx->inter_digestsize);
1007 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1008 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
50cfbbb7 1009 idx++;
c8f17865 1010
50cfbbb7 1011 /* Load the hash current length*/
8b64e512
GBY
1012 hw_desc_init(&desc[idx]);
1013 set_cipher_mode(&desc[idx], ctx->hw_mode);
1014 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1015 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1016 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1017 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
50cfbbb7 1018 idx++;
c8f17865 1019
8b64e512
GBY
1020 hw_desc_init(&desc[idx]);
1021 set_din_type(&desc[idx], DMA_DLLI,
1022 ctx->key_params.key_dma_addr, keylen,
1023 NS_BIT);
1024 set_flow_mode(&desc[idx], DIN_HASH);
50cfbbb7 1025 idx++;
c8f17865 1026
50cfbbb7 1027 /* Get hashed key */
8b64e512
GBY
1028 hw_desc_init(&desc[idx]);
1029 set_cipher_mode(&desc[idx], ctx->hw_mode);
1030 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1031 digestsize, NS_BIT, 0);
1032 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1033 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1034 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
e7258b6a 1035 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
50cfbbb7 1036 idx++;
c8f17865 1037
8b64e512
GBY
1038 hw_desc_init(&desc[idx]);
1039 set_din_const(&desc[idx], 0, (blocksize - digestsize));
1040 set_flow_mode(&desc[idx], BYPASS);
1041 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1042 digestsize),
1043 (blocksize - digestsize), NS_BIT, 0);
50cfbbb7
GBY
1044 idx++;
1045 } else {
8b64e512
GBY
1046 hw_desc_init(&desc[idx]);
1047 set_din_type(&desc[idx], DMA_DLLI,
1048 ctx->key_params.key_dma_addr, keylen,
1049 NS_BIT);
1050 set_flow_mode(&desc[idx], BYPASS);
1051 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1052 keylen, NS_BIT, 0);
50cfbbb7
GBY
1053 idx++;
1054
1055 if ((blocksize - keylen) != 0) {
8b64e512
GBY
1056 hw_desc_init(&desc[idx]);
1057 set_din_const(&desc[idx], 0,
1058 (blocksize - keylen));
1059 set_flow_mode(&desc[idx], BYPASS);
1060 set_dout_dlli(&desc[idx],
1061 (ctx->opad_tmp_keys_dma_addr +
1062 keylen), (blocksize - keylen),
1063 NS_BIT, 0);
50cfbbb7
GBY
1064 idx++;
1065 }
1066 }
1067 } else {
8b64e512
GBY
1068 hw_desc_init(&desc[idx]);
1069 set_din_const(&desc[idx], 0, blocksize);
1070 set_flow_mode(&desc[idx], BYPASS);
1071 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
1072 blocksize, NS_BIT, 0);
50cfbbb7
GBY
1073 idx++;
1074 }
1075
1076 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1077 if (unlikely(rc != 0)) {
bdd0873d 1078 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
1079 goto out;
1080 }
1081
1082 /* calc derived HMAC key */
1083 for (idx = 0, i = 0; i < 2; i++) {
1084 /* Load hash initial state */
8b64e512
GBY
1085 hw_desc_init(&desc[idx]);
1086 set_cipher_mode(&desc[idx], ctx->hw_mode);
1087 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
1088 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1089 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
50cfbbb7
GBY
1090 idx++;
1091
1092 /* Load the hash current length*/
8b64e512
GBY
1093 hw_desc_init(&desc[idx]);
1094 set_cipher_mode(&desc[idx], ctx->hw_mode);
1095 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1096 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1097 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
50cfbbb7
GBY
1098 idx++;
1099
1100 /* Prepare ipad key */
8b64e512 1101 hw_desc_init(&desc[idx]);
42886fab 1102 set_xor_val(&desc[idx], hmac_pad_const[i]);
8b64e512
GBY
1103 set_cipher_mode(&desc[idx], ctx->hw_mode);
1104 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1105 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
50cfbbb7
GBY
1106 idx++;
1107
1108 /* Perform HASH update */
8b64e512
GBY
1109 hw_desc_init(&desc[idx]);
1110 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
1111 blocksize, NS_BIT);
1112 set_cipher_mode(&desc[idx], ctx->hw_mode);
1113 set_xor_active(&desc[idx]);
1114 set_flow_mode(&desc[idx], DIN_HASH);
50cfbbb7
GBY
1115 idx++;
1116
1117 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
8b64e512
GBY
1118 hw_desc_init(&desc[idx]);
1119 set_cipher_mode(&desc[idx], ctx->hw_mode);
50cfbbb7 1120 if (i > 0) /* Not first iteration */
8b64e512
GBY
1121 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1122 ctx->inter_digestsize, NS_BIT, 0);
50cfbbb7 1123 else /* First iteration */
8b64e512
GBY
1124 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
1125 ctx->inter_digestsize, NS_BIT, 0);
1126 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1127 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
50cfbbb7
GBY
1128 idx++;
1129 }
1130
1131 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1132
1133out:
d3eff572
GBY
1134 if (rc)
1135 crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
50cfbbb7
GBY
1136
1137 if (ctx->key_params.key_dma_addr) {
a55ef6f5 1138 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
3151c1df 1139 ctx->key_params.keylen, DMA_TO_DEVICE);
bdd0873d
GBY
1140 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1141 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
50cfbbb7
GBY
1142 }
1143 return rc;
1144}
1145
50cfbbb7 1146static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
3151c1df 1147 const u8 *key, unsigned int keylen)
50cfbbb7
GBY
1148{
1149 struct ssi_crypto_req ssi_req = {};
1150 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
a55ef6f5 1151 struct device *dev = drvdata_to_dev(ctx->drvdata);
50cfbbb7 1152 int idx = 0, rc = 0;
8ca57f5c 1153 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
50cfbbb7 1154
bdd0873d 1155 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
50cfbbb7
GBY
1156
1157 switch (keylen) {
07642a07
TO
1158 case AES_KEYSIZE_128:
1159 case AES_KEYSIZE_192:
1160 case AES_KEYSIZE_256:
1161 break;
1162 default:
1163 return -EINVAL;
50cfbbb7
GBY
1164 }
1165
1166 ctx->key_params.keylen = keylen;
1167
1168 ctx->key_params.key_dma_addr = dma_map_single(
a55ef6f5 1169 dev, (void *)key,
50cfbbb7 1170 keylen, DMA_TO_DEVICE);
a55ef6f5 1171 if (unlikely(dma_mapping_error(dev, ctx->key_params.key_dma_addr))) {
bdd0873d
GBY
1172 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
1173 key, keylen);
50cfbbb7
GBY
1174 return -ENOMEM;
1175 }
bdd0873d
GBY
1176 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
1177 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
c8f17865 1178
50cfbbb7
GBY
1179 ctx->is_hmac = true;
1180 /* 1. Load the AES key */
8b64e512
GBY
1181 hw_desc_init(&desc[idx]);
1182 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
1183 keylen, NS_BIT);
1184 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1185 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1186 set_key_size_aes(&desc[idx], keylen);
1187 set_flow_mode(&desc[idx], S_DIN_to_AES);
1188 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
50cfbbb7
GBY
1189 idx++;
1190
8b64e512
GBY
1191 hw_desc_init(&desc[idx]);
1192 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
1193 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1194 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
c8f17865 1195 XCBC_MAC_K1_OFFSET),
50cfbbb7
GBY
1196 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1197 idx++;
1198
8b64e512
GBY
1199 hw_desc_init(&desc[idx]);
1200 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
1201 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1202 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
c8f17865 1203 XCBC_MAC_K2_OFFSET),
50cfbbb7
GBY
1204 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1205 idx++;
1206
8b64e512
GBY
1207 hw_desc_init(&desc[idx]);
1208 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
1209 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1210 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
50cfbbb7
GBY
1211 XCBC_MAC_K3_OFFSET),
1212 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1213 idx++;
1214
1215 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1216
1217 if (rc != 0)
1218 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1219
a55ef6f5 1220 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
3151c1df 1221 ctx->key_params.keylen, DMA_TO_DEVICE);
bdd0873d
GBY
1222 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1223 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
50cfbbb7
GBY
1224
1225 return rc;
1226}
492ddcbb 1227
50cfbbb7
GBY
1228#if SSI_CC_HAS_CMAC
1229static int ssi_cmac_setkey(struct crypto_ahash *ahash,
3151c1df 1230 const u8 *key, unsigned int keylen)
50cfbbb7
GBY
1231{
1232 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
a55ef6f5 1233 struct device *dev = drvdata_to_dev(ctx->drvdata);
492ddcbb 1234
bdd0873d 1235 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
50cfbbb7
GBY
1236
1237 ctx->is_hmac = true;
1238
1239 switch (keylen) {
07642a07
TO
1240 case AES_KEYSIZE_128:
1241 case AES_KEYSIZE_192:
1242 case AES_KEYSIZE_256:
1243 break;
1244 default:
1245 return -EINVAL;
50cfbbb7
GBY
1246 }
1247
1248 ctx->key_params.keylen = keylen;
1249
1250 /* STAT_PHASE_1: Copy key to ctx */
c8f17865 1251
a55ef6f5 1252 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
50cfbbb7
GBY
1253 keylen, DMA_TO_DEVICE);
1254
1255 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1256 if (keylen == 24)
1257 memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
c8f17865 1258
a55ef6f5 1259 dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
50cfbbb7 1260 keylen, DMA_TO_DEVICE);
c8f17865 1261
50cfbbb7 1262 ctx->key_params.keylen = keylen;
c8f17865 1263
50cfbbb7
GBY
1264 return 0;
1265}
1266#endif
1267
1268static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
1269{
a55ef6f5 1270 struct device *dev = drvdata_to_dev(ctx->drvdata);
50cfbbb7
GBY
1271
1272 if (ctx->digest_buff_dma_addr != 0) {
50cfbbb7
GBY
1273 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1274 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
bdd0873d
GBY
1275 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1276 &ctx->digest_buff_dma_addr);
50cfbbb7
GBY
1277 ctx->digest_buff_dma_addr = 0;
1278 }
1279 if (ctx->opad_tmp_keys_dma_addr != 0) {
50cfbbb7
GBY
1280 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1281 sizeof(ctx->opad_tmp_keys_buff),
1282 DMA_BIDIRECTIONAL);
bdd0873d
GBY
1283 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1284 &ctx->opad_tmp_keys_dma_addr);
50cfbbb7
GBY
1285 ctx->opad_tmp_keys_dma_addr = 0;
1286 }
1287
1288 ctx->key_params.keylen = 0;
50cfbbb7
GBY
1289}
1290
50cfbbb7
GBY
1291static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
1292{
a55ef6f5 1293 struct device *dev = drvdata_to_dev(ctx->drvdata);
50cfbbb7
GBY
1294
1295 ctx->key_params.keylen = 0;
1296
1297 ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1298 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
bdd0873d
GBY
1299 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1300 sizeof(ctx->digest_buff), ctx->digest_buff);
50cfbbb7
GBY
1301 goto fail;
1302 }
bdd0873d
GBY
1303 dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1304 sizeof(ctx->digest_buff), ctx->digest_buff,
1305 &ctx->digest_buff_dma_addr);
50cfbbb7
GBY
1306
1307 ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
1308 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
bdd0873d
GBY
1309 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1310 sizeof(ctx->opad_tmp_keys_buff),
1311 ctx->opad_tmp_keys_buff);
50cfbbb7
GBY
1312 goto fail;
1313 }
bdd0873d
GBY
1314 dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1315 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1316 &ctx->opad_tmp_keys_dma_addr);
50cfbbb7
GBY
1317
1318 ctx->is_hmac = false;
1319 return 0;
1320
1321fail:
1322 ssi_hash_free_ctx(ctx);
1323 return -ENOMEM;
1324}
1325
50cfbbb7
GBY
1326static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
1327{
1328 struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
d32a0b6d 1329 struct hash_alg_common *hash_alg_common =
50cfbbb7 1330 container_of(tfm->__crt_alg, struct hash_alg_common, base);
c8f17865 1331 struct ahash_alg *ahash_alg =
50cfbbb7
GBY
1332 container_of(hash_alg_common, struct ahash_alg, halg);
1333 struct ssi_hash_alg *ssi_alg =
1334 container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
1335
50cfbbb7 1336 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3151c1df 1337 sizeof(struct ahash_req_ctx));
50cfbbb7
GBY
1338
1339 ctx->hash_mode = ssi_alg->hash_mode;
1340 ctx->hw_mode = ssi_alg->hw_mode;
1341 ctx->inter_digestsize = ssi_alg->inter_digestsize;
1342 ctx->drvdata = ssi_alg->drvdata;
1343
1344 return ssi_hash_alloc_ctx(ctx);
1345}
1346
1347static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
1348{
1349 struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
bdd0873d 1350 struct device *dev = drvdata_to_dev(ctx->drvdata);
50cfbbb7 1351
bdd0873d 1352 dev_dbg(dev, "ssi_hash_cra_exit");
50cfbbb7
GBY
1353 ssi_hash_free_ctx(ctx);
1354}
1355
1356static int ssi_mac_update(struct ahash_request *req)
1357{
1358 struct ahash_req_ctx *state = ahash_request_ctx(req);
1359 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1360 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
a55ef6f5 1361 struct device *dev = drvdata_to_dev(ctx->drvdata);
50cfbbb7
GBY
1362 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1363 struct ssi_crypto_req ssi_req = {};
8ca57f5c 1364 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
50cfbbb7 1365 int rc;
a1ab41eb 1366 u32 idx = 0;
50cfbbb7
GBY
1367
1368 if (req->nbytes == 0) {
1369 /* no real updates required */
1370 return 0;
1371 }
1372
1373 state->xcbc_count++;
1374
462ba38c
TO
1375 rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size);
1376 if (unlikely(rc)) {
50cfbbb7 1377 if (rc == 1) {
bdd0873d
GBY
1378 dev_dbg(dev, " data size not require HW update %x\n",
1379 req->nbytes);
50cfbbb7
GBY
1380 /* No hardware updates are required */
1381 return 0;
1382 }
bdd0873d 1383 dev_err(dev, "map_ahash_request_update() failed\n");
50cfbbb7
GBY
1384 return -ENOMEM;
1385 }
1386
a8f6cbaa 1387 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
50cfbbb7 1388 ssi_hash_create_xcbc_setup(req, desc, &idx);
a8f6cbaa 1389 else
50cfbbb7 1390 ssi_hash_create_cmac_setup(req, desc, &idx);
c8f17865 1391
50cfbbb7
GBY
1392 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1393
1394 /* store the hash digest result in context */
8b64e512
GBY
1395 hw_desc_init(&desc[idx]);
1396 set_cipher_mode(&desc[idx], ctx->hw_mode);
1397 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1398 ctx->inter_digestsize, NS_BIT, 1);
1399 set_queue_last_ind(&desc[idx]);
1400 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1401 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
50cfbbb7
GBY
1402 idx++;
1403
1404 /* Setup DX request structure */
1405 ssi_req.user_cb = (void *)ssi_hash_update_complete;
1406 ssi_req.user_arg = (void *)req;
50cfbbb7
GBY
1407
1408 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1409 if (unlikely(rc != -EINPROGRESS)) {
bdd0873d 1410 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
1411 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1412 }
1413 return rc;
1414}
1415
1416static int ssi_mac_final(struct ahash_request *req)
1417{
1418 struct ahash_req_ctx *state = ahash_request_ctx(req);
1419 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1420 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
a55ef6f5 1421 struct device *dev = drvdata_to_dev(ctx->drvdata);
50cfbbb7 1422 struct ssi_crypto_req ssi_req = {};
8ca57f5c 1423 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
50cfbbb7
GBY
1424 int idx = 0;
1425 int rc = 0;
42886fab 1426 u32 key_size, key_len;
a1ab41eb 1427 u32 digestsize = crypto_ahash_digestsize(tfm);
50cfbbb7 1428
a1ab41eb 1429 u32 rem_cnt = state->buff_index ? state->buff1_cnt :
50cfbbb7 1430 state->buff0_cnt;
c8f17865 1431
50cfbbb7 1432 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
42886fab
GBY
1433 key_size = CC_AES_128_BIT_KEY_SIZE;
1434 key_len = CC_AES_128_BIT_KEY_SIZE;
50cfbbb7 1435 } else {
42886fab
GBY
1436 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1437 ctx->key_params.keylen;
1438 key_len = ctx->key_params.keylen;
50cfbbb7
GBY
1439 }
1440
bdd0873d 1441 dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
50cfbbb7
GBY
1442
1443 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
bdd0873d 1444 dev_err(dev, "map_ahash_request_final() failed\n");
50cfbbb7
GBY
1445 return -ENOMEM;
1446 }
1447
1448 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
bdd0873d 1449 dev_err(dev, "map_ahash_digest() failed\n");
50cfbbb7
GBY
1450 return -ENOMEM;
1451 }
1452
1453 /* Setup DX request structure */
1454 ssi_req.user_cb = (void *)ssi_hash_complete;
1455 ssi_req.user_arg = (void *)req;
50cfbbb7
GBY
1456
1457 if (state->xcbc_count && (rem_cnt == 0)) {
1458 /* Load key for ECB decryption */
8b64e512
GBY
1459 hw_desc_init(&desc[idx]);
1460 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1461 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1462 set_din_type(&desc[idx], DMA_DLLI,
1463 (ctx->opad_tmp_keys_dma_addr +
42886fab
GBY
1464 XCBC_MAC_K1_OFFSET), key_size, NS_BIT);
1465 set_key_size_aes(&desc[idx], key_len);
8b64e512
GBY
1466 set_flow_mode(&desc[idx], S_DIN_to_AES);
1467 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
50cfbbb7
GBY
1468 idx++;
1469
50cfbbb7 1470 /* Initiate decryption of block state to previous block_state-XOR-M[n] */
8b64e512
GBY
1471 hw_desc_init(&desc[idx]);
1472 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1473 CC_AES_BLOCK_SIZE, NS_BIT);
1474 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1475 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1476 set_flow_mode(&desc[idx], DIN_AES_DOUT);
50cfbbb7
GBY
1477 idx++;
1478
1479 /* Memory Barrier: wait for axi write to complete */
8b64e512
GBY
1480 hw_desc_init(&desc[idx]);
1481 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1482 set_dout_no_dma(&desc[idx], 0, 0, 1);
50cfbbb7
GBY
1483 idx++;
1484 }
c8f17865 1485
a8f6cbaa 1486 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
50cfbbb7 1487 ssi_hash_create_xcbc_setup(req, desc, &idx);
a8f6cbaa 1488 else
50cfbbb7 1489 ssi_hash_create_cmac_setup(req, desc, &idx);
50cfbbb7
GBY
1490
1491 if (state->xcbc_count == 0) {
8b64e512
GBY
1492 hw_desc_init(&desc[idx]);
1493 set_cipher_mode(&desc[idx], ctx->hw_mode);
42886fab 1494 set_key_size_aes(&desc[idx], key_len);
8b64e512
GBY
1495 set_cmac_size0_mode(&desc[idx]);
1496 set_flow_mode(&desc[idx], S_DIN_to_AES);
50cfbbb7
GBY
1497 idx++;
1498 } else if (rem_cnt > 0) {
1499 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1500 } else {
8b64e512
GBY
1501 hw_desc_init(&desc[idx]);
1502 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1503 set_flow_mode(&desc[idx], DIN_AES_DOUT);
50cfbbb7
GBY
1504 idx++;
1505 }
c8f17865 1506
50cfbbb7 1507 /* Get final MAC result */
8b64e512
GBY
1508 hw_desc_init(&desc[idx]);
1509 /* TODO */
1510 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1511 digestsize, NS_BIT, 1);
1512 set_queue_last_ind(&desc[idx]);
1513 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1514 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1515 set_cipher_mode(&desc[idx], ctx->hw_mode);
50cfbbb7
GBY
1516 idx++;
1517
1518 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1519 if (unlikely(rc != -EINPROGRESS)) {
bdd0873d 1520 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
1521 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1522 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1523 }
1524 return rc;
1525}
1526
1527static int ssi_mac_finup(struct ahash_request *req)
1528{
1529 struct ahash_req_ctx *state = ahash_request_ctx(req);
1530 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1531 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
a55ef6f5 1532 struct device *dev = drvdata_to_dev(ctx->drvdata);
50cfbbb7 1533 struct ssi_crypto_req ssi_req = {};
8ca57f5c 1534 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
50cfbbb7
GBY
1535 int idx = 0;
1536 int rc = 0;
a1ab41eb
GBY
1537 u32 key_len = 0;
1538 u32 digestsize = crypto_ahash_digestsize(tfm);
50cfbbb7 1539
bdd0873d 1540 dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
50cfbbb7 1541 if (state->xcbc_count > 0 && req->nbytes == 0) {
bdd0873d 1542 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
50cfbbb7
GBY
1543 return ssi_mac_final(req);
1544 }
c8f17865 1545
50cfbbb7 1546 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
bdd0873d 1547 dev_err(dev, "map_ahash_request_final() failed\n");
50cfbbb7
GBY
1548 return -ENOMEM;
1549 }
1550 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
bdd0873d 1551 dev_err(dev, "map_ahash_digest() failed\n");
50cfbbb7
GBY
1552 return -ENOMEM;
1553 }
1554
1555 /* Setup DX request structure */
1556 ssi_req.user_cb = (void *)ssi_hash_complete;
1557 ssi_req.user_arg = (void *)req;
50cfbbb7
GBY
1558
1559 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1560 key_len = CC_AES_128_BIT_KEY_SIZE;
1561 ssi_hash_create_xcbc_setup(req, desc, &idx);
1562 } else {
1563 key_len = ctx->key_params.keylen;
1564 ssi_hash_create_cmac_setup(req, desc, &idx);
1565 }
1566
1567 if (req->nbytes == 0) {
8b64e512
GBY
1568 hw_desc_init(&desc[idx]);
1569 set_cipher_mode(&desc[idx], ctx->hw_mode);
1570 set_key_size_aes(&desc[idx], key_len);
1571 set_cmac_size0_mode(&desc[idx]);
1572 set_flow_mode(&desc[idx], S_DIN_to_AES);
50cfbbb7
GBY
1573 idx++;
1574 } else {
1575 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1576 }
c8f17865 1577
50cfbbb7 1578 /* Get final MAC result */
8b64e512
GBY
1579 hw_desc_init(&desc[idx]);
1580 /* TODO */
1581 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1582 digestsize, NS_BIT, 1);
1583 set_queue_last_ind(&desc[idx]);
1584 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1585 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1586 set_cipher_mode(&desc[idx], ctx->hw_mode);
50cfbbb7
GBY
1587 idx++;
1588
1589 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1590 if (unlikely(rc != -EINPROGRESS)) {
bdd0873d 1591 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
1592 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1593 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1594 }
1595 return rc;
1596}
1597
1598static int ssi_mac_digest(struct ahash_request *req)
1599{
1600 struct ahash_req_ctx *state = ahash_request_ctx(req);
1601 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1602 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
a55ef6f5 1603 struct device *dev = drvdata_to_dev(ctx->drvdata);
a1ab41eb 1604 u32 digestsize = crypto_ahash_digestsize(tfm);
50cfbbb7 1605 struct ssi_crypto_req ssi_req = {};
8ca57f5c 1606 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
42886fab 1607 u32 key_len;
50cfbbb7
GBY
1608 int idx = 0;
1609 int rc;
1610
bdd0873d 1611 dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
c8f17865 1612
50cfbbb7 1613 if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
bdd0873d 1614 dev_err(dev, "map_ahash_source() failed\n");
50cfbbb7
GBY
1615 return -ENOMEM;
1616 }
1617 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
bdd0873d 1618 dev_err(dev, "map_ahash_digest() failed\n");
50cfbbb7
GBY
1619 return -ENOMEM;
1620 }
1621
1622 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
bdd0873d 1623 dev_err(dev, "map_ahash_request_final() failed\n");
50cfbbb7
GBY
1624 return -ENOMEM;
1625 }
c8f17865 1626
50cfbbb7
GBY
1627 /* Setup DX request structure */
1628 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
1629 ssi_req.user_arg = (void *)req;
c8f17865 1630
50cfbbb7 1631 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
42886fab 1632 key_len = CC_AES_128_BIT_KEY_SIZE;
50cfbbb7
GBY
1633 ssi_hash_create_xcbc_setup(req, desc, &idx);
1634 } else {
42886fab 1635 key_len = ctx->key_params.keylen;
50cfbbb7
GBY
1636 ssi_hash_create_cmac_setup(req, desc, &idx);
1637 }
1638
1639 if (req->nbytes == 0) {
8b64e512
GBY
1640 hw_desc_init(&desc[idx]);
1641 set_cipher_mode(&desc[idx], ctx->hw_mode);
42886fab 1642 set_key_size_aes(&desc[idx], key_len);
8b64e512
GBY
1643 set_cmac_size0_mode(&desc[idx]);
1644 set_flow_mode(&desc[idx], S_DIN_to_AES);
50cfbbb7
GBY
1645 idx++;
1646 } else {
1647 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1648 }
c8f17865 1649
50cfbbb7 1650 /* Get final MAC result */
8b64e512
GBY
1651 hw_desc_init(&desc[idx]);
1652 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1653 CC_AES_BLOCK_SIZE, NS_BIT, 1);
1654 set_queue_last_ind(&desc[idx]);
1655 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1656 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1657 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1658 set_cipher_mode(&desc[idx], ctx->hw_mode);
50cfbbb7
GBY
1659 idx++;
1660
1661 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1662 if (unlikely(rc != -EINPROGRESS)) {
bdd0873d 1663 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
50cfbbb7
GBY
1664 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1665 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1666 ssi_hash_unmap_request(dev, state, ctx);
1667 }
1668 return rc;
1669}
1670
50cfbbb7
GBY
1671//ahash wrap functions
1672static int ssi_ahash_digest(struct ahash_request *req)
1673{
1674 struct ahash_req_ctx *state = ahash_request_ctx(req);
1675 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1676 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
a1ab41eb 1677 u32 digestsize = crypto_ahash_digestsize(tfm);
c8f17865 1678
50cfbbb7
GBY
1679 return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1680}
1681
1682static int ssi_ahash_update(struct ahash_request *req)
1683{
1684 struct ahash_req_ctx *state = ahash_request_ctx(req);
1685 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1686 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1687 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
c8f17865 1688
50cfbbb7
GBY
1689 return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
1690}
1691
1692static int ssi_ahash_finup(struct ahash_request *req)
1693{
1694 struct ahash_req_ctx *state = ahash_request_ctx(req);
1695 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1696 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
a1ab41eb 1697 u32 digestsize = crypto_ahash_digestsize(tfm);
c8f17865 1698
50cfbbb7
GBY
1699 return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1700}
1701
1702static int ssi_ahash_final(struct ahash_request *req)
1703{
1704 struct ahash_req_ctx *state = ahash_request_ctx(req);
1705 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1706 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
a1ab41eb 1707 u32 digestsize = crypto_ahash_digestsize(tfm);
c8f17865 1708
50cfbbb7
GBY
1709 return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1710}
1711
1712static int ssi_ahash_init(struct ahash_request *req)
1713{
1714 struct ahash_req_ctx *state = ahash_request_ctx(req);
1715 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
c8f17865 1716 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
bdd0873d 1717 struct device *dev = drvdata_to_dev(ctx->drvdata);
50cfbbb7 1718
bdd0873d 1719 dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
50cfbbb7
GBY
1720
1721 return ssi_hash_init(state, ctx);
1722}
1723
50cfbbb7
GBY
1724static int ssi_ahash_export(struct ahash_request *req, void *out)
1725{
1726 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1727 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
a55ef6f5 1728 struct device *dev = drvdata_to_dev(ctx->drvdata);
454527d0
GBY
1729 struct ahash_req_ctx *state = ahash_request_ctx(req);
1730 u8 *curr_buff = state->buff_index ? state->buff1 : state->buff0;
1731 u32 curr_buff_cnt = state->buff_index ? state->buff1_cnt :
1732 state->buff0_cnt;
1733 const u32 tmp = CC_EXPORT_MAGIC;
1734
454527d0
GBY
1735 memcpy(out, &tmp, sizeof(u32));
1736 out += sizeof(u32);
1737
1738 dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1739 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1740 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1741 out += ctx->inter_digestsize;
1742
1743 if (state->digest_bytes_len_dma_addr) {
1744 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1745 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1746 memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
1747 } else {
1748 /* Poison the unused exported digest len field. */
1749 memset(out, 0x5F, HASH_LEN_SIZE);
1750 }
1751 out += HASH_LEN_SIZE;
1752
1753 memcpy(out, &curr_buff_cnt, sizeof(u32));
1754 out += sizeof(u32);
1755
1756 memcpy(out, curr_buff, curr_buff_cnt);
1757
1758 /* No sync for device ineeded since we did not change the data,
1759 * we only copy it
1760 */
1761
1762 return 0;
50cfbbb7
GBY
1763}
1764
1765static int ssi_ahash_import(struct ahash_request *req, const void *in)
1766{
1767 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1768 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
a55ef6f5 1769 struct device *dev = drvdata_to_dev(ctx->drvdata);
454527d0
GBY
1770 struct ahash_req_ctx *state = ahash_request_ctx(req);
1771 u32 tmp;
aece0902 1772 int rc = 0;
454527d0 1773
454527d0
GBY
1774 memcpy(&tmp, in, sizeof(u32));
1775 if (tmp != CC_EXPORT_MAGIC) {
1776 rc = -EINVAL;
1777 goto out;
1778 }
1779 in += sizeof(u32);
1780
c5f39d07
GBY
1781 /* call init() to allocate bufs if the user hasn't */
1782 if (!state->digest_buff) {
1783 rc = ssi_hash_init(state, ctx);
1784 if (rc)
1785 goto out;
1786 }
454527d0
GBY
1787
1788 dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1789 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1790 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1791 in += ctx->inter_digestsize;
1792
1793 if (state->digest_bytes_len_dma_addr) {
1794 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1795 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1796 memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
1797 }
1798 in += HASH_LEN_SIZE;
1799
1800 dma_sync_single_for_device(dev, state->digest_buff_dma_addr,
1801 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1802
1803 if (state->digest_bytes_len_dma_addr)
1804 dma_sync_single_for_device(dev,
1805 state->digest_bytes_len_dma_addr,
1806 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1807
1808 state->buff_index = 0;
1809
1810 /* Sanity check the data as much as possible */
1811 memcpy(&tmp, in, sizeof(u32));
1812 if (tmp > SSI_MAX_HASH_BLCK_SIZE) {
1813 rc = -EINVAL;
1814 goto out;
1815 }
1816 in += sizeof(u32);
1817
1818 state->buff0_cnt = tmp;
1819 memcpy(state->buff0, in, state->buff0_cnt);
1820
1821out:
1822 return rc;
50cfbbb7 1823}
50cfbbb7
GBY
1824
1825static int ssi_ahash_setkey(struct crypto_ahash *ahash,
3151c1df 1826 const u8 *key, unsigned int keylen)
c8f17865 1827{
e7258b6a 1828 return ssi_hash_setkey((void *)ahash, key, keylen, false);
50cfbbb7
GBY
1829}
1830
1831struct ssi_hash_template {
1832 char name[CRYPTO_MAX_ALG_NAME];
1833 char driver_name[CRYPTO_MAX_ALG_NAME];
c51831be
GBY
1834 char mac_name[CRYPTO_MAX_ALG_NAME];
1835 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
50cfbbb7
GBY
1836 unsigned int blocksize;
1837 bool synchronize;
d3eff572 1838 struct ahash_alg template_ahash;
50cfbbb7
GBY
1839 int hash_mode;
1840 int hw_mode;
1841 int inter_digestsize;
1842 struct ssi_drvdata *drvdata;
1843};
1844
454527d0
GBY
1845#define CC_STATE_SIZE(_x) \
1846 ((_x) + HASH_LEN_SIZE + SSI_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1847
50cfbbb7
GBY
1848/* hash descriptors */
1849static struct ssi_hash_template driver_hash[] = {
1850 //Asynchronize hash template
1851 {
1852 .name = "sha1",
1853 .driver_name = "sha1-dx",
c51831be
GBY
1854 .mac_name = "hmac(sha1)",
1855 .mac_driver_name = "hmac-sha1-dx",
50cfbbb7
GBY
1856 .blocksize = SHA1_BLOCK_SIZE,
1857 .synchronize = false,
d3eff572
GBY
1858 .template_ahash = {
1859 .init = ssi_ahash_init,
1860 .update = ssi_ahash_update,
1861 .final = ssi_ahash_final,
1862 .finup = ssi_ahash_finup,
1863 .digest = ssi_ahash_digest,
d3eff572
GBY
1864 .export = ssi_ahash_export,
1865 .import = ssi_ahash_import,
d3eff572
GBY
1866 .setkey = ssi_ahash_setkey,
1867 .halg = {
1868 .digestsize = SHA1_DIGEST_SIZE,
454527d0 1869 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1be00f94 1870 },
4d2b5bca 1871 },
50cfbbb7
GBY
1872 .hash_mode = DRV_HASH_SHA1,
1873 .hw_mode = DRV_HASH_HW_SHA1,
1874 .inter_digestsize = SHA1_DIGEST_SIZE,
1875 },
1876 {
1877 .name = "sha256",
1878 .driver_name = "sha256-dx",
c51831be
GBY
1879 .mac_name = "hmac(sha256)",
1880 .mac_driver_name = "hmac-sha256-dx",
50cfbbb7 1881 .blocksize = SHA256_BLOCK_SIZE,
d3eff572
GBY
1882 .template_ahash = {
1883 .init = ssi_ahash_init,
1884 .update = ssi_ahash_update,
1885 .final = ssi_ahash_final,
1886 .finup = ssi_ahash_finup,
1887 .digest = ssi_ahash_digest,
d3eff572
GBY
1888 .export = ssi_ahash_export,
1889 .import = ssi_ahash_import,
d3eff572
GBY
1890 .setkey = ssi_ahash_setkey,
1891 .halg = {
1892 .digestsize = SHA256_DIGEST_SIZE,
454527d0 1893 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1be00f94 1894 },
4d2b5bca 1895 },
50cfbbb7
GBY
1896 .hash_mode = DRV_HASH_SHA256,
1897 .hw_mode = DRV_HASH_HW_SHA256,
1898 .inter_digestsize = SHA256_DIGEST_SIZE,
1899 },
1900 {
1901 .name = "sha224",
1902 .driver_name = "sha224-dx",
c51831be
GBY
1903 .mac_name = "hmac(sha224)",
1904 .mac_driver_name = "hmac-sha224-dx",
50cfbbb7 1905 .blocksize = SHA224_BLOCK_SIZE,
d3eff572
GBY
1906 .template_ahash = {
1907 .init = ssi_ahash_init,
1908 .update = ssi_ahash_update,
1909 .final = ssi_ahash_final,
1910 .finup = ssi_ahash_finup,
1911 .digest = ssi_ahash_digest,
d3eff572
GBY
1912 .export = ssi_ahash_export,
1913 .import = ssi_ahash_import,
d3eff572
GBY
1914 .setkey = ssi_ahash_setkey,
1915 .halg = {
1916 .digestsize = SHA224_DIGEST_SIZE,
454527d0 1917 .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1be00f94 1918 },
4d2b5bca 1919 },
50cfbbb7
GBY
1920 .hash_mode = DRV_HASH_SHA224,
1921 .hw_mode = DRV_HASH_HW_SHA256,
1922 .inter_digestsize = SHA256_DIGEST_SIZE,
1923 },
1924#if (DX_DEV_SHA_MAX > 256)
1925 {
1926 .name = "sha384",
1927 .driver_name = "sha384-dx",
c51831be
GBY
1928 .mac_name = "hmac(sha384)",
1929 .mac_driver_name = "hmac-sha384-dx",
50cfbbb7 1930 .blocksize = SHA384_BLOCK_SIZE,
d3eff572
GBY
1931 .template_ahash = {
1932 .init = ssi_ahash_init,
1933 .update = ssi_ahash_update,
1934 .final = ssi_ahash_final,
1935 .finup = ssi_ahash_finup,
1936 .digest = ssi_ahash_digest,
d3eff572
GBY
1937 .export = ssi_ahash_export,
1938 .import = ssi_ahash_import,
d3eff572
GBY
1939 .setkey = ssi_ahash_setkey,
1940 .halg = {
1941 .digestsize = SHA384_DIGEST_SIZE,
454527d0 1942 .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1be00f94 1943 },
4d2b5bca 1944 },
50cfbbb7
GBY
1945 .hash_mode = DRV_HASH_SHA384,
1946 .hw_mode = DRV_HASH_HW_SHA512,
1947 .inter_digestsize = SHA512_DIGEST_SIZE,
1948 },
1949 {
1950 .name = "sha512",
1951 .driver_name = "sha512-dx",
c51831be
GBY
1952 .mac_name = "hmac(sha512)",
1953 .mac_driver_name = "hmac-sha512-dx",
50cfbbb7 1954 .blocksize = SHA512_BLOCK_SIZE,
d3eff572
GBY
1955 .template_ahash = {
1956 .init = ssi_ahash_init,
1957 .update = ssi_ahash_update,
1958 .final = ssi_ahash_final,
1959 .finup = ssi_ahash_finup,
1960 .digest = ssi_ahash_digest,
d3eff572
GBY
1961 .export = ssi_ahash_export,
1962 .import = ssi_ahash_import,
d3eff572
GBY
1963 .setkey = ssi_ahash_setkey,
1964 .halg = {
1965 .digestsize = SHA512_DIGEST_SIZE,
454527d0 1966 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1be00f94 1967 },
4d2b5bca 1968 },
50cfbbb7
GBY
1969 .hash_mode = DRV_HASH_SHA512,
1970 .hw_mode = DRV_HASH_HW_SHA512,
1971 .inter_digestsize = SHA512_DIGEST_SIZE,
1972 },
1973#endif
1974 {
1975 .name = "md5",
1976 .driver_name = "md5-dx",
c51831be
GBY
1977 .mac_name = "hmac(md5)",
1978 .mac_driver_name = "hmac-md5-dx",
50cfbbb7 1979 .blocksize = MD5_HMAC_BLOCK_SIZE,
d3eff572
GBY
1980 .template_ahash = {
1981 .init = ssi_ahash_init,
1982 .update = ssi_ahash_update,
1983 .final = ssi_ahash_final,
1984 .finup = ssi_ahash_finup,
1985 .digest = ssi_ahash_digest,
d3eff572
GBY
1986 .export = ssi_ahash_export,
1987 .import = ssi_ahash_import,
d3eff572
GBY
1988 .setkey = ssi_ahash_setkey,
1989 .halg = {
1990 .digestsize = MD5_DIGEST_SIZE,
454527d0 1991 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1be00f94 1992 },
4d2b5bca 1993 },
50cfbbb7
GBY
1994 .hash_mode = DRV_HASH_MD5,
1995 .hw_mode = DRV_HASH_HW_MD5,
1996 .inter_digestsize = MD5_DIGEST_SIZE,
1997 },
1998 {
c51831be
GBY
1999 .mac_name = "xcbc(aes)",
2000 .mac_driver_name = "xcbc-aes-dx",
50cfbbb7 2001 .blocksize = AES_BLOCK_SIZE,
d3eff572
GBY
2002 .template_ahash = {
2003 .init = ssi_ahash_init,
2004 .update = ssi_mac_update,
2005 .final = ssi_mac_final,
2006 .finup = ssi_mac_finup,
2007 .digest = ssi_mac_digest,
2008 .setkey = ssi_xcbc_setkey,
d3eff572
GBY
2009 .export = ssi_ahash_export,
2010 .import = ssi_ahash_import,
d3eff572
GBY
2011 .halg = {
2012 .digestsize = AES_BLOCK_SIZE,
454527d0 2013 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1be00f94 2014 },
50cfbbb7 2015 },
4d2b5bca
AM
2016 .hash_mode = DRV_HASH_NULL,
2017 .hw_mode = DRV_CIPHER_XCBC_MAC,
2018 .inter_digestsize = AES_BLOCK_SIZE,
2019 },
50cfbbb7
GBY
2020#if SSI_CC_HAS_CMAC
2021 {
c51831be
GBY
2022 .mac_name = "cmac(aes)",
2023 .mac_driver_name = "cmac-aes-dx",
50cfbbb7 2024 .blocksize = AES_BLOCK_SIZE,
d3eff572
GBY
2025 .template_ahash = {
2026 .init = ssi_ahash_init,
2027 .update = ssi_mac_update,
2028 .final = ssi_mac_final,
2029 .finup = ssi_mac_finup,
2030 .digest = ssi_mac_digest,
2031 .setkey = ssi_cmac_setkey,
d3eff572
GBY
2032 .export = ssi_ahash_export,
2033 .import = ssi_ahash_import,
d3eff572
GBY
2034 .halg = {
2035 .digestsize = AES_BLOCK_SIZE,
454527d0 2036 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1be00f94 2037 },
50cfbbb7 2038 },
4d2b5bca
AM
2039 .hash_mode = DRV_HASH_NULL,
2040 .hw_mode = DRV_CIPHER_CMAC,
2041 .inter_digestsize = AES_BLOCK_SIZE,
2042 },
50cfbbb7 2043#endif
c8f17865 2044
50cfbbb7
GBY
2045};
2046
2047static struct ssi_hash_alg *
bdd0873d
GBY
2048ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
2049 bool keyed)
50cfbbb7
GBY
2050{
2051 struct ssi_hash_alg *t_crypto_alg;
2052 struct crypto_alg *alg;
d3eff572 2053 struct ahash_alg *halg;
50cfbbb7 2054
8bf48512 2055 t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
553aff5f 2056 if (!t_crypto_alg)
50cfbbb7 2057 return ERR_PTR(-ENOMEM);
553aff5f 2058
50cfbbb7 2059
d3eff572
GBY
2060 t_crypto_alg->ahash_alg = template->template_ahash;
2061 halg = &t_crypto_alg->ahash_alg;
2062 alg = &halg->halg.base;
50cfbbb7
GBY
2063
2064 if (keyed) {
2065 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
c51831be 2066 template->mac_name);
50cfbbb7 2067 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
c51831be 2068 template->mac_driver_name);
50cfbbb7 2069 } else {
d3eff572 2070 halg->setkey = NULL;
50cfbbb7
GBY
2071 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2072 template->name);
2073 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2074 template->driver_name);
2075 }
2076 alg->cra_module = THIS_MODULE;
2077 alg->cra_ctxsize = sizeof(struct ssi_hash_ctx);
2078 alg->cra_priority = SSI_CRA_PRIO;
2079 alg->cra_blocksize = template->blocksize;
2080 alg->cra_alignmask = 0;
2081 alg->cra_exit = ssi_hash_cra_exit;
c8f17865 2082
d3eff572
GBY
2083 alg->cra_init = ssi_ahash_cra_init;
2084 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
50cfbbb7 2085 CRYPTO_ALG_KERN_DRIVER_ONLY;
d3eff572 2086 alg->cra_type = &crypto_ahash_type;
50cfbbb7
GBY
2087
2088 t_crypto_alg->hash_mode = template->hash_mode;
2089 t_crypto_alg->hw_mode = template->hw_mode;
2090 t_crypto_alg->inter_digestsize = template->inter_digestsize;
2091
2092 return t_crypto_alg;
2093}
2094
2095int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
2096{
2097 struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2098 ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
2099 unsigned int larval_seq_len = 0;
e7258b6a 2100 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
bdd0873d 2101 struct device *dev = drvdata_to_dev(drvdata);
50cfbbb7
GBY
2102 int rc = 0;
2103#if (DX_DEV_SHA_MAX > 256)
2104 int i;
2105#endif
2106
2107 /* Copy-to-sram digest-len */
2108 ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
3151c1df
SS
2109 ARRAY_SIZE(digest_len_init),
2110 larval_seq, &larval_seq_len);
50cfbbb7
GBY
2111 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2112 if (unlikely(rc != 0))
2113 goto init_digest_const_err;
2114
2115 sram_buff_ofs += sizeof(digest_len_init);
2116 larval_seq_len = 0;
2117
2118#if (DX_DEV_SHA_MAX > 256)
2119 /* Copy-to-sram digest-len for sha384/512 */
2120 ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
3151c1df
SS
2121 ARRAY_SIZE(digest_len_sha512_init),
2122 larval_seq, &larval_seq_len);
50cfbbb7
GBY
2123 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2124 if (unlikely(rc != 0))
2125 goto init_digest_const_err;
2126
2127 sram_buff_ofs += sizeof(digest_len_sha512_init);
2128 larval_seq_len = 0;
2129#endif
2130
2131 /* The initial digests offset */
2132 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
2133
2134 /* Copy-to-sram initial SHA* digests */
2135 ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
3151c1df
SS
2136 ARRAY_SIZE(md5_init), larval_seq,
2137 &larval_seq_len);
50cfbbb7
GBY
2138 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2139 if (unlikely(rc != 0))
2140 goto init_digest_const_err;
2141 sram_buff_ofs += sizeof(md5_init);
2142 larval_seq_len = 0;
2143
2144 ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
3151c1df
SS
2145 ARRAY_SIZE(sha1_init), larval_seq,
2146 &larval_seq_len);
50cfbbb7
GBY
2147 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2148 if (unlikely(rc != 0))
2149 goto init_digest_const_err;
2150 sram_buff_ofs += sizeof(sha1_init);
2151 larval_seq_len = 0;
2152
2153 ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
3151c1df
SS
2154 ARRAY_SIZE(sha224_init), larval_seq,
2155 &larval_seq_len);
50cfbbb7
GBY
2156 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2157 if (unlikely(rc != 0))
2158 goto init_digest_const_err;
2159 sram_buff_ofs += sizeof(sha224_init);
2160 larval_seq_len = 0;
2161
2162 ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
3151c1df
SS
2163 ARRAY_SIZE(sha256_init), larval_seq,
2164 &larval_seq_len);
50cfbbb7
GBY
2165 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2166 if (unlikely(rc != 0))
2167 goto init_digest_const_err;
2168 sram_buff_ofs += sizeof(sha256_init);
2169 larval_seq_len = 0;
2170
2171#if (DX_DEV_SHA_MAX > 256)
2172 /* We are forced to swap each double-word larval before copying to sram */
2173 for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
a1ab41eb
GBY
2174 const u32 const0 = ((u32 *)((u64 *)&sha384_init[i]))[1];
2175 const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
50cfbbb7
GBY
2176
2177 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
3151c1df 2178 larval_seq, &larval_seq_len);
a1ab41eb 2179 sram_buff_ofs += sizeof(u32);
50cfbbb7 2180 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
3151c1df 2181 larval_seq, &larval_seq_len);
a1ab41eb 2182 sram_buff_ofs += sizeof(u32);
50cfbbb7
GBY
2183 }
2184 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2185 if (unlikely(rc != 0)) {
bdd0873d 2186 dev_err(dev, "send_request() failed (rc = %d)\n", rc);
50cfbbb7
GBY
2187 goto init_digest_const_err;
2188 }
2189 larval_seq_len = 0;
2190
2191 for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
a1ab41eb
GBY
2192 const u32 const0 = ((u32 *)((u64 *)&sha512_init[i]))[1];
2193 const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
50cfbbb7
GBY
2194
2195 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
3151c1df 2196 larval_seq, &larval_seq_len);
a1ab41eb 2197 sram_buff_ofs += sizeof(u32);
50cfbbb7 2198 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
3151c1df 2199 larval_seq, &larval_seq_len);
a1ab41eb 2200 sram_buff_ofs += sizeof(u32);
50cfbbb7
GBY
2201 }
2202 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2203 if (unlikely(rc != 0)) {
bdd0873d 2204 dev_err(dev, "send_request() failed (rc = %d)\n", rc);
50cfbbb7
GBY
2205 goto init_digest_const_err;
2206 }
2207#endif
2208
2209init_digest_const_err:
2210 return rc;
2211}
2212
2213int ssi_hash_alloc(struct ssi_drvdata *drvdata)
2214{
2215 struct ssi_hash_handle *hash_handle;
2216 ssi_sram_addr_t sram_buff;
a1ab41eb 2217 u32 sram_size_to_alloc;
bdd0873d 2218 struct device *dev = drvdata_to_dev(drvdata);
50cfbbb7
GBY
2219 int rc = 0;
2220 int alg;
2221
8bf48512 2222 hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
553aff5f
GBY
2223 if (!hash_handle)
2224 return -ENOMEM;
50cfbbb7 2225
093d5615 2226 INIT_LIST_HEAD(&hash_handle->hash_list);
50cfbbb7
GBY
2227 drvdata->hash_handle = hash_handle;
2228
2229 sram_size_to_alloc = sizeof(digest_len_init) +
2230#if (DX_DEV_SHA_MAX > 256)
2231 sizeof(digest_len_sha512_init) +
2232 sizeof(sha384_init) +
2233 sizeof(sha512_init) +
2234#endif
2235 sizeof(md5_init) +
2236 sizeof(sha1_init) +
2237 sizeof(sha224_init) +
2238 sizeof(sha256_init);
c8f17865 2239
50cfbbb7
GBY
2240 sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
2241 if (sram_buff == NULL_SRAM_ADDR) {
bdd0873d 2242 dev_err(dev, "SRAM pool exhausted\n");
50cfbbb7
GBY
2243 rc = -ENOMEM;
2244 goto fail;
2245 }
2246
2247 /* The initial digest-len offset */
2248 hash_handle->digest_len_sram_addr = sram_buff;
2249
2250 /*must be set before the alg registration as it is being used there*/
2251 rc = ssi_hash_init_sram_digest_consts(drvdata);
2252 if (unlikely(rc != 0)) {
bdd0873d 2253 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
50cfbbb7
GBY
2254 goto fail;
2255 }
2256
50cfbbb7
GBY
2257 /* ahash registration */
2258 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2259 struct ssi_hash_alg *t_alg;
c51831be 2260 int hw_mode = driver_hash[alg].hw_mode;
c8f17865 2261
50cfbbb7 2262 /* register hmac version */
bdd0873d 2263 t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, true);
c51831be
GBY
2264 if (IS_ERR(t_alg)) {
2265 rc = PTR_ERR(t_alg);
bdd0873d
GBY
2266 dev_err(dev, "%s alg allocation failed\n",
2267 driver_hash[alg].driver_name);
c51831be
GBY
2268 goto fail;
2269 }
2270 t_alg->drvdata = drvdata;
50cfbbb7 2271
c51831be
GBY
2272 rc = crypto_register_ahash(&t_alg->ahash_alg);
2273 if (unlikely(rc)) {
bdd0873d
GBY
2274 dev_err(dev, "%s alg registration failed\n",
2275 driver_hash[alg].driver_name);
c51831be
GBY
2276 kfree(t_alg);
2277 goto fail;
2278 } else {
2279 list_add_tail(&t_alg->entry,
2280 &hash_handle->hash_list);
50cfbbb7
GBY
2281 }
2282
c51831be
GBY
2283 if ((hw_mode == DRV_CIPHER_XCBC_MAC) ||
2284 (hw_mode == DRV_CIPHER_CMAC))
2285 continue;
2286
50cfbbb7 2287 /* register hash version */
bdd0873d 2288 t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, false);
50cfbbb7
GBY
2289 if (IS_ERR(t_alg)) {
2290 rc = PTR_ERR(t_alg);
bdd0873d
GBY
2291 dev_err(dev, "%s alg allocation failed\n",
2292 driver_hash[alg].driver_name);
50cfbbb7
GBY
2293 goto fail;
2294 }
2295 t_alg->drvdata = drvdata;
c8f17865 2296
d3eff572
GBY
2297 rc = crypto_register_ahash(&t_alg->ahash_alg);
2298 if (unlikely(rc)) {
bdd0873d
GBY
2299 dev_err(dev, "%s alg registration failed\n",
2300 driver_hash[alg].driver_name);
d3eff572
GBY
2301 kfree(t_alg);
2302 goto fail;
50cfbbb7 2303 } else {
d3eff572 2304 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
50cfbbb7
GBY
2305 }
2306 }
2307
2308 return 0;
2309
2310fail:
a7b1ba23
SM
2311 kfree(drvdata->hash_handle);
2312 drvdata->hash_handle = NULL;
50cfbbb7
GBY
2313 return rc;
2314}
2315
2316int ssi_hash_free(struct ssi_drvdata *drvdata)
2317{
2318 struct ssi_hash_alg *t_hash_alg, *hash_n;
2319 struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2320
6191eb1d 2321 if (hash_handle) {
50cfbbb7 2322 list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
d3eff572 2323 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
50cfbbb7
GBY
2324 list_del(&t_hash_alg->entry);
2325 kfree(t_hash_alg);
2326 }
c8f17865 2327
50cfbbb7
GBY
2328 kfree(hash_handle);
2329 drvdata->hash_handle = NULL;
2330 }
2331 return 0;
2332}
2333
c8f17865 2334static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
3151c1df
SS
2335 struct cc_hw_desc desc[],
2336 unsigned int *seq_size)
85420e0b 2337{
50cfbbb7
GBY
2338 unsigned int idx = *seq_size;
2339 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2340 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2341 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2342
2343 /* Setup XCBC MAC K1 */
8b64e512
GBY
2344 hw_desc_init(&desc[idx]);
2345 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2346 XCBC_MAC_K1_OFFSET),
2347 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2348 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2349 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2350 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2351 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2352 set_flow_mode(&desc[idx], S_DIN_to_AES);
50cfbbb7
GBY
2353 idx++;
2354
2355 /* Setup XCBC MAC K2 */
8b64e512
GBY
2356 hw_desc_init(&desc[idx]);
2357 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2358 XCBC_MAC_K2_OFFSET),
2359 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2360 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2361 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2362 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2363 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2364 set_flow_mode(&desc[idx], S_DIN_to_AES);
50cfbbb7
GBY
2365 idx++;
2366
2367 /* Setup XCBC MAC K3 */
8b64e512
GBY
2368 hw_desc_init(&desc[idx]);
2369 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2370 XCBC_MAC_K3_OFFSET),
2371 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2372 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2373 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2374 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2375 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2376 set_flow_mode(&desc[idx], S_DIN_to_AES);
50cfbbb7
GBY
2377 idx++;
2378
2379 /* Loading MAC state */
8b64e512
GBY
2380 hw_desc_init(&desc[idx]);
2381 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2382 CC_AES_BLOCK_SIZE, NS_BIT);
2383 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2384 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2385 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2386 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2387 set_flow_mode(&desc[idx], S_DIN_to_AES);
50cfbbb7
GBY
2388 idx++;
2389 *seq_size = idx;
2390}
2391
c8f17865 2392static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
3151c1df
SS
2393 struct cc_hw_desc desc[],
2394 unsigned int *seq_size)
50cfbbb7
GBY
2395{
2396 unsigned int idx = *seq_size;
2397 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2398 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2399 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2400
2401 /* Setup CMAC Key */
8b64e512
GBY
2402 hw_desc_init(&desc[idx]);
2403 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2404 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2405 ctx->key_params.keylen), NS_BIT);
2406 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2407 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2408 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2409 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2410 set_flow_mode(&desc[idx], S_DIN_to_AES);
50cfbbb7
GBY
2411 idx++;
2412
2413 /* Load MAC state */
8b64e512
GBY
2414 hw_desc_init(&desc[idx]);
2415 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2416 CC_AES_BLOCK_SIZE, NS_BIT);
2417 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2418 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2419 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2420 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2421 set_flow_mode(&desc[idx], S_DIN_to_AES);
50cfbbb7
GBY
2422 idx++;
2423 *seq_size = idx;
2424}
2425
2426static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
2427 struct ssi_hash_ctx *ctx,
2428 unsigned int flow_mode,
8ca57f5c 2429 struct cc_hw_desc desc[],
c8f17865 2430 bool is_not_last_data,
50cfbbb7
GBY
2431 unsigned int *seq_size)
2432{
2433 unsigned int idx = *seq_size;
bdd0873d 2434 struct device *dev = drvdata_to_dev(ctx->drvdata);
50cfbbb7
GBY
2435
2436 if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
8b64e512
GBY
2437 hw_desc_init(&desc[idx]);
2438 set_din_type(&desc[idx], DMA_DLLI,
2439 sg_dma_address(areq_ctx->curr_sg),
2440 areq_ctx->curr_sg->length, NS_BIT);
2441 set_flow_mode(&desc[idx], flow_mode);
50cfbbb7
GBY
2442 idx++;
2443 } else {
2444 if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
bdd0873d 2445 dev_dbg(dev, " NULL mode\n");
50cfbbb7
GBY
2446 /* nothing to build */
2447 return;
2448 }
2449 /* bypass */
8b64e512
GBY
2450 hw_desc_init(&desc[idx]);
2451 set_din_type(&desc[idx], DMA_DLLI,
2452 areq_ctx->mlli_params.mlli_dma_addr,
2453 areq_ctx->mlli_params.mlli_len, NS_BIT);
2454 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2455 areq_ctx->mlli_params.mlli_len);
2456 set_flow_mode(&desc[idx], BYPASS);
50cfbbb7
GBY
2457 idx++;
2458 /* process */
8b64e512
GBY
2459 hw_desc_init(&desc[idx]);
2460 set_din_type(&desc[idx], DMA_MLLI,
2461 ctx->drvdata->mlli_sram_addr,
2462 areq_ctx->mlli_nents, NS_BIT);
2463 set_flow_mode(&desc[idx], flow_mode);
50cfbbb7
GBY
2464 idx++;
2465 }
a8f6cbaa 2466 if (is_not_last_data)
8b64e512 2467 set_din_not_last_indication(&desc[(idx - 1)]);
50cfbbb7
GBY
2468 /* return updated desc sequence size */
2469 *seq_size = idx;
2470}
2471
2472/*!
c8f17865 2473 * Gets the address of the initial digest in SRAM
50cfbbb7 2474 * according to the given hash mode
c8f17865 2475 *
50cfbbb7
GBY
2476 * \param drvdata
2477 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
c8f17865 2478 *
a1ab41eb 2479 * \return u32 The address of the inital digest in SRAM
50cfbbb7 2480 */
a1ab41eb 2481ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
50cfbbb7
GBY
2482{
2483 struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2484 struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
bdd0873d 2485 struct device *dev = drvdata_to_dev(_drvdata);
50cfbbb7
GBY
2486
2487 switch (mode) {
2488 case DRV_HASH_NULL:
2489 break; /*Ignore*/
2490 case DRV_HASH_MD5:
2491 return (hash_handle->larval_digest_sram_addr);
2492 case DRV_HASH_SHA1:
2493 return (hash_handle->larval_digest_sram_addr +
2494 sizeof(md5_init));
2495 case DRV_HASH_SHA224:
2496 return (hash_handle->larval_digest_sram_addr +
2497 sizeof(md5_init) +
2498 sizeof(sha1_init));
2499 case DRV_HASH_SHA256:
2500 return (hash_handle->larval_digest_sram_addr +
2501 sizeof(md5_init) +
2502 sizeof(sha1_init) +
2503 sizeof(sha224_init));
2504#if (DX_DEV_SHA_MAX > 256)
2505 case DRV_HASH_SHA384:
2506 return (hash_handle->larval_digest_sram_addr +
2507 sizeof(md5_init) +
2508 sizeof(sha1_init) +
2509 sizeof(sha224_init) +
2510 sizeof(sha256_init));
2511 case DRV_HASH_SHA512:
2512 return (hash_handle->larval_digest_sram_addr +
2513 sizeof(md5_init) +
2514 sizeof(sha1_init) +
2515 sizeof(sha224_init) +
2516 sizeof(sha256_init) +
2517 sizeof(sha384_init));
2518#endif
2519 default:
bdd0873d 2520 dev_err(dev, "Invalid hash mode (%d)\n", mode);
50cfbbb7
GBY
2521 }
2522
2523 /*This is valid wrong value to avoid kernel crash*/
2524 return hash_handle->larval_digest_sram_addr;
2525}
2526
2527ssi_sram_addr_t
a1ab41eb 2528ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
50cfbbb7
GBY
2529{
2530 struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2531 struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2532 ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2533
2534 switch (mode) {
2535 case DRV_HASH_SHA1:
2536 case DRV_HASH_SHA224:
2537 case DRV_HASH_SHA256:
2538 case DRV_HASH_MD5:
2539 return digest_len_addr;
2540#if (DX_DEV_SHA_MAX > 256)
2541 case DRV_HASH_SHA384:
2542 case DRV_HASH_SHA512:
2543 return digest_len_addr + sizeof(digest_len_init);
2544#endif
2545 default:
2546 return digest_len_addr; /*to avoid kernel crash*/
2547 }
2548}
2549