2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <crypto/algapi.h>
21 #include <crypto/hash.h>
22 #include <crypto/sha.h>
23 #include <crypto/md5.h>
24 #include <crypto/internal/hash.h>
26 #include "ssi_config.h"
27 #include "ssi_driver.h"
28 #include "ssi_request_mgr.h"
29 #include "ssi_buffer_mgr.h"
30 #include "ssi_sysfs.h"
32 #include "ssi_sram_mgr.h"
34 #define SSI_MAX_AHASH_SEQ_LEN 12
35 #define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
37 struct ssi_hash_handle
{
38 ssi_sram_addr_t digest_len_sram_addr
; /* const value in SRAM*/
39 ssi_sram_addr_t larval_digest_sram_addr
; /* const value in SRAM */
40 struct list_head hash_list
;
41 struct completion init_comp
;
44 static const u32 digest_len_init
[] = {
45 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
46 static const u32 md5_init
[] = {
47 SHA1_H3
, SHA1_H2
, SHA1_H1
, SHA1_H0
};
48 static const u32 sha1_init
[] = {
49 SHA1_H4
, SHA1_H3
, SHA1_H2
, SHA1_H1
, SHA1_H0
};
50 static const u32 sha224_init
[] = {
51 SHA224_H7
, SHA224_H6
, SHA224_H5
, SHA224_H4
,
52 SHA224_H3
, SHA224_H2
, SHA224_H1
, SHA224_H0
};
53 static const u32 sha256_init
[] = {
54 SHA256_H7
, SHA256_H6
, SHA256_H5
, SHA256_H4
,
55 SHA256_H3
, SHA256_H2
, SHA256_H1
, SHA256_H0
};
56 #if (DX_DEV_SHA_MAX > 256)
57 static const u32 digest_len_sha512_init
[] = {
58 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
59 static const u64 sha384_init
[] = {
60 SHA384_H7
, SHA384_H6
, SHA384_H5
, SHA384_H4
,
61 SHA384_H3
, SHA384_H2
, SHA384_H1
, SHA384_H0
};
62 static const u64 sha512_init
[] = {
63 SHA512_H7
, SHA512_H6
, SHA512_H5
, SHA512_H4
,
64 SHA512_H3
, SHA512_H2
, SHA512_H1
, SHA512_H0
};
67 static void ssi_hash_create_xcbc_setup(
68 struct ahash_request
*areq
,
69 struct cc_hw_desc desc
[],
70 unsigned int *seq_size
);
72 static void ssi_hash_create_cmac_setup(struct ahash_request
*areq
,
73 struct cc_hw_desc desc
[],
74 unsigned int *seq_size
);
77 struct list_head entry
;
81 struct ssi_drvdata
*drvdata
;
82 struct ahash_alg ahash_alg
;
85 struct hash_key_req_ctx
{
87 dma_addr_t key_dma_addr
;
90 /* hash per-session context */
92 struct ssi_drvdata
*drvdata
;
93 /* holds the origin digest; the digest after "setkey" if HMAC,*
94 * the initial digest if HASH.
96 u8 digest_buff
[SSI_MAX_HASH_DIGEST_SIZE
] ____cacheline_aligned
;
97 u8 opad_tmp_keys_buff
[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE
] ____cacheline_aligned
;
99 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned
;
100 dma_addr_t digest_buff_dma_addr
;
101 /* use for hmac with key large then mode block size */
102 struct hash_key_req_ctx key_params
;
105 int inter_digestsize
;
106 struct completion setkey_comp
;
110 static void ssi_hash_create_data_desc(
111 struct ahash_req_ctx
*areq_ctx
,
112 struct ssi_hash_ctx
*ctx
,
113 unsigned int flow_mode
, struct cc_hw_desc desc
[],
114 bool is_not_last_data
,
115 unsigned int *seq_size
);
117 static inline void ssi_set_hash_endianity(u32 mode
, struct cc_hw_desc
*desc
)
119 if (unlikely((mode
== DRV_HASH_MD5
) ||
120 (mode
== DRV_HASH_SHA384
) ||
121 (mode
== DRV_HASH_SHA512
))) {
122 set_bytes_swap(desc
, 1);
124 set_cipher_config0(desc
, HASH_DIGEST_RESULT_LITTLE_ENDIAN
);
128 static int ssi_hash_map_result(struct device
*dev
,
129 struct ahash_req_ctx
*state
,
130 unsigned int digestsize
)
132 state
->digest_result_dma_addr
=
133 dma_map_single(dev
, (void *)state
->digest_result_buff
,
136 if (unlikely(dma_mapping_error(dev
, state
->digest_result_dma_addr
))) {
137 dev_err(dev
, "Mapping digest result buffer %u B for DMA failed\n",
141 dev_dbg(dev
, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
142 digestsize
, state
->digest_result_buff
,
143 &state
->digest_result_dma_addr
);
148 static int ssi_hash_map_request(struct device
*dev
,
149 struct ahash_req_ctx
*state
,
150 struct ssi_hash_ctx
*ctx
)
152 bool is_hmac
= ctx
->is_hmac
;
153 ssi_sram_addr_t larval_digest_addr
= ssi_ahash_get_larval_digest_sram_addr(
154 ctx
->drvdata
, ctx
->hash_mode
);
155 struct ssi_crypto_req ssi_req
= {};
156 struct cc_hw_desc desc
;
159 state
->buff0
= kzalloc(SSI_MAX_HASH_BLCK_SIZE
, GFP_KERNEL
| GFP_DMA
);
163 state
->buff1
= kzalloc(SSI_MAX_HASH_BLCK_SIZE
, GFP_KERNEL
| GFP_DMA
);
167 state
->digest_result_buff
= kzalloc(SSI_MAX_HASH_DIGEST_SIZE
, GFP_KERNEL
| GFP_DMA
);
168 if (!state
->digest_result_buff
)
171 state
->digest_buff
= kzalloc(ctx
->inter_digestsize
, GFP_KERNEL
| GFP_DMA
);
172 if (!state
->digest_buff
)
173 goto fail_digest_result_buff
;
175 dev_dbg(dev
, "Allocated digest-buffer in context ctx->digest_buff=@%p\n",
177 if (ctx
->hw_mode
!= DRV_CIPHER_XCBC_MAC
) {
178 state
->digest_bytes_len
= kzalloc(HASH_LEN_SIZE
, GFP_KERNEL
| GFP_DMA
);
179 if (!state
->digest_bytes_len
)
182 dev_dbg(dev
, "Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n",
183 state
->digest_bytes_len
);
185 state
->digest_bytes_len
= NULL
;
188 state
->opad_digest_buff
= kzalloc(ctx
->inter_digestsize
, GFP_KERNEL
| GFP_DMA
);
189 if (!state
->opad_digest_buff
)
192 dev_dbg(dev
, "Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n",
193 state
->opad_digest_buff
);
195 state
->digest_buff_dma_addr
= dma_map_single(dev
, (void *)state
->digest_buff
, ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
196 if (dma_mapping_error(dev
, state
->digest_buff_dma_addr
)) {
197 dev_err(dev
, "Mapping digest len %d B at va=%pK for DMA failed\n",
198 ctx
->inter_digestsize
, state
->digest_buff
);
201 dev_dbg(dev
, "Mapped digest %d B at va=%pK to dma=%pad\n",
202 ctx
->inter_digestsize
, state
->digest_buff
,
203 &state
->digest_buff_dma_addr
);
206 dma_sync_single_for_cpu(dev
, ctx
->digest_buff_dma_addr
, ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
207 if ((ctx
->hw_mode
== DRV_CIPHER_XCBC_MAC
) || (ctx
->hw_mode
== DRV_CIPHER_CMAC
)) {
208 memset(state
->digest_buff
, 0, ctx
->inter_digestsize
);
210 memcpy(state
->digest_buff
, ctx
->digest_buff
, ctx
->inter_digestsize
);
211 #if (DX_DEV_SHA_MAX > 256)
212 if (unlikely((ctx
->hash_mode
== DRV_HASH_SHA512
) || (ctx
->hash_mode
== DRV_HASH_SHA384
)))
213 memcpy(state
->digest_bytes_len
, digest_len_sha512_init
, HASH_LEN_SIZE
);
215 memcpy(state
->digest_bytes_len
, digest_len_init
, HASH_LEN_SIZE
);
217 memcpy(state
->digest_bytes_len
, digest_len_init
, HASH_LEN_SIZE
);
220 dma_sync_single_for_device(dev
, state
->digest_buff_dma_addr
, ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
222 if (ctx
->hash_mode
!= DRV_HASH_NULL
) {
223 dma_sync_single_for_cpu(dev
, ctx
->opad_tmp_keys_dma_addr
, ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
224 memcpy(state
->opad_digest_buff
, ctx
->opad_tmp_keys_buff
, ctx
->inter_digestsize
);
227 /* Copy the initial digests if hash flow. The SRAM contains the
228 * initial digests in the expected order for all SHA*
231 set_din_sram(&desc
, larval_digest_addr
, ctx
->inter_digestsize
);
232 set_dout_dlli(&desc
, state
->digest_buff_dma_addr
,
233 ctx
->inter_digestsize
, NS_BIT
, 0);
234 set_flow_mode(&desc
, BYPASS
);
236 rc
= send_request(ctx
->drvdata
, &ssi_req
, &desc
, 1, 0);
237 if (unlikely(rc
!= 0)) {
238 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
243 if (ctx
->hw_mode
!= DRV_CIPHER_XCBC_MAC
) {
244 state
->digest_bytes_len_dma_addr
= dma_map_single(dev
, (void *)state
->digest_bytes_len
, HASH_LEN_SIZE
, DMA_BIDIRECTIONAL
);
245 if (dma_mapping_error(dev
, state
->digest_bytes_len_dma_addr
)) {
246 dev_err(dev
, "Mapping digest len %u B at va=%pK for DMA failed\n",
247 HASH_LEN_SIZE
, state
->digest_bytes_len
);
250 dev_dbg(dev
, "Mapped digest len %u B at va=%pK to dma=%pad\n",
251 HASH_LEN_SIZE
, state
->digest_bytes_len
,
252 &state
->digest_bytes_len_dma_addr
);
254 state
->digest_bytes_len_dma_addr
= 0;
257 if (is_hmac
&& ctx
->hash_mode
!= DRV_HASH_NULL
) {
258 state
->opad_digest_dma_addr
= dma_map_single(dev
, (void *)state
->opad_digest_buff
, ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
259 if (dma_mapping_error(dev
, state
->opad_digest_dma_addr
)) {
260 dev_err(dev
, "Mapping opad digest %d B at va=%pK for DMA failed\n",
261 ctx
->inter_digestsize
,
262 state
->opad_digest_buff
);
265 dev_dbg(dev
, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
266 ctx
->inter_digestsize
, state
->opad_digest_buff
,
267 &state
->opad_digest_dma_addr
);
269 state
->opad_digest_dma_addr
= 0;
271 state
->buff0_cnt
= 0;
272 state
->buff1_cnt
= 0;
273 state
->buff_index
= 0;
274 state
->mlli_params
.curr_pool
= NULL
;
279 if (state
->digest_bytes_len_dma_addr
!= 0) {
280 dma_unmap_single(dev
, state
->digest_bytes_len_dma_addr
, HASH_LEN_SIZE
, DMA_BIDIRECTIONAL
);
281 state
->digest_bytes_len_dma_addr
= 0;
284 if (state
->digest_buff_dma_addr
!= 0) {
285 dma_unmap_single(dev
, state
->digest_buff_dma_addr
, ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
286 state
->digest_buff_dma_addr
= 0;
289 kfree(state
->opad_digest_buff
);
291 kfree(state
->digest_bytes_len
);
293 kfree(state
->digest_buff
);
294 fail_digest_result_buff
:
295 kfree(state
->digest_result_buff
);
296 state
->digest_result_buff
= NULL
;
307 static void ssi_hash_unmap_request(struct device
*dev
,
308 struct ahash_req_ctx
*state
,
309 struct ssi_hash_ctx
*ctx
)
311 if (state
->digest_buff_dma_addr
!= 0) {
312 dma_unmap_single(dev
, state
->digest_buff_dma_addr
,
313 ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
314 dev_dbg(dev
, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
315 &state
->digest_buff_dma_addr
);
316 state
->digest_buff_dma_addr
= 0;
318 if (state
->digest_bytes_len_dma_addr
!= 0) {
319 dma_unmap_single(dev
, state
->digest_bytes_len_dma_addr
,
320 HASH_LEN_SIZE
, DMA_BIDIRECTIONAL
);
321 dev_dbg(dev
, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
322 &state
->digest_bytes_len_dma_addr
);
323 state
->digest_bytes_len_dma_addr
= 0;
325 if (state
->opad_digest_dma_addr
!= 0) {
326 dma_unmap_single(dev
, state
->opad_digest_dma_addr
,
327 ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
328 dev_dbg(dev
, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
329 &state
->opad_digest_dma_addr
);
330 state
->opad_digest_dma_addr
= 0;
333 kfree(state
->opad_digest_buff
);
334 kfree(state
->digest_bytes_len
);
335 kfree(state
->digest_buff
);
336 kfree(state
->digest_result_buff
);
341 static void ssi_hash_unmap_result(struct device
*dev
,
342 struct ahash_req_ctx
*state
,
343 unsigned int digestsize
, u8
*result
)
345 if (state
->digest_result_dma_addr
!= 0) {
346 dma_unmap_single(dev
,
347 state
->digest_result_dma_addr
,
350 dev_dbg(dev
, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
351 state
->digest_result_buff
,
352 &state
->digest_result_dma_addr
, digestsize
);
354 state
->digest_result_buff
,
357 state
->digest_result_dma_addr
= 0;
360 static void ssi_hash_update_complete(struct device
*dev
, void *ssi_req
, void __iomem
*cc_base
)
362 struct ahash_request
*req
= (struct ahash_request
*)ssi_req
;
363 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
365 dev_dbg(dev
, "req=%pK\n", req
);
367 ssi_buffer_mgr_unmap_hash_request(dev
, state
, req
->src
, false);
368 req
->base
.complete(&req
->base
, 0);
371 static void ssi_hash_digest_complete(struct device
*dev
, void *ssi_req
, void __iomem
*cc_base
)
373 struct ahash_request
*req
= (struct ahash_request
*)ssi_req
;
374 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
375 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
376 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
377 u32 digestsize
= crypto_ahash_digestsize(tfm
);
379 dev_dbg(dev
, "req=%pK\n", req
);
381 ssi_buffer_mgr_unmap_hash_request(dev
, state
, req
->src
, false);
382 ssi_hash_unmap_result(dev
, state
, digestsize
, req
->result
);
383 ssi_hash_unmap_request(dev
, state
, ctx
);
384 req
->base
.complete(&req
->base
, 0);
387 static void ssi_hash_complete(struct device
*dev
, void *ssi_req
, void __iomem
*cc_base
)
389 struct ahash_request
*req
= (struct ahash_request
*)ssi_req
;
390 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
391 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
392 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
393 u32 digestsize
= crypto_ahash_digestsize(tfm
);
395 dev_dbg(dev
, "req=%pK\n", req
);
397 ssi_buffer_mgr_unmap_hash_request(dev
, state
, req
->src
, false);
398 ssi_hash_unmap_result(dev
, state
, digestsize
, req
->result
);
399 ssi_hash_unmap_request(dev
, state
, ctx
);
400 req
->base
.complete(&req
->base
, 0);
403 static int ssi_hash_digest(struct ahash_req_ctx
*state
,
404 struct ssi_hash_ctx
*ctx
,
405 unsigned int digestsize
,
406 struct scatterlist
*src
,
407 unsigned int nbytes
, u8
*result
,
410 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
411 bool is_hmac
= ctx
->is_hmac
;
412 struct ssi_crypto_req ssi_req
= {};
413 struct cc_hw_desc desc
[SSI_MAX_AHASH_SEQ_LEN
];
414 ssi_sram_addr_t larval_digest_addr
= ssi_ahash_get_larval_digest_sram_addr(
415 ctx
->drvdata
, ctx
->hash_mode
);
419 dev_dbg(dev
, "===== %s-digest (%d) ====\n", is_hmac
? "hmac" : "hash",
422 if (unlikely(ssi_hash_map_request(dev
, state
, ctx
) != 0)) {
423 dev_err(dev
, "map_ahash_source() failed\n");
427 if (unlikely(ssi_hash_map_result(dev
, state
, digestsize
) != 0)) {
428 dev_err(dev
, "map_ahash_digest() failed\n");
432 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx
->drvdata
, state
, src
, nbytes
, 1) != 0)) {
433 dev_err(dev
, "map_ahash_request_final() failed\n");
438 /* Setup DX request structure */
439 ssi_req
.user_cb
= (void *)ssi_hash_digest_complete
;
440 ssi_req
.user_arg
= (void *)async_req
;
443 /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
444 hw_desc_init(&desc
[idx
]);
445 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
447 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
448 ctx
->inter_digestsize
, NS_BIT
);
450 set_din_sram(&desc
[idx
], larval_digest_addr
,
451 ctx
->inter_digestsize
);
453 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
454 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
457 /* Load the hash current length */
458 hw_desc_init(&desc
[idx
]);
459 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
462 set_din_type(&desc
[idx
], DMA_DLLI
,
463 state
->digest_bytes_len_dma_addr
, HASH_LEN_SIZE
,
466 set_din_const(&desc
[idx
], 0, HASH_LEN_SIZE
);
467 if (likely(nbytes
!= 0))
468 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
470 set_cipher_do(&desc
[idx
], DO_PAD
);
472 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
473 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
476 ssi_hash_create_data_desc(state
, ctx
, DIN_HASH
, desc
, false, &idx
);
479 /* HW last hash block padding (aka. "DO_PAD") */
480 hw_desc_init(&desc
[idx
]);
481 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
482 set_dout_dlli(&desc
[idx
], state
->digest_buff_dma_addr
,
483 HASH_LEN_SIZE
, NS_BIT
, 0);
484 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
485 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE1
);
486 set_cipher_do(&desc
[idx
], DO_PAD
);
489 /* store the hash digest result in the context */
490 hw_desc_init(&desc
[idx
]);
491 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
492 set_dout_dlli(&desc
[idx
], state
->digest_buff_dma_addr
,
493 digestsize
, NS_BIT
, 0);
494 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
495 ssi_set_hash_endianity(ctx
->hash_mode
, &desc
[idx
]);
496 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
499 /* Loading hash opad xor key state */
500 hw_desc_init(&desc
[idx
]);
501 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
502 set_din_type(&desc
[idx
], DMA_DLLI
, state
->opad_digest_dma_addr
,
503 ctx
->inter_digestsize
, NS_BIT
);
504 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
505 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
508 /* Load the hash current length */
509 hw_desc_init(&desc
[idx
]);
510 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
511 set_din_sram(&desc
[idx
],
512 ssi_ahash_get_initial_digest_len_sram_addr(
513 ctx
->drvdata
, ctx
->hash_mode
), HASH_LEN_SIZE
);
514 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
515 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
516 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
519 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
520 hw_desc_init(&desc
[idx
]);
521 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
522 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
525 /* Perform HASH update */
526 hw_desc_init(&desc
[idx
]);
527 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
529 set_flow_mode(&desc
[idx
], DIN_HASH
);
533 /* Get final MAC result */
534 hw_desc_init(&desc
[idx
]);
535 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
537 set_dout_dlli(&desc
[idx
], state
->digest_result_dma_addr
, digestsize
,
538 NS_BIT
, (async_req
? 1 : 0));
540 set_queue_last_ind(&desc
[idx
]);
541 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
542 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
543 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
544 ssi_set_hash_endianity(ctx
->hash_mode
, &desc
[idx
]);
548 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 1);
549 if (unlikely(rc
!= -EINPROGRESS
)) {
550 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
551 ssi_buffer_mgr_unmap_hash_request(dev
, state
, src
, true);
552 ssi_hash_unmap_result(dev
, state
, digestsize
, result
);
553 ssi_hash_unmap_request(dev
, state
, ctx
);
556 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 0);
558 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
559 ssi_buffer_mgr_unmap_hash_request(dev
, state
, src
, true);
561 ssi_buffer_mgr_unmap_hash_request(dev
, state
, src
, false);
563 ssi_hash_unmap_result(dev
, state
, digestsize
, result
);
564 ssi_hash_unmap_request(dev
, state
, ctx
);
569 static int ssi_hash_update(struct ahash_req_ctx
*state
,
570 struct ssi_hash_ctx
*ctx
,
571 unsigned int block_size
,
572 struct scatterlist
*src
,
576 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
577 struct ssi_crypto_req ssi_req
= {};
578 struct cc_hw_desc desc
[SSI_MAX_AHASH_SEQ_LEN
];
582 dev_dbg(dev
, "===== %s-update (%d) ====\n", ctx
->is_hmac
?
583 "hmac" : "hash", nbytes
);
586 /* no real updates required */
590 rc
= ssi_buffer_mgr_map_hash_request_update(ctx
->drvdata
, state
, src
, nbytes
, block_size
);
593 dev_dbg(dev
, " data size not require HW update %x\n",
595 /* No hardware updates are required */
598 dev_err(dev
, "map_ahash_request_update() failed\n");
603 /* Setup DX request structure */
604 ssi_req
.user_cb
= (void *)ssi_hash_update_complete
;
605 ssi_req
.user_arg
= async_req
;
608 /* Restore hash digest */
609 hw_desc_init(&desc
[idx
]);
610 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
611 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
612 ctx
->inter_digestsize
, NS_BIT
);
613 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
614 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
616 /* Restore hash current length */
617 hw_desc_init(&desc
[idx
]);
618 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
619 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_bytes_len_dma_addr
,
620 HASH_LEN_SIZE
, NS_BIT
);
621 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
622 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
625 ssi_hash_create_data_desc(state
, ctx
, DIN_HASH
, desc
, false, &idx
);
627 /* store the hash digest result in context */
628 hw_desc_init(&desc
[idx
]);
629 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
630 set_dout_dlli(&desc
[idx
], state
->digest_buff_dma_addr
,
631 ctx
->inter_digestsize
, NS_BIT
, 0);
632 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
633 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
636 /* store current hash length in context */
637 hw_desc_init(&desc
[idx
]);
638 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
639 set_dout_dlli(&desc
[idx
], state
->digest_bytes_len_dma_addr
,
640 HASH_LEN_SIZE
, NS_BIT
, (async_req
? 1 : 0));
642 set_queue_last_ind(&desc
[idx
]);
643 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
644 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE1
);
648 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 1);
649 if (unlikely(rc
!= -EINPROGRESS
)) {
650 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
651 ssi_buffer_mgr_unmap_hash_request(dev
, state
, src
, true);
654 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 0);
656 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
657 ssi_buffer_mgr_unmap_hash_request(dev
, state
, src
, true);
659 ssi_buffer_mgr_unmap_hash_request(dev
, state
, src
, false);
665 static int ssi_hash_finup(struct ahash_req_ctx
*state
,
666 struct ssi_hash_ctx
*ctx
,
667 unsigned int digestsize
,
668 struct scatterlist
*src
,
673 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
674 bool is_hmac
= ctx
->is_hmac
;
675 struct ssi_crypto_req ssi_req
= {};
676 struct cc_hw_desc desc
[SSI_MAX_AHASH_SEQ_LEN
];
680 dev_dbg(dev
, "===== %s-finup (%d) ====\n", is_hmac
? "hmac" : "hash",
683 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx
->drvdata
, state
, src
, nbytes
, 1) != 0)) {
684 dev_err(dev
, "map_ahash_request_final() failed\n");
687 if (unlikely(ssi_hash_map_result(dev
, state
, digestsize
) != 0)) {
688 dev_err(dev
, "map_ahash_digest() failed\n");
693 /* Setup DX request structure */
694 ssi_req
.user_cb
= (void *)ssi_hash_complete
;
695 ssi_req
.user_arg
= async_req
;
698 /* Restore hash digest */
699 hw_desc_init(&desc
[idx
]);
700 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
701 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
702 ctx
->inter_digestsize
, NS_BIT
);
703 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
704 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
707 /* Restore hash current length */
708 hw_desc_init(&desc
[idx
]);
709 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
710 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
711 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_bytes_len_dma_addr
,
712 HASH_LEN_SIZE
, NS_BIT
);
713 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
714 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
717 ssi_hash_create_data_desc(state
, ctx
, DIN_HASH
, desc
, false, &idx
);
720 /* Store the hash digest result in the context */
721 hw_desc_init(&desc
[idx
]);
722 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
723 set_dout_dlli(&desc
[idx
], state
->digest_buff_dma_addr
,
724 digestsize
, NS_BIT
, 0);
725 ssi_set_hash_endianity(ctx
->hash_mode
, &desc
[idx
]);
726 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
727 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
730 /* Loading hash OPAD xor key state */
731 hw_desc_init(&desc
[idx
]);
732 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
733 set_din_type(&desc
[idx
], DMA_DLLI
, state
->opad_digest_dma_addr
,
734 ctx
->inter_digestsize
, NS_BIT
);
735 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
736 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
739 /* Load the hash current length */
740 hw_desc_init(&desc
[idx
]);
741 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
742 set_din_sram(&desc
[idx
],
743 ssi_ahash_get_initial_digest_len_sram_addr(
744 ctx
->drvdata
, ctx
->hash_mode
), HASH_LEN_SIZE
);
745 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
746 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
747 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
750 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
751 hw_desc_init(&desc
[idx
]);
752 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
753 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
756 /* Perform HASH update on last digest */
757 hw_desc_init(&desc
[idx
]);
758 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
760 set_flow_mode(&desc
[idx
], DIN_HASH
);
764 /* Get final MAC result */
765 hw_desc_init(&desc
[idx
]);
767 set_dout_dlli(&desc
[idx
], state
->digest_result_dma_addr
, digestsize
,
768 NS_BIT
, (async_req
? 1 : 0));
770 set_queue_last_ind(&desc
[idx
]);
771 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
772 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
773 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
774 ssi_set_hash_endianity(ctx
->hash_mode
, &desc
[idx
]);
775 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
779 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 1);
780 if (unlikely(rc
!= -EINPROGRESS
)) {
781 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
782 ssi_buffer_mgr_unmap_hash_request(dev
, state
, src
, true);
783 ssi_hash_unmap_result(dev
, state
, digestsize
, result
);
786 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 0);
788 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
789 ssi_buffer_mgr_unmap_hash_request(dev
, state
, src
, true);
790 ssi_hash_unmap_result(dev
, state
, digestsize
, result
);
792 ssi_buffer_mgr_unmap_hash_request(dev
, state
, src
, false);
793 ssi_hash_unmap_result(dev
, state
, digestsize
, result
);
794 ssi_hash_unmap_request(dev
, state
, ctx
);
800 static int ssi_hash_final(struct ahash_req_ctx
*state
,
801 struct ssi_hash_ctx
*ctx
,
802 unsigned int digestsize
,
803 struct scatterlist
*src
,
808 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
809 bool is_hmac
= ctx
->is_hmac
;
810 struct ssi_crypto_req ssi_req
= {};
811 struct cc_hw_desc desc
[SSI_MAX_AHASH_SEQ_LEN
];
815 dev_dbg(dev
, "===== %s-final (%d) ====\n", is_hmac
? "hmac" : "hash",
818 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx
->drvdata
, state
, src
, nbytes
, 0) != 0)) {
819 dev_err(dev
, "map_ahash_request_final() failed\n");
823 if (unlikely(ssi_hash_map_result(dev
, state
, digestsize
) != 0)) {
824 dev_err(dev
, "map_ahash_digest() failed\n");
829 /* Setup DX request structure */
830 ssi_req
.user_cb
= (void *)ssi_hash_complete
;
831 ssi_req
.user_arg
= async_req
;
834 /* Restore hash digest */
835 hw_desc_init(&desc
[idx
]);
836 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
837 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
838 ctx
->inter_digestsize
, NS_BIT
);
839 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
840 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
843 /* Restore hash current length */
844 hw_desc_init(&desc
[idx
]);
845 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
846 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
847 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_bytes_len_dma_addr
,
848 HASH_LEN_SIZE
, NS_BIT
);
849 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
850 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
853 ssi_hash_create_data_desc(state
, ctx
, DIN_HASH
, desc
, false, &idx
);
855 /* "DO-PAD" must be enabled only when writing current length to HW */
856 hw_desc_init(&desc
[idx
]);
857 set_cipher_do(&desc
[idx
], DO_PAD
);
858 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
859 set_dout_dlli(&desc
[idx
], state
->digest_bytes_len_dma_addr
,
860 HASH_LEN_SIZE
, NS_BIT
, 0);
861 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE1
);
862 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
866 /* Store the hash digest result in the context */
867 hw_desc_init(&desc
[idx
]);
868 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
869 set_dout_dlli(&desc
[idx
], state
->digest_buff_dma_addr
,
870 digestsize
, NS_BIT
, 0);
871 ssi_set_hash_endianity(ctx
->hash_mode
, &desc
[idx
]);
872 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
873 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
876 /* Loading hash OPAD xor key state */
877 hw_desc_init(&desc
[idx
]);
878 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
879 set_din_type(&desc
[idx
], DMA_DLLI
, state
->opad_digest_dma_addr
,
880 ctx
->inter_digestsize
, NS_BIT
);
881 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
882 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
885 /* Load the hash current length */
886 hw_desc_init(&desc
[idx
]);
887 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
888 set_din_sram(&desc
[idx
],
889 ssi_ahash_get_initial_digest_len_sram_addr(
890 ctx
->drvdata
, ctx
->hash_mode
), HASH_LEN_SIZE
);
891 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
892 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
893 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
896 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
897 hw_desc_init(&desc
[idx
]);
898 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
899 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
902 /* Perform HASH update on last digest */
903 hw_desc_init(&desc
[idx
]);
904 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
906 set_flow_mode(&desc
[idx
], DIN_HASH
);
910 /* Get final MAC result */
911 hw_desc_init(&desc
[idx
]);
912 set_dout_dlli(&desc
[idx
], state
->digest_result_dma_addr
, digestsize
,
913 NS_BIT
, (async_req
? 1 : 0));
915 set_queue_last_ind(&desc
[idx
]);
916 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
917 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
918 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
919 ssi_set_hash_endianity(ctx
->hash_mode
, &desc
[idx
]);
920 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
924 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 1);
925 if (unlikely(rc
!= -EINPROGRESS
)) {
926 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
927 ssi_buffer_mgr_unmap_hash_request(dev
, state
, src
, true);
928 ssi_hash_unmap_result(dev
, state
, digestsize
, result
);
931 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 0);
933 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
934 ssi_buffer_mgr_unmap_hash_request(dev
, state
, src
, true);
935 ssi_hash_unmap_result(dev
, state
, digestsize
, result
);
937 ssi_buffer_mgr_unmap_hash_request(dev
, state
, src
, false);
938 ssi_hash_unmap_result(dev
, state
, digestsize
, result
);
939 ssi_hash_unmap_request(dev
, state
, ctx
);
945 static int ssi_hash_init(struct ahash_req_ctx
*state
, struct ssi_hash_ctx
*ctx
)
947 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
949 state
->xcbc_count
= 0;
951 ssi_hash_map_request(dev
, state
, ctx
);
956 static int ssi_hash_setkey(void *hash
,
961 unsigned int hmac_pad_const
[2] = { HMAC_IPAD_CONST
, HMAC_OPAD_CONST
};
962 struct ssi_crypto_req ssi_req
= {};
963 struct ssi_hash_ctx
*ctx
= NULL
;
966 int i
, idx
= 0, rc
= 0;
967 struct cc_hw_desc desc
[SSI_MAX_AHASH_SEQ_LEN
];
968 ssi_sram_addr_t larval_addr
;
971 ctx
= crypto_ahash_ctx(((struct crypto_ahash
*)hash
));
972 dev
= drvdata_to_dev(ctx
->drvdata
);
973 dev_dbg(dev
, "start keylen: %d", keylen
);
975 blocksize
= crypto_tfm_alg_blocksize(&((struct crypto_ahash
*)hash
)->base
);
976 digestsize
= crypto_ahash_digestsize(((struct crypto_ahash
*)hash
));
978 larval_addr
= ssi_ahash_get_larval_digest_sram_addr(
979 ctx
->drvdata
, ctx
->hash_mode
);
981 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
982 * any NON-ZERO value utilizes HMAC flow
984 ctx
->key_params
.keylen
= keylen
;
985 ctx
->key_params
.key_dma_addr
= 0;
989 ctx
->key_params
.key_dma_addr
= dma_map_single(
991 keylen
, DMA_TO_DEVICE
);
992 if (unlikely(dma_mapping_error(dev
,
993 ctx
->key_params
.key_dma_addr
))) {
994 dev_err(dev
, "Mapping key va=0x%p len=%u for DMA failed\n",
998 dev_dbg(dev
, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
999 &ctx
->key_params
.key_dma_addr
, ctx
->key_params
.keylen
);
1001 if (keylen
> blocksize
) {
1002 /* Load hash initial state */
1003 hw_desc_init(&desc
[idx
]);
1004 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1005 set_din_sram(&desc
[idx
], larval_addr
,
1006 ctx
->inter_digestsize
);
1007 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1008 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
1011 /* Load the hash current length*/
1012 hw_desc_init(&desc
[idx
]);
1013 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1014 set_din_const(&desc
[idx
], 0, HASH_LEN_SIZE
);
1015 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
1016 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1017 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1020 hw_desc_init(&desc
[idx
]);
1021 set_din_type(&desc
[idx
], DMA_DLLI
,
1022 ctx
->key_params
.key_dma_addr
, keylen
,
1024 set_flow_mode(&desc
[idx
], DIN_HASH
);
1027 /* Get hashed key */
1028 hw_desc_init(&desc
[idx
]);
1029 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1030 set_dout_dlli(&desc
[idx
], ctx
->opad_tmp_keys_dma_addr
,
1031 digestsize
, NS_BIT
, 0);
1032 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
1033 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1034 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
1035 ssi_set_hash_endianity(ctx
->hash_mode
, &desc
[idx
]);
1038 hw_desc_init(&desc
[idx
]);
1039 set_din_const(&desc
[idx
], 0, (blocksize
- digestsize
));
1040 set_flow_mode(&desc
[idx
], BYPASS
);
1041 set_dout_dlli(&desc
[idx
], (ctx
->opad_tmp_keys_dma_addr
+
1043 (blocksize
- digestsize
), NS_BIT
, 0);
1046 hw_desc_init(&desc
[idx
]);
1047 set_din_type(&desc
[idx
], DMA_DLLI
,
1048 ctx
->key_params
.key_dma_addr
, keylen
,
1050 set_flow_mode(&desc
[idx
], BYPASS
);
1051 set_dout_dlli(&desc
[idx
], ctx
->opad_tmp_keys_dma_addr
,
1055 if ((blocksize
- keylen
) != 0) {
1056 hw_desc_init(&desc
[idx
]);
1057 set_din_const(&desc
[idx
], 0,
1058 (blocksize
- keylen
));
1059 set_flow_mode(&desc
[idx
], BYPASS
);
1060 set_dout_dlli(&desc
[idx
],
1061 (ctx
->opad_tmp_keys_dma_addr
+
1062 keylen
), (blocksize
- keylen
),
1068 hw_desc_init(&desc
[idx
]);
1069 set_din_const(&desc
[idx
], 0, blocksize
);
1070 set_flow_mode(&desc
[idx
], BYPASS
);
1071 set_dout_dlli(&desc
[idx
], (ctx
->opad_tmp_keys_dma_addr
),
1072 blocksize
, NS_BIT
, 0);
1076 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 0);
1077 if (unlikely(rc
!= 0)) {
1078 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
1082 /* calc derived HMAC key */
1083 for (idx
= 0, i
= 0; i
< 2; i
++) {
1084 /* Load hash initial state */
1085 hw_desc_init(&desc
[idx
]);
1086 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1087 set_din_sram(&desc
[idx
], larval_addr
, ctx
->inter_digestsize
);
1088 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1089 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
1092 /* Load the hash current length*/
1093 hw_desc_init(&desc
[idx
]);
1094 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1095 set_din_const(&desc
[idx
], 0, HASH_LEN_SIZE
);
1096 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1097 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1100 /* Prepare ipad key */
1101 hw_desc_init(&desc
[idx
]);
1102 set_xor_val(&desc
[idx
], hmac_pad_const
[i
]);
1103 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1104 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1105 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
1108 /* Perform HASH update */
1109 hw_desc_init(&desc
[idx
]);
1110 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->opad_tmp_keys_dma_addr
,
1112 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1113 set_xor_active(&desc
[idx
]);
1114 set_flow_mode(&desc
[idx
], DIN_HASH
);
1117 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
1118 hw_desc_init(&desc
[idx
]);
1119 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1120 if (i
> 0) /* Not first iteration */
1121 set_dout_dlli(&desc
[idx
], ctx
->opad_tmp_keys_dma_addr
,
1122 ctx
->inter_digestsize
, NS_BIT
, 0);
1123 else /* First iteration */
1124 set_dout_dlli(&desc
[idx
], ctx
->digest_buff_dma_addr
,
1125 ctx
->inter_digestsize
, NS_BIT
, 0);
1126 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
1127 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1131 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 0);
1135 crypto_ahash_set_flags((struct crypto_ahash
*)hash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1137 if (ctx
->key_params
.key_dma_addr
) {
1138 dma_unmap_single(dev
, ctx
->key_params
.key_dma_addr
,
1139 ctx
->key_params
.keylen
, DMA_TO_DEVICE
);
1140 dev_dbg(dev
, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1141 &ctx
->key_params
.key_dma_addr
, ctx
->key_params
.keylen
);
1146 static int ssi_xcbc_setkey(struct crypto_ahash
*ahash
,
1147 const u8
*key
, unsigned int keylen
)
1149 struct ssi_crypto_req ssi_req
= {};
1150 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1151 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1152 int idx
= 0, rc
= 0;
1153 struct cc_hw_desc desc
[SSI_MAX_AHASH_SEQ_LEN
];
1155 dev_dbg(dev
, "===== setkey (%d) ====\n", keylen
);
1158 case AES_KEYSIZE_128
:
1159 case AES_KEYSIZE_192
:
1160 case AES_KEYSIZE_256
:
1166 ctx
->key_params
.keylen
= keylen
;
1168 ctx
->key_params
.key_dma_addr
= dma_map_single(
1170 keylen
, DMA_TO_DEVICE
);
1171 if (unlikely(dma_mapping_error(dev
, ctx
->key_params
.key_dma_addr
))) {
1172 dev_err(dev
, "Mapping key va=0x%p len=%u for DMA failed\n",
1176 dev_dbg(dev
, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
1177 &ctx
->key_params
.key_dma_addr
, ctx
->key_params
.keylen
);
1179 ctx
->is_hmac
= true;
1180 /* 1. Load the AES key */
1181 hw_desc_init(&desc
[idx
]);
1182 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->key_params
.key_dma_addr
,
1184 set_cipher_mode(&desc
[idx
], DRV_CIPHER_ECB
);
1185 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
1186 set_key_size_aes(&desc
[idx
], keylen
);
1187 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1188 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1191 hw_desc_init(&desc
[idx
]);
1192 set_din_const(&desc
[idx
], 0x01010101, CC_AES_128_BIT_KEY_SIZE
);
1193 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
1194 set_dout_dlli(&desc
[idx
], (ctx
->opad_tmp_keys_dma_addr
+
1195 XCBC_MAC_K1_OFFSET
),
1196 CC_AES_128_BIT_KEY_SIZE
, NS_BIT
, 0);
1199 hw_desc_init(&desc
[idx
]);
1200 set_din_const(&desc
[idx
], 0x02020202, CC_AES_128_BIT_KEY_SIZE
);
1201 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
1202 set_dout_dlli(&desc
[idx
], (ctx
->opad_tmp_keys_dma_addr
+
1203 XCBC_MAC_K2_OFFSET
),
1204 CC_AES_128_BIT_KEY_SIZE
, NS_BIT
, 0);
1207 hw_desc_init(&desc
[idx
]);
1208 set_din_const(&desc
[idx
], 0x03030303, CC_AES_128_BIT_KEY_SIZE
);
1209 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
1210 set_dout_dlli(&desc
[idx
], (ctx
->opad_tmp_keys_dma_addr
+
1211 XCBC_MAC_K3_OFFSET
),
1212 CC_AES_128_BIT_KEY_SIZE
, NS_BIT
, 0);
1215 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 0);
1218 crypto_ahash_set_flags(ahash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1220 dma_unmap_single(dev
, ctx
->key_params
.key_dma_addr
,
1221 ctx
->key_params
.keylen
, DMA_TO_DEVICE
);
1222 dev_dbg(dev
, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1223 &ctx
->key_params
.key_dma_addr
, ctx
->key_params
.keylen
);
1229 static int ssi_cmac_setkey(struct crypto_ahash
*ahash
,
1230 const u8
*key
, unsigned int keylen
)
1232 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1233 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1235 dev_dbg(dev
, "===== setkey (%d) ====\n", keylen
);
1237 ctx
->is_hmac
= true;
1240 case AES_KEYSIZE_128
:
1241 case AES_KEYSIZE_192
:
1242 case AES_KEYSIZE_256
:
1248 ctx
->key_params
.keylen
= keylen
;
1250 /* STAT_PHASE_1: Copy key to ctx */
1252 dma_sync_single_for_cpu(dev
, ctx
->opad_tmp_keys_dma_addr
,
1253 keylen
, DMA_TO_DEVICE
);
1255 memcpy(ctx
->opad_tmp_keys_buff
, key
, keylen
);
1257 memset(ctx
->opad_tmp_keys_buff
+ 24, 0, CC_AES_KEY_SIZE_MAX
- 24);
1259 dma_sync_single_for_device(dev
, ctx
->opad_tmp_keys_dma_addr
,
1260 keylen
, DMA_TO_DEVICE
);
1262 ctx
->key_params
.keylen
= keylen
;
1268 static void ssi_hash_free_ctx(struct ssi_hash_ctx
*ctx
)
1270 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1272 if (ctx
->digest_buff_dma_addr
!= 0) {
1273 dma_unmap_single(dev
, ctx
->digest_buff_dma_addr
,
1274 sizeof(ctx
->digest_buff
), DMA_BIDIRECTIONAL
);
1275 dev_dbg(dev
, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1276 &ctx
->digest_buff_dma_addr
);
1277 ctx
->digest_buff_dma_addr
= 0;
1279 if (ctx
->opad_tmp_keys_dma_addr
!= 0) {
1280 dma_unmap_single(dev
, ctx
->opad_tmp_keys_dma_addr
,
1281 sizeof(ctx
->opad_tmp_keys_buff
),
1283 dev_dbg(dev
, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1284 &ctx
->opad_tmp_keys_dma_addr
);
1285 ctx
->opad_tmp_keys_dma_addr
= 0;
1288 ctx
->key_params
.keylen
= 0;
1291 static int ssi_hash_alloc_ctx(struct ssi_hash_ctx
*ctx
)
1293 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1295 ctx
->key_params
.keylen
= 0;
1297 ctx
->digest_buff_dma_addr
= dma_map_single(dev
, (void *)ctx
->digest_buff
, sizeof(ctx
->digest_buff
), DMA_BIDIRECTIONAL
);
1298 if (dma_mapping_error(dev
, ctx
->digest_buff_dma_addr
)) {
1299 dev_err(dev
, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1300 sizeof(ctx
->digest_buff
), ctx
->digest_buff
);
1303 dev_dbg(dev
, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1304 sizeof(ctx
->digest_buff
), ctx
->digest_buff
,
1305 &ctx
->digest_buff_dma_addr
);
1307 ctx
->opad_tmp_keys_dma_addr
= dma_map_single(dev
, (void *)ctx
->opad_tmp_keys_buff
, sizeof(ctx
->opad_tmp_keys_buff
), DMA_BIDIRECTIONAL
);
1308 if (dma_mapping_error(dev
, ctx
->opad_tmp_keys_dma_addr
)) {
1309 dev_err(dev
, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1310 sizeof(ctx
->opad_tmp_keys_buff
),
1311 ctx
->opad_tmp_keys_buff
);
1314 dev_dbg(dev
, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1315 sizeof(ctx
->opad_tmp_keys_buff
), ctx
->opad_tmp_keys_buff
,
1316 &ctx
->opad_tmp_keys_dma_addr
);
1318 ctx
->is_hmac
= false;
1322 ssi_hash_free_ctx(ctx
);
1326 static int ssi_ahash_cra_init(struct crypto_tfm
*tfm
)
1328 struct ssi_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1329 struct hash_alg_common
*hash_alg_common
=
1330 container_of(tfm
->__crt_alg
, struct hash_alg_common
, base
);
1331 struct ahash_alg
*ahash_alg
=
1332 container_of(hash_alg_common
, struct ahash_alg
, halg
);
1333 struct ssi_hash_alg
*ssi_alg
=
1334 container_of(ahash_alg
, struct ssi_hash_alg
, ahash_alg
);
1336 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1337 sizeof(struct ahash_req_ctx
));
1339 ctx
->hash_mode
= ssi_alg
->hash_mode
;
1340 ctx
->hw_mode
= ssi_alg
->hw_mode
;
1341 ctx
->inter_digestsize
= ssi_alg
->inter_digestsize
;
1342 ctx
->drvdata
= ssi_alg
->drvdata
;
1344 return ssi_hash_alloc_ctx(ctx
);
1347 static void ssi_hash_cra_exit(struct crypto_tfm
*tfm
)
1349 struct ssi_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1350 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1352 dev_dbg(dev
, "ssi_hash_cra_exit");
1353 ssi_hash_free_ctx(ctx
);
1356 static int ssi_mac_update(struct ahash_request
*req
)
1358 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
1359 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1360 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1361 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1362 unsigned int block_size
= crypto_tfm_alg_blocksize(&tfm
->base
);
1363 struct ssi_crypto_req ssi_req
= {};
1364 struct cc_hw_desc desc
[SSI_MAX_AHASH_SEQ_LEN
];
1368 if (req
->nbytes
== 0) {
1369 /* no real updates required */
1373 state
->xcbc_count
++;
1375 rc
= ssi_buffer_mgr_map_hash_request_update(ctx
->drvdata
, state
, req
->src
, req
->nbytes
, block_size
);
1378 dev_dbg(dev
, " data size not require HW update %x\n",
1380 /* No hardware updates are required */
1383 dev_err(dev
, "map_ahash_request_update() failed\n");
1387 if (ctx
->hw_mode
== DRV_CIPHER_XCBC_MAC
)
1388 ssi_hash_create_xcbc_setup(req
, desc
, &idx
);
1390 ssi_hash_create_cmac_setup(req
, desc
, &idx
);
1392 ssi_hash_create_data_desc(state
, ctx
, DIN_AES_DOUT
, desc
, true, &idx
);
1394 /* store the hash digest result in context */
1395 hw_desc_init(&desc
[idx
]);
1396 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1397 set_dout_dlli(&desc
[idx
], state
->digest_buff_dma_addr
,
1398 ctx
->inter_digestsize
, NS_BIT
, 1);
1399 set_queue_last_ind(&desc
[idx
]);
1400 set_flow_mode(&desc
[idx
], S_AES_to_DOUT
);
1401 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1404 /* Setup DX request structure */
1405 ssi_req
.user_cb
= (void *)ssi_hash_update_complete
;
1406 ssi_req
.user_arg
= (void *)req
;
1408 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 1);
1409 if (unlikely(rc
!= -EINPROGRESS
)) {
1410 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
1411 ssi_buffer_mgr_unmap_hash_request(dev
, state
, req
->src
, true);
1416 static int ssi_mac_final(struct ahash_request
*req
)
1418 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
1419 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1420 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1421 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1422 struct ssi_crypto_req ssi_req
= {};
1423 struct cc_hw_desc desc
[SSI_MAX_AHASH_SEQ_LEN
];
1426 u32 key_size
, key_len
;
1427 u32 digestsize
= crypto_ahash_digestsize(tfm
);
1429 u32 rem_cnt
= state
->buff_index
? state
->buff1_cnt
:
1432 if (ctx
->hw_mode
== DRV_CIPHER_XCBC_MAC
) {
1433 key_size
= CC_AES_128_BIT_KEY_SIZE
;
1434 key_len
= CC_AES_128_BIT_KEY_SIZE
;
1436 key_size
= (ctx
->key_params
.keylen
== 24) ? AES_MAX_KEY_SIZE
:
1437 ctx
->key_params
.keylen
;
1438 key_len
= ctx
->key_params
.keylen
;
1441 dev_dbg(dev
, "===== final xcbc reminder (%d) ====\n", rem_cnt
);
1443 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx
->drvdata
, state
, req
->src
, req
->nbytes
, 0) != 0)) {
1444 dev_err(dev
, "map_ahash_request_final() failed\n");
1448 if (unlikely(ssi_hash_map_result(dev
, state
, digestsize
) != 0)) {
1449 dev_err(dev
, "map_ahash_digest() failed\n");
1453 /* Setup DX request structure */
1454 ssi_req
.user_cb
= (void *)ssi_hash_complete
;
1455 ssi_req
.user_arg
= (void *)req
;
1457 if (state
->xcbc_count
&& (rem_cnt
== 0)) {
1458 /* Load key for ECB decryption */
1459 hw_desc_init(&desc
[idx
]);
1460 set_cipher_mode(&desc
[idx
], DRV_CIPHER_ECB
);
1461 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_DECRYPT
);
1462 set_din_type(&desc
[idx
], DMA_DLLI
,
1463 (ctx
->opad_tmp_keys_dma_addr
+
1464 XCBC_MAC_K1_OFFSET
), key_size
, NS_BIT
);
1465 set_key_size_aes(&desc
[idx
], key_len
);
1466 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1467 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1470 /* Initiate decryption of block state to previous block_state-XOR-M[n] */
1471 hw_desc_init(&desc
[idx
]);
1472 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
1473 CC_AES_BLOCK_SIZE
, NS_BIT
);
1474 set_dout_dlli(&desc
[idx
], state
->digest_buff_dma_addr
,
1475 CC_AES_BLOCK_SIZE
, NS_BIT
, 0);
1476 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
1479 /* Memory Barrier: wait for axi write to complete */
1480 hw_desc_init(&desc
[idx
]);
1481 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
1482 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1486 if (ctx
->hw_mode
== DRV_CIPHER_XCBC_MAC
)
1487 ssi_hash_create_xcbc_setup(req
, desc
, &idx
);
1489 ssi_hash_create_cmac_setup(req
, desc
, &idx
);
1491 if (state
->xcbc_count
== 0) {
1492 hw_desc_init(&desc
[idx
]);
1493 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1494 set_key_size_aes(&desc
[idx
], key_len
);
1495 set_cmac_size0_mode(&desc
[idx
]);
1496 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1498 } else if (rem_cnt
> 0) {
1499 ssi_hash_create_data_desc(state
, ctx
, DIN_AES_DOUT
, desc
, false, &idx
);
1501 hw_desc_init(&desc
[idx
]);
1502 set_din_const(&desc
[idx
], 0x00, CC_AES_BLOCK_SIZE
);
1503 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
1507 /* Get final MAC result */
1508 hw_desc_init(&desc
[idx
]);
1510 set_dout_dlli(&desc
[idx
], state
->digest_result_dma_addr
,
1511 digestsize
, NS_BIT
, 1);
1512 set_queue_last_ind(&desc
[idx
]);
1513 set_flow_mode(&desc
[idx
], S_AES_to_DOUT
);
1514 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1515 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1518 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 1);
1519 if (unlikely(rc
!= -EINPROGRESS
)) {
1520 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
1521 ssi_buffer_mgr_unmap_hash_request(dev
, state
, req
->src
, true);
1522 ssi_hash_unmap_result(dev
, state
, digestsize
, req
->result
);
1527 static int ssi_mac_finup(struct ahash_request
*req
)
1529 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
1530 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1531 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1532 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1533 struct ssi_crypto_req ssi_req
= {};
1534 struct cc_hw_desc desc
[SSI_MAX_AHASH_SEQ_LEN
];
1538 u32 digestsize
= crypto_ahash_digestsize(tfm
);
1540 dev_dbg(dev
, "===== finup xcbc(%d) ====\n", req
->nbytes
);
1541 if (state
->xcbc_count
> 0 && req
->nbytes
== 0) {
1542 dev_dbg(dev
, "No data to update. Call to fdx_mac_final\n");
1543 return ssi_mac_final(req
);
1546 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx
->drvdata
, state
, req
->src
, req
->nbytes
, 1) != 0)) {
1547 dev_err(dev
, "map_ahash_request_final() failed\n");
1550 if (unlikely(ssi_hash_map_result(dev
, state
, digestsize
) != 0)) {
1551 dev_err(dev
, "map_ahash_digest() failed\n");
1555 /* Setup DX request structure */
1556 ssi_req
.user_cb
= (void *)ssi_hash_complete
;
1557 ssi_req
.user_arg
= (void *)req
;
1559 if (ctx
->hw_mode
== DRV_CIPHER_XCBC_MAC
) {
1560 key_len
= CC_AES_128_BIT_KEY_SIZE
;
1561 ssi_hash_create_xcbc_setup(req
, desc
, &idx
);
1563 key_len
= ctx
->key_params
.keylen
;
1564 ssi_hash_create_cmac_setup(req
, desc
, &idx
);
1567 if (req
->nbytes
== 0) {
1568 hw_desc_init(&desc
[idx
]);
1569 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1570 set_key_size_aes(&desc
[idx
], key_len
);
1571 set_cmac_size0_mode(&desc
[idx
]);
1572 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1575 ssi_hash_create_data_desc(state
, ctx
, DIN_AES_DOUT
, desc
, false, &idx
);
1578 /* Get final MAC result */
1579 hw_desc_init(&desc
[idx
]);
1581 set_dout_dlli(&desc
[idx
], state
->digest_result_dma_addr
,
1582 digestsize
, NS_BIT
, 1);
1583 set_queue_last_ind(&desc
[idx
]);
1584 set_flow_mode(&desc
[idx
], S_AES_to_DOUT
);
1585 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1586 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1589 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 1);
1590 if (unlikely(rc
!= -EINPROGRESS
)) {
1591 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
1592 ssi_buffer_mgr_unmap_hash_request(dev
, state
, req
->src
, true);
1593 ssi_hash_unmap_result(dev
, state
, digestsize
, req
->result
);
1598 static int ssi_mac_digest(struct ahash_request
*req
)
1600 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
1601 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1602 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1603 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1604 u32 digestsize
= crypto_ahash_digestsize(tfm
);
1605 struct ssi_crypto_req ssi_req
= {};
1606 struct cc_hw_desc desc
[SSI_MAX_AHASH_SEQ_LEN
];
1611 dev_dbg(dev
, "===== -digest mac (%d) ====\n", req
->nbytes
);
1613 if (unlikely(ssi_hash_map_request(dev
, state
, ctx
) != 0)) {
1614 dev_err(dev
, "map_ahash_source() failed\n");
1617 if (unlikely(ssi_hash_map_result(dev
, state
, digestsize
) != 0)) {
1618 dev_err(dev
, "map_ahash_digest() failed\n");
1622 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx
->drvdata
, state
, req
->src
, req
->nbytes
, 1) != 0)) {
1623 dev_err(dev
, "map_ahash_request_final() failed\n");
1627 /* Setup DX request structure */
1628 ssi_req
.user_cb
= (void *)ssi_hash_digest_complete
;
1629 ssi_req
.user_arg
= (void *)req
;
1631 if (ctx
->hw_mode
== DRV_CIPHER_XCBC_MAC
) {
1632 key_len
= CC_AES_128_BIT_KEY_SIZE
;
1633 ssi_hash_create_xcbc_setup(req
, desc
, &idx
);
1635 key_len
= ctx
->key_params
.keylen
;
1636 ssi_hash_create_cmac_setup(req
, desc
, &idx
);
1639 if (req
->nbytes
== 0) {
1640 hw_desc_init(&desc
[idx
]);
1641 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1642 set_key_size_aes(&desc
[idx
], key_len
);
1643 set_cmac_size0_mode(&desc
[idx
]);
1644 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1647 ssi_hash_create_data_desc(state
, ctx
, DIN_AES_DOUT
, desc
, false, &idx
);
1650 /* Get final MAC result */
1651 hw_desc_init(&desc
[idx
]);
1652 set_dout_dlli(&desc
[idx
], state
->digest_result_dma_addr
,
1653 CC_AES_BLOCK_SIZE
, NS_BIT
, 1);
1654 set_queue_last_ind(&desc
[idx
]);
1655 set_flow_mode(&desc
[idx
], S_AES_to_DOUT
);
1656 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1657 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1658 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1661 rc
= send_request(ctx
->drvdata
, &ssi_req
, desc
, idx
, 1);
1662 if (unlikely(rc
!= -EINPROGRESS
)) {
1663 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
1664 ssi_buffer_mgr_unmap_hash_request(dev
, state
, req
->src
, true);
1665 ssi_hash_unmap_result(dev
, state
, digestsize
, req
->result
);
1666 ssi_hash_unmap_request(dev
, state
, ctx
);
1671 //ahash wrap functions
1672 static int ssi_ahash_digest(struct ahash_request
*req
)
1674 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
1675 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1676 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1677 u32 digestsize
= crypto_ahash_digestsize(tfm
);
1679 return ssi_hash_digest(state
, ctx
, digestsize
, req
->src
, req
->nbytes
, req
->result
, (void *)req
);
1682 static int ssi_ahash_update(struct ahash_request
*req
)
1684 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
1685 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1686 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1687 unsigned int block_size
= crypto_tfm_alg_blocksize(&tfm
->base
);
1689 return ssi_hash_update(state
, ctx
, block_size
, req
->src
, req
->nbytes
, (void *)req
);
1692 static int ssi_ahash_finup(struct ahash_request
*req
)
1694 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
1695 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1696 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1697 u32 digestsize
= crypto_ahash_digestsize(tfm
);
1699 return ssi_hash_finup(state
, ctx
, digestsize
, req
->src
, req
->nbytes
, req
->result
, (void *)req
);
1702 static int ssi_ahash_final(struct ahash_request
*req
)
1704 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
1705 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1706 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1707 u32 digestsize
= crypto_ahash_digestsize(tfm
);
1709 return ssi_hash_final(state
, ctx
, digestsize
, req
->src
, req
->nbytes
, req
->result
, (void *)req
);
1712 static int ssi_ahash_init(struct ahash_request
*req
)
1714 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
1715 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1716 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1717 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1719 dev_dbg(dev
, "===== init (%d) ====\n", req
->nbytes
);
1721 return ssi_hash_init(state
, ctx
);
1724 static int ssi_ahash_export(struct ahash_request
*req
, void *out
)
1726 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1727 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1728 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1729 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
1730 u8
*curr_buff
= state
->buff_index
? state
->buff1
: state
->buff0
;
1731 u32 curr_buff_cnt
= state
->buff_index
? state
->buff1_cnt
:
1733 const u32 tmp
= CC_EXPORT_MAGIC
;
1735 memcpy(out
, &tmp
, sizeof(u32
));
1738 dma_sync_single_for_cpu(dev
, state
->digest_buff_dma_addr
,
1739 ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
1740 memcpy(out
, state
->digest_buff
, ctx
->inter_digestsize
);
1741 out
+= ctx
->inter_digestsize
;
1743 if (state
->digest_bytes_len_dma_addr
) {
1744 dma_sync_single_for_cpu(dev
, state
->digest_bytes_len_dma_addr
,
1745 HASH_LEN_SIZE
, DMA_BIDIRECTIONAL
);
1746 memcpy(out
, state
->digest_bytes_len
, HASH_LEN_SIZE
);
1748 /* Poison the unused exported digest len field. */
1749 memset(out
, 0x5F, HASH_LEN_SIZE
);
1751 out
+= HASH_LEN_SIZE
;
1753 memcpy(out
, &curr_buff_cnt
, sizeof(u32
));
1756 memcpy(out
, curr_buff
, curr_buff_cnt
);
1758 /* No sync for device ineeded since we did not change the data,
1765 static int ssi_ahash_import(struct ahash_request
*req
, const void *in
)
1767 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1768 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1769 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1770 struct ahash_req_ctx
*state
= ahash_request_ctx(req
);
1774 memcpy(&tmp
, in
, sizeof(u32
));
1775 if (tmp
!= CC_EXPORT_MAGIC
) {
1781 rc
= ssi_hash_init(state
, ctx
);
1785 dma_sync_single_for_cpu(dev
, state
->digest_buff_dma_addr
,
1786 ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
1787 memcpy(state
->digest_buff
, in
, ctx
->inter_digestsize
);
1788 in
+= ctx
->inter_digestsize
;
1790 if (state
->digest_bytes_len_dma_addr
) {
1791 dma_sync_single_for_cpu(dev
, state
->digest_bytes_len_dma_addr
,
1792 HASH_LEN_SIZE
, DMA_BIDIRECTIONAL
);
1793 memcpy(state
->digest_bytes_len
, in
, HASH_LEN_SIZE
);
1795 in
+= HASH_LEN_SIZE
;
1797 dma_sync_single_for_device(dev
, state
->digest_buff_dma_addr
,
1798 ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
1800 if (state
->digest_bytes_len_dma_addr
)
1801 dma_sync_single_for_device(dev
,
1802 state
->digest_bytes_len_dma_addr
,
1803 HASH_LEN_SIZE
, DMA_BIDIRECTIONAL
);
1805 state
->buff_index
= 0;
1807 /* Sanity check the data as much as possible */
1808 memcpy(&tmp
, in
, sizeof(u32
));
1809 if (tmp
> SSI_MAX_HASH_BLCK_SIZE
) {
1815 state
->buff0_cnt
= tmp
;
1816 memcpy(state
->buff0
, in
, state
->buff0_cnt
);
1822 static int ssi_ahash_setkey(struct crypto_ahash
*ahash
,
1823 const u8
*key
, unsigned int keylen
)
1825 return ssi_hash_setkey((void *)ahash
, key
, keylen
, false);
1828 struct ssi_hash_template
{
1829 char name
[CRYPTO_MAX_ALG_NAME
];
1830 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1831 char mac_name
[CRYPTO_MAX_ALG_NAME
];
1832 char mac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1833 unsigned int blocksize
;
1835 struct ahash_alg template_ahash
;
1838 int inter_digestsize
;
1839 struct ssi_drvdata
*drvdata
;
1842 #define CC_STATE_SIZE(_x) \
1843 ((_x) + HASH_LEN_SIZE + SSI_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1845 /* hash descriptors */
1846 static struct ssi_hash_template driver_hash
[] = {
1847 //Asynchronize hash template
1850 .driver_name
= "sha1-dx",
1851 .mac_name
= "hmac(sha1)",
1852 .mac_driver_name
= "hmac-sha1-dx",
1853 .blocksize
= SHA1_BLOCK_SIZE
,
1854 .synchronize
= false,
1856 .init
= ssi_ahash_init
,
1857 .update
= ssi_ahash_update
,
1858 .final
= ssi_ahash_final
,
1859 .finup
= ssi_ahash_finup
,
1860 .digest
= ssi_ahash_digest
,
1861 .export
= ssi_ahash_export
,
1862 .import
= ssi_ahash_import
,
1863 .setkey
= ssi_ahash_setkey
,
1865 .digestsize
= SHA1_DIGEST_SIZE
,
1866 .statesize
= CC_STATE_SIZE(SHA1_DIGEST_SIZE
),
1869 .hash_mode
= DRV_HASH_SHA1
,
1870 .hw_mode
= DRV_HASH_HW_SHA1
,
1871 .inter_digestsize
= SHA1_DIGEST_SIZE
,
1875 .driver_name
= "sha256-dx",
1876 .mac_name
= "hmac(sha256)",
1877 .mac_driver_name
= "hmac-sha256-dx",
1878 .blocksize
= SHA256_BLOCK_SIZE
,
1880 .init
= ssi_ahash_init
,
1881 .update
= ssi_ahash_update
,
1882 .final
= ssi_ahash_final
,
1883 .finup
= ssi_ahash_finup
,
1884 .digest
= ssi_ahash_digest
,
1885 .export
= ssi_ahash_export
,
1886 .import
= ssi_ahash_import
,
1887 .setkey
= ssi_ahash_setkey
,
1889 .digestsize
= SHA256_DIGEST_SIZE
,
1890 .statesize
= CC_STATE_SIZE(SHA256_DIGEST_SIZE
)
1893 .hash_mode
= DRV_HASH_SHA256
,
1894 .hw_mode
= DRV_HASH_HW_SHA256
,
1895 .inter_digestsize
= SHA256_DIGEST_SIZE
,
1899 .driver_name
= "sha224-dx",
1900 .mac_name
= "hmac(sha224)",
1901 .mac_driver_name
= "hmac-sha224-dx",
1902 .blocksize
= SHA224_BLOCK_SIZE
,
1904 .init
= ssi_ahash_init
,
1905 .update
= ssi_ahash_update
,
1906 .final
= ssi_ahash_final
,
1907 .finup
= ssi_ahash_finup
,
1908 .digest
= ssi_ahash_digest
,
1909 .export
= ssi_ahash_export
,
1910 .import
= ssi_ahash_import
,
1911 .setkey
= ssi_ahash_setkey
,
1913 .digestsize
= SHA224_DIGEST_SIZE
,
1914 .statesize
= CC_STATE_SIZE(SHA224_DIGEST_SIZE
),
1917 .hash_mode
= DRV_HASH_SHA224
,
1918 .hw_mode
= DRV_HASH_HW_SHA256
,
1919 .inter_digestsize
= SHA256_DIGEST_SIZE
,
1921 #if (DX_DEV_SHA_MAX > 256)
1924 .driver_name
= "sha384-dx",
1925 .mac_name
= "hmac(sha384)",
1926 .mac_driver_name
= "hmac-sha384-dx",
1927 .blocksize
= SHA384_BLOCK_SIZE
,
1929 .init
= ssi_ahash_init
,
1930 .update
= ssi_ahash_update
,
1931 .final
= ssi_ahash_final
,
1932 .finup
= ssi_ahash_finup
,
1933 .digest
= ssi_ahash_digest
,
1934 .export
= ssi_ahash_export
,
1935 .import
= ssi_ahash_import
,
1936 .setkey
= ssi_ahash_setkey
,
1938 .digestsize
= SHA384_DIGEST_SIZE
,
1939 .statesize
= CC_STATE_SIZE(SHA384_DIGEST_SIZE
),
1942 .hash_mode
= DRV_HASH_SHA384
,
1943 .hw_mode
= DRV_HASH_HW_SHA512
,
1944 .inter_digestsize
= SHA512_DIGEST_SIZE
,
1948 .driver_name
= "sha512-dx",
1949 .mac_name
= "hmac(sha512)",
1950 .mac_driver_name
= "hmac-sha512-dx",
1951 .blocksize
= SHA512_BLOCK_SIZE
,
1953 .init
= ssi_ahash_init
,
1954 .update
= ssi_ahash_update
,
1955 .final
= ssi_ahash_final
,
1956 .finup
= ssi_ahash_finup
,
1957 .digest
= ssi_ahash_digest
,
1958 .export
= ssi_ahash_export
,
1959 .import
= ssi_ahash_import
,
1960 .setkey
= ssi_ahash_setkey
,
1962 .digestsize
= SHA512_DIGEST_SIZE
,
1963 .statesize
= CC_STATE_SIZE(SHA512_DIGEST_SIZE
),
1966 .hash_mode
= DRV_HASH_SHA512
,
1967 .hw_mode
= DRV_HASH_HW_SHA512
,
1968 .inter_digestsize
= SHA512_DIGEST_SIZE
,
1973 .driver_name
= "md5-dx",
1974 .mac_name
= "hmac(md5)",
1975 .mac_driver_name
= "hmac-md5-dx",
1976 .blocksize
= MD5_HMAC_BLOCK_SIZE
,
1978 .init
= ssi_ahash_init
,
1979 .update
= ssi_ahash_update
,
1980 .final
= ssi_ahash_final
,
1981 .finup
= ssi_ahash_finup
,
1982 .digest
= ssi_ahash_digest
,
1983 .export
= ssi_ahash_export
,
1984 .import
= ssi_ahash_import
,
1985 .setkey
= ssi_ahash_setkey
,
1987 .digestsize
= MD5_DIGEST_SIZE
,
1988 .statesize
= CC_STATE_SIZE(MD5_DIGEST_SIZE
),
1991 .hash_mode
= DRV_HASH_MD5
,
1992 .hw_mode
= DRV_HASH_HW_MD5
,
1993 .inter_digestsize
= MD5_DIGEST_SIZE
,
1996 .mac_name
= "xcbc(aes)",
1997 .mac_driver_name
= "xcbc-aes-dx",
1998 .blocksize
= AES_BLOCK_SIZE
,
2000 .init
= ssi_ahash_init
,
2001 .update
= ssi_mac_update
,
2002 .final
= ssi_mac_final
,
2003 .finup
= ssi_mac_finup
,
2004 .digest
= ssi_mac_digest
,
2005 .setkey
= ssi_xcbc_setkey
,
2006 .export
= ssi_ahash_export
,
2007 .import
= ssi_ahash_import
,
2009 .digestsize
= AES_BLOCK_SIZE
,
2010 .statesize
= CC_STATE_SIZE(AES_BLOCK_SIZE
),
2013 .hash_mode
= DRV_HASH_NULL
,
2014 .hw_mode
= DRV_CIPHER_XCBC_MAC
,
2015 .inter_digestsize
= AES_BLOCK_SIZE
,
2019 .mac_name
= "cmac(aes)",
2020 .mac_driver_name
= "cmac-aes-dx",
2021 .blocksize
= AES_BLOCK_SIZE
,
2023 .init
= ssi_ahash_init
,
2024 .update
= ssi_mac_update
,
2025 .final
= ssi_mac_final
,
2026 .finup
= ssi_mac_finup
,
2027 .digest
= ssi_mac_digest
,
2028 .setkey
= ssi_cmac_setkey
,
2029 .export
= ssi_ahash_export
,
2030 .import
= ssi_ahash_import
,
2032 .digestsize
= AES_BLOCK_SIZE
,
2033 .statesize
= CC_STATE_SIZE(AES_BLOCK_SIZE
),
2036 .hash_mode
= DRV_HASH_NULL
,
2037 .hw_mode
= DRV_CIPHER_CMAC
,
2038 .inter_digestsize
= AES_BLOCK_SIZE
,
2044 static struct ssi_hash_alg
*
2045 ssi_hash_create_alg(struct ssi_hash_template
*template, struct device
*dev
,
2048 struct ssi_hash_alg
*t_crypto_alg
;
2049 struct crypto_alg
*alg
;
2050 struct ahash_alg
*halg
;
2052 t_crypto_alg
= kzalloc(sizeof(*t_crypto_alg
), GFP_KERNEL
);
2054 return ERR_PTR(-ENOMEM
);
2057 t_crypto_alg
->ahash_alg
= template->template_ahash
;
2058 halg
= &t_crypto_alg
->ahash_alg
;
2059 alg
= &halg
->halg
.base
;
2062 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
2063 template->mac_name
);
2064 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
2065 template->mac_driver_name
);
2067 halg
->setkey
= NULL
;
2068 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
2070 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
2071 template->driver_name
);
2073 alg
->cra_module
= THIS_MODULE
;
2074 alg
->cra_ctxsize
= sizeof(struct ssi_hash_ctx
);
2075 alg
->cra_priority
= SSI_CRA_PRIO
;
2076 alg
->cra_blocksize
= template->blocksize
;
2077 alg
->cra_alignmask
= 0;
2078 alg
->cra_exit
= ssi_hash_cra_exit
;
2080 alg
->cra_init
= ssi_ahash_cra_init
;
2081 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_TYPE_AHASH
|
2082 CRYPTO_ALG_KERN_DRIVER_ONLY
;
2083 alg
->cra_type
= &crypto_ahash_type
;
2085 t_crypto_alg
->hash_mode
= template->hash_mode
;
2086 t_crypto_alg
->hw_mode
= template->hw_mode
;
2087 t_crypto_alg
->inter_digestsize
= template->inter_digestsize
;
2089 return t_crypto_alg
;
2092 int ssi_hash_init_sram_digest_consts(struct ssi_drvdata
*drvdata
)
2094 struct ssi_hash_handle
*hash_handle
= drvdata
->hash_handle
;
2095 ssi_sram_addr_t sram_buff_ofs
= hash_handle
->digest_len_sram_addr
;
2096 unsigned int larval_seq_len
= 0;
2097 struct cc_hw_desc larval_seq
[CC_DIGEST_SIZE_MAX
/ sizeof(u32
)];
2098 struct device
*dev
= drvdata_to_dev(drvdata
);
2100 #if (DX_DEV_SHA_MAX > 256)
2104 /* Copy-to-sram digest-len */
2105 ssi_sram_mgr_const2sram_desc(digest_len_init
, sram_buff_ofs
,
2106 ARRAY_SIZE(digest_len_init
),
2107 larval_seq
, &larval_seq_len
);
2108 rc
= send_request_init(drvdata
, larval_seq
, larval_seq_len
);
2109 if (unlikely(rc
!= 0))
2110 goto init_digest_const_err
;
2112 sram_buff_ofs
+= sizeof(digest_len_init
);
2115 #if (DX_DEV_SHA_MAX > 256)
2116 /* Copy-to-sram digest-len for sha384/512 */
2117 ssi_sram_mgr_const2sram_desc(digest_len_sha512_init
, sram_buff_ofs
,
2118 ARRAY_SIZE(digest_len_sha512_init
),
2119 larval_seq
, &larval_seq_len
);
2120 rc
= send_request_init(drvdata
, larval_seq
, larval_seq_len
);
2121 if (unlikely(rc
!= 0))
2122 goto init_digest_const_err
;
2124 sram_buff_ofs
+= sizeof(digest_len_sha512_init
);
2128 /* The initial digests offset */
2129 hash_handle
->larval_digest_sram_addr
= sram_buff_ofs
;
2131 /* Copy-to-sram initial SHA* digests */
2132 ssi_sram_mgr_const2sram_desc(md5_init
, sram_buff_ofs
,
2133 ARRAY_SIZE(md5_init
), larval_seq
,
2135 rc
= send_request_init(drvdata
, larval_seq
, larval_seq_len
);
2136 if (unlikely(rc
!= 0))
2137 goto init_digest_const_err
;
2138 sram_buff_ofs
+= sizeof(md5_init
);
2141 ssi_sram_mgr_const2sram_desc(sha1_init
, sram_buff_ofs
,
2142 ARRAY_SIZE(sha1_init
), larval_seq
,
2144 rc
= send_request_init(drvdata
, larval_seq
, larval_seq_len
);
2145 if (unlikely(rc
!= 0))
2146 goto init_digest_const_err
;
2147 sram_buff_ofs
+= sizeof(sha1_init
);
2150 ssi_sram_mgr_const2sram_desc(sha224_init
, sram_buff_ofs
,
2151 ARRAY_SIZE(sha224_init
), larval_seq
,
2153 rc
= send_request_init(drvdata
, larval_seq
, larval_seq_len
);
2154 if (unlikely(rc
!= 0))
2155 goto init_digest_const_err
;
2156 sram_buff_ofs
+= sizeof(sha224_init
);
2159 ssi_sram_mgr_const2sram_desc(sha256_init
, sram_buff_ofs
,
2160 ARRAY_SIZE(sha256_init
), larval_seq
,
2162 rc
= send_request_init(drvdata
, larval_seq
, larval_seq_len
);
2163 if (unlikely(rc
!= 0))
2164 goto init_digest_const_err
;
2165 sram_buff_ofs
+= sizeof(sha256_init
);
2168 #if (DX_DEV_SHA_MAX > 256)
2169 /* We are forced to swap each double-word larval before copying to sram */
2170 for (i
= 0; i
< ARRAY_SIZE(sha384_init
); i
++) {
2171 const u32 const0
= ((u32
*)((u64
*)&sha384_init
[i
]))[1];
2172 const u32 const1
= ((u32
*)((u64
*)&sha384_init
[i
]))[0];
2174 ssi_sram_mgr_const2sram_desc(&const0
, sram_buff_ofs
, 1,
2175 larval_seq
, &larval_seq_len
);
2176 sram_buff_ofs
+= sizeof(u32
);
2177 ssi_sram_mgr_const2sram_desc(&const1
, sram_buff_ofs
, 1,
2178 larval_seq
, &larval_seq_len
);
2179 sram_buff_ofs
+= sizeof(u32
);
2181 rc
= send_request_init(drvdata
, larval_seq
, larval_seq_len
);
2182 if (unlikely(rc
!= 0)) {
2183 dev_err(dev
, "send_request() failed (rc = %d)\n", rc
);
2184 goto init_digest_const_err
;
2188 for (i
= 0; i
< ARRAY_SIZE(sha512_init
); i
++) {
2189 const u32 const0
= ((u32
*)((u64
*)&sha512_init
[i
]))[1];
2190 const u32 const1
= ((u32
*)((u64
*)&sha512_init
[i
]))[0];
2192 ssi_sram_mgr_const2sram_desc(&const0
, sram_buff_ofs
, 1,
2193 larval_seq
, &larval_seq_len
);
2194 sram_buff_ofs
+= sizeof(u32
);
2195 ssi_sram_mgr_const2sram_desc(&const1
, sram_buff_ofs
, 1,
2196 larval_seq
, &larval_seq_len
);
2197 sram_buff_ofs
+= sizeof(u32
);
2199 rc
= send_request_init(drvdata
, larval_seq
, larval_seq_len
);
2200 if (unlikely(rc
!= 0)) {
2201 dev_err(dev
, "send_request() failed (rc = %d)\n", rc
);
2202 goto init_digest_const_err
;
2206 init_digest_const_err
:
2210 int ssi_hash_alloc(struct ssi_drvdata
*drvdata
)
2212 struct ssi_hash_handle
*hash_handle
;
2213 ssi_sram_addr_t sram_buff
;
2214 u32 sram_size_to_alloc
;
2215 struct device
*dev
= drvdata_to_dev(drvdata
);
2219 hash_handle
= kzalloc(sizeof(*hash_handle
), GFP_KERNEL
);
2223 INIT_LIST_HEAD(&hash_handle
->hash_list
);
2224 drvdata
->hash_handle
= hash_handle
;
2226 sram_size_to_alloc
= sizeof(digest_len_init
) +
2227 #if (DX_DEV_SHA_MAX > 256)
2228 sizeof(digest_len_sha512_init
) +
2229 sizeof(sha384_init
) +
2230 sizeof(sha512_init
) +
2234 sizeof(sha224_init
) +
2235 sizeof(sha256_init
);
2237 sram_buff
= ssi_sram_mgr_alloc(drvdata
, sram_size_to_alloc
);
2238 if (sram_buff
== NULL_SRAM_ADDR
) {
2239 dev_err(dev
, "SRAM pool exhausted\n");
2244 /* The initial digest-len offset */
2245 hash_handle
->digest_len_sram_addr
= sram_buff
;
2247 /*must be set before the alg registration as it is being used there*/
2248 rc
= ssi_hash_init_sram_digest_consts(drvdata
);
2249 if (unlikely(rc
!= 0)) {
2250 dev_err(dev
, "Init digest CONST failed (rc=%d)\n", rc
);
2254 /* ahash registration */
2255 for (alg
= 0; alg
< ARRAY_SIZE(driver_hash
); alg
++) {
2256 struct ssi_hash_alg
*t_alg
;
2257 int hw_mode
= driver_hash
[alg
].hw_mode
;
2259 /* register hmac version */
2260 t_alg
= ssi_hash_create_alg(&driver_hash
[alg
], dev
, true);
2261 if (IS_ERR(t_alg
)) {
2262 rc
= PTR_ERR(t_alg
);
2263 dev_err(dev
, "%s alg allocation failed\n",
2264 driver_hash
[alg
].driver_name
);
2267 t_alg
->drvdata
= drvdata
;
2269 rc
= crypto_register_ahash(&t_alg
->ahash_alg
);
2271 dev_err(dev
, "%s alg registration failed\n",
2272 driver_hash
[alg
].driver_name
);
2276 list_add_tail(&t_alg
->entry
,
2277 &hash_handle
->hash_list
);
2280 if ((hw_mode
== DRV_CIPHER_XCBC_MAC
) ||
2281 (hw_mode
== DRV_CIPHER_CMAC
))
2284 /* register hash version */
2285 t_alg
= ssi_hash_create_alg(&driver_hash
[alg
], dev
, false);
2286 if (IS_ERR(t_alg
)) {
2287 rc
= PTR_ERR(t_alg
);
2288 dev_err(dev
, "%s alg allocation failed\n",
2289 driver_hash
[alg
].driver_name
);
2292 t_alg
->drvdata
= drvdata
;
2294 rc
= crypto_register_ahash(&t_alg
->ahash_alg
);
2296 dev_err(dev
, "%s alg registration failed\n",
2297 driver_hash
[alg
].driver_name
);
2301 list_add_tail(&t_alg
->entry
, &hash_handle
->hash_list
);
2308 kfree(drvdata
->hash_handle
);
2309 drvdata
->hash_handle
= NULL
;
2313 int ssi_hash_free(struct ssi_drvdata
*drvdata
)
2315 struct ssi_hash_alg
*t_hash_alg
, *hash_n
;
2316 struct ssi_hash_handle
*hash_handle
= drvdata
->hash_handle
;
2319 list_for_each_entry_safe(t_hash_alg
, hash_n
, &hash_handle
->hash_list
, entry
) {
2320 crypto_unregister_ahash(&t_hash_alg
->ahash_alg
);
2321 list_del(&t_hash_alg
->entry
);
2326 drvdata
->hash_handle
= NULL
;
2331 static void ssi_hash_create_xcbc_setup(struct ahash_request
*areq
,
2332 struct cc_hw_desc desc
[],
2333 unsigned int *seq_size
)
2335 unsigned int idx
= *seq_size
;
2336 struct ahash_req_ctx
*state
= ahash_request_ctx(areq
);
2337 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2338 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
2340 /* Setup XCBC MAC K1 */
2341 hw_desc_init(&desc
[idx
]);
2342 set_din_type(&desc
[idx
], DMA_DLLI
, (ctx
->opad_tmp_keys_dma_addr
+
2343 XCBC_MAC_K1_OFFSET
),
2344 CC_AES_128_BIT_KEY_SIZE
, NS_BIT
);
2345 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
2346 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
2347 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
2348 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
2349 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
2352 /* Setup XCBC MAC K2 */
2353 hw_desc_init(&desc
[idx
]);
2354 set_din_type(&desc
[idx
], DMA_DLLI
, (ctx
->opad_tmp_keys_dma_addr
+
2355 XCBC_MAC_K2_OFFSET
),
2356 CC_AES_128_BIT_KEY_SIZE
, NS_BIT
);
2357 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
2358 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
2359 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
2360 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
2361 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
2364 /* Setup XCBC MAC K3 */
2365 hw_desc_init(&desc
[idx
]);
2366 set_din_type(&desc
[idx
], DMA_DLLI
, (ctx
->opad_tmp_keys_dma_addr
+
2367 XCBC_MAC_K3_OFFSET
),
2368 CC_AES_128_BIT_KEY_SIZE
, NS_BIT
);
2369 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE2
);
2370 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
2371 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
2372 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
2373 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
2376 /* Loading MAC state */
2377 hw_desc_init(&desc
[idx
]);
2378 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
2379 CC_AES_BLOCK_SIZE
, NS_BIT
);
2380 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
2381 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
2382 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
2383 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
2384 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
2389 static void ssi_hash_create_cmac_setup(struct ahash_request
*areq
,
2390 struct cc_hw_desc desc
[],
2391 unsigned int *seq_size
)
2393 unsigned int idx
= *seq_size
;
2394 struct ahash_req_ctx
*state
= ahash_request_ctx(areq
);
2395 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2396 struct ssi_hash_ctx
*ctx
= crypto_ahash_ctx(tfm
);
2398 /* Setup CMAC Key */
2399 hw_desc_init(&desc
[idx
]);
2400 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->opad_tmp_keys_dma_addr
,
2401 ((ctx
->key_params
.keylen
== 24) ? AES_MAX_KEY_SIZE
:
2402 ctx
->key_params
.keylen
), NS_BIT
);
2403 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
2404 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CMAC
);
2405 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
2406 set_key_size_aes(&desc
[idx
], ctx
->key_params
.keylen
);
2407 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
2410 /* Load MAC state */
2411 hw_desc_init(&desc
[idx
]);
2412 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
2413 CC_AES_BLOCK_SIZE
, NS_BIT
);
2414 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
2415 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CMAC
);
2416 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
2417 set_key_size_aes(&desc
[idx
], ctx
->key_params
.keylen
);
2418 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
2423 static void ssi_hash_create_data_desc(struct ahash_req_ctx
*areq_ctx
,
2424 struct ssi_hash_ctx
*ctx
,
2425 unsigned int flow_mode
,
2426 struct cc_hw_desc desc
[],
2427 bool is_not_last_data
,
2428 unsigned int *seq_size
)
2430 unsigned int idx
= *seq_size
;
2431 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2433 if (likely(areq_ctx
->data_dma_buf_type
== SSI_DMA_BUF_DLLI
)) {
2434 hw_desc_init(&desc
[idx
]);
2435 set_din_type(&desc
[idx
], DMA_DLLI
,
2436 sg_dma_address(areq_ctx
->curr_sg
),
2437 areq_ctx
->curr_sg
->length
, NS_BIT
);
2438 set_flow_mode(&desc
[idx
], flow_mode
);
2441 if (areq_ctx
->data_dma_buf_type
== SSI_DMA_BUF_NULL
) {
2442 dev_dbg(dev
, " NULL mode\n");
2443 /* nothing to build */
2447 hw_desc_init(&desc
[idx
]);
2448 set_din_type(&desc
[idx
], DMA_DLLI
,
2449 areq_ctx
->mlli_params
.mlli_dma_addr
,
2450 areq_ctx
->mlli_params
.mlli_len
, NS_BIT
);
2451 set_dout_sram(&desc
[idx
], ctx
->drvdata
->mlli_sram_addr
,
2452 areq_ctx
->mlli_params
.mlli_len
);
2453 set_flow_mode(&desc
[idx
], BYPASS
);
2456 hw_desc_init(&desc
[idx
]);
2457 set_din_type(&desc
[idx
], DMA_MLLI
,
2458 ctx
->drvdata
->mlli_sram_addr
,
2459 areq_ctx
->mlli_nents
, NS_BIT
);
2460 set_flow_mode(&desc
[idx
], flow_mode
);
2463 if (is_not_last_data
)
2464 set_din_not_last_indication(&desc
[(idx
- 1)]);
2465 /* return updated desc sequence size */
2470 * Gets the address of the initial digest in SRAM
2471 * according to the given hash mode
2474 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2476 * \return u32 The address of the inital digest in SRAM
2478 ssi_sram_addr_t
ssi_ahash_get_larval_digest_sram_addr(void *drvdata
, u32 mode
)
2480 struct ssi_drvdata
*_drvdata
= (struct ssi_drvdata
*)drvdata
;
2481 struct ssi_hash_handle
*hash_handle
= _drvdata
->hash_handle
;
2482 struct device
*dev
= drvdata_to_dev(_drvdata
);
2488 return (hash_handle
->larval_digest_sram_addr
);
2490 return (hash_handle
->larval_digest_sram_addr
+
2492 case DRV_HASH_SHA224
:
2493 return (hash_handle
->larval_digest_sram_addr
+
2496 case DRV_HASH_SHA256
:
2497 return (hash_handle
->larval_digest_sram_addr
+
2500 sizeof(sha224_init
));
2501 #if (DX_DEV_SHA_MAX > 256)
2502 case DRV_HASH_SHA384
:
2503 return (hash_handle
->larval_digest_sram_addr
+
2506 sizeof(sha224_init
) +
2507 sizeof(sha256_init
));
2508 case DRV_HASH_SHA512
:
2509 return (hash_handle
->larval_digest_sram_addr
+
2512 sizeof(sha224_init
) +
2513 sizeof(sha256_init
) +
2514 sizeof(sha384_init
));
2517 dev_err(dev
, "Invalid hash mode (%d)\n", mode
);
2520 /*This is valid wrong value to avoid kernel crash*/
2521 return hash_handle
->larval_digest_sram_addr
;
2525 ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata
, u32 mode
)
2527 struct ssi_drvdata
*_drvdata
= (struct ssi_drvdata
*)drvdata
;
2528 struct ssi_hash_handle
*hash_handle
= _drvdata
->hash_handle
;
2529 ssi_sram_addr_t digest_len_addr
= hash_handle
->digest_len_sram_addr
;
2533 case DRV_HASH_SHA224
:
2534 case DRV_HASH_SHA256
:
2536 return digest_len_addr
;
2537 #if (DX_DEV_SHA_MAX > 256)
2538 case DRV_HASH_SHA384
:
2539 case DRV_HASH_SHA512
:
2540 return digest_len_addr
+ sizeof(digest_len_init
);
2543 return digest_len_addr
; /*to avoid kernel crash*/