2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/crypto.h>
18 #include <linux/version.h>
19 #include <crypto/algapi.h>
20 #include <crypto/internal/aead.h>
21 #include <crypto/hash.h>
22 #include <crypto/authenc.h>
23 #include <crypto/scatterwalk.h>
24 #include <linux/dmapool.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/crypto.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
30 #include "ssi_buffer_mgr.h"
31 #include "cc_lli_defs.h"
32 #include "ssi_cipher.h"
37 #define GET_DMA_BUFFER_TYPE(buff_type) ( \
38 ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
39 ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
40 ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
42 #define GET_DMA_BUFFER_TYPE(buff_type)
45 enum dma_buffer_type
{
51 struct buff_mgr_handle
{
52 struct dma_pool
*mlli_buffs_pool
;
55 union buffer_array_entry
{
56 struct scatterlist
*sgl
;
57 dma_addr_t buffer_dma
;
61 unsigned int num_of_buffers
;
62 union buffer_array_entry entry
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
63 unsigned int offset
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
64 int nents
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
65 int total_data_len
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
66 enum dma_buffer_type type
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
67 bool is_last
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
68 u32
*mlli_nents
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
72 * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
75 * @nbytes: [IN] Total SGL data bytes.
76 * @lbytes: [OUT] Returns the amount of bytes at the last entry
78 static unsigned int ssi_buffer_mgr_get_sgl_nents(
79 struct scatterlist
*sg_list
, unsigned int nbytes
, u32
*lbytes
, bool *is_chained
)
81 unsigned int nents
= 0;
84 if (sg_is_chain(sg_list
)) {
85 SSI_LOG_ERR("Unexpected chained entry "
86 "in sg (entry =0x%X)\n", nents
);
89 if (sg_list
->length
!= 0) {
91 /* get the number of bytes in the last entry */
93 nbytes
-= (sg_list
->length
> nbytes
) ? nbytes
: sg_list
->length
;
94 sg_list
= sg_next(sg_list
);
96 sg_list
= (struct scatterlist
*)sg_page(sg_list
);
101 SSI_LOG_DEBUG("nents %d last bytes %d\n", nents
, *lbytes
);
106 * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
110 void ssi_buffer_mgr_zero_sgl(struct scatterlist
*sgl
, u32 data_len
)
112 struct scatterlist
*current_sg
= sgl
;
115 while (sg_index
<= data_len
) {
117 /* reached the end of the sgl --> just return back */
120 memset(sg_virt(current_sg
), 0, current_sg
->length
);
121 sg_index
+= current_sg
->length
;
122 current_sg
= sg_next(current_sg
);
127 * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
128 * from to_skip to end, to dest and vice versa
136 void ssi_buffer_mgr_copy_scatterlist_portion(
137 u8
*dest
, struct scatterlist
*sg
,
138 u32 to_skip
, u32 end
,
139 enum ssi_sg_cpy_direct direct
)
143 nents
= ssi_buffer_mgr_get_sgl_nents(sg
, end
, &lbytes
, NULL
);
144 sg_copy_buffer(sg
, nents
, (void *)dest
, (end
- to_skip
+ 1), to_skip
,
145 (direct
== SSI_SG_TO_BUF
));
148 static inline int ssi_buffer_mgr_render_buff_to_mlli(
149 dma_addr_t buff_dma
, u32 buff_size
, u32
*curr_nents
,
152 u32
*mlli_entry_p
= *mlli_entry_pp
;
155 /* Verify there is no memory overflow*/
156 new_nents
= (*curr_nents
+ buff_size
/ CC_MAX_MLLI_ENTRY_SIZE
+ 1);
157 if (new_nents
> MAX_NUM_OF_TOTAL_MLLI_ENTRIES
)
160 /*handle buffer longer than 64 kbytes */
161 while (buff_size
> CC_MAX_MLLI_ENTRY_SIZE
) {
162 cc_lli_set_addr(mlli_entry_p
, buff_dma
);
163 cc_lli_set_size(mlli_entry_p
, CC_MAX_MLLI_ENTRY_SIZE
);
164 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents
,
165 mlli_entry_p
[LLI_WORD0_OFFSET
],
166 mlli_entry_p
[LLI_WORD1_OFFSET
]);
167 buff_dma
+= CC_MAX_MLLI_ENTRY_SIZE
;
168 buff_size
-= CC_MAX_MLLI_ENTRY_SIZE
;
169 mlli_entry_p
= mlli_entry_p
+ 2;
173 cc_lli_set_addr(mlli_entry_p
, buff_dma
);
174 cc_lli_set_size(mlli_entry_p
, buff_size
);
175 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents
,
176 mlli_entry_p
[LLI_WORD0_OFFSET
],
177 mlli_entry_p
[LLI_WORD1_OFFSET
]);
178 mlli_entry_p
= mlli_entry_p
+ 2;
179 *mlli_entry_pp
= mlli_entry_p
;
184 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
185 struct scatterlist
*sgl
, u32 sgl_data_len
, u32 sglOffset
, u32
*curr_nents
,
188 struct scatterlist
*curr_sgl
= sgl
;
189 u32
*mlli_entry_p
= *mlli_entry_pp
;
192 for ( ; (curr_sgl
) && (sgl_data_len
!= 0);
193 curr_sgl
= sg_next(curr_sgl
)) {
195 (sgl_data_len
> sg_dma_len(curr_sgl
) - sglOffset
) ?
196 sg_dma_len(curr_sgl
) - sglOffset
: sgl_data_len
;
197 sgl_data_len
-= entry_data_len
;
198 rc
= ssi_buffer_mgr_render_buff_to_mlli(
199 sg_dma_address(curr_sgl
) + sglOffset
, entry_data_len
, curr_nents
,
206 *mlli_entry_pp
= mlli_entry_p
;
210 static int ssi_buffer_mgr_generate_mlli(
212 struct buffer_array
*sg_data
,
213 struct mlli_params
*mlli_params
)
216 u32 total_nents
= 0, prev_total_nents
= 0;
219 SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data
->num_of_buffers
);
221 /* Allocate memory from the pointed pool */
222 mlli_params
->mlli_virt_addr
= dma_pool_alloc(
223 mlli_params
->curr_pool
, GFP_KERNEL
,
224 &(mlli_params
->mlli_dma_addr
));
225 if (unlikely(!mlli_params
->mlli_virt_addr
)) {
226 SSI_LOG_ERR("dma_pool_alloc() failed\n");
228 goto build_mlli_exit
;
230 /* Point to start of MLLI */
231 mlli_p
= (u32
*)mlli_params
->mlli_virt_addr
;
232 /* go over all SG's and link it to one MLLI table */
233 for (i
= 0; i
< sg_data
->num_of_buffers
; i
++) {
234 if (sg_data
->type
[i
] == DMA_SGL_TYPE
)
235 rc
= ssi_buffer_mgr_render_scatterlist_to_mlli(
236 sg_data
->entry
[i
].sgl
,
237 sg_data
->total_data_len
[i
], sg_data
->offset
[i
], &total_nents
,
239 else /*DMA_BUFF_TYPE*/
240 rc
= ssi_buffer_mgr_render_buff_to_mlli(
241 sg_data
->entry
[i
].buffer_dma
,
242 sg_data
->total_data_len
[i
], &total_nents
,
247 /* set last bit in the current table */
248 if (sg_data
->mlli_nents
[i
]) {
249 /*Calculate the current MLLI table length for the
250 *length field in the descriptor
252 *(sg_data
->mlli_nents
[i
]) +=
253 (total_nents
- prev_total_nents
);
254 prev_total_nents
= total_nents
;
258 /* Set MLLI size for the bypass operation */
259 mlli_params
->mlli_len
= (total_nents
* LLI_ENTRY_BYTE_SIZE
);
261 SSI_LOG_DEBUG("MLLI params: "
262 "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n",
263 mlli_params
->mlli_virt_addr
,
264 (unsigned long long)mlli_params
->mlli_dma_addr
,
265 mlli_params
->mlli_len
);
271 static inline void ssi_buffer_mgr_add_buffer_entry(
272 struct buffer_array
*sgl_data
,
273 dma_addr_t buffer_dma
, unsigned int buffer_len
,
274 bool is_last_entry
, u32
*mlli_nents
)
276 unsigned int index
= sgl_data
->num_of_buffers
;
278 SSI_LOG_DEBUG("index=%u single_buff=0x%llX "
279 "buffer_len=0x%08X is_last=%d\n",
280 index
, (unsigned long long)buffer_dma
, buffer_len
, is_last_entry
);
281 sgl_data
->nents
[index
] = 1;
282 sgl_data
->entry
[index
].buffer_dma
= buffer_dma
;
283 sgl_data
->offset
[index
] = 0;
284 sgl_data
->total_data_len
[index
] = buffer_len
;
285 sgl_data
->type
[index
] = DMA_BUFF_TYPE
;
286 sgl_data
->is_last
[index
] = is_last_entry
;
287 sgl_data
->mlli_nents
[index
] = mlli_nents
;
288 if (sgl_data
->mlli_nents
[index
])
289 *sgl_data
->mlli_nents
[index
] = 0;
290 sgl_data
->num_of_buffers
++;
293 static inline void ssi_buffer_mgr_add_scatterlist_entry(
294 struct buffer_array
*sgl_data
,
296 struct scatterlist
*sgl
,
297 unsigned int data_len
,
298 unsigned int data_offset
,
302 unsigned int index
= sgl_data
->num_of_buffers
;
304 SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
305 index
, nents
, sgl
, data_len
, is_last_table
);
306 sgl_data
->nents
[index
] = nents
;
307 sgl_data
->entry
[index
].sgl
= sgl
;
308 sgl_data
->offset
[index
] = data_offset
;
309 sgl_data
->total_data_len
[index
] = data_len
;
310 sgl_data
->type
[index
] = DMA_SGL_TYPE
;
311 sgl_data
->is_last
[index
] = is_last_table
;
312 sgl_data
->mlli_nents
[index
] = mlli_nents
;
313 if (sgl_data
->mlli_nents
[index
])
314 *sgl_data
->mlli_nents
[index
] = 0;
315 sgl_data
->num_of_buffers
++;
319 ssi_buffer_mgr_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, u32 nents
,
320 enum dma_data_direction direction
)
323 struct scatterlist
*l_sg
= sg
;
325 for (i
= 0; i
< nents
; i
++) {
328 if (unlikely(dma_map_sg(dev
, l_sg
, 1, direction
) != 1)) {
329 SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
332 l_sg
= sg_next(l_sg
);
337 /* Restore mapped parts */
338 for (j
= 0; j
< i
; j
++) {
341 dma_unmap_sg(dev
, sg
, 1, direction
);
347 static int ssi_buffer_mgr_map_scatterlist(
348 struct device
*dev
, struct scatterlist
*sg
,
349 unsigned int nbytes
, int direction
,
350 u32
*nents
, u32 max_sg_nents
,
351 u32
*lbytes
, u32
*mapped_nents
)
353 bool is_chained
= false;
355 if (sg_is_last(sg
)) {
356 /* One entry only case -set to DLLI */
357 if (unlikely(dma_map_sg(dev
, sg
, 1, direction
) != 1)) {
358 SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
361 SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
362 "page=%p addr=%pK offset=%u "
364 (unsigned long long)sg_dma_address(sg
),
367 sg
->offset
, sg
->length
);
371 } else { /*sg_is_last*/
372 *nents
= ssi_buffer_mgr_get_sgl_nents(sg
, nbytes
, lbytes
,
374 if (*nents
> max_sg_nents
) {
376 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
377 *nents
, max_sg_nents
);
381 /* In case of mmu the number of mapped nents might
382 * be changed from the original sgl nents
384 *mapped_nents
= dma_map_sg(dev
, sg
, *nents
, direction
);
385 if (unlikely(*mapped_nents
== 0)) {
387 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
391 /*In this case the driver maps entry by entry so it
392 * must have the same nents before and after map
394 *mapped_nents
= ssi_buffer_mgr_dma_map_sg(dev
,
398 if (unlikely(*mapped_nents
!= *nents
)) {
399 *nents
= *mapped_nents
;
400 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
410 ssi_aead_handle_config_buf(struct device
*dev
,
411 struct aead_req_ctx
*areq_ctx
,
413 struct buffer_array
*sg_data
,
414 unsigned int assoclen
)
416 SSI_LOG_DEBUG(" handle additional data config set to DLLI\n");
417 /* create sg for the current buffer */
418 sg_init_one(&areq_ctx
->ccm_adata_sg
, config_data
, AES_BLOCK_SIZE
+ areq_ctx
->ccm_hdr_size
);
419 if (unlikely(dma_map_sg(dev
, &areq_ctx
->ccm_adata_sg
, 1,
420 DMA_TO_DEVICE
) != 1)) {
421 SSI_LOG_ERR("dma_map_sg() "
422 "config buffer failed\n");
425 SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
427 "offset=%u length=%u\n",
428 (unsigned long long)sg_dma_address(&areq_ctx
->ccm_adata_sg
),
429 sg_page(&areq_ctx
->ccm_adata_sg
),
430 sg_virt(&areq_ctx
->ccm_adata_sg
),
431 areq_ctx
->ccm_adata_sg
.offset
,
432 areq_ctx
->ccm_adata_sg
.length
);
433 /* prepare for case of MLLI */
435 ssi_buffer_mgr_add_scatterlist_entry(sg_data
, 1,
436 &areq_ctx
->ccm_adata_sg
,
438 areq_ctx
->ccm_hdr_size
), 0,
444 static inline int ssi_ahash_handle_curr_buf(struct device
*dev
,
445 struct ahash_req_ctx
*areq_ctx
,
448 struct buffer_array
*sg_data
)
450 SSI_LOG_DEBUG(" handle curr buff %x set to DLLI\n", curr_buff_cnt
);
451 /* create sg for the current buffer */
452 sg_init_one(areq_ctx
->buff_sg
, curr_buff
, curr_buff_cnt
);
453 if (unlikely(dma_map_sg(dev
, areq_ctx
->buff_sg
, 1,
454 DMA_TO_DEVICE
) != 1)) {
455 SSI_LOG_ERR("dma_map_sg() "
456 "src buffer failed\n");
459 SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
461 "offset=%u length=%u\n",
462 (unsigned long long)sg_dma_address(areq_ctx
->buff_sg
),
463 sg_page(areq_ctx
->buff_sg
),
464 sg_virt(areq_ctx
->buff_sg
),
465 areq_ctx
->buff_sg
->offset
,
466 areq_ctx
->buff_sg
->length
);
467 areq_ctx
->data_dma_buf_type
= SSI_DMA_BUF_DLLI
;
468 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
469 areq_ctx
->in_nents
= 0;
470 /* prepare for case of MLLI */
471 ssi_buffer_mgr_add_scatterlist_entry(sg_data
, 1, areq_ctx
->buff_sg
,
472 curr_buff_cnt
, 0, false, NULL
);
476 void ssi_buffer_mgr_unmap_blkcipher_request(
480 struct scatterlist
*src
,
481 struct scatterlist
*dst
)
483 struct blkcipher_req_ctx
*req_ctx
= (struct blkcipher_req_ctx
*)ctx
;
485 if (likely(req_ctx
->gen_ctx
.iv_dma_addr
!= 0)) {
486 SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
487 (unsigned long long)req_ctx
->gen_ctx
.iv_dma_addr
,
489 dma_unmap_single(dev
, req_ctx
->gen_ctx
.iv_dma_addr
,
491 req_ctx
->is_giv
? DMA_BIDIRECTIONAL
:
495 if (req_ctx
->dma_buf_type
== SSI_DMA_BUF_MLLI
) {
496 dma_pool_free(req_ctx
->mlli_params
.curr_pool
,
497 req_ctx
->mlli_params
.mlli_virt_addr
,
498 req_ctx
->mlli_params
.mlli_dma_addr
);
501 dma_unmap_sg(dev
, src
, req_ctx
->in_nents
,
503 SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
507 dma_unmap_sg(dev
, dst
, req_ctx
->out_nents
,
509 SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
514 int ssi_buffer_mgr_map_blkcipher_request(
515 struct ssi_drvdata
*drvdata
,
520 struct scatterlist
*src
,
521 struct scatterlist
*dst
)
523 struct blkcipher_req_ctx
*req_ctx
= (struct blkcipher_req_ctx
*)ctx
;
524 struct mlli_params
*mlli_params
= &req_ctx
->mlli_params
;
525 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
526 struct device
*dev
= &drvdata
->plat_dev
->dev
;
527 struct buffer_array sg_data
;
530 u32 mapped_nents
= 0;
532 req_ctx
->dma_buf_type
= SSI_DMA_BUF_DLLI
;
533 mlli_params
->curr_pool
= NULL
;
534 sg_data
.num_of_buffers
= 0;
537 if (likely(ivsize
!= 0)) {
538 dump_byte_array("iv", (u8
*)info
, ivsize
);
539 req_ctx
->gen_ctx
.iv_dma_addr
=
540 dma_map_single(dev
, (void *)info
,
542 req_ctx
->is_giv
? DMA_BIDIRECTIONAL
:
544 if (unlikely(dma_mapping_error(dev
,
545 req_ctx
->gen_ctx
.iv_dma_addr
))) {
546 SSI_LOG_ERR("Mapping iv %u B at va=%pK "
547 "for DMA failed\n", ivsize
, info
);
550 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
552 (unsigned long long)req_ctx
->gen_ctx
.iv_dma_addr
);
554 req_ctx
->gen_ctx
.iv_dma_addr
= 0;
557 /* Map the src SGL */
558 rc
= ssi_buffer_mgr_map_scatterlist(dev
, src
,
559 nbytes
, DMA_BIDIRECTIONAL
, &req_ctx
->in_nents
,
560 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dummy
, &mapped_nents
);
561 if (unlikely(rc
!= 0)) {
563 goto ablkcipher_exit
;
565 if (mapped_nents
> 1)
566 req_ctx
->dma_buf_type
= SSI_DMA_BUF_MLLI
;
568 if (unlikely(src
== dst
)) {
569 /* Handle inplace operation */
570 if (unlikely(req_ctx
->dma_buf_type
== SSI_DMA_BUF_MLLI
)) {
571 req_ctx
->out_nents
= 0;
572 ssi_buffer_mgr_add_scatterlist_entry(&sg_data
,
573 req_ctx
->in_nents
, src
,
574 nbytes
, 0, true, &req_ctx
->in_mlli_nents
);
578 if (unlikely(ssi_buffer_mgr_map_scatterlist(
580 DMA_BIDIRECTIONAL
, &req_ctx
->out_nents
,
581 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dummy
,
584 goto ablkcipher_exit
;
586 if (mapped_nents
> 1)
587 req_ctx
->dma_buf_type
= SSI_DMA_BUF_MLLI
;
589 if (unlikely((req_ctx
->dma_buf_type
== SSI_DMA_BUF_MLLI
))) {
590 ssi_buffer_mgr_add_scatterlist_entry(&sg_data
,
591 req_ctx
->in_nents
, src
,
593 &req_ctx
->in_mlli_nents
);
594 ssi_buffer_mgr_add_scatterlist_entry(&sg_data
,
595 req_ctx
->out_nents
, dst
,
597 &req_ctx
->out_mlli_nents
);
601 if (unlikely(req_ctx
->dma_buf_type
== SSI_DMA_BUF_MLLI
)) {
602 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
603 rc
= ssi_buffer_mgr_generate_mlli(dev
, &sg_data
, mlli_params
);
604 if (unlikely(rc
!= 0))
605 goto ablkcipher_exit
;
608 SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
609 GET_DMA_BUFFER_TYPE(req_ctx
->dma_buf_type
));
614 ssi_buffer_mgr_unmap_blkcipher_request(dev
, req_ctx
, ivsize
, src
, dst
);
618 void ssi_buffer_mgr_unmap_aead_request(
619 struct device
*dev
, struct aead_request
*req
)
621 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
622 unsigned int hw_iv_size
= areq_ctx
->hw_iv_size
;
623 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
624 struct ssi_drvdata
*drvdata
= dev_get_drvdata(dev
);
627 u32 size_to_unmap
= 0;
629 if (areq_ctx
->mac_buf_dma_addr
!= 0) {
630 dma_unmap_single(dev
, areq_ctx
->mac_buf_dma_addr
,
631 MAX_MAC_SIZE
, DMA_BIDIRECTIONAL
);
634 #if SSI_CC_HAS_AES_GCM
635 if (areq_ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
636 if (areq_ctx
->hkey_dma_addr
!= 0) {
637 dma_unmap_single(dev
, areq_ctx
->hkey_dma_addr
,
638 AES_BLOCK_SIZE
, DMA_BIDIRECTIONAL
);
641 if (areq_ctx
->gcm_block_len_dma_addr
!= 0) {
642 dma_unmap_single(dev
, areq_ctx
->gcm_block_len_dma_addr
,
643 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
646 if (areq_ctx
->gcm_iv_inc1_dma_addr
!= 0) {
647 dma_unmap_single(dev
, areq_ctx
->gcm_iv_inc1_dma_addr
,
648 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
651 if (areq_ctx
->gcm_iv_inc2_dma_addr
!= 0) {
652 dma_unmap_single(dev
, areq_ctx
->gcm_iv_inc2_dma_addr
,
653 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
658 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
659 if (areq_ctx
->ccm_iv0_dma_addr
!= 0) {
660 dma_unmap_single(dev
, areq_ctx
->ccm_iv0_dma_addr
,
661 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
664 dma_unmap_sg(dev
, &areq_ctx
->ccm_adata_sg
, 1, DMA_TO_DEVICE
);
666 if (areq_ctx
->gen_ctx
.iv_dma_addr
!= 0) {
667 dma_unmap_single(dev
, areq_ctx
->gen_ctx
.iv_dma_addr
,
668 hw_iv_size
, DMA_BIDIRECTIONAL
);
671 /*In case a pool was set, a table was
672 *allocated and should be released
674 if (areq_ctx
->mlli_params
.curr_pool
) {
675 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
676 (unsigned long long)areq_ctx
->mlli_params
.mlli_dma_addr
,
677 areq_ctx
->mlli_params
.mlli_virt_addr
);
678 dma_pool_free(areq_ctx
->mlli_params
.curr_pool
,
679 areq_ctx
->mlli_params
.mlli_virt_addr
,
680 areq_ctx
->mlli_params
.mlli_dma_addr
);
683 SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req
->src
), areq_ctx
->src
.nents
, areq_ctx
->assoc
.nents
, req
->assoclen
, req
->cryptlen
);
684 size_to_unmap
= req
->assoclen
+ req
->cryptlen
;
685 if (areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_ENCRYPT
)
686 size_to_unmap
+= areq_ctx
->req_authsize
;
687 if (areq_ctx
->is_gcm4543
)
688 size_to_unmap
+= crypto_aead_ivsize(tfm
);
690 dma_unmap_sg(dev
, req
->src
, ssi_buffer_mgr_get_sgl_nents(req
->src
, size_to_unmap
, &dummy
, &chained
), DMA_BIDIRECTIONAL
);
691 if (unlikely(req
->src
!= req
->dst
)) {
692 SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
694 dma_unmap_sg(dev
, req
->dst
, ssi_buffer_mgr_get_sgl_nents(req
->dst
, size_to_unmap
, &dummy
, &chained
),
697 if (drvdata
->coherent
&&
698 (areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
) &&
699 likely(req
->src
== req
->dst
))
701 u32 size_to_skip
= req
->assoclen
;
703 if (areq_ctx
->is_gcm4543
)
704 size_to_skip
+= crypto_aead_ivsize(tfm
);
706 /* copy mac to a temporary location to deal with possible
707 * data memory overriding that caused by cache coherence problem.
709 ssi_buffer_mgr_copy_scatterlist_portion(
710 areq_ctx
->backup_mac
, req
->src
,
711 size_to_skip
+ req
->cryptlen
- areq_ctx
->req_authsize
,
712 size_to_skip
+ req
->cryptlen
, SSI_SG_FROM_BUF
);
716 static inline int ssi_buffer_mgr_get_aead_icv_nents(
717 struct scatterlist
*sgl
,
718 unsigned int sgl_nents
,
719 unsigned int authsize
,
720 u32 last_entry_data_size
,
721 bool *is_icv_fragmented
)
723 unsigned int icv_max_size
= 0;
724 unsigned int icv_required_size
= authsize
> last_entry_data_size
? (authsize
- last_entry_data_size
) : authsize
;
728 if (sgl_nents
< MAX_ICV_NENTS_SUPPORTED
) {
729 *is_icv_fragmented
= false;
733 for (i
= 0 ; i
< (sgl_nents
- MAX_ICV_NENTS_SUPPORTED
) ; i
++) {
740 icv_max_size
= sgl
->length
;
742 if (last_entry_data_size
> authsize
) {
743 nents
= 0; /* ICV attached to data in last entry (not fragmented!) */
744 *is_icv_fragmented
= false;
745 } else if (last_entry_data_size
== authsize
) {
746 nents
= 1; /* ICV placed in whole last entry (not fragmented!) */
747 *is_icv_fragmented
= false;
748 } else if (icv_max_size
> icv_required_size
) {
750 *is_icv_fragmented
= true;
751 } else if (icv_max_size
== icv_required_size
) {
753 *is_icv_fragmented
= true;
755 SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
756 MAX_ICV_NENTS_SUPPORTED
);
757 nents
= -1; /*unsupported*/
759 SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
760 (*is_icv_fragmented
? "true" : "false"), nents
);
765 static inline int ssi_buffer_mgr_aead_chain_iv(
766 struct ssi_drvdata
*drvdata
,
767 struct aead_request
*req
,
768 struct buffer_array
*sg_data
,
769 bool is_last
, bool do_chain
)
771 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
772 unsigned int hw_iv_size
= areq_ctx
->hw_iv_size
;
773 struct device
*dev
= &drvdata
->plat_dev
->dev
;
776 if (unlikely(!req
->iv
)) {
777 areq_ctx
->gen_ctx
.iv_dma_addr
= 0;
781 areq_ctx
->gen_ctx
.iv_dma_addr
= dma_map_single(dev
, req
->iv
,
782 hw_iv_size
, DMA_BIDIRECTIONAL
);
783 if (unlikely(dma_mapping_error(dev
, areq_ctx
->gen_ctx
.iv_dma_addr
))) {
784 SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
785 hw_iv_size
, req
->iv
);
790 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
792 (unsigned long long)areq_ctx
->gen_ctx
.iv_dma_addr
);
793 if (do_chain
&& areq_ctx
->plaintext_authenticate_only
) { // TODO: what about CTR?? ask Ron
794 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
795 unsigned int iv_size_to_authenc
= crypto_aead_ivsize(tfm
);
796 unsigned int iv_ofs
= GCM_BLOCK_RFC4_IV_OFFSET
;
797 /* Chain to given list */
798 ssi_buffer_mgr_add_buffer_entry(
799 sg_data
, areq_ctx
->gen_ctx
.iv_dma_addr
+ iv_ofs
,
800 iv_size_to_authenc
, is_last
,
801 &areq_ctx
->assoc
.mlli_nents
);
802 areq_ctx
->assoc_buff_type
= SSI_DMA_BUF_MLLI
;
809 static inline int ssi_buffer_mgr_aead_chain_assoc(
810 struct ssi_drvdata
*drvdata
,
811 struct aead_request
*req
,
812 struct buffer_array
*sg_data
,
813 bool is_last
, bool do_chain
)
815 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
817 u32 mapped_nents
= 0;
818 struct scatterlist
*current_sg
= req
->src
;
819 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
820 unsigned int sg_index
= 0;
821 u32 size_of_assoc
= req
->assoclen
;
823 if (areq_ctx
->is_gcm4543
)
824 size_of_assoc
+= crypto_aead_ivsize(tfm
);
828 goto chain_assoc_exit
;
831 if (unlikely(req
->assoclen
== 0)) {
832 areq_ctx
->assoc_buff_type
= SSI_DMA_BUF_NULL
;
833 areq_ctx
->assoc
.nents
= 0;
834 areq_ctx
->assoc
.mlli_nents
= 0;
835 SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
836 GET_DMA_BUFFER_TYPE(areq_ctx
->assoc_buff_type
),
837 areq_ctx
->assoc
.nents
);
838 goto chain_assoc_exit
;
841 //iterate over the sgl to see how many entries are for associated data
842 //it is assumed that if we reach here , the sgl is already mapped
843 sg_index
= current_sg
->length
;
844 if (sg_index
> size_of_assoc
) { //the first entry in the scatter list contains all the associated data
847 while (sg_index
<= size_of_assoc
) {
848 current_sg
= sg_next(current_sg
);
849 //if have reached the end of the sgl, then this is unexpected
851 SSI_LOG_ERR("reached end of sg list. unexpected\n");
854 sg_index
+= current_sg
->length
;
858 if (unlikely(mapped_nents
> LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
)) {
859 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
860 mapped_nents
, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
);
863 areq_ctx
->assoc
.nents
= mapped_nents
;
865 /* in CCM case we have additional entry for
866 * ccm header configurations
868 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
869 if (unlikely((mapped_nents
+ 1) >
870 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
)) {
871 SSI_LOG_ERR("CCM case.Too many fragments. "
872 "Current %d max %d\n",
873 (areq_ctx
->assoc
.nents
+ 1),
874 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
);
876 goto chain_assoc_exit
;
880 if (likely(mapped_nents
== 1) &&
881 (areq_ctx
->ccm_hdr_size
== ccm_header_size_null
))
882 areq_ctx
->assoc_buff_type
= SSI_DMA_BUF_DLLI
;
884 areq_ctx
->assoc_buff_type
= SSI_DMA_BUF_MLLI
;
886 if (unlikely((do_chain
) ||
887 (areq_ctx
->assoc_buff_type
== SSI_DMA_BUF_MLLI
))) {
888 SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
889 GET_DMA_BUFFER_TYPE(areq_ctx
->assoc_buff_type
),
890 areq_ctx
->assoc
.nents
);
891 ssi_buffer_mgr_add_scatterlist_entry(
892 sg_data
, areq_ctx
->assoc
.nents
,
893 req
->src
, req
->assoclen
, 0, is_last
,
894 &areq_ctx
->assoc
.mlli_nents
);
895 areq_ctx
->assoc_buff_type
= SSI_DMA_BUF_MLLI
;
902 static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
903 struct aead_request
*req
,
904 u32
*src_last_bytes
, u32
*dst_last_bytes
)
906 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
907 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
908 unsigned int authsize
= areq_ctx
->req_authsize
;
910 areq_ctx
->is_icv_fragmented
= false;
911 if (likely(req
->src
== req
->dst
)) {
913 areq_ctx
->icv_dma_addr
= sg_dma_address(
915 (*src_last_bytes
- authsize
);
916 areq_ctx
->icv_virt_addr
= sg_virt(
918 (*src_last_bytes
- authsize
);
919 } else if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
920 /*NON-INPLACE and DECRYPT*/
921 areq_ctx
->icv_dma_addr
= sg_dma_address(
923 (*src_last_bytes
- authsize
);
924 areq_ctx
->icv_virt_addr
= sg_virt(
926 (*src_last_bytes
- authsize
);
928 /*NON-INPLACE and ENCRYPT*/
929 areq_ctx
->icv_dma_addr
= sg_dma_address(
931 (*dst_last_bytes
- authsize
);
932 areq_ctx
->icv_virt_addr
= sg_virt(
934 (*dst_last_bytes
- authsize
);
938 static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
939 struct ssi_drvdata
*drvdata
,
940 struct aead_request
*req
,
941 struct buffer_array
*sg_data
,
942 u32
*src_last_bytes
, u32
*dst_last_bytes
,
945 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
946 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
947 unsigned int authsize
= areq_ctx
->req_authsize
;
948 int rc
= 0, icv_nents
;
949 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
951 if (likely(req
->src
== req
->dst
)) {
953 ssi_buffer_mgr_add_scatterlist_entry(sg_data
,
954 areq_ctx
->src
.nents
, areq_ctx
->srcSgl
,
955 areq_ctx
->cryptlen
, areq_ctx
->srcOffset
, is_last_table
,
956 &areq_ctx
->src
.mlli_nents
);
958 icv_nents
= ssi_buffer_mgr_get_aead_icv_nents(areq_ctx
->srcSgl
,
959 areq_ctx
->src
.nents
, authsize
, *src_last_bytes
,
960 &areq_ctx
->is_icv_fragmented
);
961 if (unlikely(icv_nents
< 0)) {
963 goto prepare_data_mlli_exit
;
966 if (unlikely(areq_ctx
->is_icv_fragmented
)) {
967 /* Backup happens only when ICV is fragmented, ICV
968 * verification is made by CPU compare in order to simplify
969 * MAC verification upon request completion
971 if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
972 if (!drvdata
->coherent
) {
973 /* In coherent platforms (e.g. ACP)
974 * already copying ICV for any
975 * INPLACE-DECRYPT operation, hence
976 * we must neglect this code.
978 u32 skip
= req
->assoclen
;
980 if (areq_ctx
->is_gcm4543
)
981 skip
+= crypto_aead_ivsize(tfm
);
983 ssi_buffer_mgr_copy_scatterlist_portion(
984 areq_ctx
->backup_mac
, req
->src
,
985 (skip
+ req
->cryptlen
-
986 areq_ctx
->req_authsize
),
987 skip
+ req
->cryptlen
,
990 areq_ctx
->icv_virt_addr
= areq_ctx
->backup_mac
;
992 areq_ctx
->icv_virt_addr
= areq_ctx
->mac_buf
;
993 areq_ctx
->icv_dma_addr
= areq_ctx
->mac_buf_dma_addr
;
995 } else { /* Contig. ICV */
996 /*Should hanlde if the sg is not contig.*/
997 areq_ctx
->icv_dma_addr
= sg_dma_address(
998 &areq_ctx
->srcSgl
[areq_ctx
->src
.nents
- 1]) +
999 (*src_last_bytes
- authsize
);
1000 areq_ctx
->icv_virt_addr
= sg_virt(
1001 &areq_ctx
->srcSgl
[areq_ctx
->src
.nents
- 1]) +
1002 (*src_last_bytes
- authsize
);
1005 } else if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
1006 /*NON-INPLACE and DECRYPT*/
1007 ssi_buffer_mgr_add_scatterlist_entry(sg_data
,
1008 areq_ctx
->src
.nents
, areq_ctx
->srcSgl
,
1009 areq_ctx
->cryptlen
, areq_ctx
->srcOffset
, is_last_table
,
1010 &areq_ctx
->src
.mlli_nents
);
1011 ssi_buffer_mgr_add_scatterlist_entry(sg_data
,
1012 areq_ctx
->dst
.nents
, areq_ctx
->dstSgl
,
1013 areq_ctx
->cryptlen
, areq_ctx
->dstOffset
, is_last_table
,
1014 &areq_ctx
->dst
.mlli_nents
);
1016 icv_nents
= ssi_buffer_mgr_get_aead_icv_nents(areq_ctx
->srcSgl
,
1017 areq_ctx
->src
.nents
, authsize
, *src_last_bytes
,
1018 &areq_ctx
->is_icv_fragmented
);
1019 if (unlikely(icv_nents
< 0)) {
1021 goto prepare_data_mlli_exit
;
1024 if (unlikely(areq_ctx
->is_icv_fragmented
)) {
1025 /* Backup happens only when ICV is fragmented, ICV
1026 * verification is made by CPU compare in order to simplify
1027 * MAC verification upon request completion
1029 u32 size_to_skip
= req
->assoclen
;
1031 if (areq_ctx
->is_gcm4543
)
1032 size_to_skip
+= crypto_aead_ivsize(tfm
);
1034 ssi_buffer_mgr_copy_scatterlist_portion(
1035 areq_ctx
->backup_mac
, req
->src
,
1036 size_to_skip
+ req
->cryptlen
- areq_ctx
->req_authsize
,
1037 size_to_skip
+ req
->cryptlen
, SSI_SG_TO_BUF
);
1038 areq_ctx
->icv_virt_addr
= areq_ctx
->backup_mac
;
1039 } else { /* Contig. ICV */
1040 /*Should hanlde if the sg is not contig.*/
1041 areq_ctx
->icv_dma_addr
= sg_dma_address(
1042 &areq_ctx
->srcSgl
[areq_ctx
->src
.nents
- 1]) +
1043 (*src_last_bytes
- authsize
);
1044 areq_ctx
->icv_virt_addr
= sg_virt(
1045 &areq_ctx
->srcSgl
[areq_ctx
->src
.nents
- 1]) +
1046 (*src_last_bytes
- authsize
);
1050 /*NON-INPLACE and ENCRYPT*/
1051 ssi_buffer_mgr_add_scatterlist_entry(sg_data
,
1052 areq_ctx
->dst
.nents
, areq_ctx
->dstSgl
,
1053 areq_ctx
->cryptlen
, areq_ctx
->dstOffset
, is_last_table
,
1054 &areq_ctx
->dst
.mlli_nents
);
1055 ssi_buffer_mgr_add_scatterlist_entry(sg_data
,
1056 areq_ctx
->src
.nents
, areq_ctx
->srcSgl
,
1057 areq_ctx
->cryptlen
, areq_ctx
->srcOffset
, is_last_table
,
1058 &areq_ctx
->src
.mlli_nents
);
1060 icv_nents
= ssi_buffer_mgr_get_aead_icv_nents(areq_ctx
->dstSgl
,
1061 areq_ctx
->dst
.nents
, authsize
, *dst_last_bytes
,
1062 &areq_ctx
->is_icv_fragmented
);
1063 if (unlikely(icv_nents
< 0)) {
1065 goto prepare_data_mlli_exit
;
1068 if (likely(!areq_ctx
->is_icv_fragmented
)) {
1070 areq_ctx
->icv_dma_addr
= sg_dma_address(
1071 &areq_ctx
->dstSgl
[areq_ctx
->dst
.nents
- 1]) +
1072 (*dst_last_bytes
- authsize
);
1073 areq_ctx
->icv_virt_addr
= sg_virt(
1074 &areq_ctx
->dstSgl
[areq_ctx
->dst
.nents
- 1]) +
1075 (*dst_last_bytes
- authsize
);
1077 areq_ctx
->icv_dma_addr
= areq_ctx
->mac_buf_dma_addr
;
1078 areq_ctx
->icv_virt_addr
= areq_ctx
->mac_buf
;
1082 prepare_data_mlli_exit
:
1086 static inline int ssi_buffer_mgr_aead_chain_data(
1087 struct ssi_drvdata
*drvdata
,
1088 struct aead_request
*req
,
1089 struct buffer_array
*sg_data
,
1090 bool is_last_table
, bool do_chain
)
1092 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1093 struct device
*dev
= &drvdata
->plat_dev
->dev
;
1094 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
1095 unsigned int authsize
= areq_ctx
->req_authsize
;
1096 int src_last_bytes
= 0, dst_last_bytes
= 0;
1098 u32 src_mapped_nents
= 0, dst_mapped_nents
= 0;
1100 unsigned int size_for_map
= req
->assoclen
+ req
->cryptlen
; /*non-inplace mode*/
1101 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1103 bool chained
= false;
1104 bool is_gcm4543
= areq_ctx
->is_gcm4543
;
1105 u32 size_to_skip
= req
->assoclen
;
1108 size_to_skip
+= crypto_aead_ivsize(tfm
);
1110 offset
= size_to_skip
;
1114 goto chain_data_exit
;
1116 areq_ctx
->srcSgl
= req
->src
;
1117 areq_ctx
->dstSgl
= req
->dst
;
1120 size_for_map
+= crypto_aead_ivsize(tfm
);
1122 size_for_map
+= (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ? authsize
: 0;
1123 src_mapped_nents
= ssi_buffer_mgr_get_sgl_nents(req
->src
, size_for_map
, &src_last_bytes
, &chained
);
1124 sg_index
= areq_ctx
->srcSgl
->length
;
1125 //check where the data starts
1126 while (sg_index
<= size_to_skip
) {
1127 offset
-= areq_ctx
->srcSgl
->length
;
1128 areq_ctx
->srcSgl
= sg_next(areq_ctx
->srcSgl
);
1129 //if have reached the end of the sgl, then this is unexpected
1130 if (!areq_ctx
->srcSgl
) {
1131 SSI_LOG_ERR("reached end of sg list. unexpected\n");
1134 sg_index
+= areq_ctx
->srcSgl
->length
;
1137 if (unlikely(src_mapped_nents
> LLI_MAX_NUM_OF_DATA_ENTRIES
))
1139 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1140 src_mapped_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
);
1144 areq_ctx
->src
.nents
= src_mapped_nents
;
1146 areq_ctx
->srcOffset
= offset
;
1148 if (req
->src
!= req
->dst
) {
1149 size_for_map
= req
->assoclen
+ req
->cryptlen
;
1150 size_for_map
+= (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ? authsize
: 0;
1152 size_for_map
+= crypto_aead_ivsize(tfm
);
1154 rc
= ssi_buffer_mgr_map_scatterlist(dev
, req
->dst
, size_for_map
,
1155 DMA_BIDIRECTIONAL
, &(areq_ctx
->dst
.nents
),
1156 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dst_last_bytes
,
1158 if (unlikely(rc
!= 0)) {
1160 goto chain_data_exit
;
1164 dst_mapped_nents
= ssi_buffer_mgr_get_sgl_nents(req
->dst
, size_for_map
, &dst_last_bytes
, &chained
);
1165 sg_index
= areq_ctx
->dstSgl
->length
;
1166 offset
= size_to_skip
;
1168 //check where the data starts
1169 while (sg_index
<= size_to_skip
) {
1170 offset
-= areq_ctx
->dstSgl
->length
;
1171 areq_ctx
->dstSgl
= sg_next(areq_ctx
->dstSgl
);
1172 //if have reached the end of the sgl, then this is unexpected
1173 if (!areq_ctx
->dstSgl
) {
1174 SSI_LOG_ERR("reached end of sg list. unexpected\n");
1177 sg_index
+= areq_ctx
->dstSgl
->length
;
1180 if (unlikely(dst_mapped_nents
> LLI_MAX_NUM_OF_DATA_ENTRIES
))
1182 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1183 dst_mapped_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
);
1186 areq_ctx
->dst
.nents
= dst_mapped_nents
;
1187 areq_ctx
->dstOffset
= offset
;
1188 if ((src_mapped_nents
> 1) ||
1189 (dst_mapped_nents
> 1) ||
1191 areq_ctx
->data_buff_type
= SSI_DMA_BUF_MLLI
;
1192 rc
= ssi_buffer_mgr_prepare_aead_data_mlli(drvdata
, req
, sg_data
,
1193 &src_last_bytes
, &dst_last_bytes
, is_last_table
);
1195 areq_ctx
->data_buff_type
= SSI_DMA_BUF_DLLI
;
1196 ssi_buffer_mgr_prepare_aead_data_dlli(
1197 req
, &src_last_bytes
, &dst_last_bytes
);
1204 static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata
*drvdata
,
1205 struct aead_request
*req
)
1207 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1208 u32 curr_mlli_size
= 0;
1210 if (areq_ctx
->assoc_buff_type
== SSI_DMA_BUF_MLLI
) {
1211 areq_ctx
->assoc
.sram_addr
= drvdata
->mlli_sram_addr
;
1212 curr_mlli_size
= areq_ctx
->assoc
.mlli_nents
*
1213 LLI_ENTRY_BYTE_SIZE
;
1216 if (areq_ctx
->data_buff_type
== SSI_DMA_BUF_MLLI
) {
1217 /*Inplace case dst nents equal to src nents*/
1218 if (req
->src
== req
->dst
) {
1219 areq_ctx
->dst
.mlli_nents
= areq_ctx
->src
.mlli_nents
;
1220 areq_ctx
->src
.sram_addr
= drvdata
->mlli_sram_addr
+
1222 areq_ctx
->dst
.sram_addr
= areq_ctx
->src
.sram_addr
;
1223 if (!areq_ctx
->is_single_pass
)
1224 areq_ctx
->assoc
.mlli_nents
+=
1225 areq_ctx
->src
.mlli_nents
;
1227 if (areq_ctx
->gen_ctx
.op_type
==
1228 DRV_CRYPTO_DIRECTION_DECRYPT
) {
1229 areq_ctx
->src
.sram_addr
=
1230 drvdata
->mlli_sram_addr
+
1232 areq_ctx
->dst
.sram_addr
=
1233 areq_ctx
->src
.sram_addr
+
1234 areq_ctx
->src
.mlli_nents
*
1235 LLI_ENTRY_BYTE_SIZE
;
1236 if (!areq_ctx
->is_single_pass
)
1237 areq_ctx
->assoc
.mlli_nents
+=
1238 areq_ctx
->src
.mlli_nents
;
1240 areq_ctx
->dst
.sram_addr
=
1241 drvdata
->mlli_sram_addr
+
1243 areq_ctx
->src
.sram_addr
=
1244 areq_ctx
->dst
.sram_addr
+
1245 areq_ctx
->dst
.mlli_nents
*
1246 LLI_ENTRY_BYTE_SIZE
;
1247 if (!areq_ctx
->is_single_pass
)
1248 areq_ctx
->assoc
.mlli_nents
+=
1249 areq_ctx
->dst
.mlli_nents
;
1255 int ssi_buffer_mgr_map_aead_request(
1256 struct ssi_drvdata
*drvdata
, struct aead_request
*req
)
1258 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1259 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1260 struct device
*dev
= &drvdata
->plat_dev
->dev
;
1261 struct buffer_array sg_data
;
1262 unsigned int authsize
= areq_ctx
->req_authsize
;
1263 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
1265 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1266 bool is_gcm4543
= areq_ctx
->is_gcm4543
;
1268 u32 mapped_nents
= 0;
1269 u32 dummy
= 0; /*used for the assoc data fragments */
1270 u32 size_to_map
= 0;
1272 mlli_params
->curr_pool
= NULL
;
1273 sg_data
.num_of_buffers
= 0;
1275 if (drvdata
->coherent
&&
1276 (areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
) &&
1277 likely(req
->src
== req
->dst
))
1279 u32 size_to_skip
= req
->assoclen
;
1282 size_to_skip
+= crypto_aead_ivsize(tfm
);
1284 /* copy mac to a temporary location to deal with possible
1285 * data memory overriding that caused by cache coherence problem.
1287 ssi_buffer_mgr_copy_scatterlist_portion(
1288 areq_ctx
->backup_mac
, req
->src
,
1289 size_to_skip
+ req
->cryptlen
- areq_ctx
->req_authsize
,
1290 size_to_skip
+ req
->cryptlen
, SSI_SG_TO_BUF
);
1293 /* cacluate the size for cipher remove ICV in decrypt*/
1294 areq_ctx
->cryptlen
= (areq_ctx
->gen_ctx
.op_type
==
1295 DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
1297 (req
->cryptlen
- authsize
);
1299 areq_ctx
->mac_buf_dma_addr
= dma_map_single(dev
,
1300 areq_ctx
->mac_buf
, MAX_MAC_SIZE
, DMA_BIDIRECTIONAL
);
1301 if (unlikely(dma_mapping_error(dev
, areq_ctx
->mac_buf_dma_addr
))) {
1302 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
1303 MAX_MAC_SIZE
, areq_ctx
->mac_buf
);
1305 goto aead_map_failure
;
1308 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
1309 areq_ctx
->ccm_iv0_dma_addr
= dma_map_single(dev
,
1310 (areq_ctx
->ccm_config
+ CCM_CTR_COUNT_0_OFFSET
),
1311 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1313 if (unlikely(dma_mapping_error(dev
, areq_ctx
->ccm_iv0_dma_addr
))) {
1314 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
1315 "for DMA failed\n", AES_BLOCK_SIZE
,
1316 (areq_ctx
->ccm_config
+ CCM_CTR_COUNT_0_OFFSET
));
1317 areq_ctx
->ccm_iv0_dma_addr
= 0;
1319 goto aead_map_failure
;
1321 if (ssi_aead_handle_config_buf(dev
, areq_ctx
,
1322 areq_ctx
->ccm_config
, &sg_data
, req
->assoclen
) != 0) {
1324 goto aead_map_failure
;
1328 #if SSI_CC_HAS_AES_GCM
1329 if (areq_ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
1330 areq_ctx
->hkey_dma_addr
= dma_map_single(dev
,
1331 areq_ctx
->hkey
, AES_BLOCK_SIZE
, DMA_BIDIRECTIONAL
);
1332 if (unlikely(dma_mapping_error(dev
, areq_ctx
->hkey_dma_addr
))) {
1333 SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
1334 AES_BLOCK_SIZE
, areq_ctx
->hkey
);
1336 goto aead_map_failure
;
1339 areq_ctx
->gcm_block_len_dma_addr
= dma_map_single(dev
,
1340 &areq_ctx
->gcm_len_block
, AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1341 if (unlikely(dma_mapping_error(dev
, areq_ctx
->gcm_block_len_dma_addr
))) {
1342 SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1343 AES_BLOCK_SIZE
, &areq_ctx
->gcm_len_block
);
1345 goto aead_map_failure
;
1348 areq_ctx
->gcm_iv_inc1_dma_addr
= dma_map_single(dev
,
1349 areq_ctx
->gcm_iv_inc1
,
1350 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1352 if (unlikely(dma_mapping_error(dev
, areq_ctx
->gcm_iv_inc1_dma_addr
))) {
1353 SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
1354 "for DMA failed\n", AES_BLOCK_SIZE
,
1355 (areq_ctx
->gcm_iv_inc1
));
1356 areq_ctx
->gcm_iv_inc1_dma_addr
= 0;
1358 goto aead_map_failure
;
1361 areq_ctx
->gcm_iv_inc2_dma_addr
= dma_map_single(dev
,
1362 areq_ctx
->gcm_iv_inc2
,
1363 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1365 if (unlikely(dma_mapping_error(dev
, areq_ctx
->gcm_iv_inc2_dma_addr
))) {
1366 SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
1367 "for DMA failed\n", AES_BLOCK_SIZE
,
1368 (areq_ctx
->gcm_iv_inc2
));
1369 areq_ctx
->gcm_iv_inc2_dma_addr
= 0;
1371 goto aead_map_failure
;
1374 #endif /*SSI_CC_HAS_AES_GCM*/
1376 size_to_map
= req
->cryptlen
+ req
->assoclen
;
1377 if (areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_ENCRYPT
)
1378 size_to_map
+= authsize
;
1381 size_to_map
+= crypto_aead_ivsize(tfm
);
1382 rc
= ssi_buffer_mgr_map_scatterlist(dev
, req
->src
,
1383 size_to_map
, DMA_BIDIRECTIONAL
, &(areq_ctx
->src
.nents
),
1384 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
+ LLI_MAX_NUM_OF_DATA_ENTRIES
, &dummy
, &mapped_nents
);
1385 if (unlikely(rc
!= 0)) {
1387 goto aead_map_failure
;
1390 if (likely(areq_ctx
->is_single_pass
)) {
1392 * Create MLLI table for:
1395 * Note: IV is contg. buffer (not an SGL)
1397 rc
= ssi_buffer_mgr_aead_chain_assoc(drvdata
, req
, &sg_data
, true, false);
1398 if (unlikely(rc
!= 0))
1399 goto aead_map_failure
;
1400 rc
= ssi_buffer_mgr_aead_chain_iv(drvdata
, req
, &sg_data
, true, false);
1401 if (unlikely(rc
!= 0))
1402 goto aead_map_failure
;
1403 rc
= ssi_buffer_mgr_aead_chain_data(drvdata
, req
, &sg_data
, true, false);
1404 if (unlikely(rc
!= 0))
1405 goto aead_map_failure
;
1406 } else { /* DOUBLE-PASS flow */
1408 * Prepare MLLI table(s) in this order:
1410 * If ENCRYPT/DECRYPT (inplace):
1411 * (1) MLLI table for assoc
1412 * (2) IV entry (chained right after end of assoc)
1413 * (3) MLLI for src/dst (inplace operation)
1415 * If ENCRYPT (non-inplace)
1416 * (1) MLLI table for assoc
1417 * (2) IV entry (chained right after end of assoc)
1421 * If DECRYPT (non-inplace)
1422 * (1) MLLI table for assoc
1423 * (2) IV entry (chained right after end of assoc)
1427 rc
= ssi_buffer_mgr_aead_chain_assoc(drvdata
, req
, &sg_data
, false, true);
1428 if (unlikely(rc
!= 0))
1429 goto aead_map_failure
;
1430 rc
= ssi_buffer_mgr_aead_chain_iv(drvdata
, req
, &sg_data
, false, true);
1431 if (unlikely(rc
!= 0))
1432 goto aead_map_failure
;
1433 rc
= ssi_buffer_mgr_aead_chain_data(drvdata
, req
, &sg_data
, true, true);
1434 if (unlikely(rc
!= 0))
1435 goto aead_map_failure
;
1438 /* Mlli support -start building the MLLI according to the above results */
1440 (areq_ctx
->assoc_buff_type
== SSI_DMA_BUF_MLLI
) ||
1441 (areq_ctx
->data_buff_type
== SSI_DMA_BUF_MLLI
))) {
1442 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
1443 rc
= ssi_buffer_mgr_generate_mlli(dev
, &sg_data
, mlli_params
);
1444 if (unlikely(rc
!= 0))
1445 goto aead_map_failure
;
1447 ssi_buffer_mgr_update_aead_mlli_nents(drvdata
, req
);
1448 SSI_LOG_DEBUG("assoc params mn %d\n", areq_ctx
->assoc
.mlli_nents
);
1449 SSI_LOG_DEBUG("src params mn %d\n", areq_ctx
->src
.mlli_nents
);
1450 SSI_LOG_DEBUG("dst params mn %d\n", areq_ctx
->dst
.mlli_nents
);
1455 ssi_buffer_mgr_unmap_aead_request(dev
, req
);
1459 int ssi_buffer_mgr_map_hash_request_final(
1460 struct ssi_drvdata
*drvdata
, void *ctx
, struct scatterlist
*src
, unsigned int nbytes
, bool do_update
)
1462 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1463 struct device
*dev
= &drvdata
->plat_dev
->dev
;
1464 u8
*curr_buff
= areq_ctx
->buff_index
? areq_ctx
->buff1
:
1466 u32
*curr_buff_cnt
= areq_ctx
->buff_index
? &areq_ctx
->buff1_cnt
:
1467 &areq_ctx
->buff0_cnt
;
1468 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1469 struct buffer_array sg_data
;
1470 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
1472 u32 mapped_nents
= 0;
1474 SSI_LOG_DEBUG(" final params : curr_buff=%pK "
1475 "curr_buff_cnt=0x%X nbytes = 0x%X "
1476 "src=%pK curr_index=%u\n",
1477 curr_buff
, *curr_buff_cnt
, nbytes
,
1478 src
, areq_ctx
->buff_index
);
1479 /* Init the type of the dma buffer */
1480 areq_ctx
->data_dma_buf_type
= SSI_DMA_BUF_NULL
;
1481 mlli_params
->curr_pool
= NULL
;
1482 sg_data
.num_of_buffers
= 0;
1483 areq_ctx
->in_nents
= 0;
1485 if (unlikely(nbytes
== 0 && *curr_buff_cnt
== 0)) {
1490 /*TODO: copy data in case that buffer is enough for operation */
1491 /* map the previous buffer */
1492 if (*curr_buff_cnt
!= 0) {
1493 if (ssi_ahash_handle_curr_buf(dev
, areq_ctx
, curr_buff
,
1494 *curr_buff_cnt
, &sg_data
) != 0) {
1499 if (src
&& (nbytes
> 0) && do_update
) {
1500 if (unlikely(ssi_buffer_mgr_map_scatterlist(dev
, src
,
1503 &areq_ctx
->in_nents
,
1504 LLI_MAX_NUM_OF_DATA_ENTRIES
,
1505 &dummy
, &mapped_nents
))){
1506 goto unmap_curr_buff
;
1508 if (src
&& (mapped_nents
== 1)
1509 && (areq_ctx
->data_dma_buf_type
== SSI_DMA_BUF_NULL
)) {
1510 memcpy(areq_ctx
->buff_sg
, src
,
1511 sizeof(struct scatterlist
));
1512 areq_ctx
->buff_sg
->length
= nbytes
;
1513 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
1514 areq_ctx
->data_dma_buf_type
= SSI_DMA_BUF_DLLI
;
1516 areq_ctx
->data_dma_buf_type
= SSI_DMA_BUF_MLLI
;
1521 if (unlikely(areq_ctx
->data_dma_buf_type
== SSI_DMA_BUF_MLLI
)) {
1522 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
1523 /* add the src data to the sg_data */
1524 ssi_buffer_mgr_add_scatterlist_entry(&sg_data
,
1528 true, &areq_ctx
->mlli_nents
);
1529 if (unlikely(ssi_buffer_mgr_generate_mlli(dev
, &sg_data
,
1530 mlli_params
) != 0)) {
1531 goto fail_unmap_din
;
1534 /* change the buffer index for the unmap function */
1535 areq_ctx
->buff_index
= (areq_ctx
->buff_index
^ 1);
1536 SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
1537 GET_DMA_BUFFER_TYPE(areq_ctx
->data_dma_buf_type
));
1541 dma_unmap_sg(dev
, src
, areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1544 if (*curr_buff_cnt
!= 0)
1545 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1550 int ssi_buffer_mgr_map_hash_request_update(
1551 struct ssi_drvdata
*drvdata
, void *ctx
, struct scatterlist
*src
, unsigned int nbytes
, unsigned int block_size
)
1553 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1554 struct device
*dev
= &drvdata
->plat_dev
->dev
;
1555 u8
*curr_buff
= areq_ctx
->buff_index
? areq_ctx
->buff1
:
1557 u32
*curr_buff_cnt
= areq_ctx
->buff_index
? &areq_ctx
->buff1_cnt
:
1558 &areq_ctx
->buff0_cnt
;
1559 u8
*next_buff
= areq_ctx
->buff_index
? areq_ctx
->buff0
:
1561 u32
*next_buff_cnt
= areq_ctx
->buff_index
? &areq_ctx
->buff0_cnt
:
1562 &areq_ctx
->buff1_cnt
;
1563 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1564 unsigned int update_data_len
;
1565 u32 total_in_len
= nbytes
+ *curr_buff_cnt
;
1566 struct buffer_array sg_data
;
1567 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
1568 unsigned int swap_index
= 0;
1570 u32 mapped_nents
= 0;
1572 SSI_LOG_DEBUG(" update params : curr_buff=%pK "
1573 "curr_buff_cnt=0x%X nbytes=0x%X "
1574 "src=%pK curr_index=%u\n",
1575 curr_buff
, *curr_buff_cnt
, nbytes
,
1576 src
, areq_ctx
->buff_index
);
1577 /* Init the type of the dma buffer */
1578 areq_ctx
->data_dma_buf_type
= SSI_DMA_BUF_NULL
;
1579 mlli_params
->curr_pool
= NULL
;
1580 areq_ctx
->curr_sg
= NULL
;
1581 sg_data
.num_of_buffers
= 0;
1582 areq_ctx
->in_nents
= 0;
1584 if (unlikely(total_in_len
< block_size
)) {
1585 SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
1586 "*curr_buff_cnt=0x%X copy_to=%pK\n",
1587 curr_buff
, *curr_buff_cnt
,
1588 &curr_buff
[*curr_buff_cnt
]);
1589 areq_ctx
->in_nents
=
1590 ssi_buffer_mgr_get_sgl_nents(src
,
1593 sg_copy_to_buffer(src
, areq_ctx
->in_nents
,
1594 &curr_buff
[*curr_buff_cnt
], nbytes
);
1595 *curr_buff_cnt
+= nbytes
;
1599 /* Calculate the residue size*/
1600 *next_buff_cnt
= total_in_len
& (block_size
- 1);
1601 /* update data len */
1602 update_data_len
= total_in_len
- *next_buff_cnt
;
1604 SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
1605 "update_data_len=0x%X\n",
1606 *next_buff_cnt
, update_data_len
);
1608 /* Copy the new residue to next buffer */
1609 if (*next_buff_cnt
!= 0) {
1610 SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
1611 " residue %u\n", next_buff
,
1612 (update_data_len
- *curr_buff_cnt
),
1614 ssi_buffer_mgr_copy_scatterlist_portion(next_buff
, src
,
1615 (update_data_len
- *curr_buff_cnt
),
1616 nbytes
, SSI_SG_TO_BUF
);
1617 /* change the buffer index for next operation */
1621 if (*curr_buff_cnt
!= 0) {
1622 if (ssi_ahash_handle_curr_buf(dev
, areq_ctx
, curr_buff
,
1623 *curr_buff_cnt
, &sg_data
) != 0) {
1626 /* change the buffer index for next operation */
1630 if (update_data_len
> *curr_buff_cnt
) {
1631 if (unlikely(ssi_buffer_mgr_map_scatterlist(dev
, src
,
1632 (update_data_len
- *curr_buff_cnt
),
1634 &areq_ctx
->in_nents
,
1635 LLI_MAX_NUM_OF_DATA_ENTRIES
,
1636 &dummy
, &mapped_nents
))){
1637 goto unmap_curr_buff
;
1639 if ((mapped_nents
== 1)
1640 && (areq_ctx
->data_dma_buf_type
== SSI_DMA_BUF_NULL
)) {
1641 /* only one entry in the SG and no previous data */
1642 memcpy(areq_ctx
->buff_sg
, src
,
1643 sizeof(struct scatterlist
));
1644 areq_ctx
->buff_sg
->length
= update_data_len
;
1645 areq_ctx
->data_dma_buf_type
= SSI_DMA_BUF_DLLI
;
1646 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
1648 areq_ctx
->data_dma_buf_type
= SSI_DMA_BUF_MLLI
;
1652 if (unlikely(areq_ctx
->data_dma_buf_type
== SSI_DMA_BUF_MLLI
)) {
1653 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
1654 /* add the src data to the sg_data */
1655 ssi_buffer_mgr_add_scatterlist_entry(&sg_data
,
1658 (update_data_len
- *curr_buff_cnt
), 0,
1659 true, &areq_ctx
->mlli_nents
);
1660 if (unlikely(ssi_buffer_mgr_generate_mlli(dev
, &sg_data
,
1661 mlli_params
) != 0)) {
1662 goto fail_unmap_din
;
1665 areq_ctx
->buff_index
= (areq_ctx
->buff_index
^ swap_index
);
1670 dma_unmap_sg(dev
, src
, areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1673 if (*curr_buff_cnt
!= 0)
1674 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1679 void ssi_buffer_mgr_unmap_hash_request(
1680 struct device
*dev
, void *ctx
, struct scatterlist
*src
, bool do_revert
)
1682 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1683 u32
*prev_len
= areq_ctx
->buff_index
? &areq_ctx
->buff0_cnt
:
1684 &areq_ctx
->buff1_cnt
;
1686 /*In case a pool was set, a table was
1687 *allocated and should be released
1689 if (areq_ctx
->mlli_params
.curr_pool
) {
1690 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
1691 (unsigned long long)areq_ctx
->mlli_params
.mlli_dma_addr
,
1692 areq_ctx
->mlli_params
.mlli_virt_addr
);
1693 dma_pool_free(areq_ctx
->mlli_params
.curr_pool
,
1694 areq_ctx
->mlli_params
.mlli_virt_addr
,
1695 areq_ctx
->mlli_params
.mlli_dma_addr
);
1698 if ((src
) && likely(areq_ctx
->in_nents
!= 0)) {
1699 SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
1701 (unsigned long long)sg_dma_address(src
),
1703 dma_unmap_sg(dev
, src
,
1704 areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1707 if (*prev_len
!= 0) {
1708 SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
1709 " dma=0x%llX len 0x%X\n",
1710 sg_virt(areq_ctx
->buff_sg
),
1711 (unsigned long long)sg_dma_address(areq_ctx
->buff_sg
),
1712 sg_dma_len(areq_ctx
->buff_sg
));
1713 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1715 /* clean the previous data length for update operation */
1718 areq_ctx
->buff_index
^= 1;
1723 int ssi_buffer_mgr_init(struct ssi_drvdata
*drvdata
)
1725 struct buff_mgr_handle
*buff_mgr_handle
;
1726 struct device
*dev
= &drvdata
->plat_dev
->dev
;
1728 buff_mgr_handle
= (struct buff_mgr_handle
*)
1729 kmalloc(sizeof(struct buff_mgr_handle
), GFP_KERNEL
);
1730 if (!buff_mgr_handle
)
1733 drvdata
->buff_mgr_handle
= buff_mgr_handle
;
1735 buff_mgr_handle
->mlli_buffs_pool
= dma_pool_create(
1736 "dx_single_mlli_tables", dev
,
1737 MAX_NUM_OF_TOTAL_MLLI_ENTRIES
*
1738 LLI_ENTRY_BYTE_SIZE
,
1739 MLLI_TABLE_MIN_ALIGNMENT
, 0);
1741 if (unlikely(!buff_mgr_handle
->mlli_buffs_pool
))
1747 ssi_buffer_mgr_fini(drvdata
);
1751 int ssi_buffer_mgr_fini(struct ssi_drvdata
*drvdata
)
1753 struct buff_mgr_handle
*buff_mgr_handle
= drvdata
->buff_mgr_handle
;
1755 if (buff_mgr_handle
) {
1756 dma_pool_destroy(buff_mgr_handle
->mlli_buffs_pool
);
1757 kfree(drvdata
->buff_mgr_handle
);
1758 drvdata
->buff_mgr_handle
= NULL
;