2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include "ssi_config.h"
18 #include <linux/kernel.h>
19 #include <linux/platform_device.h>
20 #include <linux/interrupt.h>
21 #include <linux/delay.h>
22 #include <crypto/ctr.h>
23 #ifdef FLUSH_CACHE_ALL
24 #include <asm/cacheflush.h>
26 #include <linux/pm_runtime.h>
27 #include "ssi_driver.h"
28 #include "ssi_buffer_mgr.h"
29 #include "ssi_request_mgr.h"
30 #include "ssi_sysfs.h"
31 #include "ssi_ivgen.h"
34 #include "ssi_fips_local.h"
36 #define SSI_MAX_POLL_ITER 10
38 struct ssi_request_mgr_handle
{
39 /* Request manager resources */
40 unsigned int hw_queue_size
; /* HW capability */
41 unsigned int min_free_hw_slots
;
42 unsigned int max_used_sw_slots
;
43 struct ssi_crypto_req req_queue
[MAX_REQUEST_QUEUE_SIZE
];
49 struct cc_hw_desc compl_desc
;
51 dma_addr_t dummy_comp_buff_dma
;
52 struct cc_hw_desc monitor_desc
;
54 volatile unsigned long monitor_lock
;
56 struct workqueue_struct
*workq
;
57 struct delayed_work compwork
;
59 struct tasklet_struct comptask
;
61 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
62 bool is_runtime_suspended
;
66 static void comp_handler(unsigned long devarg
);
68 static void comp_work_handler(struct work_struct
*work
);
71 void request_mgr_fini(struct ssi_drvdata
*drvdata
)
73 struct ssi_request_mgr_handle
*req_mgr_h
= drvdata
->request_mgr_handle
;
76 return; /* Not allocated */
78 if (req_mgr_h
->dummy_comp_buff_dma
!= 0) {
79 dma_free_coherent(&drvdata
->plat_dev
->dev
,
80 sizeof(u32
), req_mgr_h
->dummy_comp_buff
,
81 req_mgr_h
->dummy_comp_buff_dma
);
84 SSI_LOG_DEBUG("max_used_hw_slots=%d\n", (req_mgr_h
->hw_queue_size
-
85 req_mgr_h
->min_free_hw_slots
));
86 SSI_LOG_DEBUG("max_used_sw_slots=%d\n", req_mgr_h
->max_used_sw_slots
);
89 flush_workqueue(req_mgr_h
->workq
);
90 destroy_workqueue(req_mgr_h
->workq
);
93 tasklet_kill(&req_mgr_h
->comptask
);
95 memset(req_mgr_h
, 0, sizeof(struct ssi_request_mgr_handle
));
97 drvdata
->request_mgr_handle
= NULL
;
100 int request_mgr_init(struct ssi_drvdata
*drvdata
)
102 struct ssi_request_mgr_handle
*req_mgr_h
;
105 req_mgr_h
= kzalloc(sizeof(struct ssi_request_mgr_handle
), GFP_KERNEL
);
108 goto req_mgr_init_err
;
111 drvdata
->request_mgr_handle
= req_mgr_h
;
113 spin_lock_init(&req_mgr_h
->hw_lock
);
115 SSI_LOG_DEBUG("Initializing completion workqueue\n");
116 req_mgr_h
->workq
= create_singlethread_workqueue("arm_cc7x_wq");
117 if (unlikely(!req_mgr_h
->workq
)) {
118 SSI_LOG_ERR("Failed creating work queue\n");
120 goto req_mgr_init_err
;
122 INIT_DELAYED_WORK(&req_mgr_h
->compwork
, comp_work_handler
);
124 SSI_LOG_DEBUG("Initializing completion tasklet\n");
125 tasklet_init(&req_mgr_h
->comptask
, comp_handler
, (unsigned long)drvdata
);
127 req_mgr_h
->hw_queue_size
= READ_REGISTER(drvdata
->cc_base
+
128 CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_SRAM_SIZE
));
129 SSI_LOG_DEBUG("hw_queue_size=0x%08X\n", req_mgr_h
->hw_queue_size
);
130 if (req_mgr_h
->hw_queue_size
< MIN_HW_QUEUE_SIZE
) {
131 SSI_LOG_ERR("Invalid HW queue size = %u (Min. required is %u)\n",
132 req_mgr_h
->hw_queue_size
, MIN_HW_QUEUE_SIZE
);
134 goto req_mgr_init_err
;
136 req_mgr_h
->min_free_hw_slots
= req_mgr_h
->hw_queue_size
;
137 req_mgr_h
->max_used_sw_slots
= 0;
139 /* Allocate DMA word for "dummy" completion descriptor use */
140 req_mgr_h
->dummy_comp_buff
= dma_alloc_coherent(&drvdata
->plat_dev
->dev
,
141 sizeof(u32
), &req_mgr_h
->dummy_comp_buff_dma
, GFP_KERNEL
);
142 if (!req_mgr_h
->dummy_comp_buff
) {
143 SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped "
144 "buffer\n", sizeof(u32
));
146 goto req_mgr_init_err
;
149 /* Init. "dummy" completion descriptor */
150 hw_desc_init(&req_mgr_h
->compl_desc
);
151 set_din_const(&req_mgr_h
->compl_desc
, 0, sizeof(u32
));
152 set_dout_dlli(&req_mgr_h
->compl_desc
, req_mgr_h
->dummy_comp_buff_dma
,
153 sizeof(u32
), NS_BIT
, 1);
154 set_flow_mode(&req_mgr_h
->compl_desc
, BYPASS
);
155 set_queue_last_ind(&req_mgr_h
->compl_desc
);
160 request_mgr_fini(drvdata
);
164 static inline void enqueue_seq(
165 void __iomem
*cc_base
,
166 struct cc_hw_desc seq
[], unsigned int seq_len
)
170 for (i
= 0; i
< seq_len
; i
++) {
171 writel_relaxed(seq
[i
].word
[0], (volatile void __iomem
*)(cc_base
+ CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_WORD0
)));
172 writel_relaxed(seq
[i
].word
[1], (volatile void __iomem
*)(cc_base
+ CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_WORD0
)));
173 writel_relaxed(seq
[i
].word
[2], (volatile void __iomem
*)(cc_base
+ CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_WORD0
)));
174 writel_relaxed(seq
[i
].word
[3], (volatile void __iomem
*)(cc_base
+ CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_WORD0
)));
175 writel_relaxed(seq
[i
].word
[4], (volatile void __iomem
*)(cc_base
+ CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_WORD0
)));
177 writel_relaxed(seq
[i
].word
[5], (volatile void __iomem
*)(cc_base
+ CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_WORD0
)));
179 SSI_LOG_DEBUG("desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", i
,
180 seq
[i
].word
[0], seq
[i
].word
[1], seq
[i
].word
[2], seq
[i
].word
[3], seq
[i
].word
[4], seq
[i
].word
[5]);
186 * Completion will take place if and only if user requested completion
187 * by setting "is_dout = 0" in send_request().
190 * \param dx_compl_h The completion event to signal
192 static void request_mgr_complete(struct device
*dev
, void *dx_compl_h
, void __iomem
*cc_base
)
194 struct completion
*this_compl
= dx_compl_h
;
196 complete(this_compl
);
199 static inline int request_mgr_queues_status_check(
200 struct ssi_request_mgr_handle
*req_mgr_h
,
201 void __iomem
*cc_base
,
202 unsigned int total_seq_len
)
204 unsigned long poll_queue
;
206 /* SW queue is checked only once as it will not
207 * be chaned during the poll becasue the spinlock_bh
208 * is held by the thread
210 if (unlikely(((req_mgr_h
->req_queue_head
+ 1) &
211 (MAX_REQUEST_QUEUE_SIZE
- 1)) ==
212 req_mgr_h
->req_queue_tail
)) {
213 SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
214 req_mgr_h
->req_queue_head
, MAX_REQUEST_QUEUE_SIZE
);
218 if ((likely(req_mgr_h
->q_free_slots
>= total_seq_len
)))
221 /* Wait for space in HW queue. Poll constant num of iterations. */
222 for (poll_queue
= 0; poll_queue
< SSI_MAX_POLL_ITER
; poll_queue
++) {
223 req_mgr_h
->q_free_slots
=
224 CC_HAL_READ_REGISTER(
225 CC_REG_OFFSET(CRY_KERNEL
,
226 DSCRPTR_QUEUE_CONTENT
));
227 if (unlikely(req_mgr_h
->q_free_slots
<
228 req_mgr_h
->min_free_hw_slots
)) {
229 req_mgr_h
->min_free_hw_slots
= req_mgr_h
->q_free_slots
;
232 if (likely(req_mgr_h
->q_free_slots
>= total_seq_len
)) {
233 /* If there is enough place return */
237 SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
238 req_mgr_h
->q_free_slots
, total_seq_len
);
240 /* No room in the HW queue try again later */
241 SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d "
242 "sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
243 req_mgr_h
->req_queue_head
,
244 MAX_REQUEST_QUEUE_SIZE
,
245 req_mgr_h
->q_free_slots
,
251 * Enqueue caller request to crypto hardware.
254 * \param ssi_req The request to enqueue
255 * \param desc The crypto sequence
256 * \param len The crypto sequence length
257 * \param is_dout If "true": completion is handled by the caller
258 * If "false": this function adds a dummy descriptor completion
259 * and waits upon completion signal.
261 * \return int Returns -EINPROGRESS if "is_dout=true"; "0" if "is_dout=false"
264 struct ssi_drvdata
*drvdata
, struct ssi_crypto_req
*ssi_req
,
265 struct cc_hw_desc
*desc
, unsigned int len
, bool is_dout
)
267 void __iomem
*cc_base
= drvdata
->cc_base
;
268 struct ssi_request_mgr_handle
*req_mgr_h
= drvdata
->request_mgr_handle
;
269 unsigned int used_sw_slots
;
270 unsigned int iv_seq_len
= 0;
271 unsigned int total_seq_len
= len
; /*initial sequence length*/
272 struct cc_hw_desc iv_seq
[SSI_IVPOOL_SEQ_LEN
];
274 unsigned int max_required_seq_len
= (total_seq_len
+
275 ((ssi_req
->ivgen_dma_addr_len
== 0) ? 0 :
276 SSI_IVPOOL_SEQ_LEN
) +
277 ((is_dout
== 0) ? 1 : 0));
279 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
280 rc
= ssi_power_mgr_runtime_get(&drvdata
->plat_dev
->dev
);
282 SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n", rc
);
288 spin_lock_bh(&req_mgr_h
->hw_lock
);
290 /* Check if there is enough place in the SW/HW queues
291 * in case iv gen add the max size and in case of no dout add 1
292 * for the internal completion descriptor
294 rc
= request_mgr_queues_status_check(req_mgr_h
,
296 max_required_seq_len
);
298 /* There is enough place in the queue */
300 /* something wrong release the spinlock*/
301 spin_unlock_bh(&req_mgr_h
->hw_lock
);
304 /* Any error other than HW queue full
307 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
308 ssi_power_mgr_runtime_put_suspend(&drvdata
->plat_dev
->dev
);
313 /* HW queue is full - short sleep */
317 /* Additional completion descriptor is needed incase caller did not
318 * enabled any DLLI/MLLI DOUT bit in the given sequence
321 init_completion(&ssi_req
->seq_compl
);
322 ssi_req
->user_cb
= request_mgr_complete
;
323 ssi_req
->user_arg
= &(ssi_req
->seq_compl
);
327 if (ssi_req
->ivgen_dma_addr_len
> 0) {
328 SSI_LOG_DEBUG("Acquire IV from pool into %d DMA addresses 0x%llX, 0x%llX, 0x%llX, IV-size=%u\n",
329 ssi_req
->ivgen_dma_addr_len
,
330 (unsigned long long)ssi_req
->ivgen_dma_addr
[0],
331 (unsigned long long)ssi_req
->ivgen_dma_addr
[1],
332 (unsigned long long)ssi_req
->ivgen_dma_addr
[2],
333 ssi_req
->ivgen_size
);
335 /* Acquire IV from pool */
336 rc
= ssi_ivgen_getiv(drvdata
, ssi_req
->ivgen_dma_addr
, ssi_req
->ivgen_dma_addr_len
,
337 ssi_req
->ivgen_size
, iv_seq
, &iv_seq_len
);
339 if (unlikely(rc
!= 0)) {
340 SSI_LOG_ERR("Failed to generate IV (rc=%d)\n", rc
);
341 spin_unlock_bh(&req_mgr_h
->hw_lock
);
342 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
343 ssi_power_mgr_runtime_put_suspend(&drvdata
->plat_dev
->dev
);
348 total_seq_len
+= iv_seq_len
;
351 used_sw_slots
= ((req_mgr_h
->req_queue_head
- req_mgr_h
->req_queue_tail
) & (MAX_REQUEST_QUEUE_SIZE
- 1));
352 if (unlikely(used_sw_slots
> req_mgr_h
->max_used_sw_slots
))
353 req_mgr_h
->max_used_sw_slots
= used_sw_slots
;
355 /* Enqueue request - must be locked with HW lock*/
356 req_mgr_h
->req_queue
[req_mgr_h
->req_queue_head
] = *ssi_req
;
357 req_mgr_h
->req_queue_head
= (req_mgr_h
->req_queue_head
+ 1) & (MAX_REQUEST_QUEUE_SIZE
- 1);
358 /* TODO: Use circ_buf.h ? */
360 SSI_LOG_DEBUG("Enqueue request head=%u\n", req_mgr_h
->req_queue_head
);
362 #ifdef FLUSH_CACHE_ALL
366 /* STAT_PHASE_4: Push sequence */
367 enqueue_seq(cc_base
, iv_seq
, iv_seq_len
);
368 enqueue_seq(cc_base
, desc
, len
);
369 enqueue_seq(cc_base
, &req_mgr_h
->compl_desc
, (is_dout
? 0 : 1));
371 if (unlikely(req_mgr_h
->q_free_slots
< total_seq_len
)) {
372 /*This means that there was a problem with the resume*/
375 /* Update the free slots in HW queue */
376 req_mgr_h
->q_free_slots
-= total_seq_len
;
378 spin_unlock_bh(&req_mgr_h
->hw_lock
);
381 /* Wait upon sequence completion.
382 * Return "0" -Operation done successfully.
384 wait_for_completion(&ssi_req
->seq_compl
);
387 /* Operation still in process */
393 * Enqueue caller request to crypto hardware during init process.
394 * assume this function is not called in middle of a flow,
395 * since we set QUEUE_LAST_IND flag in the last descriptor.
398 * \param desc The crypto sequence
399 * \param len The crypto sequence length
401 * \return int Returns "0" upon success
403 int send_request_init(
404 struct ssi_drvdata
*drvdata
, struct cc_hw_desc
*desc
, unsigned int len
)
406 void __iomem
*cc_base
= drvdata
->cc_base
;
407 struct ssi_request_mgr_handle
*req_mgr_h
= drvdata
->request_mgr_handle
;
408 unsigned int total_seq_len
= len
; /*initial sequence length*/
411 /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. */
412 rc
= request_mgr_queues_status_check(req_mgr_h
, cc_base
, total_seq_len
);
413 if (unlikely(rc
!= 0))
416 set_queue_last_ind(&desc
[(len
- 1)]);
418 enqueue_seq(cc_base
, desc
, len
);
420 /* Update the free slots in HW queue */
421 req_mgr_h
->q_free_slots
= CC_HAL_READ_REGISTER(
422 CC_REG_OFFSET(CRY_KERNEL
,
423 DSCRPTR_QUEUE_CONTENT
));
428 void complete_request(struct ssi_drvdata
*drvdata
)
430 struct ssi_request_mgr_handle
*request_mgr_handle
=
431 drvdata
->request_mgr_handle
;
433 queue_delayed_work(request_mgr_handle
->workq
, &request_mgr_handle
->compwork
, 0);
435 tasklet_schedule(&request_mgr_handle
->comptask
);
440 static void comp_work_handler(struct work_struct
*work
)
442 struct ssi_drvdata
*drvdata
=
443 container_of(work
, struct ssi_drvdata
, compwork
.work
);
445 comp_handler((unsigned long)drvdata
);
449 static void proc_completions(struct ssi_drvdata
*drvdata
)
451 struct ssi_crypto_req
*ssi_req
;
452 struct platform_device
*plat_dev
= drvdata
->plat_dev
;
453 struct ssi_request_mgr_handle
*request_mgr_handle
=
454 drvdata
->request_mgr_handle
;
455 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
459 while (request_mgr_handle
->axi_completed
) {
460 request_mgr_handle
->axi_completed
--;
462 /* Dequeue request */
463 if (unlikely(request_mgr_handle
->req_queue_head
== request_mgr_handle
->req_queue_tail
)) {
464 SSI_LOG_ERR("Request queue is empty req_queue_head==req_queue_tail==%u\n", request_mgr_handle
->req_queue_head
);
468 ssi_req
= &request_mgr_handle
->req_queue
[request_mgr_handle
->req_queue_tail
];
470 #ifdef FLUSH_CACHE_ALL
474 #ifdef COMPLETION_DELAY
480 SSI_LOG_INFO("Delay\n");
481 for (i
= 0; i
< 1000000; i
++)
482 axi_err
= READ_REGISTER(drvdata
->cc_base
+ CC_REG_OFFSET(CRY_KERNEL
, AXIM_MON_ERR
));
484 #endif /* COMPLETION_DELAY */
486 if (likely(ssi_req
->user_cb
))
487 ssi_req
->user_cb(&plat_dev
->dev
, ssi_req
->user_arg
, drvdata
->cc_base
);
488 request_mgr_handle
->req_queue_tail
= (request_mgr_handle
->req_queue_tail
+ 1) & (MAX_REQUEST_QUEUE_SIZE
- 1);
489 SSI_LOG_DEBUG("Dequeue request tail=%u\n", request_mgr_handle
->req_queue_tail
);
490 SSI_LOG_DEBUG("Request completed. axi_completed=%d\n", request_mgr_handle
->axi_completed
);
491 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
492 rc
= ssi_power_mgr_runtime_put_suspend(&plat_dev
->dev
);
494 SSI_LOG_ERR("Failed to set runtime suspension %d\n", rc
);
499 static inline u32
cc_axi_comp_count(void __iomem
*cc_base
)
501 /* The CC_HAL_READ_REGISTER macro implictly requires and uses
502 * a base MMIO register address variable named cc_base.
504 return FIELD_GET(AXIM_MON_COMP_VALUE
,
505 CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET
));
508 /* Deferred service handler, run as interrupt-fired tasklet */
509 static void comp_handler(unsigned long devarg
)
511 struct ssi_drvdata
*drvdata
= (struct ssi_drvdata
*)devarg
;
512 void __iomem
*cc_base
= drvdata
->cc_base
;
513 struct ssi_request_mgr_handle
*request_mgr_handle
=
514 drvdata
->request_mgr_handle
;
518 irq
= (drvdata
->irq
& SSI_COMP_IRQ_MASK
);
520 if (irq
& SSI_COMP_IRQ_MASK
) {
521 /* To avoid the interrupt from firing as we unmask it, we clear it now */
522 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF
, HOST_ICR
), SSI_COMP_IRQ_MASK
);
524 /* Avoid race with above clear: Test completion counter once more */
525 request_mgr_handle
->axi_completed
+=
526 cc_axi_comp_count(cc_base
);
528 while (request_mgr_handle
->axi_completed
) {
530 proc_completions(drvdata
);
531 /* At this point (after proc_completions()),
532 * request_mgr_handle->axi_completed is 0.
534 request_mgr_handle
->axi_completed
=
535 cc_axi_comp_count(cc_base
);
536 } while (request_mgr_handle
->axi_completed
> 0);
538 /* To avoid the interrupt from firing as we unmask it, we clear it now */
539 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF
, HOST_ICR
), SSI_COMP_IRQ_MASK
);
541 /* Avoid race with above clear: Test completion counter once more */
542 request_mgr_handle
->axi_completed
+=
543 cc_axi_comp_count(cc_base
);
546 /* after verifing that there is nothing to do, Unmask AXI completion interrupt */
547 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF
, HOST_IMR
),
548 CC_HAL_READ_REGISTER(
549 CC_REG_OFFSET(HOST_RGF
, HOST_IMR
)) & ~irq
);
553 * resume the queue configuration - no need to take the lock as this happens inside
554 * the spin lock protection
556 #if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
557 int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata
*drvdata
)
559 struct ssi_request_mgr_handle
*request_mgr_handle
= drvdata
->request_mgr_handle
;
561 spin_lock_bh(&request_mgr_handle
->hw_lock
);
562 request_mgr_handle
->is_runtime_suspended
= false;
563 spin_unlock_bh(&request_mgr_handle
->hw_lock
);
569 * suspend the queue configuration. Since it is used for the runtime suspend
570 * only verify that the queue can be suspended.
572 int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata
*drvdata
)
574 struct ssi_request_mgr_handle
*request_mgr_handle
=
575 drvdata
->request_mgr_handle
;
577 /* lock the send_request */
578 spin_lock_bh(&request_mgr_handle
->hw_lock
);
579 if (request_mgr_handle
->req_queue_head
!=
580 request_mgr_handle
->req_queue_tail
) {
581 spin_unlock_bh(&request_mgr_handle
->hw_lock
);
584 request_mgr_handle
->is_runtime_suspended
= true;
585 spin_unlock_bh(&request_mgr_handle
->hw_lock
);
590 bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata
*drvdata
)
592 struct ssi_request_mgr_handle
*request_mgr_handle
=
593 drvdata
->request_mgr_handle
;
595 return request_mgr_handle
->is_runtime_suspended
;