2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include "ssi_config.h"
18 #include <linux/kernel.h>
19 #include <linux/platform_device.h>
20 #include <linux/interrupt.h>
21 #include <linux/delay.h>
22 #include <crypto/ctr.h>
23 #ifdef FLUSH_CACHE_ALL
24 #include <asm/cacheflush.h>
26 #include <linux/pm_runtime.h>
27 #include "ssi_driver.h"
28 #include "ssi_buffer_mgr.h"
29 #include "ssi_request_mgr.h"
30 #include "ssi_sysfs.h"
31 #include "ssi_ivgen.h"
34 #include "ssi_fips_local.h"
36 #define SSI_MAX_POLL_ITER 10
38 #define AXIM_MON_BASE_OFFSET CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_COMP)
42 #define MONITOR_CNTR_BIT 0
46 * Used to measure CC performance.
48 #define INIT_CC_MONITOR_DESC(desc_p) \
50 HW_DESC_INIT(desc_p); \
51 HW_DESC_SET_DIN_MONITOR_CNTR(desc_p); \
55 * Try adding monitor descriptor BEFORE enqueuing sequence.
57 #define CC_CYCLE_DESC_HEAD(cc_base_addr, desc_p, lock_p, is_monitored_p) \
59 if (!test_and_set_bit(MONITOR_CNTR_BIT, (lock_p))) { \
60 enqueue_seq((cc_base_addr), (desc_p), 1); \
61 *(is_monitored_p) = true; \
63 *(is_monitored_p) = false; \
68 * If CC_CYCLE_DESC_HEAD was successfully added:
69 * 1. Add memory barrier descriptor to ensure last AXI transaction.
70 * 2. Add monitor descriptor to sequence tail AFTER enqueuing sequence.
72 #define CC_CYCLE_DESC_TAIL(cc_base_addr, desc_p, is_monitored) \
74 if ((is_monitored) == true) { \
75 HwDesc_s barrier_desc; \
76 HW_DESC_INIT(&barrier_desc); \
77 HW_DESC_SET_DIN_NO_DMA(&barrier_desc, 0, 0xfffff0); \
78 HW_DESC_SET_DOUT_NO_DMA(&barrier_desc, 0, 0, 1); \
79 enqueue_seq((cc_base_addr), &barrier_desc, 1); \
80 enqueue_seq((cc_base_addr), (desc_p), 1); \
85 * Try reading CC monitor counter value upon sequence complete.
86 * Can only succeed if the lock_p is taken by the owner of the given request.
88 #define END_CC_MONITOR_COUNT(cc_base_addr, stat_op_type, stat_phase, monitor_null_cycles, lock_p, is_monitored) \
90 uint32_t elapsed_cycles; \
91 if ((is_monitored) == true) { \
92 elapsed_cycles = READ_REGISTER((cc_base_addr) + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR)); \
93 clear_bit(MONITOR_CNTR_BIT, (lock_p)); \
94 if (elapsed_cycles > 0) \
95 update_cc_stat(stat_op_type, stat_phase, (elapsed_cycles - monitor_null_cycles)); \
99 #else /*CC_CYCLE_COUNT*/
101 #define INIT_CC_MONITOR_DESC(desc_p) do { } while (0)
102 #define CC_CYCLE_DESC_HEAD(cc_base_addr, desc_p, lock_p, is_monitored_p) do { } while (0)
103 #define CC_CYCLE_DESC_TAIL(cc_base_addr, desc_p, is_monitored) do { } while (0)
104 #define END_CC_MONITOR_COUNT(cc_base_addr, stat_op_type, stat_phase, monitor_null_cycles, lock_p, is_monitored) do { } while (0)
105 #endif /*CC_CYCLE_COUNT*/
108 struct ssi_request_mgr_handle
{
109 /* Request manager resources */
110 unsigned int hw_queue_size
; /* HW capability */
111 unsigned int min_free_hw_slots
;
112 unsigned int max_used_sw_slots
;
113 struct ssi_crypto_req req_queue
[MAX_REQUEST_QUEUE_SIZE
];
114 uint32_t req_queue_head
;
115 uint32_t req_queue_tail
;
116 uint32_t axi_completed
;
117 uint32_t q_free_slots
;
120 uint8_t *dummy_comp_buff
;
121 dma_addr_t dummy_comp_buff_dma
;
122 HwDesc_s monitor_desc
;
123 volatile unsigned long monitor_lock
;
125 struct workqueue_struct
*workq
;
126 struct delayed_work compwork
;
128 struct tasklet_struct comptask
;
130 #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
131 bool is_runtime_suspended
;
135 static void comp_handler(unsigned long devarg
);
137 static void comp_work_handler(struct work_struct
*work
);
140 void request_mgr_fini(struct ssi_drvdata
*drvdata
)
142 struct ssi_request_mgr_handle
*req_mgr_h
= drvdata
->request_mgr_handle
;
144 if (req_mgr_h
== NULL
)
145 return; /* Not allocated */
147 if (req_mgr_h
->dummy_comp_buff_dma
!= 0) {
148 SSI_RESTORE_DMA_ADDR_TO_48BIT(req_mgr_h
->dummy_comp_buff_dma
);
149 dma_free_coherent(&drvdata
->plat_dev
->dev
,
150 sizeof(uint32_t), req_mgr_h
->dummy_comp_buff
,
151 req_mgr_h
->dummy_comp_buff_dma
);
154 SSI_LOG_DEBUG("max_used_hw_slots=%d\n", (req_mgr_h
->hw_queue_size
-
155 req_mgr_h
->min_free_hw_slots
) );
156 SSI_LOG_DEBUG("max_used_sw_slots=%d\n", req_mgr_h
->max_used_sw_slots
);
159 flush_workqueue(req_mgr_h
->workq
);
160 destroy_workqueue(req_mgr_h
->workq
);
163 tasklet_kill(&req_mgr_h
->comptask
);
165 memset(req_mgr_h
, 0, sizeof(struct ssi_request_mgr_handle
));
167 drvdata
->request_mgr_handle
= NULL
;
170 int request_mgr_init(struct ssi_drvdata
*drvdata
)
172 #ifdef CC_CYCLE_COUNT
173 HwDesc_s monitor_desc
[2];
174 struct ssi_crypto_req monitor_req
= {0};
176 struct ssi_request_mgr_handle
*req_mgr_h
;
179 req_mgr_h
= kzalloc(sizeof(struct ssi_request_mgr_handle
),GFP_KERNEL
);
180 if (req_mgr_h
== NULL
) {
182 goto req_mgr_init_err
;
185 drvdata
->request_mgr_handle
= req_mgr_h
;
187 spin_lock_init(&req_mgr_h
->hw_lock
);
189 SSI_LOG_DEBUG("Initializing completion workqueue\n");
190 req_mgr_h
->workq
= create_singlethread_workqueue("arm_cc7x_wq");
191 if (unlikely(req_mgr_h
->workq
== NULL
)) {
192 SSI_LOG_ERR("Failed creating work queue\n");
194 goto req_mgr_init_err
;
196 INIT_DELAYED_WORK(&req_mgr_h
->compwork
, comp_work_handler
);
198 SSI_LOG_DEBUG("Initializing completion tasklet\n");
199 tasklet_init(&req_mgr_h
->comptask
, comp_handler
, (unsigned long)drvdata
);
201 req_mgr_h
->hw_queue_size
= READ_REGISTER(drvdata
->cc_base
+
202 CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_SRAM_SIZE
));
203 SSI_LOG_DEBUG("hw_queue_size=0x%08X\n", req_mgr_h
->hw_queue_size
);
204 if (req_mgr_h
->hw_queue_size
< MIN_HW_QUEUE_SIZE
) {
205 SSI_LOG_ERR("Invalid HW queue size = %u (Min. required is %u)\n",
206 req_mgr_h
->hw_queue_size
, MIN_HW_QUEUE_SIZE
);
208 goto req_mgr_init_err
;
210 req_mgr_h
->min_free_hw_slots
= req_mgr_h
->hw_queue_size
;
211 req_mgr_h
->max_used_sw_slots
= 0;
214 /* Allocate DMA word for "dummy" completion descriptor use */
215 req_mgr_h
->dummy_comp_buff
= dma_alloc_coherent(&drvdata
->plat_dev
->dev
,
216 sizeof(uint32_t), &req_mgr_h
->dummy_comp_buff_dma
, GFP_KERNEL
);
217 if (!req_mgr_h
->dummy_comp_buff
) {
218 SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped "
219 "buffer\n", sizeof(uint32_t));
221 goto req_mgr_init_err
;
223 SSI_UPDATE_DMA_ADDR_TO_48BIT(req_mgr_h
->dummy_comp_buff_dma
,
226 /* Init. "dummy" completion descriptor */
227 HW_DESC_INIT(&req_mgr_h
->compl_desc
);
228 HW_DESC_SET_DIN_CONST(&req_mgr_h
->compl_desc
, 0, sizeof(uint32_t));
229 HW_DESC_SET_DOUT_DLLI(&req_mgr_h
->compl_desc
,
230 req_mgr_h
->dummy_comp_buff_dma
,
231 sizeof(uint32_t), NS_BIT
, 1);
232 HW_DESC_SET_FLOW_MODE(&req_mgr_h
->compl_desc
, BYPASS
);
233 HW_DESC_SET_QUEUE_LAST_IND(&req_mgr_h
->compl_desc
);
235 #ifdef CC_CYCLE_COUNT
236 /* For CC-HW cycle performance trace */
237 INIT_CC_MONITOR_DESC(&req_mgr_h
->monitor_desc
);
238 set_bit(MONITOR_CNTR_BIT
, &req_mgr_h
->monitor_lock
);
239 monitor_desc
[0] = req_mgr_h
->monitor_desc
;
240 monitor_desc
[1] = req_mgr_h
->monitor_desc
;
242 rc
= send_request(drvdata
, &monitor_req
, monitor_desc
, 2, 0);
243 if (unlikely(rc
!= 0))
244 goto req_mgr_init_err
;
246 drvdata
->monitor_null_cycles
= READ_REGISTER(drvdata
->cc_base
+
247 CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_MEASURE_CNTR
));
248 SSI_LOG_ERR("Calibration time=0x%08x\n", drvdata
->monitor_null_cycles
);
250 clear_bit(MONITOR_CNTR_BIT
, &req_mgr_h
->monitor_lock
);
256 request_mgr_fini(drvdata
);
260 static inline void enqueue_seq(
261 void __iomem
*cc_base
,
262 HwDesc_s seq
[], unsigned int seq_len
)
266 for (i
= 0; i
< seq_len
; i
++) {
267 writel_relaxed(seq
[i
].word
[0], (volatile void __iomem
*)(cc_base
+CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_WORD0
)));
268 writel_relaxed(seq
[i
].word
[1], (volatile void __iomem
*)(cc_base
+CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_WORD0
)));
269 writel_relaxed(seq
[i
].word
[2], (volatile void __iomem
*)(cc_base
+CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_WORD0
)));
270 writel_relaxed(seq
[i
].word
[3], (volatile void __iomem
*)(cc_base
+CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_WORD0
)));
271 writel_relaxed(seq
[i
].word
[4], (volatile void __iomem
*)(cc_base
+CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_WORD0
)));
273 writel_relaxed(seq
[i
].word
[5], (volatile void __iomem
*)(cc_base
+CC_REG_OFFSET(CRY_KERNEL
, DSCRPTR_QUEUE_WORD0
)));
275 SSI_LOG_DEBUG("desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", i
,
276 seq
[i
].word
[0], seq
[i
].word
[1], seq
[i
].word
[2], seq
[i
].word
[3], seq
[i
].word
[4], seq
[i
].word
[5]);
282 * Completion will take place if and only if user requested completion
283 * by setting "is_dout = 0" in send_request().
286 * \param dx_compl_h The completion event to signal
288 static void request_mgr_complete(struct device
*dev
, void *dx_compl_h
, void __iomem
*cc_base
)
290 struct completion
*this_compl
= dx_compl_h
;
291 complete(this_compl
);
295 static inline int request_mgr_queues_status_check(
296 struct ssi_request_mgr_handle
*req_mgr_h
,
297 void __iomem
*cc_base
,
298 unsigned int total_seq_len
)
300 unsigned long poll_queue
;
302 /* SW queue is checked only once as it will not
303 be chaned during the poll becasue the spinlock_bh
304 is held by the thread */
305 if (unlikely(((req_mgr_h
->req_queue_head
+ 1) &
306 (MAX_REQUEST_QUEUE_SIZE
- 1)) ==
307 req_mgr_h
->req_queue_tail
)) {
308 SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
309 req_mgr_h
->req_queue_head
, MAX_REQUEST_QUEUE_SIZE
);
313 if ((likely(req_mgr_h
->q_free_slots
>= total_seq_len
)) ) {
316 /* Wait for space in HW queue. Poll constant num of iterations. */
317 for (poll_queue
=0; poll_queue
< SSI_MAX_POLL_ITER
; poll_queue
++) {
318 req_mgr_h
->q_free_slots
=
319 CC_HAL_READ_REGISTER(
320 CC_REG_OFFSET(CRY_KERNEL
,
321 DSCRPTR_QUEUE_CONTENT
));
322 if (unlikely(req_mgr_h
->q_free_slots
<
323 req_mgr_h
->min_free_hw_slots
)) {
324 req_mgr_h
->min_free_hw_slots
= req_mgr_h
->q_free_slots
;
327 if (likely (req_mgr_h
->q_free_slots
>= total_seq_len
)) {
328 /* If there is enough place return */
332 SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
333 req_mgr_h
->q_free_slots
, total_seq_len
);
335 /* No room in the HW queue try again later */
336 SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d "
337 "sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
338 req_mgr_h
->req_queue_head
,
339 MAX_REQUEST_QUEUE_SIZE
,
340 req_mgr_h
->q_free_slots
,
346 * Enqueue caller request to crypto hardware.
349 * \param ssi_req The request to enqueue
350 * \param desc The crypto sequence
351 * \param len The crypto sequence length
352 * \param is_dout If "true": completion is handled by the caller
353 * If "false": this function adds a dummy descriptor completion
354 * and waits upon completion signal.
356 * \return int Returns -EINPROGRESS if "is_dout=true"; "0" if "is_dout=false"
359 struct ssi_drvdata
*drvdata
, struct ssi_crypto_req
*ssi_req
,
360 HwDesc_s
*desc
, unsigned int len
, bool is_dout
)
362 void __iomem
*cc_base
= drvdata
->cc_base
;
363 struct ssi_request_mgr_handle
*req_mgr_h
= drvdata
->request_mgr_handle
;
364 unsigned int used_sw_slots
;
365 unsigned int iv_seq_len
= 0;
366 unsigned int total_seq_len
= len
; /*initial sequence length*/
367 HwDesc_s iv_seq
[SSI_IVPOOL_SEQ_LEN
];
369 unsigned int max_required_seq_len
= (total_seq_len
+
370 ((ssi_req
->ivgen_dma_addr_len
== 0) ? 0 :
371 SSI_IVPOOL_SEQ_LEN
) +
372 ((is_dout
== 0 )? 1 : 0));
373 DECL_CYCLE_COUNT_RESOURCES
;
375 #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
376 rc
= ssi_power_mgr_runtime_get(&drvdata
->plat_dev
->dev
);
378 SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n",rc
);
379 spin_unlock_bh(&req_mgr_h
->hw_lock
);
385 spin_lock_bh(&req_mgr_h
->hw_lock
);
387 /* Check if there is enough place in the SW/HW queues
388 in case iv gen add the max size and in case of no dout add 1
389 for the internal completion descriptor */
390 rc
= request_mgr_queues_status_check(req_mgr_h
,
392 max_required_seq_len
);
393 if (likely(rc
== 0 ))
394 /* There is enough place in the queue */
396 /* something wrong release the spinlock*/
397 spin_unlock_bh(&req_mgr_h
->hw_lock
);
400 /* Any error other than HW queue full
401 (SW queue is full) */
402 #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
403 ssi_power_mgr_runtime_put_suspend(&drvdata
->plat_dev
->dev
);
408 /* HW queue is full - short sleep */
412 /* Additional completion descriptor is needed incase caller did not
413 enabled any DLLI/MLLI DOUT bit in the given sequence */
415 init_completion(&ssi_req
->seq_compl
);
416 ssi_req
->user_cb
= request_mgr_complete
;
417 ssi_req
->user_arg
= &(ssi_req
->seq_compl
);
421 if (ssi_req
->ivgen_dma_addr_len
> 0) {
422 SSI_LOG_DEBUG("Acquire IV from pool into %d DMA addresses 0x%llX, 0x%llX, 0x%llX, IV-size=%u\n",
423 ssi_req
->ivgen_dma_addr_len
,
424 (unsigned long long)ssi_req
->ivgen_dma_addr
[0],
425 (unsigned long long)ssi_req
->ivgen_dma_addr
[1],
426 (unsigned long long)ssi_req
->ivgen_dma_addr
[2],
427 ssi_req
->ivgen_size
);
429 /* Acquire IV from pool */
430 rc
= ssi_ivgen_getiv(drvdata
, ssi_req
->ivgen_dma_addr
, ssi_req
->ivgen_dma_addr_len
,
431 ssi_req
->ivgen_size
, iv_seq
, &iv_seq_len
);
433 if (unlikely(rc
!= 0)) {
434 SSI_LOG_ERR("Failed to generate IV (rc=%d)\n", rc
);
435 spin_unlock_bh(&req_mgr_h
->hw_lock
);
436 #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
437 ssi_power_mgr_runtime_put_suspend(&drvdata
->plat_dev
->dev
);
442 total_seq_len
+= iv_seq_len
;
445 used_sw_slots
= ((req_mgr_h
->req_queue_head
- req_mgr_h
->req_queue_tail
) & (MAX_REQUEST_QUEUE_SIZE
-1));
446 if (unlikely(used_sw_slots
> req_mgr_h
->max_used_sw_slots
)) {
447 req_mgr_h
->max_used_sw_slots
= used_sw_slots
;
450 CC_CYCLE_DESC_HEAD(cc_base
, &req_mgr_h
->monitor_desc
,
451 &req_mgr_h
->monitor_lock
, &ssi_req
->is_monitored_p
);
453 /* Enqueue request - must be locked with HW lock*/
454 req_mgr_h
->req_queue
[req_mgr_h
->req_queue_head
] = *ssi_req
;
455 START_CYCLE_COUNT_AT(req_mgr_h
->req_queue
[req_mgr_h
->req_queue_head
].submit_cycle
);
456 req_mgr_h
->req_queue_head
= (req_mgr_h
->req_queue_head
+ 1) & (MAX_REQUEST_QUEUE_SIZE
- 1);
457 /* TODO: Use circ_buf.h ? */
459 SSI_LOG_DEBUG("Enqueue request head=%u\n", req_mgr_h
->req_queue_head
);
461 #ifdef FLUSH_CACHE_ALL
465 /* STAT_PHASE_4: Push sequence */
467 enqueue_seq(cc_base
, iv_seq
, iv_seq_len
);
468 enqueue_seq(cc_base
, desc
, len
);
469 enqueue_seq(cc_base
, &req_mgr_h
->compl_desc
, (is_dout
? 0 : 1));
470 END_CYCLE_COUNT(ssi_req
->op_type
, STAT_PHASE_4
);
472 CC_CYCLE_DESC_TAIL(cc_base
, &req_mgr_h
->monitor_desc
, ssi_req
->is_monitored_p
);
474 if (unlikely(req_mgr_h
->q_free_slots
< total_seq_len
)) {
475 /*This means that there was a problem with the resume*/
478 /* Update the free slots in HW queue */
479 req_mgr_h
->q_free_slots
-= total_seq_len
;
481 spin_unlock_bh(&req_mgr_h
->hw_lock
);
484 /* Wait upon sequence completion.
485 * Return "0" -Operation done successfully. */
486 return wait_for_completion_interruptible(&ssi_req
->seq_compl
);
488 /* Operation still in process */
495 * Enqueue caller request to crypto hardware during init process.
496 * assume this function is not called in middle of a flow,
497 * since we set QUEUE_LAST_IND flag in the last descriptor.
500 * \param desc The crypto sequence
501 * \param len The crypto sequence length
503 * \return int Returns "0" upon success
505 int send_request_init(
506 struct ssi_drvdata
*drvdata
, HwDesc_s
*desc
, unsigned int len
)
508 void __iomem
*cc_base
= drvdata
->cc_base
;
509 struct ssi_request_mgr_handle
*req_mgr_h
= drvdata
->request_mgr_handle
;
510 unsigned int total_seq_len
= len
; /*initial sequence length*/
513 /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. */
514 rc
= request_mgr_queues_status_check(req_mgr_h
, cc_base
, total_seq_len
);
515 if (unlikely(rc
!= 0 )) {
518 HW_DESC_SET_QUEUE_LAST_IND(&desc
[len
-1]);
520 enqueue_seq(cc_base
, desc
, len
);
522 /* Update the free slots in HW queue */
523 req_mgr_h
->q_free_slots
= CC_HAL_READ_REGISTER(
524 CC_REG_OFFSET(CRY_KERNEL
,
525 DSCRPTR_QUEUE_CONTENT
));
531 void complete_request(struct ssi_drvdata
*drvdata
)
533 struct ssi_request_mgr_handle
*request_mgr_handle
=
534 drvdata
->request_mgr_handle
;
536 queue_delayed_work(request_mgr_handle
->workq
, &request_mgr_handle
->compwork
, 0);
538 tasklet_schedule(&request_mgr_handle
->comptask
);
543 static void comp_work_handler(struct work_struct
*work
)
545 struct ssi_drvdata
*drvdata
=
546 container_of(work
, struct ssi_drvdata
, compwork
.work
);
548 comp_handler((unsigned long)drvdata
);
552 static void proc_completions(struct ssi_drvdata
*drvdata
)
554 struct ssi_crypto_req
*ssi_req
;
555 struct platform_device
*plat_dev
= drvdata
->plat_dev
;
556 struct ssi_request_mgr_handle
* request_mgr_handle
=
557 drvdata
->request_mgr_handle
;
558 #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
561 DECL_CYCLE_COUNT_RESOURCES
;
563 while(request_mgr_handle
->axi_completed
) {
564 request_mgr_handle
->axi_completed
--;
566 /* Dequeue request */
567 if (unlikely(request_mgr_handle
->req_queue_head
== request_mgr_handle
->req_queue_tail
)) {
568 SSI_LOG_ERR("Request queue is empty req_queue_head==req_queue_tail==%u\n", request_mgr_handle
->req_queue_head
);
572 ssi_req
= &request_mgr_handle
->req_queue
[request_mgr_handle
->req_queue_tail
];
573 END_CYCLE_COUNT_AT(ssi_req
->submit_cycle
, ssi_req
->op_type
, STAT_PHASE_5
); /* Seq. Comp. */
574 END_CC_MONITOR_COUNT(drvdata
->cc_base
, ssi_req
->op_type
, STAT_PHASE_6
,
575 drvdata
->monitor_null_cycles
, &request_mgr_handle
->monitor_lock
, ssi_req
->is_monitored_p
);
577 #ifdef FLUSH_CACHE_ALL
581 #ifdef COMPLETION_DELAY
586 SSI_LOG_INFO("Delay\n");
587 for (i
=0;i
<1000000;i
++) {
588 axi_err
= READ_REGISTER(drvdata
->cc_base
+ CC_REG_OFFSET(CRY_KERNEL
, AXIM_MON_ERR
));
591 #endif /* COMPLETION_DELAY */
593 if (likely(ssi_req
->user_cb
!= NULL
)) {
595 ssi_req
->user_cb(&plat_dev
->dev
, ssi_req
->user_arg
, drvdata
->cc_base
);
596 END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC
, STAT_PHASE_3
);
598 request_mgr_handle
->req_queue_tail
= (request_mgr_handle
->req_queue_tail
+ 1) & (MAX_REQUEST_QUEUE_SIZE
- 1);
599 SSI_LOG_DEBUG("Dequeue request tail=%u\n", request_mgr_handle
->req_queue_tail
);
600 SSI_LOG_DEBUG("Request completed. axi_completed=%d\n", request_mgr_handle
->axi_completed
);
601 #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
602 rc
= ssi_power_mgr_runtime_put_suspend(&plat_dev
->dev
);
604 SSI_LOG_ERR("Failed to set runtime suspension %d\n",rc
);
610 /* Deferred service handler, run as interrupt-fired tasklet */
611 static void comp_handler(unsigned long devarg
)
613 struct ssi_drvdata
*drvdata
= (struct ssi_drvdata
*)devarg
;
614 void __iomem
*cc_base
= drvdata
->cc_base
;
615 struct ssi_request_mgr_handle
* request_mgr_handle
=
616 drvdata
->request_mgr_handle
;
620 DECL_CYCLE_COUNT_RESOURCES
;
624 irq
= (drvdata
->irq
& SSI_COMP_IRQ_MASK
);
626 if (irq
& SSI_COMP_IRQ_MASK
) {
627 /* To avoid the interrupt from firing as we unmask it, we clear it now */
628 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF
, HOST_ICR
), SSI_COMP_IRQ_MASK
);
630 /* Avoid race with above clear: Test completion counter once more */
631 request_mgr_handle
->axi_completed
+= CC_REG_FLD_GET(CRY_KERNEL
, AXIM_MON_COMP
, VALUE
,
632 CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET
));
634 /* ISR-to-Tasklet latency */
635 if (request_mgr_handle
->axi_completed
) {
636 /* Only if actually reflects ISR-to-completion-handling latency, i.e.,
637 not duplicate as a result of interrupt after AXIM_MON_ERR clear, before end of loop */
638 END_CYCLE_COUNT_AT(drvdata
->isr_exit_cycles
, STAT_OP_TYPE_GENERIC
, STAT_PHASE_1
);
641 while (request_mgr_handle
->axi_completed
) {
643 proc_completions(drvdata
);
644 /* At this point (after proc_completions()), request_mgr_handle->axi_completed is always 0.
645 The following assignment was changed to = (previously was +=) to conform KW restrictions. */
646 request_mgr_handle
->axi_completed
= CC_REG_FLD_GET(CRY_KERNEL
, AXIM_MON_COMP
, VALUE
,
647 CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET
));
648 } while (request_mgr_handle
->axi_completed
> 0);
650 /* To avoid the interrupt from firing as we unmask it, we clear it now */
651 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF
, HOST_ICR
), SSI_COMP_IRQ_MASK
);
653 /* Avoid race with above clear: Test completion counter once more */
654 request_mgr_handle
->axi_completed
+= CC_REG_FLD_GET(CRY_KERNEL
, AXIM_MON_COMP
, VALUE
,
655 CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET
));
659 /* after verifing that there is nothing to do, Unmask AXI completion interrupt */
660 CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF
, HOST_IMR
),
661 CC_HAL_READ_REGISTER(
662 CC_REG_OFFSET(HOST_RGF
, HOST_IMR
)) & ~irq
);
663 END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC
, STAT_PHASE_2
);
667 resume the queue configuration - no need to take the lock as this happens inside
668 the spin lock protection
670 #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
671 int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata
*drvdata
)
673 struct ssi_request_mgr_handle
* request_mgr_handle
= drvdata
->request_mgr_handle
;
675 spin_lock_bh(&request_mgr_handle
->hw_lock
);
676 request_mgr_handle
->is_runtime_suspended
= false;
677 spin_unlock_bh(&request_mgr_handle
->hw_lock
);
683 suspend the queue configuration. Since it is used for the runtime suspend
684 only verify that the queue can be suspended.
686 int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata
*drvdata
)
688 struct ssi_request_mgr_handle
* request_mgr_handle
=
689 drvdata
->request_mgr_handle
;
691 /* lock the send_request */
692 spin_lock_bh(&request_mgr_handle
->hw_lock
);
693 if (request_mgr_handle
->req_queue_head
!=
694 request_mgr_handle
->req_queue_tail
) {
695 spin_unlock_bh(&request_mgr_handle
->hw_lock
);
698 request_mgr_handle
->is_runtime_suspended
= true;
699 spin_unlock_bh(&request_mgr_handle
->hw_lock
);
704 bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata
*drvdata
)
706 struct ssi_request_mgr_handle
* request_mgr_handle
=
707 drvdata
->request_mgr_handle
;
709 return request_mgr_handle
->is_runtime_suspended
;