]>
Commit | Line | Data |
---|---|---|
abefd674 GBY |
1 | /* |
2 | * Copyright (C) 2012-2017 ARM Limited or its affiliates. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
17 | #include "ssi_config.h" | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/platform_device.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/delay.h> | |
22 | #include <crypto/ctr.h> | |
23 | #ifdef FLUSH_CACHE_ALL | |
24 | #include <asm/cacheflush.h> | |
25 | #endif | |
26 | #include <linux/pm_runtime.h> | |
27 | #include "ssi_driver.h" | |
28 | #include "ssi_buffer_mgr.h" | |
29 | #include "ssi_request_mgr.h" | |
30 | #include "ssi_sysfs.h" | |
a4d826b9 | 31 | #include "ssi_ivgen.h" |
abefd674 | 32 | #include "ssi_pm.h" |
16609980 GBY |
33 | #include "ssi_fips.h" |
34 | #include "ssi_fips_local.h" | |
abefd674 GBY |
35 | |
36 | #define SSI_MAX_POLL_ITER 10 | |
37 | ||
38 | #define AXIM_MON_BASE_OFFSET CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_COMP) | |
39 | ||
40 | #ifdef CC_CYCLE_COUNT | |
41 | ||
42 | #define MONITOR_CNTR_BIT 0 | |
43 | ||
44 | /** | |
45 | * Monitor descriptor. | |
46 | * Used to measure CC performance. | |
47 | */ | |
48 | #define INIT_CC_MONITOR_DESC(desc_p) \ | |
49 | do { \ | |
50 | HW_DESC_INIT(desc_p); \ | |
51 | HW_DESC_SET_DIN_MONITOR_CNTR(desc_p); \ | |
52 | } while (0) | |
53 | ||
54 | /** | |
55 | * Try adding monitor descriptor BEFORE enqueuing sequence. | |
56 | */ | |
57 | #define CC_CYCLE_DESC_HEAD(cc_base_addr, desc_p, lock_p, is_monitored_p) \ | |
58 | do { \ | |
59 | if (!test_and_set_bit(MONITOR_CNTR_BIT, (lock_p))) { \ | |
60 | enqueue_seq((cc_base_addr), (desc_p), 1); \ | |
61 | *(is_monitored_p) = true; \ | |
62 | } else { \ | |
63 | *(is_monitored_p) = false; \ | |
64 | } \ | |
65 | } while (0) | |
66 | ||
67 | /** | |
68 | * If CC_CYCLE_DESC_HEAD was successfully added: | |
69 | * 1. Add memory barrier descriptor to ensure last AXI transaction. | |
70 | * 2. Add monitor descriptor to sequence tail AFTER enqueuing sequence. | |
71 | */ | |
72 | #define CC_CYCLE_DESC_TAIL(cc_base_addr, desc_p, is_monitored) \ | |
73 | do { \ | |
74 | if ((is_monitored) == true) { \ | |
75 | HwDesc_s barrier_desc; \ | |
76 | HW_DESC_INIT(&barrier_desc); \ | |
77 | HW_DESC_SET_DIN_NO_DMA(&barrier_desc, 0, 0xfffff0); \ | |
78 | HW_DESC_SET_DOUT_NO_DMA(&barrier_desc, 0, 0, 1); \ | |
79 | enqueue_seq((cc_base_addr), &barrier_desc, 1); \ | |
80 | enqueue_seq((cc_base_addr), (desc_p), 1); \ | |
81 | } \ | |
82 | } while (0) | |
83 | ||
84 | /** | |
85 | * Try reading CC monitor counter value upon sequence complete. | |
86 | * Can only succeed if the lock_p is taken by the owner of the given request. | |
87 | */ | |
88 | #define END_CC_MONITOR_COUNT(cc_base_addr, stat_op_type, stat_phase, monitor_null_cycles, lock_p, is_monitored) \ | |
89 | do { \ | |
90 | uint32_t elapsed_cycles; \ | |
91 | if ((is_monitored) == true) { \ | |
92 | elapsed_cycles = READ_REGISTER((cc_base_addr) + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR)); \ | |
93 | clear_bit(MONITOR_CNTR_BIT, (lock_p)); \ | |
94 | if (elapsed_cycles > 0) \ | |
95 | update_cc_stat(stat_op_type, stat_phase, (elapsed_cycles - monitor_null_cycles)); \ | |
96 | } \ | |
97 | } while (0) | |
98 | ||
99 | #else /*CC_CYCLE_COUNT*/ | |
100 | ||
101 | #define INIT_CC_MONITOR_DESC(desc_p) do { } while (0) | |
102 | #define CC_CYCLE_DESC_HEAD(cc_base_addr, desc_p, lock_p, is_monitored_p) do { } while (0) | |
103 | #define CC_CYCLE_DESC_TAIL(cc_base_addr, desc_p, is_monitored) do { } while (0) | |
104 | #define END_CC_MONITOR_COUNT(cc_base_addr, stat_op_type, stat_phase, monitor_null_cycles, lock_p, is_monitored) do { } while (0) | |
105 | #endif /*CC_CYCLE_COUNT*/ | |
106 | ||
107 | ||
108 | struct ssi_request_mgr_handle { | |
109 | /* Request manager resources */ | |
110 | unsigned int hw_queue_size; /* HW capability */ | |
111 | unsigned int min_free_hw_slots; | |
112 | unsigned int max_used_sw_slots; | |
113 | struct ssi_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE]; | |
114 | uint32_t req_queue_head; | |
115 | uint32_t req_queue_tail; | |
116 | uint32_t axi_completed; | |
117 | uint32_t q_free_slots; | |
118 | spinlock_t hw_lock; | |
119 | HwDesc_s compl_desc; | |
120 | uint8_t *dummy_comp_buff; | |
121 | dma_addr_t dummy_comp_buff_dma; | |
122 | HwDesc_s monitor_desc; | |
123 | volatile unsigned long monitor_lock; | |
124 | #ifdef COMP_IN_WQ | |
125 | struct workqueue_struct *workq; | |
126 | struct delayed_work compwork; | |
127 | #else | |
128 | struct tasklet_struct comptask; | |
129 | #endif | |
130 | #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) | |
131 | bool is_runtime_suspended; | |
132 | #endif | |
133 | }; | |
134 | ||
135 | static void comp_handler(unsigned long devarg); | |
136 | #ifdef COMP_IN_WQ | |
137 | static void comp_work_handler(struct work_struct *work); | |
138 | #endif | |
139 | ||
140 | void request_mgr_fini(struct ssi_drvdata *drvdata) | |
141 | { | |
142 | struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; | |
143 | ||
144 | if (req_mgr_h == NULL) | |
145 | return; /* Not allocated */ | |
146 | ||
147 | if (req_mgr_h->dummy_comp_buff_dma != 0) { | |
148 | SSI_RESTORE_DMA_ADDR_TO_48BIT(req_mgr_h->dummy_comp_buff_dma); | |
149 | dma_free_coherent(&drvdata->plat_dev->dev, | |
150 | sizeof(uint32_t), req_mgr_h->dummy_comp_buff, | |
151 | req_mgr_h->dummy_comp_buff_dma); | |
152 | } | |
153 | ||
154 | SSI_LOG_DEBUG("max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size - | |
155 | req_mgr_h->min_free_hw_slots) ); | |
156 | SSI_LOG_DEBUG("max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots); | |
157 | ||
158 | #ifdef COMP_IN_WQ | |
159 | flush_workqueue(req_mgr_h->workq); | |
160 | destroy_workqueue(req_mgr_h->workq); | |
161 | #else | |
162 | /* Kill tasklet */ | |
163 | tasklet_kill(&req_mgr_h->comptask); | |
164 | #endif | |
165 | memset(req_mgr_h, 0, sizeof(struct ssi_request_mgr_handle)); | |
166 | kfree(req_mgr_h); | |
167 | drvdata->request_mgr_handle = NULL; | |
168 | } | |
169 | ||
170 | int request_mgr_init(struct ssi_drvdata *drvdata) | |
171 | { | |
172 | #ifdef CC_CYCLE_COUNT | |
173 | HwDesc_s monitor_desc[2]; | |
174 | struct ssi_crypto_req monitor_req = {0}; | |
175 | #endif | |
176 | struct ssi_request_mgr_handle *req_mgr_h; | |
177 | int rc = 0; | |
178 | ||
179 | req_mgr_h = kzalloc(sizeof(struct ssi_request_mgr_handle),GFP_KERNEL); | |
180 | if (req_mgr_h == NULL) { | |
181 | rc = -ENOMEM; | |
182 | goto req_mgr_init_err; | |
183 | } | |
184 | ||
185 | drvdata->request_mgr_handle = req_mgr_h; | |
186 | ||
187 | spin_lock_init(&req_mgr_h->hw_lock); | |
188 | #ifdef COMP_IN_WQ | |
189 | SSI_LOG_DEBUG("Initializing completion workqueue\n"); | |
190 | req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq"); | |
191 | if (unlikely(req_mgr_h->workq == NULL)) { | |
192 | SSI_LOG_ERR("Failed creating work queue\n"); | |
193 | rc = -ENOMEM; | |
194 | goto req_mgr_init_err; | |
195 | } | |
196 | INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler); | |
197 | #else | |
198 | SSI_LOG_DEBUG("Initializing completion tasklet\n"); | |
199 | tasklet_init(&req_mgr_h->comptask, comp_handler, (unsigned long)drvdata); | |
200 | #endif | |
201 | req_mgr_h->hw_queue_size = READ_REGISTER(drvdata->cc_base + | |
202 | CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_SRAM_SIZE)); | |
203 | SSI_LOG_DEBUG("hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size); | |
204 | if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) { | |
205 | SSI_LOG_ERR("Invalid HW queue size = %u (Min. required is %u)\n", | |
206 | req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE); | |
207 | rc = -ENOMEM; | |
208 | goto req_mgr_init_err; | |
209 | } | |
210 | req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size; | |
211 | req_mgr_h->max_used_sw_slots = 0; | |
212 | ||
213 | ||
214 | /* Allocate DMA word for "dummy" completion descriptor use */ | |
215 | req_mgr_h->dummy_comp_buff = dma_alloc_coherent(&drvdata->plat_dev->dev, | |
216 | sizeof(uint32_t), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL); | |
217 | if (!req_mgr_h->dummy_comp_buff) { | |
218 | SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped " | |
219 | "buffer\n", sizeof(uint32_t)); | |
220 | rc = -ENOMEM; | |
221 | goto req_mgr_init_err; | |
222 | } | |
223 | SSI_UPDATE_DMA_ADDR_TO_48BIT(req_mgr_h->dummy_comp_buff_dma, | |
224 | sizeof(uint32_t)); | |
225 | ||
226 | /* Init. "dummy" completion descriptor */ | |
227 | HW_DESC_INIT(&req_mgr_h->compl_desc); | |
228 | HW_DESC_SET_DIN_CONST(&req_mgr_h->compl_desc, 0, sizeof(uint32_t)); | |
229 | HW_DESC_SET_DOUT_DLLI(&req_mgr_h->compl_desc, | |
230 | req_mgr_h->dummy_comp_buff_dma, | |
231 | sizeof(uint32_t), NS_BIT, 1); | |
232 | HW_DESC_SET_FLOW_MODE(&req_mgr_h->compl_desc, BYPASS); | |
233 | HW_DESC_SET_QUEUE_LAST_IND(&req_mgr_h->compl_desc); | |
234 | ||
235 | #ifdef CC_CYCLE_COUNT | |
236 | /* For CC-HW cycle performance trace */ | |
237 | INIT_CC_MONITOR_DESC(&req_mgr_h->monitor_desc); | |
238 | set_bit(MONITOR_CNTR_BIT, &req_mgr_h->monitor_lock); | |
239 | monitor_desc[0] = req_mgr_h->monitor_desc; | |
240 | monitor_desc[1] = req_mgr_h->monitor_desc; | |
241 | ||
242 | rc = send_request(drvdata, &monitor_req, monitor_desc, 2, 0); | |
243 | if (unlikely(rc != 0)) | |
244 | goto req_mgr_init_err; | |
245 | ||
246 | drvdata->monitor_null_cycles = READ_REGISTER(drvdata->cc_base + | |
247 | CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR)); | |
248 | SSI_LOG_ERR("Calibration time=0x%08x\n", drvdata->monitor_null_cycles); | |
249 | ||
250 | clear_bit(MONITOR_CNTR_BIT, &req_mgr_h->monitor_lock); | |
251 | #endif | |
252 | ||
253 | return 0; | |
254 | ||
255 | req_mgr_init_err: | |
256 | request_mgr_fini(drvdata); | |
257 | return rc; | |
258 | } | |
259 | ||
260 | static inline void enqueue_seq( | |
261 | void __iomem *cc_base, | |
262 | HwDesc_s seq[], unsigned int seq_len) | |
263 | { | |
264 | int i; | |
265 | ||
266 | for (i = 0; i < seq_len; i++) { | |
267 | writel_relaxed(seq[i].word[0], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0))); | |
268 | writel_relaxed(seq[i].word[1], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0))); | |
269 | writel_relaxed(seq[i].word[2], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0))); | |
270 | writel_relaxed(seq[i].word[3], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0))); | |
271 | writel_relaxed(seq[i].word[4], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0))); | |
272 | wmb(); | |
273 | writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0))); | |
274 | #ifdef DX_DUMP_DESCS | |
275 | SSI_LOG_DEBUG("desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", i, | |
276 | seq[i].word[0], seq[i].word[1], seq[i].word[2], seq[i].word[3], seq[i].word[4], seq[i].word[5]); | |
277 | #endif | |
278 | } | |
279 | } | |
280 | ||
281 | /*! | |
282 | * Completion will take place if and only if user requested completion | |
283 | * by setting "is_dout = 0" in send_request(). | |
284 | * | |
285 | * \param dev | |
286 | * \param dx_compl_h The completion event to signal | |
287 | */ | |
288 | static void request_mgr_complete(struct device *dev, void *dx_compl_h, void __iomem *cc_base) | |
289 | { | |
290 | struct completion *this_compl = dx_compl_h; | |
291 | complete(this_compl); | |
292 | } | |
293 | ||
294 | ||
295 | static inline int request_mgr_queues_status_check( | |
296 | struct ssi_request_mgr_handle *req_mgr_h, | |
297 | void __iomem *cc_base, | |
298 | unsigned int total_seq_len) | |
299 | { | |
300 | unsigned long poll_queue; | |
301 | ||
302 | /* SW queue is checked only once as it will not | |
303 | be chaned during the poll becasue the spinlock_bh | |
304 | is held by the thread */ | |
305 | if (unlikely(((req_mgr_h->req_queue_head + 1) & | |
306 | (MAX_REQUEST_QUEUE_SIZE - 1)) == | |
307 | req_mgr_h->req_queue_tail)) { | |
308 | SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n", | |
309 | req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE); | |
310 | return -EBUSY; | |
311 | } | |
312 | ||
313 | if ((likely(req_mgr_h->q_free_slots >= total_seq_len)) ) { | |
314 | return 0; | |
315 | } | |
316 | /* Wait for space in HW queue. Poll constant num of iterations. */ | |
317 | for (poll_queue =0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue ++) { | |
318 | req_mgr_h->q_free_slots = | |
319 | CC_HAL_READ_REGISTER( | |
320 | CC_REG_OFFSET(CRY_KERNEL, | |
321 | DSCRPTR_QUEUE_CONTENT)); | |
322 | if (unlikely(req_mgr_h->q_free_slots < | |
323 | req_mgr_h->min_free_hw_slots)) { | |
324 | req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots; | |
325 | } | |
326 | ||
327 | if (likely (req_mgr_h->q_free_slots >= total_seq_len)) { | |
328 | /* If there is enough place return */ | |
329 | return 0; | |
330 | } | |
331 | ||
332 | SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n", | |
333 | req_mgr_h->q_free_slots, total_seq_len); | |
334 | } | |
335 | /* No room in the HW queue try again later */ | |
336 | SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d " | |
337 | "sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n", | |
338 | req_mgr_h->req_queue_head, | |
339 | MAX_REQUEST_QUEUE_SIZE, | |
340 | req_mgr_h->q_free_slots, | |
341 | total_seq_len); | |
342 | return -EAGAIN; | |
343 | } | |
344 | ||
345 | /*! | |
346 | * Enqueue caller request to crypto hardware. | |
347 | * | |
348 | * \param drvdata | |
349 | * \param ssi_req The request to enqueue | |
350 | * \param desc The crypto sequence | |
351 | * \param len The crypto sequence length | |
352 | * \param is_dout If "true": completion is handled by the caller | |
353 | * If "false": this function adds a dummy descriptor completion | |
354 | * and waits upon completion signal. | |
355 | * | |
356 | * \return int Returns -EINPROGRESS if "is_dout=true"; "0" if "is_dout=false" | |
357 | */ | |
358 | int send_request( | |
359 | struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req, | |
360 | HwDesc_s *desc, unsigned int len, bool is_dout) | |
361 | { | |
362 | void __iomem *cc_base = drvdata->cc_base; | |
363 | struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; | |
364 | unsigned int used_sw_slots; | |
a4d826b9 | 365 | unsigned int iv_seq_len = 0; |
abefd674 | 366 | unsigned int total_seq_len = len; /*initial sequence length*/ |
a4d826b9 | 367 | HwDesc_s iv_seq[SSI_IVPOOL_SEQ_LEN]; |
abefd674 | 368 | int rc; |
a4d826b9 GBY |
369 | unsigned int max_required_seq_len = (total_seq_len + |
370 | ((ssi_req->ivgen_dma_addr_len == 0) ? 0 : | |
371 | SSI_IVPOOL_SEQ_LEN ) + | |
372 | ((is_dout == 0 )? 1 : 0)); | |
abefd674 GBY |
373 | DECL_CYCLE_COUNT_RESOURCES; |
374 | ||
375 | #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) | |
376 | rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev); | |
377 | if (rc != 0) { | |
378 | SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n",rc); | |
abefd674 GBY |
379 | return rc; |
380 | } | |
381 | #endif | |
382 | ||
383 | do { | |
384 | spin_lock_bh(&req_mgr_h->hw_lock); | |
385 | ||
386 | /* Check if there is enough place in the SW/HW queues | |
387 | in case iv gen add the max size and in case of no dout add 1 | |
388 | for the internal completion descriptor */ | |
389 | rc = request_mgr_queues_status_check(req_mgr_h, | |
390 | cc_base, | |
391 | max_required_seq_len); | |
392 | if (likely(rc == 0 )) | |
393 | /* There is enough place in the queue */ | |
394 | break; | |
395 | /* something wrong release the spinlock*/ | |
396 | spin_unlock_bh(&req_mgr_h->hw_lock); | |
397 | ||
398 | if (rc != -EAGAIN) { | |
399 | /* Any error other than HW queue full | |
400 | (SW queue is full) */ | |
401 | #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) | |
402 | ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev); | |
403 | #endif | |
404 | return rc; | |
405 | } | |
406 | ||
407 | /* HW queue is full - short sleep */ | |
408 | msleep(1); | |
409 | } while (1); | |
410 | ||
411 | /* Additional completion descriptor is needed incase caller did not | |
412 | enabled any DLLI/MLLI DOUT bit in the given sequence */ | |
413 | if (!is_dout) { | |
414 | init_completion(&ssi_req->seq_compl); | |
415 | ssi_req->user_cb = request_mgr_complete; | |
416 | ssi_req->user_arg = &(ssi_req->seq_compl); | |
417 | total_seq_len++; | |
418 | } | |
419 | ||
a4d826b9 GBY |
420 | if (ssi_req->ivgen_dma_addr_len > 0) { |
421 | SSI_LOG_DEBUG("Acquire IV from pool into %d DMA addresses 0x%llX, 0x%llX, 0x%llX, IV-size=%u\n", | |
422 | ssi_req->ivgen_dma_addr_len, | |
423 | (unsigned long long)ssi_req->ivgen_dma_addr[0], | |
424 | (unsigned long long)ssi_req->ivgen_dma_addr[1], | |
425 | (unsigned long long)ssi_req->ivgen_dma_addr[2], | |
426 | ssi_req->ivgen_size); | |
427 | ||
428 | /* Acquire IV from pool */ | |
429 | rc = ssi_ivgen_getiv(drvdata, ssi_req->ivgen_dma_addr, ssi_req->ivgen_dma_addr_len, | |
430 | ssi_req->ivgen_size, iv_seq, &iv_seq_len); | |
431 | ||
432 | if (unlikely(rc != 0)) { | |
433 | SSI_LOG_ERR("Failed to generate IV (rc=%d)\n", rc); | |
434 | spin_unlock_bh(&req_mgr_h->hw_lock); | |
435 | #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) | |
436 | ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev); | |
437 | #endif | |
438 | return rc; | |
439 | } | |
440 | ||
441 | total_seq_len += iv_seq_len; | |
442 | } | |
443 | ||
abefd674 GBY |
444 | used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & (MAX_REQUEST_QUEUE_SIZE-1)); |
445 | if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots)) { | |
446 | req_mgr_h->max_used_sw_slots = used_sw_slots; | |
447 | } | |
448 | ||
449 | CC_CYCLE_DESC_HEAD(cc_base, &req_mgr_h->monitor_desc, | |
450 | &req_mgr_h->monitor_lock, &ssi_req->is_monitored_p); | |
451 | ||
452 | /* Enqueue request - must be locked with HW lock*/ | |
453 | req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *ssi_req; | |
454 | START_CYCLE_COUNT_AT(req_mgr_h->req_queue[req_mgr_h->req_queue_head].submit_cycle); | |
455 | req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1); | |
456 | /* TODO: Use circ_buf.h ? */ | |
457 | ||
458 | SSI_LOG_DEBUG("Enqueue request head=%u\n", req_mgr_h->req_queue_head); | |
459 | ||
460 | #ifdef FLUSH_CACHE_ALL | |
461 | flush_cache_all(); | |
462 | #endif | |
463 | ||
464 | /* STAT_PHASE_4: Push sequence */ | |
465 | START_CYCLE_COUNT(); | |
a4d826b9 | 466 | enqueue_seq(cc_base, iv_seq, iv_seq_len); |
abefd674 GBY |
467 | enqueue_seq(cc_base, desc, len); |
468 | enqueue_seq(cc_base, &req_mgr_h->compl_desc, (is_dout ? 0 : 1)); | |
469 | END_CYCLE_COUNT(ssi_req->op_type, STAT_PHASE_4); | |
470 | ||
471 | CC_CYCLE_DESC_TAIL(cc_base, &req_mgr_h->monitor_desc, ssi_req->is_monitored_p); | |
472 | ||
473 | if (unlikely(req_mgr_h->q_free_slots < total_seq_len)) { | |
474 | /*This means that there was a problem with the resume*/ | |
475 | BUG(); | |
476 | } | |
477 | /* Update the free slots in HW queue */ | |
478 | req_mgr_h->q_free_slots -= total_seq_len; | |
479 | ||
480 | spin_unlock_bh(&req_mgr_h->hw_lock); | |
481 | ||
482 | if (!is_dout) { | |
483 | /* Wait upon sequence completion. | |
484 | * Return "0" -Operation done successfully. */ | |
485 | return wait_for_completion_interruptible(&ssi_req->seq_compl); | |
486 | } else { | |
487 | /* Operation still in process */ | |
488 | return -EINPROGRESS; | |
489 | } | |
490 | } | |
491 | ||
492 | ||
493 | /*! | |
494 | * Enqueue caller request to crypto hardware during init process. | |
495 | * assume this function is not called in middle of a flow, | |
496 | * since we set QUEUE_LAST_IND flag in the last descriptor. | |
497 | * | |
498 | * \param drvdata | |
499 | * \param desc The crypto sequence | |
500 | * \param len The crypto sequence length | |
501 | * | |
502 | * \return int Returns "0" upon success | |
503 | */ | |
504 | int send_request_init( | |
505 | struct ssi_drvdata *drvdata, HwDesc_s *desc, unsigned int len) | |
506 | { | |
507 | void __iomem *cc_base = drvdata->cc_base; | |
508 | struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; | |
509 | unsigned int total_seq_len = len; /*initial sequence length*/ | |
510 | int rc = 0; | |
511 | ||
512 | /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. */ | |
513 | rc = request_mgr_queues_status_check(req_mgr_h, cc_base, total_seq_len); | |
514 | if (unlikely(rc != 0 )) { | |
515 | return rc; | |
516 | } | |
517 | HW_DESC_SET_QUEUE_LAST_IND(&desc[len-1]); | |
518 | ||
519 | enqueue_seq(cc_base, desc, len); | |
520 | ||
521 | /* Update the free slots in HW queue */ | |
522 | req_mgr_h->q_free_slots = CC_HAL_READ_REGISTER( | |
523 | CC_REG_OFFSET(CRY_KERNEL, | |
524 | DSCRPTR_QUEUE_CONTENT)); | |
525 | ||
526 | return 0; | |
527 | } | |
528 | ||
529 | ||
530 | void complete_request(struct ssi_drvdata *drvdata) | |
531 | { | |
532 | struct ssi_request_mgr_handle *request_mgr_handle = | |
533 | drvdata->request_mgr_handle; | |
534 | #ifdef COMP_IN_WQ | |
535 | queue_delayed_work(request_mgr_handle->workq, &request_mgr_handle->compwork, 0); | |
536 | #else | |
537 | tasklet_schedule(&request_mgr_handle->comptask); | |
538 | #endif | |
539 | } | |
540 | ||
541 | #ifdef COMP_IN_WQ | |
542 | static void comp_work_handler(struct work_struct *work) | |
543 | { | |
544 | struct ssi_drvdata *drvdata = | |
545 | container_of(work, struct ssi_drvdata, compwork.work); | |
546 | ||
547 | comp_handler((unsigned long)drvdata); | |
548 | } | |
549 | #endif | |
550 | ||
551 | static void proc_completions(struct ssi_drvdata *drvdata) | |
552 | { | |
553 | struct ssi_crypto_req *ssi_req; | |
554 | struct platform_device *plat_dev = drvdata->plat_dev; | |
555 | struct ssi_request_mgr_handle * request_mgr_handle = | |
556 | drvdata->request_mgr_handle; | |
557 | #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) | |
558 | int rc = 0; | |
559 | #endif | |
560 | DECL_CYCLE_COUNT_RESOURCES; | |
561 | ||
562 | while(request_mgr_handle->axi_completed) { | |
563 | request_mgr_handle->axi_completed--; | |
564 | ||
565 | /* Dequeue request */ | |
566 | if (unlikely(request_mgr_handle->req_queue_head == request_mgr_handle->req_queue_tail)) { | |
567 | SSI_LOG_ERR("Request queue is empty req_queue_head==req_queue_tail==%u\n", request_mgr_handle->req_queue_head); | |
568 | BUG(); | |
569 | } | |
570 | ||
571 | ssi_req = &request_mgr_handle->req_queue[request_mgr_handle->req_queue_tail]; | |
572 | END_CYCLE_COUNT_AT(ssi_req->submit_cycle, ssi_req->op_type, STAT_PHASE_5); /* Seq. Comp. */ | |
573 | END_CC_MONITOR_COUNT(drvdata->cc_base, ssi_req->op_type, STAT_PHASE_6, | |
574 | drvdata->monitor_null_cycles, &request_mgr_handle->monitor_lock, ssi_req->is_monitored_p); | |
575 | ||
576 | #ifdef FLUSH_CACHE_ALL | |
577 | flush_cache_all(); | |
578 | #endif | |
579 | ||
580 | #ifdef COMPLETION_DELAY | |
581 | /* Delay */ | |
582 | { | |
583 | uint32_t axi_err; | |
584 | int i; | |
585 | SSI_LOG_INFO("Delay\n"); | |
586 | for (i=0;i<1000000;i++) { | |
587 | axi_err = READ_REGISTER(drvdata->cc_base + CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR)); | |
588 | } | |
589 | } | |
590 | #endif /* COMPLETION_DELAY */ | |
591 | ||
592 | if (likely(ssi_req->user_cb != NULL)) { | |
593 | START_CYCLE_COUNT(); | |
594 | ssi_req->user_cb(&plat_dev->dev, ssi_req->user_arg, drvdata->cc_base); | |
595 | END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_3); | |
596 | } | |
597 | request_mgr_handle->req_queue_tail = (request_mgr_handle->req_queue_tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1); | |
598 | SSI_LOG_DEBUG("Dequeue request tail=%u\n", request_mgr_handle->req_queue_tail); | |
599 | SSI_LOG_DEBUG("Request completed. axi_completed=%d\n", request_mgr_handle->axi_completed); | |
600 | #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) | |
601 | rc = ssi_power_mgr_runtime_put_suspend(&plat_dev->dev); | |
602 | if (rc != 0) { | |
603 | SSI_LOG_ERR("Failed to set runtime suspension %d\n",rc); | |
604 | } | |
605 | #endif | |
606 | } | |
607 | } | |
608 | ||
609 | /* Deferred service handler, run as interrupt-fired tasklet */ | |
610 | static void comp_handler(unsigned long devarg) | |
611 | { | |
612 | struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg; | |
613 | void __iomem *cc_base = drvdata->cc_base; | |
614 | struct ssi_request_mgr_handle * request_mgr_handle = | |
615 | drvdata->request_mgr_handle; | |
616 | ||
617 | uint32_t irq; | |
618 | ||
619 | DECL_CYCLE_COUNT_RESOURCES; | |
620 | ||
621 | START_CYCLE_COUNT(); | |
622 | ||
623 | irq = (drvdata->irq & SSI_COMP_IRQ_MASK); | |
624 | ||
625 | if (irq & SSI_COMP_IRQ_MASK) { | |
626 | /* To avoid the interrupt from firing as we unmask it, we clear it now */ | |
627 | CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK); | |
628 | ||
629 | /* Avoid race with above clear: Test completion counter once more */ | |
630 | request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE, | |
631 | CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET)); | |
632 | ||
633 | /* ISR-to-Tasklet latency */ | |
634 | if (request_mgr_handle->axi_completed) { | |
635 | /* Only if actually reflects ISR-to-completion-handling latency, i.e., | |
636 | not duplicate as a result of interrupt after AXIM_MON_ERR clear, before end of loop */ | |
637 | END_CYCLE_COUNT_AT(drvdata->isr_exit_cycles, STAT_OP_TYPE_GENERIC, STAT_PHASE_1); | |
638 | } | |
639 | ||
640 | while (request_mgr_handle->axi_completed) { | |
641 | do { | |
642 | proc_completions(drvdata); | |
643 | /* At this point (after proc_completions()), request_mgr_handle->axi_completed is always 0. | |
644 | The following assignment was changed to = (previously was +=) to conform KW restrictions. */ | |
645 | request_mgr_handle->axi_completed = CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE, | |
646 | CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET)); | |
647 | } while (request_mgr_handle->axi_completed > 0); | |
648 | ||
649 | /* To avoid the interrupt from firing as we unmask it, we clear it now */ | |
650 | CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK); | |
651 | ||
652 | /* Avoid race with above clear: Test completion counter once more */ | |
653 | request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE, | |
654 | CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET)); | |
401c3113 | 655 | } |
abefd674 GBY |
656 | |
657 | } | |
658 | /* after verifing that there is nothing to do, Unmask AXI completion interrupt */ | |
659 | CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), | |
660 | CC_HAL_READ_REGISTER( | |
661 | CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq); | |
662 | END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_2); | |
663 | } | |
664 | ||
665 | /* | |
666 | resume the queue configuration - no need to take the lock as this happens inside | |
667 | the spin lock protection | |
668 | */ | |
669 | #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) | |
670 | int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata) | |
671 | { | |
672 | struct ssi_request_mgr_handle * request_mgr_handle = drvdata->request_mgr_handle; | |
673 | ||
674 | spin_lock_bh(&request_mgr_handle->hw_lock); | |
675 | request_mgr_handle->is_runtime_suspended = false; | |
676 | spin_unlock_bh(&request_mgr_handle->hw_lock); | |
677 | ||
678 | return 0 ; | |
679 | } | |
680 | ||
681 | /* | |
682 | suspend the queue configuration. Since it is used for the runtime suspend | |
683 | only verify that the queue can be suspended. | |
684 | */ | |
685 | int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata) | |
686 | { | |
687 | struct ssi_request_mgr_handle * request_mgr_handle = | |
688 | drvdata->request_mgr_handle; | |
689 | ||
690 | /* lock the send_request */ | |
691 | spin_lock_bh(&request_mgr_handle->hw_lock); | |
692 | if (request_mgr_handle->req_queue_head != | |
693 | request_mgr_handle->req_queue_tail) { | |
694 | spin_unlock_bh(&request_mgr_handle->hw_lock); | |
695 | return -EBUSY; | |
696 | } | |
697 | request_mgr_handle->is_runtime_suspended = true; | |
698 | spin_unlock_bh(&request_mgr_handle->hw_lock); | |
699 | ||
700 | return 0; | |
701 | } | |
702 | ||
703 | bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata) | |
704 | { | |
705 | struct ssi_request_mgr_handle * request_mgr_handle = | |
706 | drvdata->request_mgr_handle; | |
707 | ||
708 | return request_mgr_handle->is_runtime_suspended; | |
709 | } | |
710 | ||
711 | #endif | |
712 |