]>
Commit | Line | Data |
---|---|---|
14fa93cd SJ |
1 | #include <linux/gfp.h> |
2 | #include <linux/workqueue.h> | |
3 | #include <crypto/internal/skcipher.h> | |
4 | ||
5 | #include "nitrox_dev.h" | |
6 | #include "nitrox_req.h" | |
7 | #include "nitrox_csr.h" | |
8 | #include "nitrox_req.h" | |
9 | ||
10 | /* SLC_STORE_INFO */ | |
11 | #define MIN_UDD_LEN 16 | |
12 | /* PKT_IN_HDR + SLC_STORE_INFO */ | |
13 | #define FDATA_SIZE 32 | |
14 | /* Base destination port for the solicited requests */ | |
15 | #define SOLICIT_BASE_DPORT 256 | |
16 | #define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL | |
17 | ||
18 | #define REQ_NOT_POSTED 1 | |
19 | #define REQ_BACKLOG 2 | |
20 | #define REQ_POSTED 3 | |
21 | ||
22 | /** | |
23 | * Response codes from SE microcode | |
24 | * 0x00 - Success | |
25 | * Completion with no error | |
26 | * 0x43 - ERR_GC_DATA_LEN_INVALID | |
27 | * Invalid Data length if Encryption Data length is | |
28 | * less than 16 bytes for AES-XTS and AES-CTS. | |
29 | * 0x45 - ERR_GC_CTX_LEN_INVALID | |
30 | * Invalid context length: CTXL != 23 words. | |
31 | * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID | |
32 | * DOCSIS support is enabled with other than | |
33 | * AES/DES-CBC mode encryption. | |
34 | * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID | |
35 | * Authentication offset is other than 0 with | |
36 | * Encryption IV source = 0. | |
37 | * Authentication offset is other than 8 (DES)/16 (AES) | |
38 | * with Encryption IV source = 1 | |
39 | * 0x51 - ERR_GC_CRC32_INVALID_SELECTION | |
40 | * CRC32 is enabled for other than DOCSIS encryption. | |
41 | * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID | |
42 | * Invalid flag options in AES-CCM IV. | |
43 | */ | |
44 | ||
45 | /** | |
46 | * dma_free_sglist - unmap and free the sg lists. | |
47 | * @ndev: N5 device | |
48 | * @sgtbl: SG table | |
49 | */ | |
50 | static void softreq_unmap_sgbufs(struct nitrox_softreq *sr) | |
51 | { | |
52 | struct nitrox_device *ndev = sr->ndev; | |
53 | struct device *dev = DEV(ndev); | |
54 | struct nitrox_sglist *sglist; | |
55 | ||
56 | /* unmap in sgbuf */ | |
57 | sglist = sr->in.sglist; | |
58 | if (!sglist) | |
59 | goto out_unmap; | |
60 | ||
61 | /* unmap iv */ | |
62 | dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL); | |
63 | /* unmpa src sglist */ | |
64 | dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir); | |
65 | /* unamp gather component */ | |
66 | dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE); | |
67 | kfree(sr->in.sglist); | |
68 | kfree(sr->in.sgcomp); | |
69 | sr->in.sglist = NULL; | |
70 | sr->in.buf = NULL; | |
71 | sr->in.map_bufs_cnt = 0; | |
72 | ||
73 | out_unmap: | |
74 | /* unmap out sgbuf */ | |
75 | sglist = sr->out.sglist; | |
76 | if (!sglist) | |
77 | return; | |
78 | ||
79 | /* unmap orh */ | |
80 | dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); | |
81 | ||
82 | /* unmap dst sglist */ | |
83 | if (!sr->inplace) { | |
84 | dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3), | |
85 | sr->out.dir); | |
86 | } | |
87 | /* unmap completion */ | |
88 | dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); | |
89 | ||
90 | /* unmap scatter component */ | |
91 | dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE); | |
92 | kfree(sr->out.sglist); | |
93 | kfree(sr->out.sgcomp); | |
94 | sr->out.sglist = NULL; | |
95 | sr->out.buf = NULL; | |
96 | sr->out.map_bufs_cnt = 0; | |
97 | } | |
98 | ||
99 | static void softreq_destroy(struct nitrox_softreq *sr) | |
100 | { | |
101 | softreq_unmap_sgbufs(sr); | |
102 | kfree(sr); | |
103 | } | |
104 | ||
105 | /** | |
106 | * create_sg_component - create SG componets for N5 device. | |
107 | * @sr: Request structure | |
108 | * @sgtbl: SG table | |
109 | * @nr_comp: total number of components required | |
110 | * | |
111 | * Component structure | |
112 | * | |
113 | * 63 48 47 32 31 16 15 0 | |
114 | * -------------------------------------- | |
115 | * | LEN0 | LEN1 | LEN2 | LEN3 | | |
116 | * |------------------------------------- | |
117 | * | PTR0 | | |
118 | * -------------------------------------- | |
119 | * | PTR1 | | |
120 | * -------------------------------------- | |
121 | * | PTR2 | | |
122 | * -------------------------------------- | |
123 | * | PTR3 | | |
124 | * -------------------------------------- | |
125 | * | |
126 | * Returns 0 if success or a negative errno code on error. | |
127 | */ | |
128 | static int create_sg_component(struct nitrox_softreq *sr, | |
129 | struct nitrox_sgtable *sgtbl, int map_nents) | |
130 | { | |
131 | struct nitrox_device *ndev = sr->ndev; | |
132 | struct nitrox_sgcomp *sgcomp; | |
133 | struct nitrox_sglist *sglist; | |
134 | dma_addr_t dma; | |
135 | size_t sz_comp; | |
136 | int i, j, nr_sgcomp; | |
137 | ||
138 | nr_sgcomp = roundup(map_nents, 4) / 4; | |
139 | ||
140 | /* each component holds 4 dma pointers */ | |
141 | sz_comp = nr_sgcomp * sizeof(*sgcomp); | |
142 | sgcomp = kzalloc(sz_comp, sr->gfp); | |
143 | if (!sgcomp) | |
144 | return -ENOMEM; | |
145 | ||
146 | sgtbl->sgcomp = sgcomp; | |
147 | sgtbl->nr_sgcomp = nr_sgcomp; | |
148 | ||
149 | sglist = sgtbl->sglist; | |
150 | /* populate device sg component */ | |
151 | for (i = 0; i < nr_sgcomp; i++) { | |
152 | for (j = 0; j < 4; j++) { | |
153 | sgcomp->len[j] = cpu_to_be16(sglist->len); | |
154 | sgcomp->dma[j] = cpu_to_be64(sglist->dma); | |
155 | sglist++; | |
156 | } | |
157 | sgcomp++; | |
158 | } | |
159 | /* map the device sg component */ | |
160 | dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE); | |
161 | if (dma_mapping_error(DEV(ndev), dma)) { | |
162 | kfree(sgtbl->sgcomp); | |
163 | sgtbl->sgcomp = NULL; | |
164 | return -ENOMEM; | |
165 | } | |
166 | ||
167 | sgtbl->dma = dma; | |
168 | sgtbl->len = sz_comp; | |
169 | ||
170 | return 0; | |
171 | } | |
172 | ||
173 | /** | |
174 | * dma_map_inbufs - DMA map input sglist and creates sglist component | |
175 | * for N5 device. | |
176 | * @sr: Request structure | |
177 | * @req: Crypto request structre | |
178 | * | |
179 | * Returns 0 if successful or a negative errno code on error. | |
180 | */ | |
181 | static int dma_map_inbufs(struct nitrox_softreq *sr, | |
182 | struct se_crypto_request *req) | |
183 | { | |
184 | struct device *dev = DEV(sr->ndev); | |
185 | struct scatterlist *sg = req->src; | |
186 | struct nitrox_sglist *glist; | |
187 | int i, nents, ret = 0; | |
188 | dma_addr_t dma; | |
189 | size_t sz; | |
190 | ||
191 | nents = sg_nents(req->src); | |
192 | ||
193 | /* creater gather list IV and src entries */ | |
194 | sz = roundup((1 + nents), 4) * sizeof(*glist); | |
195 | glist = kzalloc(sz, sr->gfp); | |
196 | if (!glist) | |
197 | return -ENOMEM; | |
198 | ||
199 | sr->in.sglist = glist; | |
200 | /* map IV */ | |
201 | dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL); | |
202 | ret = dma_mapping_error(dev, dma); | |
203 | if (ret) | |
204 | goto iv_map_err; | |
205 | ||
206 | sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | |
207 | /* map src entries */ | |
208 | nents = dma_map_sg(dev, req->src, nents, sr->in.dir); | |
209 | if (!nents) { | |
210 | ret = -EINVAL; | |
211 | goto src_map_err; | |
212 | } | |
213 | sr->in.buf = req->src; | |
214 | ||
215 | /* store the mappings */ | |
216 | glist->len = req->ivsize; | |
217 | glist->dma = dma; | |
218 | glist++; | |
219 | sr->in.total_bytes += req->ivsize; | |
220 | ||
221 | for_each_sg(req->src, sg, nents, i) { | |
222 | glist->len = sg_dma_len(sg); | |
223 | glist->dma = sg_dma_address(sg); | |
224 | sr->in.total_bytes += glist->len; | |
225 | glist++; | |
226 | } | |
227 | /* roundup map count to align with entires in sg component */ | |
228 | sr->in.map_bufs_cnt = (1 + nents); | |
229 | ||
230 | /* create NITROX gather component */ | |
231 | ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt); | |
232 | if (ret) | |
233 | goto incomp_err; | |
234 | ||
235 | return 0; | |
236 | ||
237 | incomp_err: | |
238 | dma_unmap_sg(dev, req->src, nents, sr->in.dir); | |
239 | sr->in.map_bufs_cnt = 0; | |
240 | src_map_err: | |
241 | dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL); | |
242 | iv_map_err: | |
243 | kfree(sr->in.sglist); | |
244 | sr->in.sglist = NULL; | |
245 | return ret; | |
246 | } | |
247 | ||
248 | static int dma_map_outbufs(struct nitrox_softreq *sr, | |
249 | struct se_crypto_request *req) | |
250 | { | |
251 | struct device *dev = DEV(sr->ndev); | |
252 | struct nitrox_sglist *glist = sr->in.sglist; | |
253 | struct nitrox_sglist *slist; | |
254 | struct scatterlist *sg; | |
255 | int i, nents, map_bufs_cnt, ret = 0; | |
256 | size_t sz; | |
257 | ||
258 | nents = sg_nents(req->dst); | |
259 | ||
260 | /* create scatter list ORH, IV, dst entries and Completion header */ | |
261 | sz = roundup((3 + nents), 4) * sizeof(*slist); | |
262 | slist = kzalloc(sz, sr->gfp); | |
263 | if (!slist) | |
264 | return -ENOMEM; | |
265 | ||
266 | sr->out.sglist = slist; | |
267 | sr->out.dir = DMA_BIDIRECTIONAL; | |
268 | /* map ORH */ | |
269 | sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN, | |
270 | sr->out.dir); | |
271 | ret = dma_mapping_error(dev, sr->resp.orh_dma); | |
272 | if (ret) | |
273 | goto orh_map_err; | |
274 | ||
275 | /* map completion */ | |
276 | sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion, | |
277 | COMP_HLEN, sr->out.dir); | |
278 | ret = dma_mapping_error(dev, sr->resp.completion_dma); | |
279 | if (ret) | |
280 | goto compl_map_err; | |
281 | ||
282 | sr->inplace = (req->src == req->dst) ? true : false; | |
283 | /* out place */ | |
284 | if (!sr->inplace) { | |
285 | nents = dma_map_sg(dev, req->dst, nents, sr->out.dir); | |
286 | if (!nents) { | |
287 | ret = -EINVAL; | |
288 | goto dst_map_err; | |
289 | } | |
290 | } | |
291 | sr->out.buf = req->dst; | |
292 | ||
293 | /* store the mappings */ | |
294 | /* orh */ | |
295 | slist->len = ORH_HLEN; | |
296 | slist->dma = sr->resp.orh_dma; | |
297 | slist++; | |
298 | ||
299 | /* copy the glist mappings */ | |
300 | if (sr->inplace) { | |
301 | nents = sr->in.map_bufs_cnt - 1; | |
302 | map_bufs_cnt = sr->in.map_bufs_cnt; | |
303 | while (map_bufs_cnt--) { | |
304 | slist->len = glist->len; | |
305 | slist->dma = glist->dma; | |
306 | slist++; | |
307 | glist++; | |
308 | } | |
309 | } else { | |
310 | /* copy iv mapping */ | |
311 | slist->len = glist->len; | |
312 | slist->dma = glist->dma; | |
313 | slist++; | |
314 | /* copy remaining maps */ | |
315 | for_each_sg(req->dst, sg, nents, i) { | |
316 | slist->len = sg_dma_len(sg); | |
317 | slist->dma = sg_dma_address(sg); | |
318 | slist++; | |
319 | } | |
320 | } | |
321 | ||
322 | /* completion */ | |
323 | slist->len = COMP_HLEN; | |
324 | slist->dma = sr->resp.completion_dma; | |
325 | ||
326 | sr->out.map_bufs_cnt = (3 + nents); | |
327 | ||
328 | ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt); | |
329 | if (ret) | |
330 | goto outcomp_map_err; | |
331 | ||
332 | return 0; | |
333 | ||
334 | outcomp_map_err: | |
335 | if (!sr->inplace) | |
336 | dma_unmap_sg(dev, req->dst, nents, sr->out.dir); | |
337 | sr->out.map_bufs_cnt = 0; | |
338 | sr->out.buf = NULL; | |
339 | dst_map_err: | |
340 | dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); | |
341 | sr->resp.completion_dma = 0; | |
342 | compl_map_err: | |
343 | dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); | |
344 | sr->resp.orh_dma = 0; | |
345 | orh_map_err: | |
346 | kfree(sr->out.sglist); | |
347 | sr->out.sglist = NULL; | |
348 | return ret; | |
349 | } | |
350 | ||
351 | static inline int softreq_map_iobuf(struct nitrox_softreq *sr, | |
352 | struct se_crypto_request *creq) | |
353 | { | |
354 | int ret; | |
355 | ||
356 | ret = dma_map_inbufs(sr, creq); | |
357 | if (ret) | |
358 | return ret; | |
359 | ||
360 | ret = dma_map_outbufs(sr, creq); | |
361 | if (ret) | |
362 | softreq_unmap_sgbufs(sr); | |
363 | ||
364 | return ret; | |
365 | } | |
366 | ||
367 | static inline void backlog_list_add(struct nitrox_softreq *sr, | |
368 | struct nitrox_cmdq *cmdq) | |
369 | { | |
370 | INIT_LIST_HEAD(&sr->backlog); | |
371 | ||
372 | spin_lock_bh(&cmdq->backlog_lock); | |
373 | list_add_tail(&sr->backlog, &cmdq->backlog_head); | |
374 | atomic_inc(&cmdq->backlog_count); | |
375 | atomic_set(&sr->status, REQ_BACKLOG); | |
376 | spin_unlock_bh(&cmdq->backlog_lock); | |
377 | } | |
378 | ||
379 | static inline void response_list_add(struct nitrox_softreq *sr, | |
380 | struct nitrox_cmdq *cmdq) | |
381 | { | |
382 | INIT_LIST_HEAD(&sr->response); | |
383 | ||
384 | spin_lock_bh(&cmdq->response_lock); | |
385 | list_add_tail(&sr->response, &cmdq->response_head); | |
386 | spin_unlock_bh(&cmdq->response_lock); | |
387 | } | |
388 | ||
389 | static inline void response_list_del(struct nitrox_softreq *sr, | |
390 | struct nitrox_cmdq *cmdq) | |
391 | { | |
392 | spin_lock_bh(&cmdq->response_lock); | |
393 | list_del(&sr->response); | |
394 | spin_unlock_bh(&cmdq->response_lock); | |
395 | } | |
396 | ||
397 | static struct nitrox_softreq * | |
398 | get_first_response_entry(struct nitrox_cmdq *cmdq) | |
399 | { | |
400 | return list_first_entry_or_null(&cmdq->response_head, | |
401 | struct nitrox_softreq, response); | |
402 | } | |
403 | ||
404 | static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen) | |
405 | { | |
406 | if (atomic_inc_return(&cmdq->pending_count) > qlen) { | |
407 | atomic_dec(&cmdq->pending_count); | |
408 | /* sync with other cpus */ | |
409 | smp_mb__after_atomic(); | |
410 | return true; | |
411 | } | |
412 | return false; | |
413 | } | |
414 | ||
415 | /** | |
416 | * post_se_instr - Post SE instruction to Packet Input ring | |
417 | * @sr: Request structure | |
418 | * | |
419 | * Returns 0 if successful or a negative error code, | |
420 | * if no space in ring. | |
421 | */ | |
422 | static void post_se_instr(struct nitrox_softreq *sr, | |
423 | struct nitrox_cmdq *cmdq) | |
424 | { | |
425 | struct nitrox_device *ndev = sr->ndev; | |
426 | union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; | |
427 | u64 offset; | |
428 | u8 *ent; | |
429 | ||
430 | spin_lock_bh(&cmdq->cmdq_lock); | |
431 | ||
432 | /* get the next write offset */ | |
433 | offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno); | |
434 | pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset); | |
435 | /* copy the instruction */ | |
436 | ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; | |
437 | memcpy(ent, &sr->instr, cmdq->instr_size); | |
438 | /* flush the command queue updates */ | |
439 | dma_wmb(); | |
440 | ||
441 | sr->tstamp = jiffies; | |
442 | atomic_set(&sr->status, REQ_POSTED); | |
443 | response_list_add(sr, cmdq); | |
444 | ||
445 | /* Ring doorbell with count 1 */ | |
446 | writeq(1, cmdq->dbell_csr_addr); | |
447 | /* orders the doorbell rings */ | |
448 | mmiowb(); | |
449 | ||
450 | spin_unlock_bh(&cmdq->cmdq_lock); | |
451 | } | |
452 | ||
453 | static int post_backlog_cmds(struct nitrox_cmdq *cmdq) | |
454 | { | |
455 | struct nitrox_device *ndev = cmdq->ndev; | |
456 | struct nitrox_softreq *sr, *tmp; | |
457 | int ret = 0; | |
458 | ||
459 | spin_lock_bh(&cmdq->backlog_lock); | |
460 | ||
461 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { | |
462 | struct skcipher_request *skreq; | |
463 | ||
464 | /* submit until space available */ | |
465 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | |
466 | ret = -EBUSY; | |
467 | break; | |
468 | } | |
469 | /* delete from backlog list */ | |
470 | list_del(&sr->backlog); | |
471 | atomic_dec(&cmdq->backlog_count); | |
472 | /* sync with other cpus */ | |
473 | smp_mb__after_atomic(); | |
474 | ||
475 | skreq = sr->skreq; | |
476 | /* post the command */ | |
477 | post_se_instr(sr, cmdq); | |
478 | ||
479 | /* backlog requests are posted, wakeup with -EINPROGRESS */ | |
480 | skcipher_request_complete(skreq, -EINPROGRESS); | |
481 | } | |
482 | spin_unlock_bh(&cmdq->backlog_lock); | |
483 | ||
484 | return ret; | |
485 | } | |
486 | ||
487 | static int nitrox_enqueue_request(struct nitrox_softreq *sr) | |
488 | { | |
489 | struct nitrox_cmdq *cmdq = sr->cmdq; | |
490 | struct nitrox_device *ndev = sr->ndev; | |
491 | int ret = -EBUSY; | |
492 | ||
493 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | |
494 | if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | |
495 | return -EAGAIN; | |
496 | ||
497 | backlog_list_add(sr, cmdq); | |
498 | } else { | |
499 | ret = post_backlog_cmds(cmdq); | |
500 | if (ret) { | |
501 | backlog_list_add(sr, cmdq); | |
502 | return ret; | |
503 | } | |
504 | post_se_instr(sr, cmdq); | |
505 | ret = -EINPROGRESS; | |
506 | } | |
507 | return ret; | |
508 | } | |
509 | ||
510 | /** | |
511 | * nitrox_se_request - Send request to SE core | |
512 | * @ndev: NITROX device | |
513 | * @req: Crypto request | |
514 | * | |
515 | * Returns 0 on success, or a negative error code. | |
516 | */ | |
517 | int nitrox_process_se_request(struct nitrox_device *ndev, | |
518 | struct se_crypto_request *req, | |
519 | completion_t callback, | |
520 | struct skcipher_request *skreq) | |
521 | { | |
522 | struct nitrox_softreq *sr; | |
523 | dma_addr_t ctx_handle = 0; | |
524 | int qno, ret = 0; | |
525 | ||
526 | if (!nitrox_ready(ndev)) | |
527 | return -ENODEV; | |
528 | ||
529 | sr = kzalloc(sizeof(*sr), req->gfp); | |
530 | if (!sr) | |
531 | return -ENOMEM; | |
532 | ||
533 | sr->ndev = ndev; | |
534 | sr->flags = req->flags; | |
535 | sr->gfp = req->gfp; | |
536 | sr->callback = callback; | |
537 | sr->skreq = skreq; | |
538 | ||
539 | atomic_set(&sr->status, REQ_NOT_POSTED); | |
540 | ||
541 | WRITE_ONCE(sr->resp.orh, PENDING_SIG); | |
542 | WRITE_ONCE(sr->resp.completion, PENDING_SIG); | |
543 | ||
544 | ret = softreq_map_iobuf(sr, req); | |
545 | if (ret) { | |
546 | kfree(sr); | |
547 | return ret; | |
548 | } | |
549 | ||
550 | /* get the context handle */ | |
551 | if (req->ctx_handle) { | |
552 | struct ctx_hdr *hdr; | |
553 | u8 *ctx_ptr; | |
554 | ||
555 | ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle; | |
556 | hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr)); | |
557 | ctx_handle = hdr->ctx_dma; | |
558 | } | |
559 | ||
560 | /* select the queue */ | |
561 | qno = smp_processor_id() % ndev->nr_queues; | |
562 | ||
563 | sr->cmdq = &ndev->pkt_cmdqs[qno]; | |
564 | ||
565 | /* | |
566 | * 64-Byte Instruction Format | |
567 | * | |
568 | * ---------------------- | |
569 | * | DPTR0 | 8 bytes | |
570 | * ---------------------- | |
571 | * | PKT_IN_INSTR_HDR | 8 bytes | |
572 | * ---------------------- | |
573 | * | PKT_IN_HDR | 16 bytes | |
574 | * ---------------------- | |
575 | * | SLC_INFO | 16 bytes | |
576 | * ---------------------- | |
577 | * | Front data | 16 bytes | |
578 | * ---------------------- | |
579 | */ | |
580 | ||
581 | /* fill the packet instruction */ | |
582 | /* word 0 */ | |
583 | sr->instr.dptr0 = cpu_to_be64(sr->in.dma); | |
584 | ||
585 | /* word 1 */ | |
586 | sr->instr.ih.value = 0; | |
587 | sr->instr.ih.s.g = 1; | |
588 | sr->instr.ih.s.gsz = sr->in.map_bufs_cnt; | |
589 | sr->instr.ih.s.ssz = sr->out.map_bufs_cnt; | |
590 | sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr); | |
591 | sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes; | |
592 | sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value); | |
593 | ||
594 | /* word 2 */ | |
595 | sr->instr.irh.value[0] = 0; | |
596 | sr->instr.irh.s.uddl = MIN_UDD_LEN; | |
597 | /* context length in 64-bit words */ | |
598 | sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8); | |
599 | /* offset from solicit base port 256 */ | |
600 | sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno; | |
601 | sr->instr.irh.s.ctxc = req->ctrl.s.ctxc; | |
602 | sr->instr.irh.s.arg = req->ctrl.s.arg; | |
603 | sr->instr.irh.s.opcode = req->opcode; | |
604 | sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]); | |
605 | ||
606 | /* word 3 */ | |
607 | sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle); | |
608 | ||
609 | /* word 4 */ | |
610 | sr->instr.slc.value[0] = 0; | |
611 | sr->instr.slc.s.ssz = sr->out.map_bufs_cnt; | |
612 | sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]); | |
613 | ||
614 | /* word 5 */ | |
615 | sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma); | |
616 | ||
617 | /* | |
618 | * No conversion for front data, | |
619 | * It goes into payload | |
620 | * put GP Header in front data | |
621 | */ | |
622 | sr->instr.fdata[0] = *((u64 *)&req->gph); | |
623 | sr->instr.fdata[1] = 0; | |
624 | /* flush the soft_req changes before posting the cmd */ | |
625 | wmb(); | |
626 | ||
627 | ret = nitrox_enqueue_request(sr); | |
628 | if (ret == -EAGAIN) | |
629 | goto send_fail; | |
630 | ||
631 | return ret; | |
632 | ||
633 | send_fail: | |
634 | softreq_destroy(sr); | |
635 | return ret; | |
636 | } | |
637 | ||
638 | static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout) | |
639 | { | |
640 | return time_after_eq(jiffies, (tstamp + timeout)); | |
641 | } | |
642 | ||
643 | void backlog_qflush_work(struct work_struct *work) | |
644 | { | |
645 | struct nitrox_cmdq *cmdq; | |
646 | ||
647 | cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush); | |
648 | post_backlog_cmds(cmdq); | |
649 | } | |
650 | ||
651 | /** | |
652 | * process_request_list - process completed requests | |
653 | * @ndev: N5 device | |
654 | * @qno: queue to operate | |
655 | * | |
656 | * Returns the number of responses processed. | |
657 | */ | |
658 | static void process_response_list(struct nitrox_cmdq *cmdq) | |
659 | { | |
660 | struct nitrox_device *ndev = cmdq->ndev; | |
661 | struct nitrox_softreq *sr; | |
662 | struct skcipher_request *skreq; | |
663 | completion_t callback; | |
664 | int req_completed = 0, err = 0, budget; | |
665 | ||
666 | /* check all pending requests */ | |
667 | budget = atomic_read(&cmdq->pending_count); | |
668 | ||
669 | while (req_completed < budget) { | |
670 | sr = get_first_response_entry(cmdq); | |
671 | if (!sr) | |
672 | break; | |
673 | ||
674 | if (atomic_read(&sr->status) != REQ_POSTED) | |
675 | break; | |
676 | ||
677 | /* check orh and completion bytes updates */ | |
678 | if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) { | |
679 | /* request not completed, check for timeout */ | |
680 | if (!cmd_timeout(sr->tstamp, ndev->timeout)) | |
681 | break; | |
682 | dev_err_ratelimited(DEV(ndev), | |
683 | "Request timeout, orh 0x%016llx\n", | |
684 | READ_ONCE(sr->resp.orh)); | |
685 | } | |
686 | atomic_dec(&cmdq->pending_count); | |
687 | /* sync with other cpus */ | |
688 | smp_mb__after_atomic(); | |
689 | /* remove from response list */ | |
690 | response_list_del(sr, cmdq); | |
691 | ||
692 | callback = sr->callback; | |
693 | skreq = sr->skreq; | |
694 | ||
695 | /* ORH error code */ | |
696 | err = READ_ONCE(sr->resp.orh) & 0xff; | |
697 | softreq_destroy(sr); | |
698 | ||
699 | if (callback) | |
700 | callback(skreq, err); | |
701 | ||
702 | req_completed++; | |
703 | } | |
704 | } | |
705 | ||
706 | /** | |
707 | * pkt_slc_resp_handler - post processing of SE responses | |
708 | */ | |
709 | void pkt_slc_resp_handler(unsigned long data) | |
710 | { | |
711 | struct bh_data *bh = (void *)(uintptr_t)(data); | |
712 | struct nitrox_cmdq *cmdq = bh->cmdq; | |
713 | union nps_pkt_slc_cnts pkt_slc_cnts; | |
714 | ||
715 | /* read completion count */ | |
716 | pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr); | |
717 | /* resend the interrupt if more work to do */ | |
718 | pkt_slc_cnts.s.resend = 1; | |
719 | ||
720 | process_response_list(cmdq); | |
721 | ||
722 | /* | |
723 | * clear the interrupt with resend bit enabled, | |
724 | * MSI-X interrupt generates if Completion count > Threshold | |
725 | */ | |
726 | writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr); | |
727 | /* order the writes */ | |
728 | mmiowb(); | |
729 | ||
730 | if (atomic_read(&cmdq->backlog_count)) | |
731 | schedule_work(&cmdq->backlog_qflush); | |
732 | } |