]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
14fa93cd SJ |
2 | #include <linux/gfp.h> |
3 | #include <linux/workqueue.h> | |
4 | #include <crypto/internal/skcipher.h> | |
5 | ||
cd078cb6 | 6 | #include "nitrox_common.h" |
14fa93cd SJ |
7 | #include "nitrox_dev.h" |
8 | #include "nitrox_req.h" | |
9 | #include "nitrox_csr.h" | |
14fa93cd SJ |
10 | |
11 | /* SLC_STORE_INFO */ | |
12 | #define MIN_UDD_LEN 16 | |
13 | /* PKT_IN_HDR + SLC_STORE_INFO */ | |
14 | #define FDATA_SIZE 32 | |
15 | /* Base destination port for the solicited requests */ | |
16 | #define SOLICIT_BASE_DPORT 256 | |
14fa93cd SJ |
17 | |
18 | #define REQ_NOT_POSTED 1 | |
19 | #define REQ_BACKLOG 2 | |
20 | #define REQ_POSTED 3 | |
21 | ||
c4d7d318 | 22 | /* |
14fa93cd SJ |
23 | * Response codes from SE microcode |
24 | * 0x00 - Success | |
25 | * Completion with no error | |
26 | * 0x43 - ERR_GC_DATA_LEN_INVALID | |
27 | * Invalid Data length if Encryption Data length is | |
28 | * less than 16 bytes for AES-XTS and AES-CTS. | |
29 | * 0x45 - ERR_GC_CTX_LEN_INVALID | |
30 | * Invalid context length: CTXL != 23 words. | |
31 | * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID | |
32 | * DOCSIS support is enabled with other than | |
33 | * AES/DES-CBC mode encryption. | |
34 | * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID | |
35 | * Authentication offset is other than 0 with | |
36 | * Encryption IV source = 0. | |
37 | * Authentication offset is other than 8 (DES)/16 (AES) | |
38 | * with Encryption IV source = 1 | |
39 | * 0x51 - ERR_GC_CRC32_INVALID_SELECTION | |
40 | * CRC32 is enabled for other than DOCSIS encryption. | |
41 | * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID | |
42 | * Invalid flag options in AES-CCM IV. | |
43 | */ | |
44 | ||
3d7c8206 SJ |
45 | static inline int incr_index(int index, int count, int max) |
46 | { | |
47 | if ((index + count) >= max) | |
48 | index = index + count - max; | |
49 | else | |
50 | index += count; | |
51 | ||
52 | return index; | |
53 | } | |
54 | ||
14fa93cd SJ |
55 | static void softreq_unmap_sgbufs(struct nitrox_softreq *sr) |
56 | { | |
57 | struct nitrox_device *ndev = sr->ndev; | |
58 | struct device *dev = DEV(ndev); | |
4bede34c NR |
59 | |
60 | ||
c114ecd3 XC |
61 | dma_unmap_sg(dev, sr->in.sg, sg_nents(sr->in.sg), |
62 | DMA_BIDIRECTIONAL); | |
4bede34c NR |
63 | dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len, |
64 | DMA_TO_DEVICE); | |
14fa93cd | 65 | kfree(sr->in.sgcomp); |
4bede34c NR |
66 | sr->in.sg = NULL; |
67 | sr->in.sgmap_cnt = 0; | |
14fa93cd | 68 | |
c114ecd3 | 69 | dma_unmap_sg(dev, sr->out.sg, sg_nents(sr->out.sg), |
4bede34c NR |
70 | DMA_BIDIRECTIONAL); |
71 | dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len, | |
72 | DMA_TO_DEVICE); | |
14fa93cd | 73 | kfree(sr->out.sgcomp); |
4bede34c NR |
74 | sr->out.sg = NULL; |
75 | sr->out.sgmap_cnt = 0; | |
14fa93cd SJ |
76 | } |
77 | ||
78 | static void softreq_destroy(struct nitrox_softreq *sr) | |
79 | { | |
80 | softreq_unmap_sgbufs(sr); | |
81 | kfree(sr); | |
82 | } | |
83 | ||
84 | /** | |
85 | * create_sg_component - create SG componets for N5 device. | |
86 | * @sr: Request structure | |
87 | * @sgtbl: SG table | |
4bede34c | 88 | * @map_nents: number of dma mapped entries |
14fa93cd SJ |
89 | * |
90 | * Component structure | |
91 | * | |
92 | * 63 48 47 32 31 16 15 0 | |
93 | * -------------------------------------- | |
94 | * | LEN0 | LEN1 | LEN2 | LEN3 | | |
95 | * |------------------------------------- | |
96 | * | PTR0 | | |
97 | * -------------------------------------- | |
98 | * | PTR1 | | |
99 | * -------------------------------------- | |
100 | * | PTR2 | | |
101 | * -------------------------------------- | |
102 | * | PTR3 | | |
103 | * -------------------------------------- | |
104 | * | |
105 | * Returns 0 if success or a negative errno code on error. | |
106 | */ | |
107 | static int create_sg_component(struct nitrox_softreq *sr, | |
108 | struct nitrox_sgtable *sgtbl, int map_nents) | |
109 | { | |
110 | struct nitrox_device *ndev = sr->ndev; | |
111 | struct nitrox_sgcomp *sgcomp; | |
4bede34c | 112 | struct scatterlist *sg; |
14fa93cd SJ |
113 | dma_addr_t dma; |
114 | size_t sz_comp; | |
115 | int i, j, nr_sgcomp; | |
116 | ||
117 | nr_sgcomp = roundup(map_nents, 4) / 4; | |
118 | ||
119 | /* each component holds 4 dma pointers */ | |
120 | sz_comp = nr_sgcomp * sizeof(*sgcomp); | |
121 | sgcomp = kzalloc(sz_comp, sr->gfp); | |
122 | if (!sgcomp) | |
123 | return -ENOMEM; | |
124 | ||
125 | sgtbl->sgcomp = sgcomp; | |
14fa93cd | 126 | |
4bede34c | 127 | sg = sgtbl->sg; |
14fa93cd SJ |
128 | /* populate device sg component */ |
129 | for (i = 0; i < nr_sgcomp; i++) { | |
4bede34c NR |
130 | for (j = 0; j < 4 && sg; j++) { |
131 | sgcomp[i].len[j] = cpu_to_be16(sg_dma_len(sg)); | |
132 | sgcomp[i].dma[j] = cpu_to_be64(sg_dma_address(sg)); | |
133 | sg = sg_next(sg); | |
14fa93cd | 134 | } |
14fa93cd SJ |
135 | } |
136 | /* map the device sg component */ | |
137 | dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE); | |
138 | if (dma_mapping_error(DEV(ndev), dma)) { | |
139 | kfree(sgtbl->sgcomp); | |
140 | sgtbl->sgcomp = NULL; | |
141 | return -ENOMEM; | |
142 | } | |
143 | ||
4bede34c NR |
144 | sgtbl->sgcomp_dma = dma; |
145 | sgtbl->sgcomp_len = sz_comp; | |
14fa93cd SJ |
146 | |
147 | return 0; | |
148 | } | |
149 | ||
150 | /** | |
151 | * dma_map_inbufs - DMA map input sglist and creates sglist component | |
152 | * for N5 device. | |
153 | * @sr: Request structure | |
154 | * @req: Crypto request structre | |
155 | * | |
156 | * Returns 0 if successful or a negative errno code on error. | |
157 | */ | |
158 | static int dma_map_inbufs(struct nitrox_softreq *sr, | |
159 | struct se_crypto_request *req) | |
160 | { | |
161 | struct device *dev = DEV(sr->ndev); | |
3c995c4c | 162 | struct scatterlist *sg; |
14fa93cd | 163 | int i, nents, ret = 0; |
14fa93cd | 164 | |
4bede34c NR |
165 | nents = dma_map_sg(dev, req->src, sg_nents(req->src), |
166 | DMA_BIDIRECTIONAL); | |
167 | if (!nents) | |
168 | return -EINVAL; | |
14fa93cd | 169 | |
4bede34c NR |
170 | for_each_sg(req->src, sg, nents, i) |
171 | sr->in.total_bytes += sg_dma_len(sg); | |
14fa93cd | 172 | |
4bede34c NR |
173 | sr->in.sg = req->src; |
174 | sr->in.sgmap_cnt = nents; | |
175 | ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt); | |
14fa93cd SJ |
176 | if (ret) |
177 | goto incomp_err; | |
178 | ||
179 | return 0; | |
180 | ||
181 | incomp_err: | |
c114ecd3 | 182 | dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); |
4bede34c | 183 | sr->in.sgmap_cnt = 0; |
14fa93cd SJ |
184 | return ret; |
185 | } | |
186 | ||
187 | static int dma_map_outbufs(struct nitrox_softreq *sr, | |
188 | struct se_crypto_request *req) | |
189 | { | |
190 | struct device *dev = DEV(sr->ndev); | |
4bede34c | 191 | int nents, ret = 0; |
14fa93cd | 192 | |
4bede34c NR |
193 | nents = dma_map_sg(dev, req->dst, sg_nents(req->dst), |
194 | DMA_BIDIRECTIONAL); | |
195 | if (!nents) | |
196 | return -EINVAL; | |
14fa93cd | 197 | |
4bede34c NR |
198 | sr->out.sg = req->dst; |
199 | sr->out.sgmap_cnt = nents; | |
200 | ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt); | |
14fa93cd SJ |
201 | if (ret) |
202 | goto outcomp_map_err; | |
203 | ||
204 | return 0; | |
205 | ||
206 | outcomp_map_err: | |
c114ecd3 | 207 | dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_BIDIRECTIONAL); |
4bede34c NR |
208 | sr->out.sgmap_cnt = 0; |
209 | sr->out.sg = NULL; | |
14fa93cd SJ |
210 | return ret; |
211 | } | |
212 | ||
213 | static inline int softreq_map_iobuf(struct nitrox_softreq *sr, | |
214 | struct se_crypto_request *creq) | |
215 | { | |
216 | int ret; | |
217 | ||
218 | ret = dma_map_inbufs(sr, creq); | |
219 | if (ret) | |
220 | return ret; | |
221 | ||
222 | ret = dma_map_outbufs(sr, creq); | |
223 | if (ret) | |
224 | softreq_unmap_sgbufs(sr); | |
225 | ||
226 | return ret; | |
227 | } | |
228 | ||
229 | static inline void backlog_list_add(struct nitrox_softreq *sr, | |
230 | struct nitrox_cmdq *cmdq) | |
231 | { | |
232 | INIT_LIST_HEAD(&sr->backlog); | |
233 | ||
e7892dd6 | 234 | spin_lock_bh(&cmdq->backlog_qlock); |
14fa93cd SJ |
235 | list_add_tail(&sr->backlog, &cmdq->backlog_head); |
236 | atomic_inc(&cmdq->backlog_count); | |
237 | atomic_set(&sr->status, REQ_BACKLOG); | |
e7892dd6 | 238 | spin_unlock_bh(&cmdq->backlog_qlock); |
14fa93cd SJ |
239 | } |
240 | ||
241 | static inline void response_list_add(struct nitrox_softreq *sr, | |
242 | struct nitrox_cmdq *cmdq) | |
243 | { | |
244 | INIT_LIST_HEAD(&sr->response); | |
245 | ||
e7892dd6 | 246 | spin_lock_bh(&cmdq->resp_qlock); |
14fa93cd | 247 | list_add_tail(&sr->response, &cmdq->response_head); |
e7892dd6 | 248 | spin_unlock_bh(&cmdq->resp_qlock); |
14fa93cd SJ |
249 | } |
250 | ||
251 | static inline void response_list_del(struct nitrox_softreq *sr, | |
252 | struct nitrox_cmdq *cmdq) | |
253 | { | |
e7892dd6 | 254 | spin_lock_bh(&cmdq->resp_qlock); |
14fa93cd | 255 | list_del(&sr->response); |
e7892dd6 | 256 | spin_unlock_bh(&cmdq->resp_qlock); |
14fa93cd SJ |
257 | } |
258 | ||
259 | static struct nitrox_softreq * | |
260 | get_first_response_entry(struct nitrox_cmdq *cmdq) | |
261 | { | |
262 | return list_first_entry_or_null(&cmdq->response_head, | |
263 | struct nitrox_softreq, response); | |
264 | } | |
265 | ||
266 | static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen) | |
267 | { | |
268 | if (atomic_inc_return(&cmdq->pending_count) > qlen) { | |
269 | atomic_dec(&cmdq->pending_count); | |
270 | /* sync with other cpus */ | |
271 | smp_mb__after_atomic(); | |
272 | return true; | |
273 | } | |
c9613335 NR |
274 | /* sync with other cpus */ |
275 | smp_mb__after_atomic(); | |
14fa93cd SJ |
276 | return false; |
277 | } | |
278 | ||
279 | /** | |
280 | * post_se_instr - Post SE instruction to Packet Input ring | |
281 | * @sr: Request structure | |
c4d7d318 | 282 | * @cmdq: Command queue structure |
14fa93cd SJ |
283 | * |
284 | * Returns 0 if successful or a negative error code, | |
285 | * if no space in ring. | |
286 | */ | |
287 | static void post_se_instr(struct nitrox_softreq *sr, | |
288 | struct nitrox_cmdq *cmdq) | |
289 | { | |
290 | struct nitrox_device *ndev = sr->ndev; | |
3d7c8206 | 291 | int idx; |
14fa93cd SJ |
292 | u8 *ent; |
293 | ||
e7892dd6 | 294 | spin_lock_bh(&cmdq->cmd_qlock); |
14fa93cd | 295 | |
3d7c8206 | 296 | idx = cmdq->write_idx; |
14fa93cd | 297 | /* copy the instruction */ |
e7892dd6 | 298 | ent = cmdq->base + (idx * cmdq->instr_size); |
14fa93cd | 299 | memcpy(ent, &sr->instr, cmdq->instr_size); |
14fa93cd | 300 | |
14fa93cd SJ |
301 | atomic_set(&sr->status, REQ_POSTED); |
302 | response_list_add(sr, cmdq); | |
3d7c8206 SJ |
303 | sr->tstamp = jiffies; |
304 | /* flush the command queue updates */ | |
305 | dma_wmb(); | |
14fa93cd SJ |
306 | |
307 | /* Ring doorbell with count 1 */ | |
308 | writeq(1, cmdq->dbell_csr_addr); | |
14fa93cd | 309 | |
3d7c8206 SJ |
310 | cmdq->write_idx = incr_index(idx, 1, ndev->qlen); |
311 | ||
e7892dd6 | 312 | spin_unlock_bh(&cmdq->cmd_qlock); |
fec165c9 SJ |
313 | |
314 | /* increment the posted command count */ | |
315 | atomic64_inc(&ndev->stats.posted); | |
14fa93cd SJ |
316 | } |
317 | ||
318 | static int post_backlog_cmds(struct nitrox_cmdq *cmdq) | |
319 | { | |
320 | struct nitrox_device *ndev = cmdq->ndev; | |
321 | struct nitrox_softreq *sr, *tmp; | |
322 | int ret = 0; | |
323 | ||
3d7c8206 SJ |
324 | if (!atomic_read(&cmdq->backlog_count)) |
325 | return 0; | |
326 | ||
e7892dd6 | 327 | spin_lock_bh(&cmdq->backlog_qlock); |
14fa93cd SJ |
328 | |
329 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { | |
14fa93cd SJ |
330 | /* submit until space available */ |
331 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | |
3d7c8206 | 332 | ret = -ENOSPC; |
14fa93cd SJ |
333 | break; |
334 | } | |
335 | /* delete from backlog list */ | |
336 | list_del(&sr->backlog); | |
337 | atomic_dec(&cmdq->backlog_count); | |
338 | /* sync with other cpus */ | |
339 | smp_mb__after_atomic(); | |
340 | ||
14fa93cd SJ |
341 | /* post the command */ |
342 | post_se_instr(sr, cmdq); | |
14fa93cd | 343 | } |
e7892dd6 | 344 | spin_unlock_bh(&cmdq->backlog_qlock); |
14fa93cd SJ |
345 | |
346 | return ret; | |
347 | } | |
348 | ||
349 | static int nitrox_enqueue_request(struct nitrox_softreq *sr) | |
350 | { | |
351 | struct nitrox_cmdq *cmdq = sr->cmdq; | |
352 | struct nitrox_device *ndev = sr->ndev; | |
3d7c8206 SJ |
353 | |
354 | /* try to post backlog requests */ | |
355 | post_backlog_cmds(cmdq); | |
14fa93cd SJ |
356 | |
357 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | |
fec165c9 SJ |
358 | if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
359 | /* increment drop count */ | |
360 | atomic64_inc(&ndev->stats.dropped); | |
3d7c8206 | 361 | return -ENOSPC; |
fec165c9 | 362 | } |
3d7c8206 | 363 | /* add to backlog list */ |
14fa93cd | 364 | backlog_list_add(sr, cmdq); |
c9613335 | 365 | return -EINPROGRESS; |
14fa93cd | 366 | } |
3d7c8206 SJ |
367 | post_se_instr(sr, cmdq); |
368 | ||
369 | return -EINPROGRESS; | |
14fa93cd SJ |
370 | } |
371 | ||
372 | /** | |
eb9e492f | 373 | * nitrox_process_se_request - Send request to SE core |
14fa93cd SJ |
374 | * @ndev: NITROX device |
375 | * @req: Crypto request | |
c4d7d318 LJ |
376 | * @callback: Completion callback |
377 | * @cb_arg: Completion callback arguments | |
14fa93cd SJ |
378 | * |
379 | * Returns 0 on success, or a negative error code. | |
380 | */ | |
381 | int nitrox_process_se_request(struct nitrox_device *ndev, | |
382 | struct se_crypto_request *req, | |
383 | completion_t callback, | |
c9613335 | 384 | void *cb_arg) |
14fa93cd SJ |
385 | { |
386 | struct nitrox_softreq *sr; | |
387 | dma_addr_t ctx_handle = 0; | |
388 | int qno, ret = 0; | |
389 | ||
390 | if (!nitrox_ready(ndev)) | |
391 | return -ENODEV; | |
392 | ||
393 | sr = kzalloc(sizeof(*sr), req->gfp); | |
394 | if (!sr) | |
395 | return -ENOMEM; | |
396 | ||
397 | sr->ndev = ndev; | |
398 | sr->flags = req->flags; | |
399 | sr->gfp = req->gfp; | |
400 | sr->callback = callback; | |
c9613335 | 401 | sr->cb_arg = cb_arg; |
14fa93cd SJ |
402 | |
403 | atomic_set(&sr->status, REQ_NOT_POSTED); | |
404 | ||
4bede34c NR |
405 | sr->resp.orh = req->orh; |
406 | sr->resp.completion = req->comp; | |
14fa93cd SJ |
407 | |
408 | ret = softreq_map_iobuf(sr, req); | |
409 | if (ret) { | |
410 | kfree(sr); | |
411 | return ret; | |
412 | } | |
413 | ||
414 | /* get the context handle */ | |
415 | if (req->ctx_handle) { | |
416 | struct ctx_hdr *hdr; | |
417 | u8 *ctx_ptr; | |
418 | ||
419 | ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle; | |
420 | hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr)); | |
421 | ctx_handle = hdr->ctx_dma; | |
422 | } | |
423 | ||
424 | /* select the queue */ | |
425 | qno = smp_processor_id() % ndev->nr_queues; | |
426 | ||
e7892dd6 | 427 | sr->cmdq = &ndev->pkt_inq[qno]; |
14fa93cd SJ |
428 | |
429 | /* | |
430 | * 64-Byte Instruction Format | |
431 | * | |
432 | * ---------------------- | |
433 | * | DPTR0 | 8 bytes | |
434 | * ---------------------- | |
435 | * | PKT_IN_INSTR_HDR | 8 bytes | |
436 | * ---------------------- | |
437 | * | PKT_IN_HDR | 16 bytes | |
438 | * ---------------------- | |
439 | * | SLC_INFO | 16 bytes | |
440 | * ---------------------- | |
441 | * | Front data | 16 bytes | |
442 | * ---------------------- | |
443 | */ | |
444 | ||
445 | /* fill the packet instruction */ | |
446 | /* word 0 */ | |
4bede34c | 447 | sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma); |
14fa93cd SJ |
448 | |
449 | /* word 1 */ | |
450 | sr->instr.ih.value = 0; | |
451 | sr->instr.ih.s.g = 1; | |
4bede34c NR |
452 | sr->instr.ih.s.gsz = sr->in.sgmap_cnt; |
453 | sr->instr.ih.s.ssz = sr->out.sgmap_cnt; | |
14fa93cd SJ |
454 | sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr); |
455 | sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes; | |
cd078cb6 | 456 | sr->instr.ih.bev = cpu_to_be64(sr->instr.ih.value); |
14fa93cd SJ |
457 | |
458 | /* word 2 */ | |
459 | sr->instr.irh.value[0] = 0; | |
460 | sr->instr.irh.s.uddl = MIN_UDD_LEN; | |
461 | /* context length in 64-bit words */ | |
462 | sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8); | |
463 | /* offset from solicit base port 256 */ | |
464 | sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno; | |
465 | sr->instr.irh.s.ctxc = req->ctrl.s.ctxc; | |
466 | sr->instr.irh.s.arg = req->ctrl.s.arg; | |
467 | sr->instr.irh.s.opcode = req->opcode; | |
cd078cb6 | 468 | sr->instr.irh.bev[0] = cpu_to_be64(sr->instr.irh.value[0]); |
14fa93cd SJ |
469 | |
470 | /* word 3 */ | |
471 | sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle); | |
472 | ||
473 | /* word 4 */ | |
474 | sr->instr.slc.value[0] = 0; | |
4bede34c | 475 | sr->instr.slc.s.ssz = sr->out.sgmap_cnt; |
cd078cb6 | 476 | sr->instr.slc.bev[0] = cpu_to_be64(sr->instr.slc.value[0]); |
14fa93cd SJ |
477 | |
478 | /* word 5 */ | |
4bede34c | 479 | sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma); |
14fa93cd SJ |
480 | |
481 | /* | |
482 | * No conversion for front data, | |
483 | * It goes into payload | |
484 | * put GP Header in front data | |
485 | */ | |
486 | sr->instr.fdata[0] = *((u64 *)&req->gph); | |
487 | sr->instr.fdata[1] = 0; | |
14fa93cd SJ |
488 | |
489 | ret = nitrox_enqueue_request(sr); | |
3d7c8206 | 490 | if (ret == -ENOSPC) |
14fa93cd SJ |
491 | goto send_fail; |
492 | ||
493 | return ret; | |
494 | ||
495 | send_fail: | |
496 | softreq_destroy(sr); | |
497 | return ret; | |
498 | } | |
499 | ||
500 | static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout) | |
501 | { | |
502 | return time_after_eq(jiffies, (tstamp + timeout)); | |
503 | } | |
504 | ||
505 | void backlog_qflush_work(struct work_struct *work) | |
506 | { | |
507 | struct nitrox_cmdq *cmdq; | |
508 | ||
509 | cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush); | |
510 | post_backlog_cmds(cmdq); | |
511 | } | |
512 | ||
4bede34c NR |
513 | static bool sr_completed(struct nitrox_softreq *sr) |
514 | { | |
c9613335 NR |
515 | u64 orh = READ_ONCE(*sr->resp.orh); |
516 | unsigned long timeout = jiffies + msecs_to_jiffies(1); | |
517 | ||
518 | if ((orh != PENDING_SIG) && (orh & 0xff)) | |
519 | return true; | |
520 | ||
521 | while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) { | |
522 | if (time_after(jiffies, timeout)) { | |
523 | pr_err("comp not done\n"); | |
524 | return false; | |
525 | } | |
526 | } | |
527 | ||
528 | return true; | |
4bede34c NR |
529 | } |
530 | ||
14fa93cd | 531 | /** |
c4d7d318 LJ |
532 | * process_response_list - process completed requests |
533 | * @cmdq: Command queue structure | |
14fa93cd SJ |
534 | * |
535 | * Returns the number of responses processed. | |
536 | */ | |
537 | static void process_response_list(struct nitrox_cmdq *cmdq) | |
538 | { | |
539 | struct nitrox_device *ndev = cmdq->ndev; | |
540 | struct nitrox_softreq *sr; | |
14fa93cd | 541 | int req_completed = 0, err = 0, budget; |
356690d0 NR |
542 | completion_t callback; |
543 | void *cb_arg; | |
14fa93cd SJ |
544 | |
545 | /* check all pending requests */ | |
546 | budget = atomic_read(&cmdq->pending_count); | |
547 | ||
548 | while (req_completed < budget) { | |
549 | sr = get_first_response_entry(cmdq); | |
550 | if (!sr) | |
551 | break; | |
552 | ||
553 | if (atomic_read(&sr->status) != REQ_POSTED) | |
554 | break; | |
555 | ||
556 | /* check orh and completion bytes updates */ | |
4bede34c | 557 | if (!sr_completed(sr)) { |
14fa93cd SJ |
558 | /* request not completed, check for timeout */ |
559 | if (!cmd_timeout(sr->tstamp, ndev->timeout)) | |
560 | break; | |
561 | dev_err_ratelimited(DEV(ndev), | |
562 | "Request timeout, orh 0x%016llx\n", | |
4bede34c | 563 | READ_ONCE(*sr->resp.orh)); |
14fa93cd SJ |
564 | } |
565 | atomic_dec(&cmdq->pending_count); | |
fec165c9 | 566 | atomic64_inc(&ndev->stats.completed); |
14fa93cd SJ |
567 | /* sync with other cpus */ |
568 | smp_mb__after_atomic(); | |
569 | /* remove from response list */ | |
570 | response_list_del(sr, cmdq); | |
14fa93cd | 571 | /* ORH error code */ |
4bede34c | 572 | err = READ_ONCE(*sr->resp.orh) & 0xff; |
356690d0 NR |
573 | callback = sr->callback; |
574 | cb_arg = sr->cb_arg; | |
06bbf753 | 575 | softreq_destroy(sr); |
356690d0 NR |
576 | if (callback) |
577 | callback(cb_arg, err); | |
14fa93cd SJ |
578 | |
579 | req_completed++; | |
580 | } | |
581 | } | |
582 | ||
c4d7d318 | 583 | /* |
5155e118 | 584 | * pkt_slc_resp_tasklet - post processing of SE responses |
14fa93cd | 585 | */ |
5155e118 | 586 | void pkt_slc_resp_tasklet(unsigned long data) |
14fa93cd | 587 | { |
5155e118 SJ |
588 | struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data); |
589 | struct nitrox_cmdq *cmdq = qvec->cmdq; | |
590 | union nps_pkt_slc_cnts slc_cnts; | |
14fa93cd SJ |
591 | |
592 | /* read completion count */ | |
5155e118 | 593 | slc_cnts.value = readq(cmdq->compl_cnt_csr_addr); |
14fa93cd | 594 | /* resend the interrupt if more work to do */ |
5155e118 | 595 | slc_cnts.s.resend = 1; |
14fa93cd SJ |
596 | |
597 | process_response_list(cmdq); | |
598 | ||
599 | /* | |
600 | * clear the interrupt with resend bit enabled, | |
601 | * MSI-X interrupt generates if Completion count > Threshold | |
602 | */ | |
5155e118 | 603 | writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr); |
14fa93cd SJ |
604 | |
605 | if (atomic_read(&cmdq->backlog_count)) | |
606 | schedule_work(&cmdq->backlog_qflush); | |
607 | } |