1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/workqueue.h>
4 #include <crypto/internal/skcipher.h>
6 #include "nitrox_dev.h"
7 #include "nitrox_req.h"
8 #include "nitrox_csr.h"
9 #include "nitrox_req.h"
12 #define MIN_UDD_LEN 16
13 /* PKT_IN_HDR + SLC_STORE_INFO */
15 /* Base destination port for the solicited requests */
16 #define SOLICIT_BASE_DPORT 256
17 #define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
19 #define REQ_NOT_POSTED 1
24 * Response codes from SE microcode
26 * Completion with no error
27 * 0x43 - ERR_GC_DATA_LEN_INVALID
28 * Invalid Data length if Encryption Data length is
29 * less than 16 bytes for AES-XTS and AES-CTS.
30 * 0x45 - ERR_GC_CTX_LEN_INVALID
31 * Invalid context length: CTXL != 23 words.
32 * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
33 * DOCSIS support is enabled with other than
34 * AES/DES-CBC mode encryption.
35 * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
36 * Authentication offset is other than 0 with
37 * Encryption IV source = 0.
38 * Authentication offset is other than 8 (DES)/16 (AES)
39 * with Encryption IV source = 1
40 * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
41 * CRC32 is enabled for other than DOCSIS encryption.
42 * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
43 * Invalid flag options in AES-CCM IV.
47 * dma_free_sglist - unmap and free the sg lists.
51 static void softreq_unmap_sgbufs(struct nitrox_softreq
*sr
)
53 struct nitrox_device
*ndev
= sr
->ndev
;
54 struct device
*dev
= DEV(ndev
);
55 struct nitrox_sglist
*sglist
;
58 sglist
= sr
->in
.sglist
;
63 dma_unmap_single(dev
, sglist
->dma
, sglist
->len
, DMA_BIDIRECTIONAL
);
64 /* unmpa src sglist */
65 dma_unmap_sg(dev
, sr
->in
.buf
, (sr
->in
.map_bufs_cnt
- 1), sr
->in
.dir
);
66 /* unamp gather component */
67 dma_unmap_single(dev
, sr
->in
.dma
, sr
->in
.len
, DMA_TO_DEVICE
);
72 sr
->in
.map_bufs_cnt
= 0;
76 sglist
= sr
->out
.sglist
;
81 dma_unmap_single(dev
, sr
->resp
.orh_dma
, ORH_HLEN
, sr
->out
.dir
);
83 /* unmap dst sglist */
85 dma_unmap_sg(dev
, sr
->out
.buf
, (sr
->out
.map_bufs_cnt
- 3),
88 /* unmap completion */
89 dma_unmap_single(dev
, sr
->resp
.completion_dma
, COMP_HLEN
, sr
->out
.dir
);
91 /* unmap scatter component */
92 dma_unmap_single(dev
, sr
->out
.dma
, sr
->out
.len
, DMA_TO_DEVICE
);
93 kfree(sr
->out
.sglist
);
94 kfree(sr
->out
.sgcomp
);
95 sr
->out
.sglist
= NULL
;
97 sr
->out
.map_bufs_cnt
= 0;
100 static void softreq_destroy(struct nitrox_softreq
*sr
)
102 softreq_unmap_sgbufs(sr
);
107 * create_sg_component - create SG componets for N5 device.
108 * @sr: Request structure
110 * @nr_comp: total number of components required
112 * Component structure
114 * 63 48 47 32 31 16 15 0
115 * --------------------------------------
116 * | LEN0 | LEN1 | LEN2 | LEN3 |
117 * |-------------------------------------
119 * --------------------------------------
121 * --------------------------------------
123 * --------------------------------------
125 * --------------------------------------
127 * Returns 0 if success or a negative errno code on error.
129 static int create_sg_component(struct nitrox_softreq
*sr
,
130 struct nitrox_sgtable
*sgtbl
, int map_nents
)
132 struct nitrox_device
*ndev
= sr
->ndev
;
133 struct nitrox_sgcomp
*sgcomp
;
134 struct nitrox_sglist
*sglist
;
139 nr_sgcomp
= roundup(map_nents
, 4) / 4;
141 /* each component holds 4 dma pointers */
142 sz_comp
= nr_sgcomp
* sizeof(*sgcomp
);
143 sgcomp
= kzalloc(sz_comp
, sr
->gfp
);
147 sgtbl
->sgcomp
= sgcomp
;
148 sgtbl
->nr_sgcomp
= nr_sgcomp
;
150 sglist
= sgtbl
->sglist
;
151 /* populate device sg component */
152 for (i
= 0; i
< nr_sgcomp
; i
++) {
153 for (j
= 0; j
< 4; j
++) {
154 sgcomp
->len
[j
] = cpu_to_be16(sglist
->len
);
155 sgcomp
->dma
[j
] = cpu_to_be64(sglist
->dma
);
160 /* map the device sg component */
161 dma
= dma_map_single(DEV(ndev
), sgtbl
->sgcomp
, sz_comp
, DMA_TO_DEVICE
);
162 if (dma_mapping_error(DEV(ndev
), dma
)) {
163 kfree(sgtbl
->sgcomp
);
164 sgtbl
->sgcomp
= NULL
;
169 sgtbl
->len
= sz_comp
;
175 * dma_map_inbufs - DMA map input sglist and creates sglist component
177 * @sr: Request structure
178 * @req: Crypto request structre
180 * Returns 0 if successful or a negative errno code on error.
182 static int dma_map_inbufs(struct nitrox_softreq
*sr
,
183 struct se_crypto_request
*req
)
185 struct device
*dev
= DEV(sr
->ndev
);
186 struct scatterlist
*sg
= req
->src
;
187 struct nitrox_sglist
*glist
;
188 int i
, nents
, ret
= 0;
192 nents
= sg_nents(req
->src
);
194 /* creater gather list IV and src entries */
195 sz
= roundup((1 + nents
), 4) * sizeof(*glist
);
196 glist
= kzalloc(sz
, sr
->gfp
);
200 sr
->in
.sglist
= glist
;
202 dma
= dma_map_single(dev
, &req
->iv
, req
->ivsize
, DMA_BIDIRECTIONAL
);
203 if (dma_mapping_error(dev
, dma
)) {
208 sr
->in
.dir
= (req
->src
== req
->dst
) ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
209 /* map src entries */
210 nents
= dma_map_sg(dev
, req
->src
, nents
, sr
->in
.dir
);
215 sr
->in
.buf
= req
->src
;
217 /* store the mappings */
218 glist
->len
= req
->ivsize
;
221 sr
->in
.total_bytes
+= req
->ivsize
;
223 for_each_sg(req
->src
, sg
, nents
, i
) {
224 glist
->len
= sg_dma_len(sg
);
225 glist
->dma
= sg_dma_address(sg
);
226 sr
->in
.total_bytes
+= glist
->len
;
229 /* roundup map count to align with entires in sg component */
230 sr
->in
.map_bufs_cnt
= (1 + nents
);
232 /* create NITROX gather component */
233 ret
= create_sg_component(sr
, &sr
->in
, sr
->in
.map_bufs_cnt
);
240 dma_unmap_sg(dev
, req
->src
, nents
, sr
->in
.dir
);
241 sr
->in
.map_bufs_cnt
= 0;
243 dma_unmap_single(dev
, dma
, req
->ivsize
, DMA_BIDIRECTIONAL
);
245 kfree(sr
->in
.sglist
);
246 sr
->in
.sglist
= NULL
;
250 static int dma_map_outbufs(struct nitrox_softreq
*sr
,
251 struct se_crypto_request
*req
)
253 struct device
*dev
= DEV(sr
->ndev
);
254 struct nitrox_sglist
*glist
= sr
->in
.sglist
;
255 struct nitrox_sglist
*slist
;
256 struct scatterlist
*sg
;
257 int i
, nents
, map_bufs_cnt
, ret
= 0;
260 nents
= sg_nents(req
->dst
);
262 /* create scatter list ORH, IV, dst entries and Completion header */
263 sz
= roundup((3 + nents
), 4) * sizeof(*slist
);
264 slist
= kzalloc(sz
, sr
->gfp
);
268 sr
->out
.sglist
= slist
;
269 sr
->out
.dir
= DMA_BIDIRECTIONAL
;
271 sr
->resp
.orh_dma
= dma_map_single(dev
, &sr
->resp
.orh
, ORH_HLEN
,
273 if (dma_mapping_error(dev
, sr
->resp
.orh_dma
)) {
279 sr
->resp
.completion_dma
= dma_map_single(dev
, &sr
->resp
.completion
,
280 COMP_HLEN
, sr
->out
.dir
);
281 if (dma_mapping_error(dev
, sr
->resp
.completion_dma
)) {
286 sr
->inplace
= (req
->src
== req
->dst
) ? true : false;
289 nents
= dma_map_sg(dev
, req
->dst
, nents
, sr
->out
.dir
);
295 sr
->out
.buf
= req
->dst
;
297 /* store the mappings */
299 slist
->len
= ORH_HLEN
;
300 slist
->dma
= sr
->resp
.orh_dma
;
303 /* copy the glist mappings */
305 nents
= sr
->in
.map_bufs_cnt
- 1;
306 map_bufs_cnt
= sr
->in
.map_bufs_cnt
;
307 while (map_bufs_cnt
--) {
308 slist
->len
= glist
->len
;
309 slist
->dma
= glist
->dma
;
314 /* copy iv mapping */
315 slist
->len
= glist
->len
;
316 slist
->dma
= glist
->dma
;
318 /* copy remaining maps */
319 for_each_sg(req
->dst
, sg
, nents
, i
) {
320 slist
->len
= sg_dma_len(sg
);
321 slist
->dma
= sg_dma_address(sg
);
327 slist
->len
= COMP_HLEN
;
328 slist
->dma
= sr
->resp
.completion_dma
;
330 sr
->out
.map_bufs_cnt
= (3 + nents
);
332 ret
= create_sg_component(sr
, &sr
->out
, sr
->out
.map_bufs_cnt
);
334 goto outcomp_map_err
;
340 dma_unmap_sg(dev
, req
->dst
, nents
, sr
->out
.dir
);
341 sr
->out
.map_bufs_cnt
= 0;
344 dma_unmap_single(dev
, sr
->resp
.completion_dma
, COMP_HLEN
, sr
->out
.dir
);
345 sr
->resp
.completion_dma
= 0;
347 dma_unmap_single(dev
, sr
->resp
.orh_dma
, ORH_HLEN
, sr
->out
.dir
);
348 sr
->resp
.orh_dma
= 0;
350 kfree(sr
->out
.sglist
);
351 sr
->out
.sglist
= NULL
;
355 static inline int softreq_map_iobuf(struct nitrox_softreq
*sr
,
356 struct se_crypto_request
*creq
)
360 ret
= dma_map_inbufs(sr
, creq
);
364 ret
= dma_map_outbufs(sr
, creq
);
366 softreq_unmap_sgbufs(sr
);
371 static inline void backlog_list_add(struct nitrox_softreq
*sr
,
372 struct nitrox_cmdq
*cmdq
)
374 INIT_LIST_HEAD(&sr
->backlog
);
376 spin_lock_bh(&cmdq
->backlog_lock
);
377 list_add_tail(&sr
->backlog
, &cmdq
->backlog_head
);
378 atomic_inc(&cmdq
->backlog_count
);
379 atomic_set(&sr
->status
, REQ_BACKLOG
);
380 spin_unlock_bh(&cmdq
->backlog_lock
);
383 static inline void response_list_add(struct nitrox_softreq
*sr
,
384 struct nitrox_cmdq
*cmdq
)
386 INIT_LIST_HEAD(&sr
->response
);
388 spin_lock_bh(&cmdq
->response_lock
);
389 list_add_tail(&sr
->response
, &cmdq
->response_head
);
390 spin_unlock_bh(&cmdq
->response_lock
);
393 static inline void response_list_del(struct nitrox_softreq
*sr
,
394 struct nitrox_cmdq
*cmdq
)
396 spin_lock_bh(&cmdq
->response_lock
);
397 list_del(&sr
->response
);
398 spin_unlock_bh(&cmdq
->response_lock
);
401 static struct nitrox_softreq
*
402 get_first_response_entry(struct nitrox_cmdq
*cmdq
)
404 return list_first_entry_or_null(&cmdq
->response_head
,
405 struct nitrox_softreq
, response
);
408 static inline bool cmdq_full(struct nitrox_cmdq
*cmdq
, int qlen
)
410 if (atomic_inc_return(&cmdq
->pending_count
) > qlen
) {
411 atomic_dec(&cmdq
->pending_count
);
412 /* sync with other cpus */
413 smp_mb__after_atomic();
420 * post_se_instr - Post SE instruction to Packet Input ring
421 * @sr: Request structure
423 * Returns 0 if successful or a negative error code,
424 * if no space in ring.
426 static void post_se_instr(struct nitrox_softreq
*sr
,
427 struct nitrox_cmdq
*cmdq
)
429 struct nitrox_device
*ndev
= sr
->ndev
;
430 union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell
;
434 spin_lock_bh(&cmdq
->cmdq_lock
);
436 /* get the next write offset */
437 offset
= NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq
->qno
);
438 pkt_in_baoff_dbell
.value
= nitrox_read_csr(ndev
, offset
);
439 /* copy the instruction */
440 ent
= cmdq
->head
+ pkt_in_baoff_dbell
.s
.aoff
;
441 memcpy(ent
, &sr
->instr
, cmdq
->instr_size
);
442 /* flush the command queue updates */
445 sr
->tstamp
= jiffies
;
446 atomic_set(&sr
->status
, REQ_POSTED
);
447 response_list_add(sr
, cmdq
);
449 /* Ring doorbell with count 1 */
450 writeq(1, cmdq
->dbell_csr_addr
);
451 /* orders the doorbell rings */
454 spin_unlock_bh(&cmdq
->cmdq_lock
);
457 static int post_backlog_cmds(struct nitrox_cmdq
*cmdq
)
459 struct nitrox_device
*ndev
= cmdq
->ndev
;
460 struct nitrox_softreq
*sr
, *tmp
;
463 spin_lock_bh(&cmdq
->backlog_lock
);
465 list_for_each_entry_safe(sr
, tmp
, &cmdq
->backlog_head
, backlog
) {
466 struct skcipher_request
*skreq
;
468 /* submit until space available */
469 if (unlikely(cmdq_full(cmdq
, ndev
->qlen
))) {
473 /* delete from backlog list */
474 list_del(&sr
->backlog
);
475 atomic_dec(&cmdq
->backlog_count
);
476 /* sync with other cpus */
477 smp_mb__after_atomic();
480 /* post the command */
481 post_se_instr(sr
, cmdq
);
483 /* backlog requests are posted, wakeup with -EINPROGRESS */
484 skcipher_request_complete(skreq
, -EINPROGRESS
);
486 spin_unlock_bh(&cmdq
->backlog_lock
);
491 static int nitrox_enqueue_request(struct nitrox_softreq
*sr
)
493 struct nitrox_cmdq
*cmdq
= sr
->cmdq
;
494 struct nitrox_device
*ndev
= sr
->ndev
;
497 if (unlikely(cmdq_full(cmdq
, ndev
->qlen
))) {
498 if (!(sr
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
501 backlog_list_add(sr
, cmdq
);
503 ret
= post_backlog_cmds(cmdq
);
505 backlog_list_add(sr
, cmdq
);
508 post_se_instr(sr
, cmdq
);
515 * nitrox_se_request - Send request to SE core
516 * @ndev: NITROX device
517 * @req: Crypto request
519 * Returns 0 on success, or a negative error code.
521 int nitrox_process_se_request(struct nitrox_device
*ndev
,
522 struct se_crypto_request
*req
,
523 completion_t callback
,
524 struct skcipher_request
*skreq
)
526 struct nitrox_softreq
*sr
;
527 dma_addr_t ctx_handle
= 0;
530 if (!nitrox_ready(ndev
))
533 sr
= kzalloc(sizeof(*sr
), req
->gfp
);
538 sr
->flags
= req
->flags
;
540 sr
->callback
= callback
;
543 atomic_set(&sr
->status
, REQ_NOT_POSTED
);
545 WRITE_ONCE(sr
->resp
.orh
, PENDING_SIG
);
546 WRITE_ONCE(sr
->resp
.completion
, PENDING_SIG
);
548 ret
= softreq_map_iobuf(sr
, req
);
554 /* get the context handle */
555 if (req
->ctx_handle
) {
559 ctx_ptr
= (u8
*)(uintptr_t)req
->ctx_handle
;
560 hdr
= (struct ctx_hdr
*)(ctx_ptr
- sizeof(struct ctx_hdr
));
561 ctx_handle
= hdr
->ctx_dma
;
564 /* select the queue */
565 qno
= smp_processor_id() % ndev
->nr_queues
;
567 sr
->cmdq
= &ndev
->pkt_cmdqs
[qno
];
570 * 64-Byte Instruction Format
572 * ----------------------
574 * ----------------------
575 * | PKT_IN_INSTR_HDR | 8 bytes
576 * ----------------------
577 * | PKT_IN_HDR | 16 bytes
578 * ----------------------
579 * | SLC_INFO | 16 bytes
580 * ----------------------
581 * | Front data | 16 bytes
582 * ----------------------
585 /* fill the packet instruction */
587 sr
->instr
.dptr0
= cpu_to_be64(sr
->in
.dma
);
590 sr
->instr
.ih
.value
= 0;
591 sr
->instr
.ih
.s
.g
= 1;
592 sr
->instr
.ih
.s
.gsz
= sr
->in
.map_bufs_cnt
;
593 sr
->instr
.ih
.s
.ssz
= sr
->out
.map_bufs_cnt
;
594 sr
->instr
.ih
.s
.fsz
= FDATA_SIZE
+ sizeof(struct gphdr
);
595 sr
->instr
.ih
.s
.tlen
= sr
->instr
.ih
.s
.fsz
+ sr
->in
.total_bytes
;
596 sr
->instr
.ih
.value
= cpu_to_be64(sr
->instr
.ih
.value
);
599 sr
->instr
.irh
.value
[0] = 0;
600 sr
->instr
.irh
.s
.uddl
= MIN_UDD_LEN
;
601 /* context length in 64-bit words */
602 sr
->instr
.irh
.s
.ctxl
= (req
->ctrl
.s
.ctxl
/ 8);
603 /* offset from solicit base port 256 */
604 sr
->instr
.irh
.s
.destport
= SOLICIT_BASE_DPORT
+ qno
;
605 sr
->instr
.irh
.s
.ctxc
= req
->ctrl
.s
.ctxc
;
606 sr
->instr
.irh
.s
.arg
= req
->ctrl
.s
.arg
;
607 sr
->instr
.irh
.s
.opcode
= req
->opcode
;
608 sr
->instr
.irh
.value
[0] = cpu_to_be64(sr
->instr
.irh
.value
[0]);
611 sr
->instr
.irh
.s
.ctxp
= cpu_to_be64(ctx_handle
);
614 sr
->instr
.slc
.value
[0] = 0;
615 sr
->instr
.slc
.s
.ssz
= sr
->out
.map_bufs_cnt
;
616 sr
->instr
.slc
.value
[0] = cpu_to_be64(sr
->instr
.slc
.value
[0]);
619 sr
->instr
.slc
.s
.rptr
= cpu_to_be64(sr
->out
.dma
);
622 * No conversion for front data,
623 * It goes into payload
624 * put GP Header in front data
626 sr
->instr
.fdata
[0] = *((u64
*)&req
->gph
);
627 sr
->instr
.fdata
[1] = 0;
628 /* flush the soft_req changes before posting the cmd */
631 ret
= nitrox_enqueue_request(sr
);
642 static inline int cmd_timeout(unsigned long tstamp
, unsigned long timeout
)
644 return time_after_eq(jiffies
, (tstamp
+ timeout
));
647 void backlog_qflush_work(struct work_struct
*work
)
649 struct nitrox_cmdq
*cmdq
;
651 cmdq
= container_of(work
, struct nitrox_cmdq
, backlog_qflush
);
652 post_backlog_cmds(cmdq
);
656 * process_request_list - process completed requests
658 * @qno: queue to operate
660 * Returns the number of responses processed.
662 static void process_response_list(struct nitrox_cmdq
*cmdq
)
664 struct nitrox_device
*ndev
= cmdq
->ndev
;
665 struct nitrox_softreq
*sr
;
666 struct skcipher_request
*skreq
;
667 completion_t callback
;
668 int req_completed
= 0, err
= 0, budget
;
670 /* check all pending requests */
671 budget
= atomic_read(&cmdq
->pending_count
);
673 while (req_completed
< budget
) {
674 sr
= get_first_response_entry(cmdq
);
678 if (atomic_read(&sr
->status
) != REQ_POSTED
)
681 /* check orh and completion bytes updates */
682 if (READ_ONCE(sr
->resp
.orh
) == READ_ONCE(sr
->resp
.completion
)) {
683 /* request not completed, check for timeout */
684 if (!cmd_timeout(sr
->tstamp
, ndev
->timeout
))
686 dev_err_ratelimited(DEV(ndev
),
687 "Request timeout, orh 0x%016llx\n",
688 READ_ONCE(sr
->resp
.orh
));
690 atomic_dec(&cmdq
->pending_count
);
691 /* sync with other cpus */
692 smp_mb__after_atomic();
693 /* remove from response list */
694 response_list_del(sr
, cmdq
);
696 callback
= sr
->callback
;
700 err
= READ_ONCE(sr
->resp
.orh
) & 0xff;
704 callback(skreq
, err
);
711 * pkt_slc_resp_handler - post processing of SE responses
713 void pkt_slc_resp_handler(unsigned long data
)
715 struct bh_data
*bh
= (void *)(uintptr_t)(data
);
716 struct nitrox_cmdq
*cmdq
= bh
->cmdq
;
717 union nps_pkt_slc_cnts pkt_slc_cnts
;
719 /* read completion count */
720 pkt_slc_cnts
.value
= readq(bh
->completion_cnt_csr_addr
);
721 /* resend the interrupt if more work to do */
722 pkt_slc_cnts
.s
.resend
= 1;
724 process_response_list(cmdq
);
727 * clear the interrupt with resend bit enabled,
728 * MSI-X interrupt generates if Completion count > Threshold
730 writeq(pkt_slc_cnts
.value
, bh
->completion_cnt_csr_addr
);
731 /* order the writes */
734 if (atomic_read(&cmdq
->backlog_count
))
735 schedule_work(&cmdq
->backlog_qflush
);