]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/crypto/cavium/nitrox/nitrox_lib.c
Merge branch 'am335x-phy-fixes' into omap-for-v5.0/fixes-v2
[mirror_ubuntu-eoan-kernel.git] / drivers / crypto / cavium / nitrox / nitrox_lib.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cpumask.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/dmapool.h>
5 #include <linux/delay.h>
6 #include <linux/gfp.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/pci_regs.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pci.h>
12
13 #include "nitrox_dev.h"
14 #include "nitrox_common.h"
15 #include "nitrox_req.h"
16 #include "nitrox_csr.h"
17
18 #define CRYPTO_CTX_SIZE 256
19
20 /* packet inuput ring alignments */
21 #define PKTIN_Q_ALIGN_BYTES 16
22
23 static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
24 {
25 struct nitrox_device *ndev = cmdq->ndev;
26
27 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
28 cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize,
29 &cmdq->unalign_dma,
30 GFP_KERNEL);
31 if (!cmdq->unalign_base)
32 return -ENOMEM;
33
34 cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
35 cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
36 cmdq->write_idx = 0;
37
38 spin_lock_init(&cmdq->cmd_qlock);
39 spin_lock_init(&cmdq->resp_qlock);
40 spin_lock_init(&cmdq->backlog_qlock);
41
42 INIT_LIST_HEAD(&cmdq->response_head);
43 INIT_LIST_HEAD(&cmdq->backlog_head);
44 INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work);
45
46 atomic_set(&cmdq->pending_count, 0);
47 atomic_set(&cmdq->backlog_count, 0);
48 return 0;
49 }
50
51 static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
52 {
53 cmdq->write_idx = 0;
54 atomic_set(&cmdq->pending_count, 0);
55 atomic_set(&cmdq->backlog_count, 0);
56 }
57
58 static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
59 {
60 struct nitrox_device *ndev = cmdq->ndev;
61
62 if (!cmdq->unalign_base)
63 return;
64
65 cancel_work_sync(&cmdq->backlog_qflush);
66
67 dma_free_coherent(DEV(ndev), cmdq->qsize,
68 cmdq->unalign_base, cmdq->unalign_dma);
69 nitrox_cmdq_reset(cmdq);
70
71 cmdq->dbell_csr_addr = NULL;
72 cmdq->compl_cnt_csr_addr = NULL;
73 cmdq->unalign_base = NULL;
74 cmdq->base = NULL;
75 cmdq->unalign_dma = 0;
76 cmdq->dma = 0;
77 cmdq->qsize = 0;
78 cmdq->instr_size = 0;
79 }
80
81 static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
82 {
83 int i;
84
85 for (i = 0; i < ndev->nr_queues; i++) {
86 struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
87
88 nitrox_cmdq_cleanup(cmdq);
89 }
90 kfree(ndev->pkt_inq);
91 ndev->pkt_inq = NULL;
92 }
93
94 static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
95 {
96 int i, err;
97
98 ndev->pkt_inq = kcalloc_node(ndev->nr_queues,
99 sizeof(struct nitrox_cmdq),
100 GFP_KERNEL, ndev->node);
101 if (!ndev->pkt_inq)
102 return -ENOMEM;
103
104 for (i = 0; i < ndev->nr_queues; i++) {
105 struct nitrox_cmdq *cmdq;
106 u64 offset;
107
108 cmdq = &ndev->pkt_inq[i];
109 cmdq->ndev = ndev;
110 cmdq->qno = i;
111 cmdq->instr_size = sizeof(struct nps_pkt_instr);
112
113 /* packet input ring doorbell address */
114 offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
115 cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
116 /* packet solicit port completion count address */
117 offset = NPS_PKT_SLC_CNTSX(i);
118 cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
119
120 err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
121 if (err)
122 goto pktq_fail;
123 }
124 return 0;
125
126 pktq_fail:
127 nitrox_free_pktin_queues(ndev);
128 return err;
129 }
130
131 static int create_crypto_dma_pool(struct nitrox_device *ndev)
132 {
133 size_t size;
134
135 /* Crypto context pool, 16 byte aligned */
136 size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
137 ndev->ctx_pool = dma_pool_create("nitrox-context",
138 DEV(ndev), size, 16, 0);
139 if (!ndev->ctx_pool)
140 return -ENOMEM;
141
142 return 0;
143 }
144
145 static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
146 {
147 if (!ndev->ctx_pool)
148 return;
149
150 dma_pool_destroy(ndev->ctx_pool);
151 ndev->ctx_pool = NULL;
152 }
153
154 /*
155 * crypto_alloc_context - Allocate crypto context from pool
156 * @ndev: NITROX Device
157 */
158 void *crypto_alloc_context(struct nitrox_device *ndev)
159 {
160 struct ctx_hdr *ctx;
161 struct crypto_ctx_hdr *chdr;
162 void *vaddr;
163 dma_addr_t dma;
164
165 chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
166 if (!chdr)
167 return NULL;
168
169 vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
170 if (!vaddr) {
171 kfree(chdr);
172 return NULL;
173 }
174
175 /* fill meta data */
176 ctx = vaddr;
177 ctx->pool = ndev->ctx_pool;
178 ctx->dma = dma;
179 ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
180
181 chdr->pool = ndev->ctx_pool;
182 chdr->dma = dma;
183 chdr->vaddr = vaddr;
184
185 return chdr;
186 }
187
188 /**
189 * crypto_free_context - Free crypto context to pool
190 * @ctx: context to free
191 */
192 void crypto_free_context(void *ctx)
193 {
194 struct crypto_ctx_hdr *ctxp;
195
196 if (!ctx)
197 return;
198
199 ctxp = ctx;
200 dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
201 kfree(ctxp);
202 }
203
204 /**
205 * nitrox_common_sw_init - allocate software resources.
206 * @ndev: NITROX device
207 *
208 * Allocates crypto context pools and command queues etc.
209 *
210 * Return: 0 on success, or a negative error code on error.
211 */
212 int nitrox_common_sw_init(struct nitrox_device *ndev)
213 {
214 int err = 0;
215
216 /* per device crypto context pool */
217 err = create_crypto_dma_pool(ndev);
218 if (err)
219 return err;
220
221 err = nitrox_alloc_pktin_queues(ndev);
222 if (err)
223 destroy_crypto_dma_pool(ndev);
224
225 return err;
226 }
227
228 /**
229 * nitrox_common_sw_cleanup - free software resources.
230 * @ndev: NITROX device
231 */
232 void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
233 {
234 nitrox_free_pktin_queues(ndev);
235 destroy_crypto_dma_pool(ndev);
236 }