]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / crypto / aesni_gcm / aesni_gcm_pmd_ops.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
7c673cae
FG
3 */
4
5#include <string.h>
6
7#include <rte_common.h>
8#include <rte_malloc.h>
9#include <rte_cryptodev_pmd.h>
10
11#include "aesni_gcm_pmd_private.h"
12
13static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
11fdf7f2 14 { /* AES GMAC (AUTH) */
7c673cae
FG
15 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
16 {.sym = {
17 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
18 {.auth = {
11fdf7f2 19 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
7c673cae
FG
20 .block_size = 16,
21 .key_size = {
22 .min = 16,
11fdf7f2
TL
23 .max = 32,
24 .increment = 8
7c673cae
FG
25 },
26 .digest_size = {
9f95a23c 27 .min = 1,
7c673cae 28 .max = 16,
9f95a23c 29 .increment = 1
7c673cae 30 },
11fdf7f2
TL
31 .iv_size = {
32 .min = 12,
7c673cae 33 .max = 12,
11fdf7f2 34 .increment = 0
7c673cae
FG
35 }
36 }, }
37 }, }
38 },
11fdf7f2 39 { /* AES GCM */
7c673cae
FG
40 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
41 {.sym = {
11fdf7f2
TL
42 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
43 {.aead = {
44 .algo = RTE_CRYPTO_AEAD_AES_GCM,
7c673cae
FG
45 .block_size = 16,
46 .key_size = {
47 .min = 16,
11fdf7f2
TL
48 .max = 32,
49 .increment = 8
50 },
51 .digest_size = {
9f95a23c 52 .min = 1,
7c673cae 53 .max = 16,
9f95a23c 54 .increment = 1
11fdf7f2
TL
55 },
56 .aad_size = {
57 .min = 0,
58 .max = 65535,
59 .increment = 1
7c673cae
FG
60 },
61 .iv_size = {
11fdf7f2
TL
62 .min = 12,
63 .max = 12,
7c673cae
FG
64 .increment = 0
65 }
66 }, }
67 }, }
68 },
69 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
70};
71
72/** Configure device */
73static int
11fdf7f2
TL
74aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev,
75 __rte_unused struct rte_cryptodev_config *config)
7c673cae
FG
76{
77 return 0;
78}
79
80/** Start device */
81static int
82aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
83{
84 return 0;
85}
86
87/** Stop device */
88static void
89aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
90{
91}
92
93/** Close device */
94static int
95aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
96{
97 return 0;
98}
99
100
101/** Get device statistics */
102static void
103aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
104 struct rte_cryptodev_stats *stats)
105{
106 int qp_id;
107
108 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
109 struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
110
111 stats->enqueued_count += qp->qp_stats.enqueued_count;
112 stats->dequeued_count += qp->qp_stats.dequeued_count;
113
114 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
115 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
116 }
117}
118
119/** Reset device statistics */
120static void
121aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
122{
123 int qp_id;
124
125 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
126 struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
127
128 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
129 }
130}
131
132
133/** Get device info */
134static void
135aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
136 struct rte_cryptodev_info *dev_info)
137{
138 struct aesni_gcm_private *internals = dev->data->dev_private;
139
140 if (dev_info != NULL) {
11fdf7f2
TL
141 dev_info->driver_id = dev->driver_id;
142 dev_info->feature_flags = dev->feature_flags;
143 dev_info->capabilities = aesni_gcm_pmd_capabilities;
7c673cae
FG
144
145 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
11fdf7f2
TL
146 /* No limit of number of sessions */
147 dev_info->sym.max_nb_sessions = 0;
7c673cae
FG
148 }
149}
150
151/** Release queue pair */
152static int
153aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
154{
155 if (dev->data->queue_pairs[qp_id] != NULL) {
9f95a23c
TL
156 struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
157
158 if (qp->processed_pkts)
159 rte_ring_free(qp->processed_pkts);
160
7c673cae
FG
161 rte_free(dev->data->queue_pairs[qp_id]);
162 dev->data->queue_pairs[qp_id] = NULL;
163 }
164 return 0;
165}
166
167/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
168static int
169aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
170 struct aesni_gcm_qp *qp)
171{
172 unsigned n = snprintf(qp->name, sizeof(qp->name),
173 "aesni_gcm_pmd_%u_qp_%u",
174 dev->data->dev_id, qp->id);
175
11fdf7f2 176 if (n >= sizeof(qp->name))
7c673cae
FG
177 return -1;
178
179 return 0;
180}
181
182/** Create a ring to place process packets on */
183static struct rte_ring *
184aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
185 unsigned ring_size, int socket_id)
186{
187 struct rte_ring *r;
188
189 r = rte_ring_lookup(qp->name);
190 if (r) {
11fdf7f2
TL
191 if (rte_ring_get_size(r) >= ring_size) {
192 AESNI_GCM_LOG(INFO, "Reusing existing ring %s for processed"
193 " packets", qp->name);
7c673cae
FG
194 return r;
195 }
11fdf7f2 196 AESNI_GCM_LOG(ERR, "Unable to reuse existing ring %s for processed"
7c673cae
FG
197 " packets", qp->name);
198 return NULL;
199 }
200
201 return rte_ring_create(qp->name, ring_size, socket_id,
202 RING_F_SP_ENQ | RING_F_SC_DEQ);
203}
204
205/** Setup a queue pair */
206static int
207aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
208 const struct rte_cryptodev_qp_conf *qp_conf,
9f95a23c 209 int socket_id)
7c673cae
FG
210{
211 struct aesni_gcm_qp *qp = NULL;
212 struct aesni_gcm_private *internals = dev->data->dev_private;
213
214 /* Free memory prior to re-allocation if needed. */
215 if (dev->data->queue_pairs[qp_id] != NULL)
216 aesni_gcm_pmd_qp_release(dev, qp_id);
217
218 /* Allocate the queue pair data structure. */
219 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
220 RTE_CACHE_LINE_SIZE, socket_id);
221 if (qp == NULL)
222 return (-ENOMEM);
223
224 qp->id = qp_id;
225 dev->data->queue_pairs[qp_id] = qp;
226
227 if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
228 goto qp_setup_cleanup;
229
9f95a23c 230 qp->ops = (const struct aesni_gcm_ops *)internals->ops;
7c673cae
FG
231
232 qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
233 qp_conf->nb_descriptors, socket_id);
234 if (qp->processed_pkts == NULL)
235 goto qp_setup_cleanup;
236
9f95a23c
TL
237 qp->sess_mp = qp_conf->mp_session;
238 qp->sess_mp_priv = qp_conf->mp_session_private;
7c673cae
FG
239
240 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
241
242 return 0;
243
244qp_setup_cleanup:
245 if (qp)
246 rte_free(qp);
247
248 return -1;
249}
250
7c673cae
FG
251/** Return the number of allocated queue pairs */
252static uint32_t
253aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
254{
255 return dev->data->nb_queue_pairs;
256}
257
258/** Returns the size of the aesni gcm session structure */
259static unsigned
11fdf7f2 260aesni_gcm_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
7c673cae
FG
261{
262 return sizeof(struct aesni_gcm_session);
263}
264
265/** Configure a aesni gcm session from a crypto xform chain */
11fdf7f2
TL
266static int
267aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
268 struct rte_crypto_sym_xform *xform,
269 struct rte_cryptodev_sym_session *sess,
270 struct rte_mempool *mempool)
7c673cae 271{
11fdf7f2
TL
272 void *sess_private_data;
273 int ret;
7c673cae
FG
274 struct aesni_gcm_private *internals = dev->data->dev_private;
275
276 if (unlikely(sess == NULL)) {
11fdf7f2
TL
277 AESNI_GCM_LOG(ERR, "invalid session struct");
278 return -EINVAL;
7c673cae
FG
279 }
280
11fdf7f2
TL
281 if (rte_mempool_get(mempool, &sess_private_data)) {
282 AESNI_GCM_LOG(ERR,
283 "Couldn't get object from session mempool");
284 return -ENOMEM;
7c673cae 285 }
9f95a23c 286 ret = aesni_gcm_set_session_parameters(internals->ops,
11fdf7f2
TL
287 sess_private_data, xform);
288 if (ret != 0) {
289 AESNI_GCM_LOG(ERR, "failed configure session parameters");
290
291 /* Return session to mempool */
292 rte_mempool_put(mempool, sess_private_data);
293 return ret;
294 }
295
296 set_sym_session_private_data(sess, dev->driver_id,
297 sess_private_data);
7c673cae 298
11fdf7f2 299 return 0;
7c673cae
FG
300}
301
302/** Clear the memory of session so it doesn't leave key material behind */
303static void
11fdf7f2
TL
304aesni_gcm_pmd_sym_session_clear(struct rte_cryptodev *dev,
305 struct rte_cryptodev_sym_session *sess)
7c673cae 306{
11fdf7f2
TL
307 uint8_t index = dev->driver_id;
308 void *sess_priv = get_sym_session_private_data(sess, index);
309
310 /* Zero out the whole structure */
311 if (sess_priv) {
312 memset(sess_priv, 0, sizeof(struct aesni_gcm_session));
313 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
314 set_sym_session_private_data(sess, index, NULL);
315 rte_mempool_put(sess_mp, sess_priv);
316 }
7c673cae
FG
317}
318
319struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
320 .dev_configure = aesni_gcm_pmd_config,
321 .dev_start = aesni_gcm_pmd_start,
322 .dev_stop = aesni_gcm_pmd_stop,
323 .dev_close = aesni_gcm_pmd_close,
324
325 .stats_get = aesni_gcm_pmd_stats_get,
326 .stats_reset = aesni_gcm_pmd_stats_reset,
327
328 .dev_infos_get = aesni_gcm_pmd_info_get,
329
330 .queue_pair_setup = aesni_gcm_pmd_qp_setup,
331 .queue_pair_release = aesni_gcm_pmd_qp_release,
7c673cae
FG
332 .queue_pair_count = aesni_gcm_pmd_qp_count,
333
11fdf7f2
TL
334 .sym_session_get_size = aesni_gcm_pmd_sym_session_get_size,
335 .sym_session_configure = aesni_gcm_pmd_sym_session_configure,
336 .sym_session_clear = aesni_gcm_pmd_sym_session_clear
7c673cae
FG
337};
338
339struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;