4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_cryptodev_pmd.h>
39 #include "aesni_gcm_pmd_private.h"
41 static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities
[] = {
42 { /* AES GCM (AUTH) */
43 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
45 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
47 .algo
= RTE_CRYPTO_AUTH_AES_GCM
,
67 { /* AES GCM (CIPHER) */
68 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
70 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
72 .algo
= RTE_CRYPTO_CIPHER_AES_GCM
,
87 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
90 /** Configure device */
92 aesni_gcm_pmd_config(__rte_unused
struct rte_cryptodev
*dev
)
99 aesni_gcm_pmd_start(__rte_unused
struct rte_cryptodev
*dev
)
106 aesni_gcm_pmd_stop(__rte_unused
struct rte_cryptodev
*dev
)
112 aesni_gcm_pmd_close(__rte_unused
struct rte_cryptodev
*dev
)
118 /** Get device statistics */
120 aesni_gcm_pmd_stats_get(struct rte_cryptodev
*dev
,
121 struct rte_cryptodev_stats
*stats
)
125 for (qp_id
= 0; qp_id
< dev
->data
->nb_queue_pairs
; qp_id
++) {
126 struct aesni_gcm_qp
*qp
= dev
->data
->queue_pairs
[qp_id
];
128 stats
->enqueued_count
+= qp
->qp_stats
.enqueued_count
;
129 stats
->dequeued_count
+= qp
->qp_stats
.dequeued_count
;
131 stats
->enqueue_err_count
+= qp
->qp_stats
.enqueue_err_count
;
132 stats
->dequeue_err_count
+= qp
->qp_stats
.dequeue_err_count
;
136 /** Reset device statistics */
138 aesni_gcm_pmd_stats_reset(struct rte_cryptodev
*dev
)
142 for (qp_id
= 0; qp_id
< dev
->data
->nb_queue_pairs
; qp_id
++) {
143 struct aesni_gcm_qp
*qp
= dev
->data
->queue_pairs
[qp_id
];
145 memset(&qp
->qp_stats
, 0, sizeof(qp
->qp_stats
));
150 /** Get device info */
152 aesni_gcm_pmd_info_get(struct rte_cryptodev
*dev
,
153 struct rte_cryptodev_info
*dev_info
)
155 struct aesni_gcm_private
*internals
= dev
->data
->dev_private
;
157 if (dev_info
!= NULL
) {
158 dev_info
->dev_type
= dev
->dev_type
;
159 dev_info
->feature_flags
= dev
->feature_flags
;
160 dev_info
->capabilities
= aesni_gcm_pmd_capabilities
;
162 dev_info
->max_nb_queue_pairs
= internals
->max_nb_queue_pairs
;
163 dev_info
->sym
.max_nb_sessions
= internals
->max_nb_sessions
;
167 /** Release queue pair */
169 aesni_gcm_pmd_qp_release(struct rte_cryptodev
*dev
, uint16_t qp_id
)
171 if (dev
->data
->queue_pairs
[qp_id
] != NULL
) {
172 rte_free(dev
->data
->queue_pairs
[qp_id
]);
173 dev
->data
->queue_pairs
[qp_id
] = NULL
;
178 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
180 aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev
*dev
,
181 struct aesni_gcm_qp
*qp
)
183 unsigned n
= snprintf(qp
->name
, sizeof(qp
->name
),
184 "aesni_gcm_pmd_%u_qp_%u",
185 dev
->data
->dev_id
, qp
->id
);
187 if (n
> sizeof(qp
->name
))
193 /** Create a ring to place process packets on */
194 static struct rte_ring
*
195 aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp
*qp
,
196 unsigned ring_size
, int socket_id
)
200 r
= rte_ring_lookup(qp
->name
);
202 if (r
->prod
.size
>= ring_size
) {
203 GCM_LOG_INFO("Reusing existing ring %s for processed"
204 " packets", qp
->name
);
208 GCM_LOG_ERR("Unable to reuse existing ring %s for processed"
209 " packets", qp
->name
);
213 return rte_ring_create(qp
->name
, ring_size
, socket_id
,
214 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
217 /** Setup a queue pair */
219 aesni_gcm_pmd_qp_setup(struct rte_cryptodev
*dev
, uint16_t qp_id
,
220 const struct rte_cryptodev_qp_conf
*qp_conf
,
223 struct aesni_gcm_qp
*qp
= NULL
;
224 struct aesni_gcm_private
*internals
= dev
->data
->dev_private
;
226 /* Free memory prior to re-allocation if needed. */
227 if (dev
->data
->queue_pairs
[qp_id
] != NULL
)
228 aesni_gcm_pmd_qp_release(dev
, qp_id
);
230 /* Allocate the queue pair data structure. */
231 qp
= rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp
),
232 RTE_CACHE_LINE_SIZE
, socket_id
);
237 dev
->data
->queue_pairs
[qp_id
] = qp
;
239 if (aesni_gcm_pmd_qp_set_unique_name(dev
, qp
))
240 goto qp_setup_cleanup
;
242 qp
->ops
= &gcm_ops
[internals
->vector_mode
];
244 qp
->processed_pkts
= aesni_gcm_pmd_qp_create_processed_pkts_ring(qp
,
245 qp_conf
->nb_descriptors
, socket_id
);
246 if (qp
->processed_pkts
== NULL
)
247 goto qp_setup_cleanup
;
249 qp
->sess_mp
= dev
->data
->session_pool
;
251 memset(&qp
->qp_stats
, 0, sizeof(qp
->qp_stats
));
262 /** Start queue pair */
264 aesni_gcm_pmd_qp_start(__rte_unused
struct rte_cryptodev
*dev
,
265 __rte_unused
uint16_t queue_pair_id
)
270 /** Stop queue pair */
272 aesni_gcm_pmd_qp_stop(__rte_unused
struct rte_cryptodev
*dev
,
273 __rte_unused
uint16_t queue_pair_id
)
278 /** Return the number of allocated queue pairs */
280 aesni_gcm_pmd_qp_count(struct rte_cryptodev
*dev
)
282 return dev
->data
->nb_queue_pairs
;
285 /** Returns the size of the aesni gcm session structure */
287 aesni_gcm_pmd_session_get_size(struct rte_cryptodev
*dev __rte_unused
)
289 return sizeof(struct aesni_gcm_session
);
292 /** Configure a aesni gcm session from a crypto xform chain */
294 aesni_gcm_pmd_session_configure(struct rte_cryptodev
*dev
,
295 struct rte_crypto_sym_xform
*xform
, void *sess
)
297 struct aesni_gcm_private
*internals
= dev
->data
->dev_private
;
299 if (unlikely(sess
== NULL
)) {
300 GCM_LOG_ERR("invalid session struct");
304 if (aesni_gcm_set_session_parameters(&gcm_ops
[internals
->vector_mode
],
306 GCM_LOG_ERR("failed configure session parameters");
313 /** Clear the memory of session so it doesn't leave key material behind */
315 aesni_gcm_pmd_session_clear(struct rte_cryptodev
*dev __rte_unused
, void *sess
)
318 memset(sess
, 0, sizeof(struct aesni_gcm_session
));
321 struct rte_cryptodev_ops aesni_gcm_pmd_ops
= {
322 .dev_configure
= aesni_gcm_pmd_config
,
323 .dev_start
= aesni_gcm_pmd_start
,
324 .dev_stop
= aesni_gcm_pmd_stop
,
325 .dev_close
= aesni_gcm_pmd_close
,
327 .stats_get
= aesni_gcm_pmd_stats_get
,
328 .stats_reset
= aesni_gcm_pmd_stats_reset
,
330 .dev_infos_get
= aesni_gcm_pmd_info_get
,
332 .queue_pair_setup
= aesni_gcm_pmd_qp_setup
,
333 .queue_pair_release
= aesni_gcm_pmd_qp_release
,
334 .queue_pair_start
= aesni_gcm_pmd_qp_start
,
335 .queue_pair_stop
= aesni_gcm_pmd_qp_stop
,
336 .queue_pair_count
= aesni_gcm_pmd_qp_count
,
338 .session_get_size
= aesni_gcm_pmd_session_get_size
,
339 .session_configure
= aesni_gcm_pmd_session_configure
,
340 .session_clear
= aesni_gcm_pmd_session_clear
343 struct rte_cryptodev_ops
*rte_aesni_gcm_pmd_ops
= &aesni_gcm_pmd_ops
;