]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / drivers / crypto / snow3g / rte_snow3g_pmd.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_hexdump.h>
36 #include <rte_cryptodev.h>
37 #include <rte_cryptodev_pmd.h>
38 #include <rte_vdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cpuflags.h>
41
42 #include "rte_snow3g_pmd_private.h"
43
44 #define SNOW3G_IV_LENGTH 16
45 #define SNOW3G_DIGEST_LENGTH 4
46 #define SNOW3G_MAX_BURST 8
47 #define BYTE_LEN 8
48
49 /** Get xform chain order. */
50 static enum snow3g_operation
51 snow3g_get_mode(const struct rte_crypto_sym_xform *xform)
52 {
53 if (xform == NULL)
54 return SNOW3G_OP_NOT_SUPPORTED;
55
56 if (xform->next)
57 if (xform->next->next != NULL)
58 return SNOW3G_OP_NOT_SUPPORTED;
59
60 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
61 if (xform->next == NULL)
62 return SNOW3G_OP_ONLY_AUTH;
63 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
64 return SNOW3G_OP_AUTH_CIPHER;
65 else
66 return SNOW3G_OP_NOT_SUPPORTED;
67 }
68
69 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
70 if (xform->next == NULL)
71 return SNOW3G_OP_ONLY_CIPHER;
72 else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
73 return SNOW3G_OP_CIPHER_AUTH;
74 else
75 return SNOW3G_OP_NOT_SUPPORTED;
76 }
77
78 return SNOW3G_OP_NOT_SUPPORTED;
79 }
80
81
82 /** Parse crypto xform chain and set private session parameters. */
83 int
84 snow3g_set_session_parameters(struct snow3g_session *sess,
85 const struct rte_crypto_sym_xform *xform)
86 {
87 const struct rte_crypto_sym_xform *auth_xform = NULL;
88 const struct rte_crypto_sym_xform *cipher_xform = NULL;
89 enum snow3g_operation mode;
90
91 /* Select Crypto operation - hash then cipher / cipher then hash */
92 mode = snow3g_get_mode(xform);
93
94 switch (mode) {
95 case SNOW3G_OP_CIPHER_AUTH:
96 auth_xform = xform->next;
97
98 /* Fall-through */
99 case SNOW3G_OP_ONLY_CIPHER:
100 cipher_xform = xform;
101 break;
102 case SNOW3G_OP_AUTH_CIPHER:
103 cipher_xform = xform->next;
104 /* Fall-through */
105 case SNOW3G_OP_ONLY_AUTH:
106 auth_xform = xform;
107 break;
108 case SNOW3G_OP_NOT_SUPPORTED:
109 default:
110 SNOW3G_LOG_ERR("Unsupported operation chain order parameter");
111 return -EINVAL;
112 }
113
114 if (cipher_xform) {
115 /* Only SNOW 3G UEA2 supported */
116 if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
117 return -EINVAL;
118 /* Initialize key */
119 sso_snow3g_init_key_sched(cipher_xform->cipher.key.data,
120 &sess->pKeySched_cipher);
121 }
122
123 if (auth_xform) {
124 /* Only SNOW 3G UIA2 supported */
125 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
126 return -EINVAL;
127 sess->auth_op = auth_xform->auth.op;
128 /* Initialize key */
129 sso_snow3g_init_key_sched(auth_xform->auth.key.data,
130 &sess->pKeySched_hash);
131 }
132
133
134 sess->op = mode;
135
136 return 0;
137 }
138
139 /** Get SNOW 3G session. */
140 static struct snow3g_session *
141 snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
142 {
143 struct snow3g_session *sess;
144
145 if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
146 if (unlikely(op->sym->session->dev_type !=
147 RTE_CRYPTODEV_SNOW3G_PMD))
148 return NULL;
149
150 sess = (struct snow3g_session *)op->sym->session->_private;
151 } else {
152 struct rte_cryptodev_session *c_sess = NULL;
153
154 if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
155 return NULL;
156
157 sess = (struct snow3g_session *)c_sess->_private;
158
159 if (unlikely(snow3g_set_session_parameters(sess,
160 op->sym->xform) != 0))
161 return NULL;
162 }
163
164 return sess;
165 }
166
167 /** Encrypt/decrypt mbufs with same cipher key. */
168 static uint8_t
169 process_snow3g_cipher_op(struct rte_crypto_op **ops,
170 struct snow3g_session *session,
171 uint8_t num_ops)
172 {
173 unsigned i;
174 uint8_t processed_ops = 0;
175 uint8_t *src[SNOW3G_MAX_BURST], *dst[SNOW3G_MAX_BURST];
176 uint8_t *IV[SNOW3G_MAX_BURST];
177 uint32_t num_bytes[SNOW3G_MAX_BURST];
178
179 for (i = 0; i < num_ops; i++) {
180 /* Sanity checks. */
181 if (unlikely(ops[i]->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) {
182 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
183 SNOW3G_LOG_ERR("iv");
184 break;
185 }
186
187 src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
188 (ops[i]->sym->cipher.data.offset >> 3);
189 dst[i] = ops[i]->sym->m_dst ?
190 rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
191 (ops[i]->sym->cipher.data.offset >> 3) :
192 rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
193 (ops[i]->sym->cipher.data.offset >> 3);
194 IV[i] = ops[i]->sym->cipher.iv.data;
195 num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
196
197 processed_ops++;
198 }
199
200 sso_snow3g_f8_n_buffer(&session->pKeySched_cipher, IV, src, dst,
201 num_bytes, processed_ops);
202
203 return processed_ops;
204 }
205
206 /** Encrypt/decrypt mbuf (bit level function). */
207 static uint8_t
208 process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
209 struct snow3g_session *session)
210 {
211 uint8_t *src, *dst;
212 uint8_t *IV;
213 uint32_t length_in_bits, offset_in_bits;
214
215 /* Sanity checks. */
216 if (unlikely(op->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) {
217 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
218 SNOW3G_LOG_ERR("iv");
219 return 0;
220 }
221
222 offset_in_bits = op->sym->cipher.data.offset;
223 src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
224 if (op->sym->m_dst == NULL) {
225 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
226 SNOW3G_LOG_ERR("bit-level in-place not supported\n");
227 return 0;
228 }
229 dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
230 IV = op->sym->cipher.iv.data;
231 length_in_bits = op->sym->cipher.data.length;
232
233 sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, IV,
234 src, dst, length_in_bits, offset_in_bits);
235
236 return 1;
237 }
238
239 /** Generate/verify hash from mbufs with same hash key. */
240 static int
241 process_snow3g_hash_op(struct rte_crypto_op **ops,
242 struct snow3g_session *session,
243 uint8_t num_ops)
244 {
245 unsigned i;
246 uint8_t processed_ops = 0;
247 uint8_t *src, *dst;
248 uint32_t length_in_bits;
249
250 for (i = 0; i < num_ops; i++) {
251 if (unlikely(ops[i]->sym->auth.aad.length != SNOW3G_IV_LENGTH)) {
252 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
253 SNOW3G_LOG_ERR("aad");
254 break;
255 }
256
257 if (unlikely(ops[i]->sym->auth.digest.length != SNOW3G_DIGEST_LENGTH)) {
258 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
259 SNOW3G_LOG_ERR("digest");
260 break;
261 }
262
263 /* Data must be byte aligned */
264 if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
265 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
266 SNOW3G_LOG_ERR("Offset");
267 break;
268 }
269
270 length_in_bits = ops[i]->sym->auth.data.length;
271
272 src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
273 (ops[i]->sym->auth.data.offset >> 3);
274
275 if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
276 dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
277 ops[i]->sym->auth.digest.length);
278
279 sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
280 ops[i]->sym->auth.aad.data, src,
281 length_in_bits, dst);
282 /* Verify digest. */
283 if (memcmp(dst, ops[i]->sym->auth.digest.data,
284 ops[i]->sym->auth.digest.length) != 0)
285 ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
286
287 /* Trim area used for digest from mbuf. */
288 rte_pktmbuf_trim(ops[i]->sym->m_src,
289 ops[i]->sym->auth.digest.length);
290 } else {
291 dst = ops[i]->sym->auth.digest.data;
292
293 sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
294 ops[i]->sym->auth.aad.data, src,
295 length_in_bits, dst);
296 }
297 processed_ops++;
298 }
299
300 return processed_ops;
301 }
302
303 /** Process a batch of crypto ops which shares the same session. */
304 static int
305 process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
306 struct snow3g_qp *qp, uint8_t num_ops,
307 uint16_t *accumulated_enqueued_ops)
308 {
309 unsigned i;
310 unsigned enqueued_ops, processed_ops;
311
312 #ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
313 for (i = 0; i < num_ops; i++) {
314 if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
315 (ops[i]->sym->m_dst != NULL &&
316 !rte_pktmbuf_is_contiguous(
317 ops[i]->sym->m_dst))) {
318 SNOW3G_LOG_ERR("PMD supports only contiguous mbufs, "
319 "op (%p) provides noncontiguous mbuf as "
320 "source/destination buffer.\n", ops[i]);
321 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
322 return 0;
323 }
324 }
325 #endif
326
327 switch (session->op) {
328 case SNOW3G_OP_ONLY_CIPHER:
329 processed_ops = process_snow3g_cipher_op(ops,
330 session, num_ops);
331 break;
332 case SNOW3G_OP_ONLY_AUTH:
333 processed_ops = process_snow3g_hash_op(ops, session,
334 num_ops);
335 break;
336 case SNOW3G_OP_CIPHER_AUTH:
337 processed_ops = process_snow3g_cipher_op(ops, session,
338 num_ops);
339 process_snow3g_hash_op(ops, session, processed_ops);
340 break;
341 case SNOW3G_OP_AUTH_CIPHER:
342 processed_ops = process_snow3g_hash_op(ops, session,
343 num_ops);
344 process_snow3g_cipher_op(ops, session, processed_ops);
345 break;
346 default:
347 /* Operation not supported. */
348 processed_ops = 0;
349 }
350
351 for (i = 0; i < num_ops; i++) {
352 /*
353 * If there was no error/authentication failure,
354 * change status to successful.
355 */
356 if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
357 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
358 /* Free session if a session-less crypto op. */
359 if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
360 rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
361 ops[i]->sym->session = NULL;
362 }
363 }
364
365 enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
366 (void **)ops, processed_ops, NULL);
367 qp->qp_stats.enqueued_count += enqueued_ops;
368 *accumulated_enqueued_ops += enqueued_ops;
369
370 return enqueued_ops;
371 }
372
373 /** Process a crypto op with length/offset in bits. */
374 static int
375 process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
376 struct snow3g_qp *qp, uint16_t *accumulated_enqueued_ops)
377 {
378 unsigned enqueued_op, processed_op;
379
380 switch (session->op) {
381 case SNOW3G_OP_ONLY_CIPHER:
382 processed_op = process_snow3g_cipher_op_bit(op,
383 session);
384 break;
385 case SNOW3G_OP_ONLY_AUTH:
386 processed_op = process_snow3g_hash_op(&op, session, 1);
387 break;
388 case SNOW3G_OP_CIPHER_AUTH:
389 processed_op = process_snow3g_cipher_op_bit(op, session);
390 if (processed_op == 1)
391 process_snow3g_hash_op(&op, session, 1);
392 break;
393 case SNOW3G_OP_AUTH_CIPHER:
394 processed_op = process_snow3g_hash_op(&op, session, 1);
395 if (processed_op == 1)
396 process_snow3g_cipher_op_bit(op, session);
397 break;
398 default:
399 /* Operation not supported. */
400 processed_op = 0;
401 }
402
403 /*
404 * If there was no error/authentication failure,
405 * change status to successful.
406 */
407 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
408 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
409
410 /* Free session if a session-less crypto op. */
411 if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
412 rte_mempool_put(qp->sess_mp, op->sym->session);
413 op->sym->session = NULL;
414 }
415
416 enqueued_op = rte_ring_enqueue_burst(qp->processed_ops,
417 (void **)&op, processed_op, NULL);
418 qp->qp_stats.enqueued_count += enqueued_op;
419 *accumulated_enqueued_ops += enqueued_op;
420
421 return enqueued_op;
422 }
423
424 static uint16_t
425 snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
426 uint16_t nb_ops)
427 {
428 struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
429 struct rte_crypto_op *curr_c_op;
430
431 struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
432 struct snow3g_qp *qp = queue_pair;
433 unsigned i;
434 uint8_t burst_size = 0;
435 uint16_t enqueued_ops = 0;
436 uint8_t processed_ops;
437
438 for (i = 0; i < nb_ops; i++) {
439 curr_c_op = ops[i];
440
441 /* Set status as enqueued (not processed yet) by default. */
442 curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
443
444 curr_sess = snow3g_get_session(qp, curr_c_op);
445 if (unlikely(curr_sess == NULL ||
446 curr_sess->op == SNOW3G_OP_NOT_SUPPORTED)) {
447 curr_c_op->status =
448 RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
449 break;
450 }
451
452 /* If length/offset is at bit-level, process this buffer alone. */
453 if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
454 || ((curr_c_op->sym->cipher.data.offset
455 % BYTE_LEN) != 0)) {
456 /* Process the ops of the previous session. */
457 if (prev_sess != NULL) {
458 processed_ops = process_ops(c_ops, prev_sess,
459 qp, burst_size, &enqueued_ops);
460 if (processed_ops < burst_size) {
461 burst_size = 0;
462 break;
463 }
464
465 burst_size = 0;
466 prev_sess = NULL;
467 }
468
469 processed_ops = process_op_bit(curr_c_op, curr_sess,
470 qp, &enqueued_ops);
471 if (processed_ops != 1)
472 break;
473
474 continue;
475 }
476
477 /* Batch ops that share the same session. */
478 if (prev_sess == NULL) {
479 prev_sess = curr_sess;
480 c_ops[burst_size++] = curr_c_op;
481 } else if (curr_sess == prev_sess) {
482 c_ops[burst_size++] = curr_c_op;
483 /*
484 * When there are enough ops to process in a batch,
485 * process them, and start a new batch.
486 */
487 if (burst_size == SNOW3G_MAX_BURST) {
488 processed_ops = process_ops(c_ops, prev_sess,
489 qp, burst_size, &enqueued_ops);
490 if (processed_ops < burst_size) {
491 burst_size = 0;
492 break;
493 }
494
495 burst_size = 0;
496 prev_sess = NULL;
497 }
498 } else {
499 /*
500 * Different session, process the ops
501 * of the previous session.
502 */
503 processed_ops = process_ops(c_ops, prev_sess,
504 qp, burst_size, &enqueued_ops);
505 if (processed_ops < burst_size) {
506 burst_size = 0;
507 break;
508 }
509
510 burst_size = 0;
511 prev_sess = curr_sess;
512
513 c_ops[burst_size++] = curr_c_op;
514 }
515 }
516
517 if (burst_size != 0) {
518 /* Process the crypto ops of the last session. */
519 processed_ops = process_ops(c_ops, prev_sess,
520 qp, burst_size, &enqueued_ops);
521 }
522
523 qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
524 return enqueued_ops;
525 }
526
527 static uint16_t
528 snow3g_pmd_dequeue_burst(void *queue_pair,
529 struct rte_crypto_op **c_ops, uint16_t nb_ops)
530 {
531 struct snow3g_qp *qp = queue_pair;
532
533 unsigned nb_dequeued;
534
535 nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
536 (void **)c_ops, nb_ops, NULL);
537 qp->qp_stats.dequeued_count += nb_dequeued;
538
539 return nb_dequeued;
540 }
541
542 static int cryptodev_snow3g_remove(struct rte_vdev_device *vdev);
543
544 static int
545 cryptodev_snow3g_create(const char *name,
546 struct rte_vdev_device *vdev,
547 struct rte_crypto_vdev_init_params *init_params)
548 {
549 struct rte_cryptodev *dev;
550 struct snow3g_private *internals;
551 uint64_t cpu_flags = 0;
552
553 if (init_params->name[0] == '\0')
554 snprintf(init_params->name, sizeof(init_params->name),
555 "%s", name);
556
557 /* Check CPU for supported vector instruction set */
558 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
559 cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
560 else {
561 SNOW3G_LOG_ERR("Vector instructions are not supported by CPU");
562 return -EFAULT;
563 }
564
565 dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
566 sizeof(struct snow3g_private), init_params->socket_id);
567 if (dev == NULL) {
568 SNOW3G_LOG_ERR("failed to create cryptodev vdev");
569 goto init_error;
570 }
571
572 dev->dev_type = RTE_CRYPTODEV_SNOW3G_PMD;
573 dev->dev_ops = rte_snow3g_pmd_ops;
574
575 /* Register RX/TX burst functions for data path. */
576 dev->dequeue_burst = snow3g_pmd_dequeue_burst;
577 dev->enqueue_burst = snow3g_pmd_enqueue_burst;
578
579 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
580 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
581 cpu_flags;
582
583 internals = dev->data->dev_private;
584
585 internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
586 internals->max_nb_sessions = init_params->max_nb_sessions;
587
588 return 0;
589 init_error:
590 SNOW3G_LOG_ERR("driver %s: cryptodev_snow3g_create failed",
591 init_params->name);
592
593 cryptodev_snow3g_remove(vdev);
594 return -EFAULT;
595 }
596
597 static int
598 cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
599 {
600 struct rte_crypto_vdev_init_params init_params = {
601 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
602 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
603 rte_socket_id(),
604 {0}
605 };
606 const char *name;
607 const char *input_args;
608
609 name = rte_vdev_device_name(vdev);
610 if (name == NULL)
611 return -EINVAL;
612 input_args = rte_vdev_device_args(vdev);
613
614 rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
615
616 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
617 init_params.socket_id);
618 if (init_params.name[0] != '\0')
619 RTE_LOG(INFO, PMD, " User defined name = %s\n",
620 init_params.name);
621 RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
622 init_params.max_nb_queue_pairs);
623 RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
624 init_params.max_nb_sessions);
625
626 return cryptodev_snow3g_create(name, vdev, &init_params);
627 }
628
629 static int
630 cryptodev_snow3g_remove(struct rte_vdev_device *vdev)
631 {
632 const char *name;
633
634 name = rte_vdev_device_name(vdev);
635 if (name == NULL)
636 return -EINVAL;
637
638 RTE_LOG(INFO, PMD, "Closing SNOW 3G crypto device %s"
639 " on numa socket %u\n",
640 name, rte_socket_id());
641
642 return 0;
643 }
644
645 static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
646 .probe = cryptodev_snow3g_probe,
647 .remove = cryptodev_snow3g_remove
648 };
649
650 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
651 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
652 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
653 "max_nb_queue_pairs=<int> "
654 "max_nb_sessions=<int> "
655 "socket_id=<int>");