]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/lib/librte_bbdev/rte_bbdev.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / lib / librte_bbdev / rte_bbdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdbool.h>
8
9 #include <rte_string_fns.h>
10 #include <rte_compat.h>
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_log.h>
14 #include <rte_debug.h>
15 #include <rte_eal.h>
16 #include <rte_malloc.h>
17 #include <rte_mempool.h>
18 #include <rte_memzone.h>
19 #include <rte_lcore.h>
20 #include <rte_dev.h>
21 #include <rte_spinlock.h>
22 #include <rte_tailq.h>
23 #include <rte_interrupts.h>
24
25 #include "rte_bbdev_op.h"
26 #include "rte_bbdev.h"
27 #include "rte_bbdev_pmd.h"
28
29 #define DEV_NAME "BBDEV"
30
31
32 /* BBDev library logging ID */
33 static int bbdev_logtype;
34
35 /* Helper macro for logging */
36 #define rte_bbdev_log(level, fmt, ...) \
37 rte_log(RTE_LOG_ ## level, bbdev_logtype, fmt "\n", ##__VA_ARGS__)
38
39 #define rte_bbdev_log_debug(fmt, ...) \
40 rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
41 ##__VA_ARGS__)
42
43 /* Helper macro to check dev_id is valid */
44 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
45 if (dev == NULL) { \
46 rte_bbdev_log(ERR, "device %u is invalid", dev_id); \
47 return -ENODEV; \
48 } \
49 } while (0)
50
51 /* Helper macro to check dev_ops is valid */
52 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
53 if (dev->dev_ops == NULL) { \
54 rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \
55 dev_id); \
56 return -ENODEV; \
57 } \
58 } while (0)
59
60 /* Helper macro to check that driver implements required function pointer */
61 #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \
62 if (func == NULL) { \
63 rte_bbdev_log(ERR, "device %u does not support %s", \
64 dev_id, #func); \
65 return -ENOTSUP; \
66 } \
67 } while (0)
68
69 /* Helper macro to check that queue is valid */
70 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
71 if (queue_id >= dev->data->num_queues) { \
72 rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \
73 queue_id, dev->data->dev_id); \
74 return -ERANGE; \
75 } \
76 } while (0)
77
78 /* List of callback functions registered by an application */
79 struct rte_bbdev_callback {
80 TAILQ_ENTRY(rte_bbdev_callback) next; /* Callbacks list */
81 rte_bbdev_cb_fn cb_fn; /* Callback address */
82 void *cb_arg; /* Parameter for callback */
83 void *ret_param; /* Return parameter */
84 enum rte_bbdev_event_type event; /* Interrupt event type */
85 uint32_t active; /* Callback is executing */
86 };
87
88 /* spinlock for bbdev device callbacks */
89 static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
90
91 /*
92 * Global array of all devices. This is not static because it's used by the
93 * inline enqueue and dequeue functions
94 */
95 struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS];
96
97 /* Global array with rte_bbdev_data structures */
98 static struct rte_bbdev_data *rte_bbdev_data;
99
100 /* Memzone name for global bbdev data pool */
101 static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data";
102
103 /* Number of currently valid devices */
104 static uint16_t num_devs;
105
106 /* Return pointer to device structure, with validity check */
107 static struct rte_bbdev *
108 get_dev(uint16_t dev_id)
109 {
110 if (rte_bbdev_is_valid(dev_id))
111 return &rte_bbdev_devices[dev_id];
112 return NULL;
113 }
114
115 /* Allocate global data array */
116 static int
117 rte_bbdev_data_alloc(void)
118 {
119 const unsigned int flags = 0;
120 const struct rte_memzone *mz;
121
122 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
123 mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA,
124 RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data),
125 rte_socket_id(), flags);
126 } else
127 mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA);
128 if (mz == NULL) {
129 rte_bbdev_log(CRIT,
130 "Cannot allocate memzone for bbdev port data");
131 return -ENOMEM;
132 }
133
134 rte_bbdev_data = mz->addr;
135 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
136 memset(rte_bbdev_data, 0,
137 RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data));
138 return 0;
139 }
140
141 /*
142 * Find data alocated for the device or if not found return first unused bbdev
143 * data. If all structures are in use and none is used by the device return
144 * NULL.
145 */
146 static struct rte_bbdev_data *
147 find_bbdev_data(const char *name)
148 {
149 uint16_t data_id;
150
151 for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) {
152 if (strlen(rte_bbdev_data[data_id].name) == 0) {
153 memset(&rte_bbdev_data[data_id], 0,
154 sizeof(struct rte_bbdev_data));
155 return &rte_bbdev_data[data_id];
156 } else if (strncmp(rte_bbdev_data[data_id].name, name,
157 RTE_BBDEV_NAME_MAX_LEN) == 0)
158 return &rte_bbdev_data[data_id];
159 }
160
161 return NULL;
162 }
163
164 /* Find lowest device id with no attached device */
165 static uint16_t
166 find_free_dev_id(void)
167 {
168 uint16_t i;
169 for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
170 if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED)
171 return i;
172 }
173 return RTE_BBDEV_MAX_DEVS;
174 }
175
176 struct rte_bbdev * __rte_experimental
177 rte_bbdev_allocate(const char *name)
178 {
179 int ret;
180 struct rte_bbdev *bbdev;
181 uint16_t dev_id;
182
183 if (name == NULL) {
184 rte_bbdev_log(ERR, "Invalid null device name");
185 return NULL;
186 }
187
188 if (rte_bbdev_get_named_dev(name) != NULL) {
189 rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name);
190 return NULL;
191 }
192
193 dev_id = find_free_dev_id();
194 if (dev_id == RTE_BBDEV_MAX_DEVS) {
195 rte_bbdev_log(ERR, "Reached maximum number of devices");
196 return NULL;
197 }
198
199 bbdev = &rte_bbdev_devices[dev_id];
200
201 if (rte_bbdev_data == NULL) {
202 ret = rte_bbdev_data_alloc();
203 if (ret != 0)
204 return NULL;
205 }
206
207 bbdev->data = find_bbdev_data(name);
208 if (bbdev->data == NULL) {
209 rte_bbdev_log(ERR,
210 "Max BBDevs already allocated in multi-process environment!");
211 return NULL;
212 }
213
214 rte_atomic16_inc(&bbdev->data->process_cnt);
215 bbdev->data->dev_id = dev_id;
216 bbdev->state = RTE_BBDEV_INITIALIZED;
217
218 ret = strlcpy(bbdev->data->name, name, RTE_BBDEV_NAME_MAX_LEN);
219 if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) {
220 rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name);
221 return NULL;
222 }
223
224 /* init user callbacks */
225 TAILQ_INIT(&(bbdev->list_cbs));
226
227 num_devs++;
228
229 rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u",
230 name, dev_id, num_devs);
231
232 return bbdev;
233 }
234
235 int __rte_experimental
236 rte_bbdev_release(struct rte_bbdev *bbdev)
237 {
238 uint16_t dev_id;
239 struct rte_bbdev_callback *cb, *next;
240
241 if (bbdev == NULL) {
242 rte_bbdev_log(ERR, "NULL bbdev");
243 return -ENODEV;
244 }
245 dev_id = bbdev->data->dev_id;
246
247 /* free all callbacks from the device's list */
248 for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) {
249
250 next = TAILQ_NEXT(cb, next);
251 TAILQ_REMOVE(&(bbdev->list_cbs), cb, next);
252 rte_free(cb);
253 }
254
255 /* clear shared BBDev Data if no process is using the device anymore */
256 if (rte_atomic16_dec_and_test(&bbdev->data->process_cnt))
257 memset(bbdev->data, 0, sizeof(*bbdev->data));
258
259 memset(bbdev, 0, sizeof(*bbdev));
260 num_devs--;
261 bbdev->state = RTE_BBDEV_UNUSED;
262
263 rte_bbdev_log_debug(
264 "Un-initialised device id = %u. Num devices = %u",
265 dev_id, num_devs);
266 return 0;
267 }
268
269 struct rte_bbdev * __rte_experimental
270 rte_bbdev_get_named_dev(const char *name)
271 {
272 unsigned int i;
273
274 if (name == NULL) {
275 rte_bbdev_log(ERR, "NULL driver name");
276 return NULL;
277 }
278
279 for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
280 struct rte_bbdev *dev = get_dev(i);
281 if (dev && (strncmp(dev->data->name,
282 name, RTE_BBDEV_NAME_MAX_LEN) == 0))
283 return dev;
284 }
285
286 return NULL;
287 }
288
289 uint16_t __rte_experimental
290 rte_bbdev_count(void)
291 {
292 return num_devs;
293 }
294
295 bool __rte_experimental
296 rte_bbdev_is_valid(uint16_t dev_id)
297 {
298 if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
299 rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED)
300 return true;
301 return false;
302 }
303
304 uint16_t __rte_experimental
305 rte_bbdev_find_next(uint16_t dev_id)
306 {
307 dev_id++;
308 for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++)
309 if (rte_bbdev_is_valid(dev_id))
310 break;
311 return dev_id;
312 }
313
314 int __rte_experimental
315 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
316 {
317 unsigned int i;
318 int ret;
319 struct rte_bbdev_driver_info dev_info;
320 struct rte_bbdev *dev = get_dev(dev_id);
321 VALID_DEV_OR_RET_ERR(dev, dev_id);
322
323 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
324
325 if (dev->data->started) {
326 rte_bbdev_log(ERR,
327 "Device %u cannot be configured when started",
328 dev_id);
329 return -EBUSY;
330 }
331
332 /* Get device driver information to get max number of queues */
333 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
334 memset(&dev_info, 0, sizeof(dev_info));
335 dev->dev_ops->info_get(dev, &dev_info);
336
337 if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) {
338 rte_bbdev_log(ERR,
339 "Device %u supports 0 < N <= %u queues, not %u",
340 dev_id, dev_info.max_num_queues, num_queues);
341 return -EINVAL;
342 }
343
344 /* If re-configuration, get driver to free existing internal memory */
345 if (dev->data->queues != NULL) {
346 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
347 for (i = 0; i < dev->data->num_queues; i++) {
348 int ret = dev->dev_ops->queue_release(dev, i);
349 if (ret < 0) {
350 rte_bbdev_log(ERR,
351 "Device %u queue %u release failed",
352 dev_id, i);
353 return ret;
354 }
355 }
356 /* Call optional device close */
357 if (dev->dev_ops->close) {
358 ret = dev->dev_ops->close(dev);
359 if (ret < 0) {
360 rte_bbdev_log(ERR,
361 "Device %u couldn't be closed",
362 dev_id);
363 return ret;
364 }
365 }
366 rte_free(dev->data->queues);
367 }
368
369 /* Allocate queue pointers */
370 dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
371 sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
372 dev->data->socket_id);
373 if (dev->data->queues == NULL) {
374 rte_bbdev_log(ERR,
375 "calloc of %u queues for device %u on socket %i failed",
376 num_queues, dev_id, dev->data->socket_id);
377 return -ENOMEM;
378 }
379
380 dev->data->num_queues = num_queues;
381
382 /* Call optional device configuration */
383 if (dev->dev_ops->setup_queues) {
384 ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
385 if (ret < 0) {
386 rte_bbdev_log(ERR,
387 "Device %u memory configuration failed",
388 dev_id);
389 goto error;
390 }
391 }
392
393 rte_bbdev_log_debug("Device %u set up with %u queues", dev_id,
394 num_queues);
395 return 0;
396
397 error:
398 dev->data->num_queues = 0;
399 rte_free(dev->data->queues);
400 dev->data->queues = NULL;
401 return ret;
402 }
403
404 int __rte_experimental
405 rte_bbdev_intr_enable(uint16_t dev_id)
406 {
407 int ret;
408 struct rte_bbdev *dev = get_dev(dev_id);
409 VALID_DEV_OR_RET_ERR(dev, dev_id);
410
411 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
412
413 if (dev->data->started) {
414 rte_bbdev_log(ERR,
415 "Device %u cannot be configured when started",
416 dev_id);
417 return -EBUSY;
418 }
419
420 if (dev->dev_ops->intr_enable) {
421 ret = dev->dev_ops->intr_enable(dev);
422 if (ret < 0) {
423 rte_bbdev_log(ERR,
424 "Device %u interrupts configuration failed",
425 dev_id);
426 return ret;
427 }
428 rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
429 return 0;
430 }
431
432 rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id);
433 return -ENOTSUP;
434 }
435
436 int __rte_experimental
437 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
438 const struct rte_bbdev_queue_conf *conf)
439 {
440 int ret = 0;
441 struct rte_bbdev_driver_info dev_info;
442 struct rte_bbdev *dev = get_dev(dev_id);
443 const struct rte_bbdev_op_cap *p;
444 struct rte_bbdev_queue_conf *stored_conf;
445 const char *op_type_str;
446 VALID_DEV_OR_RET_ERR(dev, dev_id);
447
448 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
449
450 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
451
452 if (dev->data->queues[queue_id].started || dev->data->started) {
453 rte_bbdev_log(ERR,
454 "Queue %u of device %u cannot be configured when started",
455 queue_id, dev_id);
456 return -EBUSY;
457 }
458
459 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
460 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
461
462 /* Get device driver information to verify config is valid */
463 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
464 memset(&dev_info, 0, sizeof(dev_info));
465 dev->dev_ops->info_get(dev, &dev_info);
466
467 /* Check configuration is valid */
468 if (conf != NULL) {
469 if ((conf->op_type == RTE_BBDEV_OP_NONE) &&
470 (dev_info.capabilities[0].type ==
471 RTE_BBDEV_OP_NONE)) {
472 ret = 1;
473 } else {
474 for (p = dev_info.capabilities;
475 p->type != RTE_BBDEV_OP_NONE; p++) {
476 if (conf->op_type == p->type) {
477 ret = 1;
478 break;
479 }
480 }
481 }
482 if (ret == 0) {
483 rte_bbdev_log(ERR, "Invalid operation type");
484 return -EINVAL;
485 }
486 if (conf->queue_size > dev_info.queue_size_lim) {
487 rte_bbdev_log(ERR,
488 "Size (%u) of queue %u of device %u must be: <= %u",
489 conf->queue_size, queue_id, dev_id,
490 dev_info.queue_size_lim);
491 return -EINVAL;
492 }
493 if (!rte_is_power_of_2(conf->queue_size)) {
494 rte_bbdev_log(ERR,
495 "Size (%u) of queue %u of device %u must be a power of 2",
496 conf->queue_size, queue_id, dev_id);
497 return -EINVAL;
498 }
499 if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC &&
500 conf->priority > dev_info.max_ul_queue_priority) {
501 rte_bbdev_log(ERR,
502 "Priority (%u) of queue %u of bdev %u must be <= %u",
503 conf->priority, queue_id, dev_id,
504 dev_info.max_ul_queue_priority);
505 return -EINVAL;
506 }
507 if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC &&
508 conf->priority > dev_info.max_dl_queue_priority) {
509 rte_bbdev_log(ERR,
510 "Priority (%u) of queue %u of bdev %u must be <= %u",
511 conf->priority, queue_id, dev_id,
512 dev_info.max_dl_queue_priority);
513 return -EINVAL;
514 }
515 }
516
517 /* Release existing queue (in case of queue reconfiguration) */
518 if (dev->data->queues[queue_id].queue_private != NULL) {
519 ret = dev->dev_ops->queue_release(dev, queue_id);
520 if (ret < 0) {
521 rte_bbdev_log(ERR, "Device %u queue %u release failed",
522 dev_id, queue_id);
523 return ret;
524 }
525 }
526
527 /* Get driver to setup the queue */
528 ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
529 conf : &dev_info.default_queue_conf);
530 if (ret < 0) {
531 rte_bbdev_log(ERR,
532 "Device %u queue %u setup failed", dev_id,
533 queue_id);
534 return ret;
535 }
536
537 /* Store configuration */
538 stored_conf = &dev->data->queues[queue_id].conf;
539 memcpy(stored_conf,
540 (conf != NULL) ? conf : &dev_info.default_queue_conf,
541 sizeof(*stored_conf));
542
543 op_type_str = rte_bbdev_op_type_str(stored_conf->op_type);
544 if (op_type_str == NULL)
545 return -EINVAL;
546
547 rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
548 dev_id, queue_id, stored_conf->queue_size, op_type_str,
549 stored_conf->priority);
550
551 return 0;
552 }
553
554 int __rte_experimental
555 rte_bbdev_start(uint16_t dev_id)
556 {
557 int i;
558 struct rte_bbdev *dev = get_dev(dev_id);
559 VALID_DEV_OR_RET_ERR(dev, dev_id);
560
561 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
562
563 if (dev->data->started) {
564 rte_bbdev_log_debug("Device %u is already started", dev_id);
565 return 0;
566 }
567
568 if (dev->dev_ops->start) {
569 int ret = dev->dev_ops->start(dev);
570 if (ret < 0) {
571 rte_bbdev_log(ERR, "Device %u start failed", dev_id);
572 return ret;
573 }
574 }
575
576 /* Store new state */
577 for (i = 0; i < dev->data->num_queues; i++)
578 if (!dev->data->queues[i].conf.deferred_start)
579 dev->data->queues[i].started = true;
580 dev->data->started = true;
581
582 rte_bbdev_log_debug("Started device %u", dev_id);
583 return 0;
584 }
585
586 int __rte_experimental
587 rte_bbdev_stop(uint16_t dev_id)
588 {
589 struct rte_bbdev *dev = get_dev(dev_id);
590 VALID_DEV_OR_RET_ERR(dev, dev_id);
591
592 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
593
594 if (!dev->data->started) {
595 rte_bbdev_log_debug("Device %u is already stopped", dev_id);
596 return 0;
597 }
598
599 if (dev->dev_ops->stop)
600 dev->dev_ops->stop(dev);
601 dev->data->started = false;
602
603 rte_bbdev_log_debug("Stopped device %u", dev_id);
604 return 0;
605 }
606
607 int __rte_experimental
608 rte_bbdev_close(uint16_t dev_id)
609 {
610 int ret;
611 uint16_t i;
612 struct rte_bbdev *dev = get_dev(dev_id);
613 VALID_DEV_OR_RET_ERR(dev, dev_id);
614
615 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
616
617 if (dev->data->started) {
618 ret = rte_bbdev_stop(dev_id);
619 if (ret < 0) {
620 rte_bbdev_log(ERR, "Device %u stop failed", dev_id);
621 return ret;
622 }
623 }
624
625 /* Free memory used by queues */
626 for (i = 0; i < dev->data->num_queues; i++) {
627 ret = dev->dev_ops->queue_release(dev, i);
628 if (ret < 0) {
629 rte_bbdev_log(ERR, "Device %u queue %u release failed",
630 dev_id, i);
631 return ret;
632 }
633 }
634 rte_free(dev->data->queues);
635
636 if (dev->dev_ops->close) {
637 ret = dev->dev_ops->close(dev);
638 if (ret < 0) {
639 rte_bbdev_log(ERR, "Device %u close failed", dev_id);
640 return ret;
641 }
642 }
643
644 /* Clear configuration */
645 dev->data->queues = NULL;
646 dev->data->num_queues = 0;
647
648 rte_bbdev_log_debug("Closed device %u", dev_id);
649 return 0;
650 }
651
652 int __rte_experimental
653 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
654 {
655 struct rte_bbdev *dev = get_dev(dev_id);
656 VALID_DEV_OR_RET_ERR(dev, dev_id);
657
658 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
659
660 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
661
662 if (dev->data->queues[queue_id].started) {
663 rte_bbdev_log_debug("Queue %u of device %u already started",
664 queue_id, dev_id);
665 return 0;
666 }
667
668 if (dev->dev_ops->queue_start) {
669 int ret = dev->dev_ops->queue_start(dev, queue_id);
670 if (ret < 0) {
671 rte_bbdev_log(ERR, "Device %u queue %u start failed",
672 dev_id, queue_id);
673 return ret;
674 }
675 }
676 dev->data->queues[queue_id].started = true;
677
678 rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id);
679 return 0;
680 }
681
682 int __rte_experimental
683 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
684 {
685 struct rte_bbdev *dev = get_dev(dev_id);
686 VALID_DEV_OR_RET_ERR(dev, dev_id);
687
688 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
689
690 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
691
692 if (!dev->data->queues[queue_id].started) {
693 rte_bbdev_log_debug("Queue %u of device %u already stopped",
694 queue_id, dev_id);
695 return 0;
696 }
697
698 if (dev->dev_ops->queue_stop) {
699 int ret = dev->dev_ops->queue_stop(dev, queue_id);
700 if (ret < 0) {
701 rte_bbdev_log(ERR, "Device %u queue %u stop failed",
702 dev_id, queue_id);
703 return ret;
704 }
705 }
706 dev->data->queues[queue_id].started = false;
707
708 rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id);
709 return 0;
710 }
711
712 /* Get device statistics */
713 static void
714 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
715 {
716 unsigned int q_id;
717 for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
718 struct rte_bbdev_stats *q_stats =
719 &dev->data->queues[q_id].queue_stats;
720
721 stats->enqueued_count += q_stats->enqueued_count;
722 stats->dequeued_count += q_stats->dequeued_count;
723 stats->enqueue_err_count += q_stats->enqueue_err_count;
724 stats->dequeue_err_count += q_stats->dequeue_err_count;
725 }
726 rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
727 }
728
729 static void
730 reset_stats_in_queues(struct rte_bbdev *dev)
731 {
732 unsigned int q_id;
733 for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
734 struct rte_bbdev_stats *q_stats =
735 &dev->data->queues[q_id].queue_stats;
736
737 memset(q_stats, 0, sizeof(*q_stats));
738 }
739 rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
740 }
741
742 int __rte_experimental
743 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
744 {
745 struct rte_bbdev *dev = get_dev(dev_id);
746 VALID_DEV_OR_RET_ERR(dev, dev_id);
747
748 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
749
750 if (stats == NULL) {
751 rte_bbdev_log(ERR, "NULL stats structure");
752 return -EINVAL;
753 }
754
755 memset(stats, 0, sizeof(*stats));
756 if (dev->dev_ops->stats_get != NULL)
757 dev->dev_ops->stats_get(dev, stats);
758 else
759 get_stats_from_queues(dev, stats);
760
761 rte_bbdev_log_debug("Retrieved stats of device %u", dev_id);
762 return 0;
763 }
764
765 int __rte_experimental
766 rte_bbdev_stats_reset(uint16_t dev_id)
767 {
768 struct rte_bbdev *dev = get_dev(dev_id);
769 VALID_DEV_OR_RET_ERR(dev, dev_id);
770
771 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
772
773 if (dev->dev_ops->stats_reset != NULL)
774 dev->dev_ops->stats_reset(dev);
775 else
776 reset_stats_in_queues(dev);
777
778 rte_bbdev_log_debug("Reset stats of device %u", dev_id);
779 return 0;
780 }
781
782 int __rte_experimental
783 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
784 {
785 struct rte_bbdev *dev = get_dev(dev_id);
786 VALID_DEV_OR_RET_ERR(dev, dev_id);
787
788 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
789
790 if (dev_info == NULL) {
791 rte_bbdev_log(ERR, "NULL dev info structure");
792 return -EINVAL;
793 }
794
795 /* Copy data maintained by device interface layer */
796 memset(dev_info, 0, sizeof(*dev_info));
797 dev_info->dev_name = dev->data->name;
798 dev_info->num_queues = dev->data->num_queues;
799 dev_info->bus = rte_bus_find_by_device(dev->device);
800 dev_info->socket_id = dev->data->socket_id;
801 dev_info->started = dev->data->started;
802
803 /* Copy data maintained by device driver layer */
804 dev->dev_ops->info_get(dev, &dev_info->drv);
805
806 rte_bbdev_log_debug("Retrieved info of device %u", dev_id);
807 return 0;
808 }
809
810 int __rte_experimental
811 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
812 struct rte_bbdev_queue_info *queue_info)
813 {
814 struct rte_bbdev *dev = get_dev(dev_id);
815 VALID_DEV_OR_RET_ERR(dev, dev_id);
816
817 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
818
819 if (queue_info == NULL) {
820 rte_bbdev_log(ERR, "NULL queue info structure");
821 return -EINVAL;
822 }
823
824 /* Copy data to output */
825 memset(queue_info, 0, sizeof(*queue_info));
826 queue_info->conf = dev->data->queues[queue_id].conf;
827 queue_info->started = dev->data->queues[queue_id].started;
828
829 rte_bbdev_log_debug("Retrieved info of queue %u of device %u",
830 queue_id, dev_id);
831 return 0;
832 }
833
834 /* Calculate size needed to store bbdev_op, depending on type */
835 static unsigned int
836 get_bbdev_op_size(enum rte_bbdev_op_type type)
837 {
838 unsigned int result = 0;
839 switch (type) {
840 case RTE_BBDEV_OP_NONE:
841 result = RTE_MAX(sizeof(struct rte_bbdev_dec_op),
842 sizeof(struct rte_bbdev_enc_op));
843 break;
844 case RTE_BBDEV_OP_TURBO_DEC:
845 result = sizeof(struct rte_bbdev_dec_op);
846 break;
847 case RTE_BBDEV_OP_TURBO_ENC:
848 result = sizeof(struct rte_bbdev_enc_op);
849 break;
850 default:
851 break;
852 }
853
854 return result;
855 }
856
857 /* Initialise a bbdev_op structure */
858 static void
859 bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element,
860 __rte_unused unsigned int n)
861 {
862 enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg;
863
864 if (type == RTE_BBDEV_OP_TURBO_DEC) {
865 struct rte_bbdev_dec_op *op = element;
866 memset(op, 0, mempool->elt_size);
867 op->mempool = mempool;
868 } else if (type == RTE_BBDEV_OP_TURBO_ENC) {
869 struct rte_bbdev_enc_op *op = element;
870 memset(op, 0, mempool->elt_size);
871 op->mempool = mempool;
872 }
873 }
874
875 struct rte_mempool * __rte_experimental
876 rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
877 unsigned int num_elements, unsigned int cache_size,
878 int socket_id)
879 {
880 struct rte_bbdev_op_pool_private *priv;
881 struct rte_mempool *mp;
882 const char *op_type_str;
883
884 if (name == NULL) {
885 rte_bbdev_log(ERR, "NULL name for op pool");
886 return NULL;
887 }
888
889 if (type >= RTE_BBDEV_OP_TYPE_COUNT) {
890 rte_bbdev_log(ERR,
891 "Invalid op type (%u), should be less than %u",
892 type, RTE_BBDEV_OP_TYPE_COUNT);
893 return NULL;
894 }
895
896 mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type),
897 cache_size, sizeof(struct rte_bbdev_op_pool_private),
898 NULL, NULL, bbdev_op_init, &type, socket_id, 0);
899 if (mp == NULL) {
900 rte_bbdev_log(ERR,
901 "Failed to create op pool %s (num ops=%u, op size=%u) with error: %s",
902 name, num_elements, get_bbdev_op_size(type),
903 rte_strerror(rte_errno));
904 return NULL;
905 }
906
907 op_type_str = rte_bbdev_op_type_str(type);
908 if (op_type_str == NULL)
909 return NULL;
910
911 rte_bbdev_log_debug(
912 "Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)",
913 name, num_elements, op_type_str, cache_size, socket_id,
914 get_bbdev_op_size(type));
915
916 priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp);
917 priv->type = type;
918
919 return mp;
920 }
921
922 int __rte_experimental
923 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
924 rte_bbdev_cb_fn cb_fn, void *cb_arg)
925 {
926 struct rte_bbdev_callback *user_cb;
927 struct rte_bbdev *dev = get_dev(dev_id);
928 VALID_DEV_OR_RET_ERR(dev, dev_id);
929
930 if (event >= RTE_BBDEV_EVENT_MAX) {
931 rte_bbdev_log(ERR,
932 "Invalid event type (%u), should be less than %u",
933 event, RTE_BBDEV_EVENT_MAX);
934 return -EINVAL;
935 }
936
937 if (cb_fn == NULL) {
938 rte_bbdev_log(ERR, "NULL callback function");
939 return -EINVAL;
940 }
941
942 rte_spinlock_lock(&rte_bbdev_cb_lock);
943
944 TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
945 if (user_cb->cb_fn == cb_fn &&
946 user_cb->cb_arg == cb_arg &&
947 user_cb->event == event)
948 break;
949 }
950
951 /* create a new callback. */
952 if (user_cb == NULL) {
953 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
954 sizeof(struct rte_bbdev_callback), 0);
955 if (user_cb != NULL) {
956 user_cb->cb_fn = cb_fn;
957 user_cb->cb_arg = cb_arg;
958 user_cb->event = event;
959 TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
960 }
961 }
962
963 rte_spinlock_unlock(&rte_bbdev_cb_lock);
964 return (user_cb == NULL) ? -ENOMEM : 0;
965 }
966
967 int __rte_experimental
968 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
969 rte_bbdev_cb_fn cb_fn, void *cb_arg)
970 {
971 int ret = 0;
972 struct rte_bbdev_callback *cb, *next;
973 struct rte_bbdev *dev = get_dev(dev_id);
974 VALID_DEV_OR_RET_ERR(dev, dev_id);
975
976 if (event >= RTE_BBDEV_EVENT_MAX) {
977 rte_bbdev_log(ERR,
978 "Invalid event type (%u), should be less than %u",
979 event, RTE_BBDEV_EVENT_MAX);
980 return -EINVAL;
981 }
982
983 if (cb_fn == NULL) {
984 rte_bbdev_log(ERR,
985 "NULL callback function cannot be unregistered");
986 return -EINVAL;
987 }
988
989 dev = &rte_bbdev_devices[dev_id];
990 rte_spinlock_lock(&rte_bbdev_cb_lock);
991
992 for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
993
994 next = TAILQ_NEXT(cb, next);
995
996 if (cb->cb_fn != cb_fn || cb->event != event ||
997 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
998 continue;
999
1000 /* If this callback is not executing right now, remove it. */
1001 if (cb->active == 0) {
1002 TAILQ_REMOVE(&(dev->list_cbs), cb, next);
1003 rte_free(cb);
1004 } else
1005 ret = -EAGAIN;
1006 }
1007
1008 rte_spinlock_unlock(&rte_bbdev_cb_lock);
1009 return ret;
1010 }
1011
1012 void __rte_experimental
1013 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
1014 enum rte_bbdev_event_type event, void *ret_param)
1015 {
1016 struct rte_bbdev_callback *cb_lst;
1017 struct rte_bbdev_callback dev_cb;
1018
1019 if (dev == NULL) {
1020 rte_bbdev_log(ERR, "NULL device");
1021 return;
1022 }
1023
1024 if (dev->data == NULL) {
1025 rte_bbdev_log(ERR, "NULL data structure");
1026 return;
1027 }
1028
1029 if (event >= RTE_BBDEV_EVENT_MAX) {
1030 rte_bbdev_log(ERR,
1031 "Invalid event type (%u), should be less than %u",
1032 event, RTE_BBDEV_EVENT_MAX);
1033 return;
1034 }
1035
1036 rte_spinlock_lock(&rte_bbdev_cb_lock);
1037 TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
1038 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1039 continue;
1040 dev_cb = *cb_lst;
1041 cb_lst->active = 1;
1042 if (ret_param != NULL)
1043 dev_cb.ret_param = ret_param;
1044
1045 rte_spinlock_unlock(&rte_bbdev_cb_lock);
1046 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1047 dev_cb.cb_arg, dev_cb.ret_param);
1048 rte_spinlock_lock(&rte_bbdev_cb_lock);
1049 cb_lst->active = 0;
1050 }
1051 rte_spinlock_unlock(&rte_bbdev_cb_lock);
1052 }
1053
1054 int __rte_experimental
1055 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
1056 {
1057 struct rte_bbdev *dev = get_dev(dev_id);
1058 VALID_DEV_OR_RET_ERR(dev, dev_id);
1059 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1060 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1061 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
1062 return dev->dev_ops->queue_intr_enable(dev, queue_id);
1063 }
1064
1065 int __rte_experimental
1066 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
1067 {
1068 struct rte_bbdev *dev = get_dev(dev_id);
1069 VALID_DEV_OR_RET_ERR(dev, dev_id);
1070 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1071 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1072 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
1073 return dev->dev_ops->queue_intr_disable(dev, queue_id);
1074 }
1075
1076 int __rte_experimental
1077 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1078 void *data)
1079 {
1080 uint32_t vec;
1081 struct rte_bbdev *dev = get_dev(dev_id);
1082 struct rte_intr_handle *intr_handle;
1083 int ret;
1084
1085 VALID_DEV_OR_RET_ERR(dev, dev_id);
1086 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1087
1088 intr_handle = dev->intr_handle;
1089 if (!intr_handle || !intr_handle->intr_vec) {
1090 rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id);
1091 return -ENOTSUP;
1092 }
1093
1094 if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
1095 rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n",
1096 dev_id, queue_id);
1097 return -ENOTSUP;
1098 }
1099
1100 vec = intr_handle->intr_vec[queue_id];
1101 ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
1102 if (ret && (ret != -EEXIST)) {
1103 rte_bbdev_log(ERR,
1104 "dev %u q %u int ctl error op %d epfd %d vec %u\n",
1105 dev_id, queue_id, op, epfd, vec);
1106 return ret;
1107 }
1108
1109 return 0;
1110 }
1111
1112
1113 const char * __rte_experimental
1114 rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
1115 {
1116 static const char * const op_types[] = {
1117 "RTE_BBDEV_OP_NONE",
1118 "RTE_BBDEV_OP_TURBO_DEC",
1119 "RTE_BBDEV_OP_TURBO_ENC",
1120 };
1121
1122 if (op_type < RTE_BBDEV_OP_TYPE_COUNT)
1123 return op_types[op_type];
1124
1125 rte_bbdev_log(ERR, "Invalid operation type");
1126 return NULL;
1127 }
1128
1129 RTE_INIT(rte_bbdev_init_log)
1130 {
1131 bbdev_logtype = rte_log_register("lib.bbdev");
1132 if (bbdev_logtype >= 0)
1133 rte_log_set_level(bbdev_logtype, RTE_LOG_NOTICE);
1134 }