]> git.proxmox.com Git - ceph.git/blame - ceph/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / crypto / scheduler / scheduler_pmd_private.h
CommitLineData
9f95a23c
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
11fdf7f2
TL
3 */
4
5#ifndef _SCHEDULER_PMD_PRIVATE_H
6#define _SCHEDULER_PMD_PRIVATE_H
7
8#include "rte_cryptodev_scheduler.h"
9
9f95a23c
TL
10#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
11/**< Scheduler Crypto PMD device name */
12
11fdf7f2
TL
13#define PER_SLAVE_BUFF_SIZE (256)
14
9f95a23c
TL
15extern int scheduler_logtype_driver;
16
17#define CR_SCHED_LOG(level, fmt, args...) \
18 rte_log(RTE_LOG_ ## level, scheduler_logtype_driver, \
19 "%s() line %u: "fmt "\n", __func__, __LINE__, ##args)
11fdf7f2
TL
20
21struct scheduler_slave {
22 uint8_t dev_id;
23 uint16_t qp_id;
24 uint32_t nb_inflight_cops;
25
9f95a23c 26 uint8_t driver_id;
11fdf7f2
TL
27};
28
29struct scheduler_ctx {
30 void *private_ctx;
31 /**< private scheduler context pointer */
32
33 struct rte_cryptodev_capabilities *capabilities;
34 uint32_t nb_capabilities;
35
36 uint32_t max_nb_queue_pairs;
37
38 struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
39 uint32_t nb_slaves;
40
41 enum rte_cryptodev_scheduler_mode mode;
42
43 struct rte_cryptodev_scheduler_ops ops;
44
45 uint8_t reordering_enabled;
46
47 char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
48 char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
9f95a23c
TL
49 uint16_t wc_pool[RTE_MAX_LCORE];
50 uint16_t nb_wc;
11fdf7f2
TL
51
52 char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
53 int nb_init_slaves;
54} __rte_cache_aligned;
55
56struct scheduler_qp_ctx {
57 void *private_qp_ctx;
58
59 uint32_t max_nb_objs;
60
61 struct rte_ring *order_ring;
62 uint32_t seqn;
63} __rte_cache_aligned;
64
11fdf7f2 65
9f95a23c
TL
66extern uint8_t cryptodev_scheduler_driver_id;
67
68static __rte_always_inline uint16_t
11fdf7f2
TL
69get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
70{
71 uint32_t count = rte_ring_free_count(order_ring);
72
73 return count > nb_ops ? nb_ops : count;
74}
75
9f95a23c 76static __rte_always_inline void
11fdf7f2
TL
77scheduler_order_insert(struct rte_ring *order_ring,
78 struct rte_crypto_op **ops, uint16_t nb_ops)
79{
80 rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
81}
82
83#define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do { \
84 struct rte_crypto_op **ring = (void *)&order_ring[1]; \
85 op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \
86} while (0)
87
9f95a23c 88static __rte_always_inline uint16_t
11fdf7f2
TL
89scheduler_order_drain(struct rte_ring *order_ring,
90 struct rte_crypto_op **ops, uint16_t nb_ops)
91{
92 struct rte_crypto_op *op;
93 uint32_t nb_objs = rte_ring_count(order_ring);
94 uint32_t nb_ops_to_deq = 0;
95 uint32_t nb_ops_deqd = 0;
96
97 if (nb_objs > nb_ops)
98 nb_objs = nb_ops;
99
100 while (nb_ops_to_deq < nb_objs) {
101 SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
102 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
103 break;
104 nb_ops_to_deq++;
105 }
106
107 if (nb_ops_to_deq)
108 nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
109 (void **)ops, nb_ops_to_deq, NULL);
110
111 return nb_ops_deqd;
112}
113/** device specific operations function pointer structure */
114extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
115
116#endif /* _SCHEDULER_PMD_PRIVATE_H */