]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/crypto/scheduler/scheduler_roundrobin.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / crypto / scheduler / scheduler_roundrobin.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5#include <rte_cryptodev.h>
6#include <rte_malloc.h>
7
8#include "rte_cryptodev_scheduler_operations.h"
9#include "scheduler_pmd_private.h"
10
11struct rr_scheduler_qp_ctx {
12 struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
13 uint32_t nb_slaves;
14
15 uint32_t last_enq_slave_idx;
16 uint32_t last_deq_slave_idx;
17};
18
19static uint16_t
20schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
21{
22 struct rr_scheduler_qp_ctx *rr_qp_ctx =
23 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
24 uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
25 struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
26 uint16_t i, processed_ops;
27
28 if (unlikely(nb_ops == 0))
29 return 0;
30
31 for (i = 0; i < nb_ops && i < 4; i++)
32 rte_prefetch0(ops[i]->sym->session);
33
34 processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
35 slave->qp_id, ops, nb_ops);
36
37 slave->nb_inflight_cops += processed_ops;
38
39 rr_qp_ctx->last_enq_slave_idx += 1;
40 rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
41
42 return processed_ops;
43}
44
45static uint16_t
46schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
47 uint16_t nb_ops)
48{
49 struct rte_ring *order_ring =
50 ((struct scheduler_qp_ctx *)qp)->order_ring;
51 uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
52 nb_ops);
53 uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
54 nb_ops_to_enq);
55
56 scheduler_order_insert(order_ring, ops, nb_ops_enqd);
57
58 return nb_ops_enqd;
59}
60
61
62static uint16_t
63schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
64{
65 struct rr_scheduler_qp_ctx *rr_qp_ctx =
66 ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
67 struct scheduler_slave *slave;
68 uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
69 uint16_t nb_deq_ops;
70
71 if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
72 do {
73 last_slave_idx += 1;
74
75 if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
76 last_slave_idx = 0;
77 /* looped back, means no inflight cops in the queue */
78 if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
79 return 0;
80 } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
81 == 0);
82 }
83
84 slave = &rr_qp_ctx->slaves[last_slave_idx];
85
86 nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
87 slave->qp_id, ops, nb_ops);
88
89 last_slave_idx += 1;
90 last_slave_idx %= rr_qp_ctx->nb_slaves;
91
92 rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
93
94 slave->nb_inflight_cops -= nb_deq_ops;
95
96 return nb_deq_ops;
97}
98
99static uint16_t
100schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
101 uint16_t nb_ops)
102{
103 struct rte_ring *order_ring =
104 ((struct scheduler_qp_ctx *)qp)->order_ring;
105
106 schedule_dequeue(qp, ops, nb_ops);
107
108 return scheduler_order_drain(order_ring, ops, nb_ops);
109}
110
111static int
112slave_attach(__rte_unused struct rte_cryptodev *dev,
113 __rte_unused uint8_t slave_id)
114{
115 return 0;
116}
117
118static int
119slave_detach(__rte_unused struct rte_cryptodev *dev,
120 __rte_unused uint8_t slave_id)
121{
122 return 0;
123}
124
125static int
126scheduler_start(struct rte_cryptodev *dev)
127{
128 struct scheduler_ctx *sched_ctx = dev->data->dev_private;
129 uint16_t i;
130
131 if (sched_ctx->reordering_enabled) {
132 dev->enqueue_burst = &schedule_enqueue_ordering;
133 dev->dequeue_burst = &schedule_dequeue_ordering;
134 } else {
135 dev->enqueue_burst = &schedule_enqueue;
136 dev->dequeue_burst = &schedule_dequeue;
137 }
138
139 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
140 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
141 struct rr_scheduler_qp_ctx *rr_qp_ctx =
142 qp_ctx->private_qp_ctx;
143 uint32_t j;
144
145 memset(rr_qp_ctx->slaves, 0,
146 RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
147 sizeof(struct scheduler_slave));
148 for (j = 0; j < sched_ctx->nb_slaves; j++) {
149 rr_qp_ctx->slaves[j].dev_id =
150 sched_ctx->slaves[j].dev_id;
151 rr_qp_ctx->slaves[j].qp_id = i;
152 }
153
154 rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
155
156 rr_qp_ctx->last_enq_slave_idx = 0;
157 rr_qp_ctx->last_deq_slave_idx = 0;
158 }
159
160 return 0;
161}
162
163static int
164scheduler_stop(__rte_unused struct rte_cryptodev *dev)
165{
166 return 0;
167}
168
169static int
170scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
171{
172 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
173 struct rr_scheduler_qp_ctx *rr_qp_ctx;
174
175 rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
176 rte_socket_id());
177 if (!rr_qp_ctx) {
178 CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
179 return -ENOMEM;
180 }
181
182 qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
183
184 return 0;
185}
186
187static int
188scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
189{
190 return 0;
191}
192
9f95a23c 193static struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
11fdf7f2
TL
194 slave_attach,
195 slave_detach,
196 scheduler_start,
197 scheduler_stop,
198 scheduler_config_qp,
199 scheduler_create_private_ctx,
200 NULL, /* option_set */
201 NULL /* option_get */
202};
203
9f95a23c 204static struct rte_cryptodev_scheduler scheduler = {
11fdf7f2
TL
205 .name = "roundrobin-scheduler",
206 .description = "scheduler which will round robin burst across "
207 "slave crypto devices",
208 .mode = CDEV_SCHED_MODE_ROUNDROBIN,
209 .ops = &scheduler_rr_ops
210};
211
9f95a23c 212struct rte_cryptodev_scheduler *crypto_scheduler_roundrobin = &scheduler;