1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_cryptodev.h>
6 #include <rte_malloc.h>
8 #include "rte_cryptodev_scheduler_operations.h"
9 #include "scheduler_pmd_private.h"
11 #define DEF_PKT_SIZE_THRESHOLD (0xffffff80)
12 #define SLAVE_IDX_SWITCH_MASK (0x01)
13 #define PRIMARY_SLAVE_IDX 0
14 #define SECONDARY_SLAVE_IDX 1
15 #define NB_PKT_SIZE_SLAVES 2
17 /** pkt size based scheduler context */
18 struct psd_scheduler_ctx
{
22 /** pkt size based scheduler queue pair context */
23 struct psd_scheduler_qp_ctx
{
24 struct scheduler_slave primary_slave
;
25 struct scheduler_slave secondary_slave
;
28 } __rte_cache_aligned
;
30 /** scheduling operation variables' wrapping */
31 struct psd_schedule_op
{
37 schedule_enqueue(void *qp
, struct rte_crypto_op
**ops
, uint16_t nb_ops
)
39 struct scheduler_qp_ctx
*qp_ctx
= qp
;
40 struct psd_scheduler_qp_ctx
*psd_qp_ctx
= qp_ctx
->private_qp_ctx
;
41 struct rte_crypto_op
*sched_ops
[NB_PKT_SIZE_SLAVES
][nb_ops
];
42 uint32_t in_flight_ops
[NB_PKT_SIZE_SLAVES
] = {
43 psd_qp_ctx
->primary_slave
.nb_inflight_cops
,
44 psd_qp_ctx
->secondary_slave
.nb_inflight_cops
46 struct psd_schedule_op enq_ops
[NB_PKT_SIZE_SLAVES
] = {
47 {PRIMARY_SLAVE_IDX
, 0}, {SECONDARY_SLAVE_IDX
, 0}
49 struct psd_schedule_op
*p_enq_op
;
50 uint16_t i
, processed_ops_pri
= 0, processed_ops_sec
= 0;
53 if (unlikely(nb_ops
== 0))
56 for (i
= 0; i
< nb_ops
&& i
< 4; i
++) {
57 rte_prefetch0(ops
[i
]->sym
);
58 rte_prefetch0(ops
[i
]->sym
->session
);
61 for (i
= 0; (i
< (nb_ops
- 8)) && (nb_ops
> 8); i
+= 4) {
62 rte_prefetch0(ops
[i
+ 4]->sym
);
63 rte_prefetch0(ops
[i
+ 4]->sym
->session
);
64 rte_prefetch0(ops
[i
+ 5]->sym
);
65 rte_prefetch0(ops
[i
+ 5]->sym
->session
);
66 rte_prefetch0(ops
[i
+ 6]->sym
);
67 rte_prefetch0(ops
[i
+ 6]->sym
->session
);
68 rte_prefetch0(ops
[i
+ 7]->sym
);
69 rte_prefetch0(ops
[i
+ 7]->sym
->session
);
71 /* job_len is initialized as cipher data length, once
72 * it is 0, equals to auth data length
74 job_len
= ops
[i
]->sym
->cipher
.data
.length
;
75 job_len
+= (ops
[i
]->sym
->cipher
.data
.length
== 0) *
76 ops
[i
]->sym
->auth
.data
.length
;
77 /* decide the target op based on the job length */
78 p_enq_op
= &enq_ops
[!(job_len
& psd_qp_ctx
->threshold
)];
80 /* stop schedule cops before the queue is full, this shall
81 * prevent the failed enqueue
83 if (p_enq_op
->pos
+ in_flight_ops
[p_enq_op
->slave_idx
] ==
84 qp_ctx
->max_nb_objs
) {
89 sched_ops
[p_enq_op
->slave_idx
][p_enq_op
->pos
] = ops
[i
];
92 job_len
= ops
[i
+1]->sym
->cipher
.data
.length
;
93 job_len
+= (ops
[i
+1]->sym
->cipher
.data
.length
== 0) *
94 ops
[i
+1]->sym
->auth
.data
.length
;
95 p_enq_op
= &enq_ops
[!(job_len
& psd_qp_ctx
->threshold
)];
97 if (p_enq_op
->pos
+ in_flight_ops
[p_enq_op
->slave_idx
] ==
98 qp_ctx
->max_nb_objs
) {
103 sched_ops
[p_enq_op
->slave_idx
][p_enq_op
->pos
] = ops
[i
+1];
106 job_len
= ops
[i
+2]->sym
->cipher
.data
.length
;
107 job_len
+= (ops
[i
+2]->sym
->cipher
.data
.length
== 0) *
108 ops
[i
+2]->sym
->auth
.data
.length
;
109 p_enq_op
= &enq_ops
[!(job_len
& psd_qp_ctx
->threshold
)];
111 if (p_enq_op
->pos
+ in_flight_ops
[p_enq_op
->slave_idx
] ==
112 qp_ctx
->max_nb_objs
) {
117 sched_ops
[p_enq_op
->slave_idx
][p_enq_op
->pos
] = ops
[i
+2];
120 job_len
= ops
[i
+3]->sym
->cipher
.data
.length
;
121 job_len
+= (ops
[i
+3]->sym
->cipher
.data
.length
== 0) *
122 ops
[i
+3]->sym
->auth
.data
.length
;
123 p_enq_op
= &enq_ops
[!(job_len
& psd_qp_ctx
->threshold
)];
125 if (p_enq_op
->pos
+ in_flight_ops
[p_enq_op
->slave_idx
] ==
126 qp_ctx
->max_nb_objs
) {
131 sched_ops
[p_enq_op
->slave_idx
][p_enq_op
->pos
] = ops
[i
+3];
135 for (; i
< nb_ops
; i
++) {
136 job_len
= ops
[i
]->sym
->cipher
.data
.length
;
137 job_len
+= (ops
[i
]->sym
->cipher
.data
.length
== 0) *
138 ops
[i
]->sym
->auth
.data
.length
;
139 p_enq_op
= &enq_ops
[!(job_len
& psd_qp_ctx
->threshold
)];
141 if (p_enq_op
->pos
+ in_flight_ops
[p_enq_op
->slave_idx
] ==
142 qp_ctx
->max_nb_objs
) {
147 sched_ops
[p_enq_op
->slave_idx
][p_enq_op
->pos
] = ops
[i
];
151 processed_ops_pri
= rte_cryptodev_enqueue_burst(
152 psd_qp_ctx
->primary_slave
.dev_id
,
153 psd_qp_ctx
->primary_slave
.qp_id
,
154 sched_ops
[PRIMARY_SLAVE_IDX
],
155 enq_ops
[PRIMARY_SLAVE_IDX
].pos
);
156 /* enqueue shall not fail as the slave queue is monitored */
157 RTE_ASSERT(processed_ops_pri
== enq_ops
[PRIMARY_SLAVE_IDX
].pos
);
159 psd_qp_ctx
->primary_slave
.nb_inflight_cops
+= processed_ops_pri
;
161 processed_ops_sec
= rte_cryptodev_enqueue_burst(
162 psd_qp_ctx
->secondary_slave
.dev_id
,
163 psd_qp_ctx
->secondary_slave
.qp_id
,
164 sched_ops
[SECONDARY_SLAVE_IDX
],
165 enq_ops
[SECONDARY_SLAVE_IDX
].pos
);
166 RTE_ASSERT(processed_ops_sec
== enq_ops
[SECONDARY_SLAVE_IDX
].pos
);
168 psd_qp_ctx
->secondary_slave
.nb_inflight_cops
+= processed_ops_sec
;
170 return processed_ops_pri
+ processed_ops_sec
;
174 schedule_enqueue_ordering(void *qp
, struct rte_crypto_op
**ops
,
177 struct rte_ring
*order_ring
=
178 ((struct scheduler_qp_ctx
*)qp
)->order_ring
;
179 uint16_t nb_ops_to_enq
= get_max_enqueue_order_count(order_ring
,
181 uint16_t nb_ops_enqd
= schedule_enqueue(qp
, ops
,
184 scheduler_order_insert(order_ring
, ops
, nb_ops_enqd
);
190 schedule_dequeue(void *qp
, struct rte_crypto_op
**ops
, uint16_t nb_ops
)
192 struct psd_scheduler_qp_ctx
*qp_ctx
=
193 ((struct scheduler_qp_ctx
*)qp
)->private_qp_ctx
;
194 struct scheduler_slave
*slaves
[NB_PKT_SIZE_SLAVES
] = {
195 &qp_ctx
->primary_slave
, &qp_ctx
->secondary_slave
};
196 struct scheduler_slave
*slave
= slaves
[qp_ctx
->deq_idx
];
197 uint16_t nb_deq_ops_pri
= 0, nb_deq_ops_sec
= 0;
199 if (slave
->nb_inflight_cops
) {
200 nb_deq_ops_pri
= rte_cryptodev_dequeue_burst(slave
->dev_id
,
201 slave
->qp_id
, ops
, nb_ops
);
202 slave
->nb_inflight_cops
-= nb_deq_ops_pri
;
205 qp_ctx
->deq_idx
= (~qp_ctx
->deq_idx
) & SLAVE_IDX_SWITCH_MASK
;
207 if (nb_deq_ops_pri
== nb_ops
)
208 return nb_deq_ops_pri
;
210 slave
= slaves
[qp_ctx
->deq_idx
];
212 if (slave
->nb_inflight_cops
) {
213 nb_deq_ops_sec
= rte_cryptodev_dequeue_burst(slave
->dev_id
,
214 slave
->qp_id
, &ops
[nb_deq_ops_pri
],
215 nb_ops
- nb_deq_ops_pri
);
216 slave
->nb_inflight_cops
-= nb_deq_ops_sec
;
218 if (!slave
->nb_inflight_cops
)
219 qp_ctx
->deq_idx
= (~qp_ctx
->deq_idx
) &
220 SLAVE_IDX_SWITCH_MASK
;
223 return nb_deq_ops_pri
+ nb_deq_ops_sec
;
227 schedule_dequeue_ordering(void *qp
, struct rte_crypto_op
**ops
,
230 struct rte_ring
*order_ring
=
231 ((struct scheduler_qp_ctx
*)qp
)->order_ring
;
233 schedule_dequeue(qp
, ops
, nb_ops
);
235 return scheduler_order_drain(order_ring
, ops
, nb_ops
);
239 slave_attach(__rte_unused
struct rte_cryptodev
*dev
,
240 __rte_unused
uint8_t slave_id
)
246 slave_detach(__rte_unused
struct rte_cryptodev
*dev
,
247 __rte_unused
uint8_t slave_id
)
253 scheduler_start(struct rte_cryptodev
*dev
)
255 struct scheduler_ctx
*sched_ctx
= dev
->data
->dev_private
;
256 struct psd_scheduler_ctx
*psd_ctx
= sched_ctx
->private_ctx
;
259 /* for packet size based scheduler, nb_slaves have to >= 2 */
260 if (sched_ctx
->nb_slaves
< NB_PKT_SIZE_SLAVES
) {
261 CR_SCHED_LOG(ERR
, "not enough slaves to start");
265 for (i
= 0; i
< dev
->data
->nb_queue_pairs
; i
++) {
266 struct scheduler_qp_ctx
*qp_ctx
= dev
->data
->queue_pairs
[i
];
267 struct psd_scheduler_qp_ctx
*ps_qp_ctx
=
268 qp_ctx
->private_qp_ctx
;
270 ps_qp_ctx
->primary_slave
.dev_id
=
271 sched_ctx
->slaves
[PRIMARY_SLAVE_IDX
].dev_id
;
272 ps_qp_ctx
->primary_slave
.qp_id
= i
;
273 ps_qp_ctx
->primary_slave
.nb_inflight_cops
= 0;
275 ps_qp_ctx
->secondary_slave
.dev_id
=
276 sched_ctx
->slaves
[SECONDARY_SLAVE_IDX
].dev_id
;
277 ps_qp_ctx
->secondary_slave
.qp_id
= i
;
278 ps_qp_ctx
->secondary_slave
.nb_inflight_cops
= 0;
280 ps_qp_ctx
->threshold
= psd_ctx
->threshold
;
283 if (sched_ctx
->reordering_enabled
) {
284 dev
->enqueue_burst
= &schedule_enqueue_ordering
;
285 dev
->dequeue_burst
= &schedule_dequeue_ordering
;
287 dev
->enqueue_burst
= &schedule_enqueue
;
288 dev
->dequeue_burst
= &schedule_dequeue
;
295 scheduler_stop(struct rte_cryptodev
*dev
)
299 for (i
= 0; i
< dev
->data
->nb_queue_pairs
; i
++) {
300 struct scheduler_qp_ctx
*qp_ctx
= dev
->data
->queue_pairs
[i
];
301 struct psd_scheduler_qp_ctx
*ps_qp_ctx
= qp_ctx
->private_qp_ctx
;
303 if (ps_qp_ctx
->primary_slave
.nb_inflight_cops
+
304 ps_qp_ctx
->secondary_slave
.nb_inflight_cops
) {
305 CR_SCHED_LOG(ERR
, "Some crypto ops left in slave queue");
314 scheduler_config_qp(struct rte_cryptodev
*dev
, uint16_t qp_id
)
316 struct scheduler_qp_ctx
*qp_ctx
= dev
->data
->queue_pairs
[qp_id
];
317 struct psd_scheduler_qp_ctx
*ps_qp_ctx
;
319 ps_qp_ctx
= rte_zmalloc_socket(NULL
, sizeof(*ps_qp_ctx
), 0,
322 CR_SCHED_LOG(ERR
, "failed allocate memory for private queue pair");
326 qp_ctx
->private_qp_ctx
= (void *)ps_qp_ctx
;
332 scheduler_create_private_ctx(struct rte_cryptodev
*dev
)
334 struct scheduler_ctx
*sched_ctx
= dev
->data
->dev_private
;
335 struct psd_scheduler_ctx
*psd_ctx
;
337 if (sched_ctx
->private_ctx
) {
338 rte_free(sched_ctx
->private_ctx
);
339 sched_ctx
->private_ctx
= NULL
;
342 psd_ctx
= rte_zmalloc_socket(NULL
, sizeof(struct psd_scheduler_ctx
), 0,
345 CR_SCHED_LOG(ERR
, "failed allocate memory");
349 psd_ctx
->threshold
= DEF_PKT_SIZE_THRESHOLD
;
351 sched_ctx
->private_ctx
= (void *)psd_ctx
;
356 scheduler_option_set(struct rte_cryptodev
*dev
, uint32_t option_type
,
359 struct psd_scheduler_ctx
*psd_ctx
= ((struct scheduler_ctx
*)
360 dev
->data
->dev_private
)->private_ctx
;
363 if ((enum rte_cryptodev_schedule_option_type
)option_type
!=
364 CDEV_SCHED_OPTION_THRESHOLD
) {
365 CR_SCHED_LOG(ERR
, "Option not supported");
369 threshold
= ((struct rte_cryptodev_scheduler_threshold_option
*)
371 if (!rte_is_power_of_2(threshold
)) {
372 CR_SCHED_LOG(ERR
, "Threshold is not power of 2");
376 psd_ctx
->threshold
= ~(threshold
- 1);
382 scheduler_option_get(struct rte_cryptodev
*dev
, uint32_t option_type
,
385 struct psd_scheduler_ctx
*psd_ctx
= ((struct scheduler_ctx
*)
386 dev
->data
->dev_private
)->private_ctx
;
387 struct rte_cryptodev_scheduler_threshold_option
*threshold_option
;
389 if ((enum rte_cryptodev_schedule_option_type
)option_type
!=
390 CDEV_SCHED_OPTION_THRESHOLD
) {
391 CR_SCHED_LOG(ERR
, "Option not supported");
395 threshold_option
= option
;
396 threshold_option
->threshold
= (~psd_ctx
->threshold
) + 1;
401 static struct rte_cryptodev_scheduler_ops scheduler_ps_ops
= {
407 scheduler_create_private_ctx
,
408 scheduler_option_set
,
412 static struct rte_cryptodev_scheduler psd_scheduler
= {
413 .name
= "packet-size-based-scheduler",
414 .description
= "scheduler which will distribute crypto op "
415 "burst based on the packet size",
416 .mode
= CDEV_SCHED_MODE_PKT_SIZE_DISTR
,
417 .ops
= &scheduler_ps_ops
420 struct rte_cryptodev_scheduler
*crypto_scheduler_pkt_size_based_distr
= &psd_scheduler
;