1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_cryptodev.h>
6 #include <rte_malloc.h>
8 #include "rte_cryptodev_scheduler_operations.h"
9 #include "scheduler_pmd_private.h"
11 struct rr_scheduler_qp_ctx
{
12 struct scheduler_slave slaves
[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
];
15 uint32_t last_enq_slave_idx
;
16 uint32_t last_deq_slave_idx
;
20 schedule_enqueue(void *qp
, struct rte_crypto_op
**ops
, uint16_t nb_ops
)
22 struct rr_scheduler_qp_ctx
*rr_qp_ctx
=
23 ((struct scheduler_qp_ctx
*)qp
)->private_qp_ctx
;
24 uint32_t slave_idx
= rr_qp_ctx
->last_enq_slave_idx
;
25 struct scheduler_slave
*slave
= &rr_qp_ctx
->slaves
[slave_idx
];
26 uint16_t i
, processed_ops
;
28 if (unlikely(nb_ops
== 0))
31 for (i
= 0; i
< nb_ops
&& i
< 4; i
++)
32 rte_prefetch0(ops
[i
]->sym
->session
);
34 processed_ops
= rte_cryptodev_enqueue_burst(slave
->dev_id
,
35 slave
->qp_id
, ops
, nb_ops
);
37 slave
->nb_inflight_cops
+= processed_ops
;
39 rr_qp_ctx
->last_enq_slave_idx
+= 1;
40 rr_qp_ctx
->last_enq_slave_idx
%= rr_qp_ctx
->nb_slaves
;
46 schedule_enqueue_ordering(void *qp
, struct rte_crypto_op
**ops
,
49 struct rte_ring
*order_ring
=
50 ((struct scheduler_qp_ctx
*)qp
)->order_ring
;
51 uint16_t nb_ops_to_enq
= get_max_enqueue_order_count(order_ring
,
53 uint16_t nb_ops_enqd
= schedule_enqueue(qp
, ops
,
56 scheduler_order_insert(order_ring
, ops
, nb_ops_enqd
);
63 schedule_dequeue(void *qp
, struct rte_crypto_op
**ops
, uint16_t nb_ops
)
65 struct rr_scheduler_qp_ctx
*rr_qp_ctx
=
66 ((struct scheduler_qp_ctx
*)qp
)->private_qp_ctx
;
67 struct scheduler_slave
*slave
;
68 uint32_t last_slave_idx
= rr_qp_ctx
->last_deq_slave_idx
;
71 if (unlikely(rr_qp_ctx
->slaves
[last_slave_idx
].nb_inflight_cops
== 0)) {
75 if (unlikely(last_slave_idx
>= rr_qp_ctx
->nb_slaves
))
77 /* looped back, means no inflight cops in the queue */
78 if (last_slave_idx
== rr_qp_ctx
->last_deq_slave_idx
)
80 } while (rr_qp_ctx
->slaves
[last_slave_idx
].nb_inflight_cops
84 slave
= &rr_qp_ctx
->slaves
[last_slave_idx
];
86 nb_deq_ops
= rte_cryptodev_dequeue_burst(slave
->dev_id
,
87 slave
->qp_id
, ops
, nb_ops
);
90 last_slave_idx
%= rr_qp_ctx
->nb_slaves
;
92 rr_qp_ctx
->last_deq_slave_idx
= last_slave_idx
;
94 slave
->nb_inflight_cops
-= nb_deq_ops
;
100 schedule_dequeue_ordering(void *qp
, struct rte_crypto_op
**ops
,
103 struct rte_ring
*order_ring
=
104 ((struct scheduler_qp_ctx
*)qp
)->order_ring
;
106 schedule_dequeue(qp
, ops
, nb_ops
);
108 return scheduler_order_drain(order_ring
, ops
, nb_ops
);
112 slave_attach(__rte_unused
struct rte_cryptodev
*dev
,
113 __rte_unused
uint8_t slave_id
)
119 slave_detach(__rte_unused
struct rte_cryptodev
*dev
,
120 __rte_unused
uint8_t slave_id
)
126 scheduler_start(struct rte_cryptodev
*dev
)
128 struct scheduler_ctx
*sched_ctx
= dev
->data
->dev_private
;
131 if (sched_ctx
->reordering_enabled
) {
132 dev
->enqueue_burst
= &schedule_enqueue_ordering
;
133 dev
->dequeue_burst
= &schedule_dequeue_ordering
;
135 dev
->enqueue_burst
= &schedule_enqueue
;
136 dev
->dequeue_burst
= &schedule_dequeue
;
139 for (i
= 0; i
< dev
->data
->nb_queue_pairs
; i
++) {
140 struct scheduler_qp_ctx
*qp_ctx
= dev
->data
->queue_pairs
[i
];
141 struct rr_scheduler_qp_ctx
*rr_qp_ctx
=
142 qp_ctx
->private_qp_ctx
;
145 memset(rr_qp_ctx
->slaves
, 0,
146 RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
*
147 sizeof(struct scheduler_slave
));
148 for (j
= 0; j
< sched_ctx
->nb_slaves
; j
++) {
149 rr_qp_ctx
->slaves
[j
].dev_id
=
150 sched_ctx
->slaves
[j
].dev_id
;
151 rr_qp_ctx
->slaves
[j
].qp_id
= i
;
154 rr_qp_ctx
->nb_slaves
= sched_ctx
->nb_slaves
;
156 rr_qp_ctx
->last_enq_slave_idx
= 0;
157 rr_qp_ctx
->last_deq_slave_idx
= 0;
164 scheduler_stop(__rte_unused
struct rte_cryptodev
*dev
)
170 scheduler_config_qp(struct rte_cryptodev
*dev
, uint16_t qp_id
)
172 struct scheduler_qp_ctx
*qp_ctx
= dev
->data
->queue_pairs
[qp_id
];
173 struct rr_scheduler_qp_ctx
*rr_qp_ctx
;
175 rr_qp_ctx
= rte_zmalloc_socket(NULL
, sizeof(*rr_qp_ctx
), 0,
178 CR_SCHED_LOG(ERR
, "failed allocate memory for private queue pair");
182 qp_ctx
->private_qp_ctx
= (void *)rr_qp_ctx
;
188 scheduler_create_private_ctx(__rte_unused
struct rte_cryptodev
*dev
)
193 static struct rte_cryptodev_scheduler_ops scheduler_rr_ops
= {
199 scheduler_create_private_ctx
,
200 NULL
, /* option_set */
201 NULL
/* option_get */
204 static struct rte_cryptodev_scheduler scheduler
= {
205 .name
= "roundrobin-scheduler",
206 .description
= "scheduler which will round robin burst across "
207 "slave crypto devices",
208 .mode
= CDEV_SCHED_MODE_ROUNDROBIN
,
209 .ops
= &scheduler_rr_ops
212 struct rte_cryptodev_scheduler
*crypto_scheduler_roundrobin
= &scheduler
;