1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_cryptodev.h>
6 #include <rte_malloc.h>
8 #include "rte_cryptodev_scheduler_operations.h"
9 #include "scheduler_pmd_private.h"
11 #define PRIMARY_SLAVE_IDX 0
12 #define SECONDARY_SLAVE_IDX 1
13 #define NB_FAILOVER_SLAVES 2
14 #define SLAVE_SWITCH_MASK (0x01)
16 struct fo_scheduler_qp_ctx
{
17 struct scheduler_slave primary_slave
;
18 struct scheduler_slave secondary_slave
;
23 static __rte_always_inline
uint16_t
24 failover_slave_enqueue(struct scheduler_slave
*slave
,
25 struct rte_crypto_op
**ops
, uint16_t nb_ops
)
27 uint16_t i
, processed_ops
;
29 for (i
= 0; i
< nb_ops
&& i
< 4; i
++)
30 rte_prefetch0(ops
[i
]->sym
->session
);
32 processed_ops
= rte_cryptodev_enqueue_burst(slave
->dev_id
,
33 slave
->qp_id
, ops
, nb_ops
);
34 slave
->nb_inflight_cops
+= processed_ops
;
40 schedule_enqueue(void *qp
, struct rte_crypto_op
**ops
, uint16_t nb_ops
)
42 struct fo_scheduler_qp_ctx
*qp_ctx
=
43 ((struct scheduler_qp_ctx
*)qp
)->private_qp_ctx
;
44 uint16_t enqueued_ops
;
46 if (unlikely(nb_ops
== 0))
49 enqueued_ops
= failover_slave_enqueue(&qp_ctx
->primary_slave
,
52 if (enqueued_ops
< nb_ops
)
53 enqueued_ops
+= failover_slave_enqueue(&qp_ctx
->secondary_slave
,
55 nb_ops
- enqueued_ops
);
62 schedule_enqueue_ordering(void *qp
, struct rte_crypto_op
**ops
,
65 struct rte_ring
*order_ring
=
66 ((struct scheduler_qp_ctx
*)qp
)->order_ring
;
67 uint16_t nb_ops_to_enq
= get_max_enqueue_order_count(order_ring
,
69 uint16_t nb_ops_enqd
= schedule_enqueue(qp
, ops
,
72 scheduler_order_insert(order_ring
, ops
, nb_ops_enqd
);
78 schedule_dequeue(void *qp
, struct rte_crypto_op
**ops
, uint16_t nb_ops
)
80 struct fo_scheduler_qp_ctx
*qp_ctx
=
81 ((struct scheduler_qp_ctx
*)qp
)->private_qp_ctx
;
82 struct scheduler_slave
*slaves
[NB_FAILOVER_SLAVES
] = {
83 &qp_ctx
->primary_slave
, &qp_ctx
->secondary_slave
};
84 struct scheduler_slave
*slave
= slaves
[qp_ctx
->deq_idx
];
85 uint16_t nb_deq_ops
= 0, nb_deq_ops2
= 0;
87 if (slave
->nb_inflight_cops
) {
88 nb_deq_ops
= rte_cryptodev_dequeue_burst(slave
->dev_id
,
89 slave
->qp_id
, ops
, nb_ops
);
90 slave
->nb_inflight_cops
-= nb_deq_ops
;
93 qp_ctx
->deq_idx
= (~qp_ctx
->deq_idx
) & SLAVE_SWITCH_MASK
;
95 if (nb_deq_ops
== nb_ops
)
98 slave
= slaves
[qp_ctx
->deq_idx
];
100 if (slave
->nb_inflight_cops
) {
101 nb_deq_ops2
= rte_cryptodev_dequeue_burst(slave
->dev_id
,
102 slave
->qp_id
, &ops
[nb_deq_ops
], nb_ops
- nb_deq_ops
);
103 slave
->nb_inflight_cops
-= nb_deq_ops2
;
106 return nb_deq_ops
+ nb_deq_ops2
;
110 schedule_dequeue_ordering(void *qp
, struct rte_crypto_op
**ops
,
113 struct rte_ring
*order_ring
=
114 ((struct scheduler_qp_ctx
*)qp
)->order_ring
;
116 schedule_dequeue(qp
, ops
, nb_ops
);
118 return scheduler_order_drain(order_ring
, ops
, nb_ops
);
122 slave_attach(__rte_unused
struct rte_cryptodev
*dev
,
123 __rte_unused
uint8_t slave_id
)
129 slave_detach(__rte_unused
struct rte_cryptodev
*dev
,
130 __rte_unused
uint8_t slave_id
)
136 scheduler_start(struct rte_cryptodev
*dev
)
138 struct scheduler_ctx
*sched_ctx
= dev
->data
->dev_private
;
141 if (sched_ctx
->nb_slaves
< 2) {
142 CR_SCHED_LOG(ERR
, "Number of slaves shall no less than 2");
146 if (sched_ctx
->reordering_enabled
) {
147 dev
->enqueue_burst
= schedule_enqueue_ordering
;
148 dev
->dequeue_burst
= schedule_dequeue_ordering
;
150 dev
->enqueue_burst
= schedule_enqueue
;
151 dev
->dequeue_burst
= schedule_dequeue
;
154 for (i
= 0; i
< dev
->data
->nb_queue_pairs
; i
++) {
155 struct fo_scheduler_qp_ctx
*qp_ctx
=
156 ((struct scheduler_qp_ctx
*)
157 dev
->data
->queue_pairs
[i
])->private_qp_ctx
;
159 rte_memcpy(&qp_ctx
->primary_slave
,
160 &sched_ctx
->slaves
[PRIMARY_SLAVE_IDX
],
161 sizeof(struct scheduler_slave
));
162 rte_memcpy(&qp_ctx
->secondary_slave
,
163 &sched_ctx
->slaves
[SECONDARY_SLAVE_IDX
],
164 sizeof(struct scheduler_slave
));
171 scheduler_stop(__rte_unused
struct rte_cryptodev
*dev
)
177 scheduler_config_qp(struct rte_cryptodev
*dev
, uint16_t qp_id
)
179 struct scheduler_qp_ctx
*qp_ctx
= dev
->data
->queue_pairs
[qp_id
];
180 struct fo_scheduler_qp_ctx
*fo_qp_ctx
;
182 fo_qp_ctx
= rte_zmalloc_socket(NULL
, sizeof(*fo_qp_ctx
), 0,
185 CR_SCHED_LOG(ERR
, "failed allocate memory for private queue pair");
189 qp_ctx
->private_qp_ctx
= (void *)fo_qp_ctx
;
195 scheduler_create_private_ctx(__rte_unused
struct rte_cryptodev
*dev
)
200 static struct rte_cryptodev_scheduler_ops scheduler_fo_ops
= {
206 scheduler_create_private_ctx
,
207 NULL
, /* option_set */
211 static struct rte_cryptodev_scheduler fo_scheduler
= {
212 .name
= "failover-scheduler",
213 .description
= "scheduler which enqueues to the primary slave, "
214 "and only then enqueues to the secondary slave "
215 "upon failing on enqueuing to primary",
216 .mode
= CDEV_SCHED_MODE_FAILOVER
,
217 .ops
= &scheduler_fo_ops
220 struct rte_cryptodev_scheduler
*crypto_scheduler_failover
= &fo_scheduler
;