4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_cryptodev.h>
34 #include <rte_malloc.h>
36 #include "rte_cryptodev_scheduler_operations.h"
37 #include "scheduler_pmd_private.h"
39 #define PRIMARY_SLAVE_IDX 0
40 #define SECONDARY_SLAVE_IDX 1
41 #define NB_FAILOVER_SLAVES 2
42 #define SLAVE_SWITCH_MASK (0x01)
44 struct fo_scheduler_qp_ctx
{
45 struct scheduler_slave primary_slave
;
46 struct scheduler_slave secondary_slave
;
51 static inline uint16_t __attribute__((always_inline
))
52 failover_slave_enqueue(struct scheduler_slave
*slave
, uint8_t slave_idx
,
53 struct rte_crypto_op
**ops
, uint16_t nb_ops
)
55 uint16_t i
, processed_ops
;
56 struct rte_cryptodev_sym_session
*sessions
[nb_ops
];
57 struct scheduler_session
*sess0
, *sess1
, *sess2
, *sess3
;
59 for (i
= 0; i
< nb_ops
&& i
< 4; i
++)
60 rte_prefetch0(ops
[i
]->sym
->session
);
62 for (i
= 0; (i
< (nb_ops
- 8)) && (nb_ops
> 8); i
+= 4) {
63 rte_prefetch0(ops
[i
+ 4]->sym
->session
);
64 rte_prefetch0(ops
[i
+ 5]->sym
->session
);
65 rte_prefetch0(ops
[i
+ 6]->sym
->session
);
66 rte_prefetch0(ops
[i
+ 7]->sym
->session
);
68 sess0
= (struct scheduler_session
*)
69 ops
[i
]->sym
->session
->_private
;
70 sess1
= (struct scheduler_session
*)
71 ops
[i
+1]->sym
->session
->_private
;
72 sess2
= (struct scheduler_session
*)
73 ops
[i
+2]->sym
->session
->_private
;
74 sess3
= (struct scheduler_session
*)
75 ops
[i
+3]->sym
->session
->_private
;
77 sessions
[i
] = ops
[i
]->sym
->session
;
78 sessions
[i
+ 1] = ops
[i
+ 1]->sym
->session
;
79 sessions
[i
+ 2] = ops
[i
+ 2]->sym
->session
;
80 sessions
[i
+ 3] = ops
[i
+ 3]->sym
->session
;
82 ops
[i
]->sym
->session
= sess0
->sessions
[slave_idx
];
83 ops
[i
+ 1]->sym
->session
= sess1
->sessions
[slave_idx
];
84 ops
[i
+ 2]->sym
->session
= sess2
->sessions
[slave_idx
];
85 ops
[i
+ 3]->sym
->session
= sess3
->sessions
[slave_idx
];
88 for (; i
< nb_ops
; i
++) {
89 sess0
= (struct scheduler_session
*)
90 ops
[i
]->sym
->session
->_private
;
91 sessions
[i
] = ops
[i
]->sym
->session
;
92 ops
[i
]->sym
->session
= sess0
->sessions
[slave_idx
];
95 processed_ops
= rte_cryptodev_enqueue_burst(slave
->dev_id
,
96 slave
->qp_id
, ops
, nb_ops
);
97 slave
->nb_inflight_cops
+= processed_ops
;
99 if (unlikely(processed_ops
< nb_ops
))
100 for (i
= processed_ops
; i
< nb_ops
; i
++)
101 ops
[i
]->sym
->session
= sessions
[i
];
103 return processed_ops
;
107 schedule_enqueue(void *qp
, struct rte_crypto_op
**ops
, uint16_t nb_ops
)
109 struct fo_scheduler_qp_ctx
*qp_ctx
=
110 ((struct scheduler_qp_ctx
*)qp
)->private_qp_ctx
;
111 uint16_t enqueued_ops
;
113 if (unlikely(nb_ops
== 0))
116 enqueued_ops
= failover_slave_enqueue(&qp_ctx
->primary_slave
,
117 PRIMARY_SLAVE_IDX
, ops
, nb_ops
);
119 if (enqueued_ops
< nb_ops
)
120 enqueued_ops
+= failover_slave_enqueue(&qp_ctx
->secondary_slave
,
121 SECONDARY_SLAVE_IDX
, &ops
[enqueued_ops
],
122 nb_ops
- enqueued_ops
);
129 schedule_enqueue_ordering(void *qp
, struct rte_crypto_op
**ops
,
132 struct rte_ring
*order_ring
=
133 ((struct scheduler_qp_ctx
*)qp
)->order_ring
;
134 uint16_t nb_ops_to_enq
= get_max_enqueue_order_count(order_ring
,
136 uint16_t nb_ops_enqd
= schedule_enqueue(qp
, ops
,
139 scheduler_order_insert(order_ring
, ops
, nb_ops_enqd
);
145 schedule_dequeue(void *qp
, struct rte_crypto_op
**ops
, uint16_t nb_ops
)
147 struct fo_scheduler_qp_ctx
*qp_ctx
=
148 ((struct scheduler_qp_ctx
*)qp
)->private_qp_ctx
;
149 struct scheduler_slave
*slaves
[NB_FAILOVER_SLAVES
] = {
150 &qp_ctx
->primary_slave
, &qp_ctx
->secondary_slave
};
151 struct scheduler_slave
*slave
= slaves
[qp_ctx
->deq_idx
];
152 uint16_t nb_deq_ops
= 0, nb_deq_ops2
= 0;
154 if (slave
->nb_inflight_cops
) {
155 nb_deq_ops
= rte_cryptodev_dequeue_burst(slave
->dev_id
,
156 slave
->qp_id
, ops
, nb_ops
);
157 slave
->nb_inflight_cops
-= nb_deq_ops
;
160 qp_ctx
->deq_idx
= (~qp_ctx
->deq_idx
) & SLAVE_SWITCH_MASK
;
162 if (nb_deq_ops
== nb_ops
)
165 slave
= slaves
[qp_ctx
->deq_idx
];
167 if (slave
->nb_inflight_cops
) {
168 nb_deq_ops2
= rte_cryptodev_dequeue_burst(slave
->dev_id
,
169 slave
->qp_id
, &ops
[nb_deq_ops
], nb_ops
- nb_deq_ops
);
170 slave
->nb_inflight_cops
-= nb_deq_ops2
;
173 return nb_deq_ops
+ nb_deq_ops2
;
177 schedule_dequeue_ordering(void *qp
, struct rte_crypto_op
**ops
,
180 struct rte_ring
*order_ring
=
181 ((struct scheduler_qp_ctx
*)qp
)->order_ring
;
183 schedule_dequeue(qp
, ops
, nb_ops
);
185 return scheduler_order_drain(order_ring
, ops
, nb_ops
);
189 slave_attach(__rte_unused
struct rte_cryptodev
*dev
,
190 __rte_unused
uint8_t slave_id
)
196 slave_detach(__rte_unused
struct rte_cryptodev
*dev
,
197 __rte_unused
uint8_t slave_id
)
203 scheduler_start(struct rte_cryptodev
*dev
)
205 struct scheduler_ctx
*sched_ctx
= dev
->data
->dev_private
;
208 if (sched_ctx
->nb_slaves
< 2) {
209 CS_LOG_ERR("Number of slaves shall no less than 2");
213 if (sched_ctx
->reordering_enabled
) {
214 dev
->enqueue_burst
= schedule_enqueue_ordering
;
215 dev
->dequeue_burst
= schedule_dequeue_ordering
;
217 dev
->enqueue_burst
= schedule_enqueue
;
218 dev
->dequeue_burst
= schedule_dequeue
;
221 for (i
= 0; i
< dev
->data
->nb_queue_pairs
; i
++) {
222 struct fo_scheduler_qp_ctx
*qp_ctx
=
223 ((struct scheduler_qp_ctx
*)
224 dev
->data
->queue_pairs
[i
])->private_qp_ctx
;
226 rte_memcpy(&qp_ctx
->primary_slave
,
227 &sched_ctx
->slaves
[PRIMARY_SLAVE_IDX
],
228 sizeof(struct scheduler_slave
));
229 rte_memcpy(&qp_ctx
->secondary_slave
,
230 &sched_ctx
->slaves
[SECONDARY_SLAVE_IDX
],
231 sizeof(struct scheduler_slave
));
238 scheduler_stop(__rte_unused
struct rte_cryptodev
*dev
)
244 scheduler_config_qp(struct rte_cryptodev
*dev
, uint16_t qp_id
)
246 struct scheduler_qp_ctx
*qp_ctx
= dev
->data
->queue_pairs
[qp_id
];
247 struct fo_scheduler_qp_ctx
*fo_qp_ctx
;
249 fo_qp_ctx
= rte_zmalloc_socket(NULL
, sizeof(*fo_qp_ctx
), 0,
252 CS_LOG_ERR("failed allocate memory for private queue pair");
256 qp_ctx
->private_qp_ctx
= (void *)fo_qp_ctx
;
262 scheduler_create_private_ctx(__rte_unused
struct rte_cryptodev
*dev
)
267 struct rte_cryptodev_scheduler_ops scheduler_fo_ops
= {
273 scheduler_create_private_ctx
,
274 NULL
, /* option_set */
278 struct rte_cryptodev_scheduler fo_scheduler
= {
279 .name
= "failover-scheduler",
280 .description
= "scheduler which enqueues to the primary slave, "
281 "and only then enqueues to the secondary slave "
282 "upon failing on enqueuing to primary",
283 .mode
= CDEV_SCHED_MODE_FAILOVER
,
284 .ops
= &scheduler_fo_ops
287 struct rte_cryptodev_scheduler
*failover_scheduler
= &fo_scheduler
;