2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Intel Corporation.
4 * Copyright 2017 Cavium, Inc.
7 #include "pipeline_common.h"
9 static __rte_always_inline
int
10 worker_generic(void *arg
)
14 struct worker_data
*data
= (struct worker_data
*)arg
;
15 uint8_t dev_id
= data
->dev_id
;
16 uint8_t port_id
= data
->port_id
;
17 size_t sent
= 0, received
= 0;
18 unsigned int lcore_id
= rte_lcore_id();
20 while (!fdata
->done
) {
22 if (fdata
->cap
.scheduler
)
23 fdata
->cap
.scheduler(lcore_id
);
25 if (!fdata
->worker_core
[lcore_id
]) {
30 const uint16_t nb_rx
= rte_event_dequeue_burst(dev_id
, port_id
,
39 /* The first worker stage does classification */
40 if (ev
.queue_id
== cdata
.qid
[0])
41 ev
.flow_id
= ev
.mbuf
->hash
.rss
44 ev
.queue_id
= cdata
.next_qid
[ev
.queue_id
];
45 ev
.op
= RTE_EVENT_OP_FORWARD
;
46 ev
.sched_type
= cdata
.queue_type
;
50 while (rte_event_enqueue_burst(dev_id
, port_id
, &ev
, 1) != 1)
56 printf(" worker %u thread done. RX=%zu TX=%zu\n",
57 rte_lcore_id(), received
, sent
);
63 worker_generic_burst(void *arg
)
65 struct rte_event events
[BATCH_SIZE
];
67 struct worker_data
*data
= (struct worker_data
*)arg
;
68 uint8_t dev_id
= data
->dev_id
;
69 uint8_t port_id
= data
->port_id
;
70 size_t sent
= 0, received
= 0;
71 unsigned int lcore_id
= rte_lcore_id();
73 while (!fdata
->done
) {
76 if (fdata
->cap
.scheduler
)
77 fdata
->cap
.scheduler(lcore_id
);
79 if (!fdata
->worker_core
[lcore_id
]) {
84 const uint16_t nb_rx
= rte_event_dequeue_burst(dev_id
, port_id
,
85 events
, RTE_DIM(events
), 0);
93 for (i
= 0; i
< nb_rx
; i
++) {
95 /* The first worker stage does classification */
96 if (events
[i
].queue_id
== cdata
.qid
[0])
97 events
[i
].flow_id
= events
[i
].mbuf
->hash
.rss
100 events
[i
].queue_id
= cdata
.next_qid
[events
[i
].queue_id
];
101 events
[i
].op
= RTE_EVENT_OP_FORWARD
;
102 events
[i
].sched_type
= cdata
.queue_type
;
106 uint16_t nb_tx
= rte_event_enqueue_burst(dev_id
, port_id
,
108 while (nb_tx
< nb_rx
&& !fdata
->done
)
109 nb_tx
+= rte_event_enqueue_burst(dev_id
, port_id
,
116 printf(" worker %u thread done. RX=%zu TX=%zu\n",
117 rte_lcore_id(), received
, sent
);
123 setup_eventdev_generic(struct worker_data
*worker_data
)
125 const uint8_t dev_id
= 0;
126 /* +1 stages is for a SINGLE_LINK TX stage */
127 const uint8_t nb_queues
= cdata
.num_stages
+ 1;
128 const uint8_t nb_ports
= cdata
.num_workers
;
129 struct rte_event_dev_config config
= {
130 .nb_event_queues
= nb_queues
,
131 .nb_event_ports
= nb_ports
,
132 .nb_events_limit
= 4096,
133 .nb_event_queue_flows
= 1024,
134 .nb_event_port_dequeue_depth
= 128,
135 .nb_event_port_enqueue_depth
= 128,
137 struct rte_event_port_conf wkr_p_conf
= {
138 .dequeue_depth
= cdata
.worker_cq_depth
,
140 .new_event_threshold
= 4096,
142 struct rte_event_queue_conf wkr_q_conf
= {
143 .schedule_type
= cdata
.queue_type
,
144 .priority
= RTE_EVENT_DEV_PRIORITY_NORMAL
,
145 .nb_atomic_flows
= 1024,
146 .nb_atomic_order_sequences
= 1024,
148 struct rte_event_queue_conf tx_q_conf
= {
149 .priority
= RTE_EVENT_DEV_PRIORITY_HIGHEST
,
150 .event_queue_cfg
= RTE_EVENT_QUEUE_CFG_SINGLE_LINK
,
153 struct port_link worker_queues
[MAX_NUM_STAGES
];
154 uint8_t disable_implicit_release
;
157 int ret
, ndev
= rte_event_dev_count();
159 printf("%d: No Eventdev Devices Found\n", __LINE__
);
163 struct rte_event_dev_info dev_info
;
164 ret
= rte_event_dev_info_get(dev_id
, &dev_info
);
165 printf("\tEventdev %d: %s\n", dev_id
, dev_info
.driver_name
);
167 disable_implicit_release
= (dev_info
.event_dev_cap
&
168 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE
);
170 wkr_p_conf
.disable_implicit_release
= disable_implicit_release
;
172 if (dev_info
.max_num_events
< config
.nb_events_limit
)
173 config
.nb_events_limit
= dev_info
.max_num_events
;
174 if (dev_info
.max_event_port_dequeue_depth
<
175 config
.nb_event_port_dequeue_depth
)
176 config
.nb_event_port_dequeue_depth
=
177 dev_info
.max_event_port_dequeue_depth
;
178 if (dev_info
.max_event_port_enqueue_depth
<
179 config
.nb_event_port_enqueue_depth
)
180 config
.nb_event_port_enqueue_depth
=
181 dev_info
.max_event_port_enqueue_depth
;
183 ret
= rte_event_dev_configure(dev_id
, &config
);
185 printf("%d: Error configuring device\n", __LINE__
);
189 /* Q creation - one load balanced per pipeline stage*/
190 printf(" Stages:\n");
191 for (i
= 0; i
< cdata
.num_stages
; i
++) {
192 if (rte_event_queue_setup(dev_id
, i
, &wkr_q_conf
) < 0) {
193 printf("%d: error creating qid %d\n", __LINE__
, i
);
197 cdata
.next_qid
[i
] = i
+1;
198 worker_queues
[i
].queue_id
= i
;
199 if (cdata
.enable_queue_priorities
) {
200 /* calculate priority stepping for each stage, leaving
201 * headroom of 1 for the SINGLE_LINK TX below
203 const uint32_t prio_delta
=
204 (RTE_EVENT_DEV_PRIORITY_LOWEST
-1) / nb_queues
;
206 /* higher priority for queues closer to tx */
207 wkr_q_conf
.priority
=
208 RTE_EVENT_DEV_PRIORITY_LOWEST
- prio_delta
* i
;
211 const char *type_str
= "Atomic";
212 switch (wkr_q_conf
.schedule_type
) {
213 case RTE_SCHED_TYPE_ORDERED
:
214 type_str
= "Ordered";
216 case RTE_SCHED_TYPE_PARALLEL
:
217 type_str
= "Parallel";
220 printf("\tStage %d, Type %s\tPriority = %d\n", i
, type_str
,
221 wkr_q_conf
.priority
);
225 /* final queue for sending to TX core */
226 if (rte_event_queue_setup(dev_id
, i
, &tx_q_conf
) < 0) {
227 printf("%d: error creating qid %d\n", __LINE__
, i
);
230 cdata
.tx_queue_id
= i
;
232 if (wkr_p_conf
.new_event_threshold
> config
.nb_events_limit
)
233 wkr_p_conf
.new_event_threshold
= config
.nb_events_limit
;
234 if (wkr_p_conf
.dequeue_depth
> config
.nb_event_port_dequeue_depth
)
235 wkr_p_conf
.dequeue_depth
= config
.nb_event_port_dequeue_depth
;
236 if (wkr_p_conf
.enqueue_depth
> config
.nb_event_port_enqueue_depth
)
237 wkr_p_conf
.enqueue_depth
= config
.nb_event_port_enqueue_depth
;
239 /* set up one port per worker, linking to all stage queues */
240 for (i
= 0; i
< cdata
.num_workers
; i
++) {
241 struct worker_data
*w
= &worker_data
[i
];
243 if (rte_event_port_setup(dev_id
, i
, &wkr_p_conf
) < 0) {
244 printf("Error setting up port %d\n", i
);
249 for (s
= 0; s
< cdata
.num_stages
; s
++) {
250 if (rte_event_port_link(dev_id
, i
,
251 &worker_queues
[s
].queue_id
,
252 &worker_queues
[s
].priority
,
254 printf("%d: error creating link for port %d\n",
262 ret
= rte_event_dev_service_id_get(dev_id
,
263 &fdata
->evdev_service_id
);
264 if (ret
!= -ESRCH
&& ret
!= 0) {
265 printf("Error getting the service ID for sw eventdev\n");
268 rte_service_runstate_set(fdata
->evdev_service_id
, 1);
269 rte_service_set_runstate_mapped_check(fdata
->evdev_service_id
, 0);
275 init_adapters(uint16_t nb_ports
)
279 uint8_t tx_port_id
= 0;
280 uint8_t evdev_id
= 0;
281 struct rte_event_dev_info dev_info
;
283 ret
= rte_event_dev_info_get(evdev_id
, &dev_info
);
285 struct rte_event_port_conf adptr_p_conf
= {
286 .dequeue_depth
= cdata
.worker_cq_depth
,
288 .new_event_threshold
= 4096,
291 if (adptr_p_conf
.new_event_threshold
> dev_info
.max_num_events
)
292 adptr_p_conf
.new_event_threshold
= dev_info
.max_num_events
;
293 if (adptr_p_conf
.dequeue_depth
> dev_info
.max_event_port_dequeue_depth
)
294 adptr_p_conf
.dequeue_depth
=
295 dev_info
.max_event_port_dequeue_depth
;
296 if (adptr_p_conf
.enqueue_depth
> dev_info
.max_event_port_enqueue_depth
)
297 adptr_p_conf
.enqueue_depth
=
298 dev_info
.max_event_port_enqueue_depth
;
300 /* Create one adapter for all the ethernet ports. */
301 ret
= rte_event_eth_rx_adapter_create(cdata
.rx_adapter_id
, evdev_id
,
304 rte_exit(EXIT_FAILURE
, "failed to create rx adapter[%d]",
305 cdata
.rx_adapter_id
);
307 ret
= rte_event_eth_tx_adapter_create(cdata
.tx_adapter_id
, evdev_id
,
310 rte_exit(EXIT_FAILURE
, "failed to create tx adapter[%d]",
311 cdata
.tx_adapter_id
);
313 struct rte_event_eth_rx_adapter_queue_conf queue_conf
;
314 memset(&queue_conf
, 0, sizeof(queue_conf
));
315 queue_conf
.ev
.sched_type
= cdata
.queue_type
;
316 queue_conf
.ev
.queue_id
= cdata
.qid
[0];
318 for (i
= 0; i
< nb_ports
; i
++) {
319 ret
= rte_event_eth_rx_adapter_queue_add(cdata
.rx_adapter_id
, i
,
322 rte_exit(EXIT_FAILURE
,
323 "Failed to add queues to Rx adapter");
325 ret
= rte_event_eth_tx_adapter_queue_add(cdata
.tx_adapter_id
, i
,
328 rte_exit(EXIT_FAILURE
,
329 "Failed to add queues to Tx adapter");
332 ret
= rte_event_eth_tx_adapter_event_port_get(cdata
.tx_adapter_id
,
335 rte_exit(EXIT_FAILURE
,
336 "Failed to get Tx adapter port id");
337 ret
= rte_event_port_link(evdev_id
, tx_port_id
, &cdata
.tx_queue_id
,
340 rte_exit(EXIT_FAILURE
,
341 "Unable to link Tx adapter port to Tx queue");
343 ret
= rte_event_eth_rx_adapter_service_id_get(cdata
.rx_adapter_id
,
344 &fdata
->rxadptr_service_id
);
345 if (ret
!= -ESRCH
&& ret
!= 0) {
346 rte_exit(EXIT_FAILURE
,
347 "Error getting the service ID for Rx adapter\n");
349 rte_service_runstate_set(fdata
->rxadptr_service_id
, 1);
350 rte_service_set_runstate_mapped_check(fdata
->rxadptr_service_id
, 0);
352 ret
= rte_event_eth_tx_adapter_service_id_get(cdata
.tx_adapter_id
,
353 &fdata
->txadptr_service_id
);
354 if (ret
!= -ESRCH
&& ret
!= 0) {
355 rte_exit(EXIT_FAILURE
,
356 "Error getting the service ID for Tx adapter\n");
358 rte_service_runstate_set(fdata
->txadptr_service_id
, 1);
359 rte_service_set_runstate_mapped_check(fdata
->txadptr_service_id
, 0);
361 ret
= rte_event_eth_rx_adapter_start(cdata
.rx_adapter_id
);
363 rte_exit(EXIT_FAILURE
, "Rx adapter[%d] start failed",
364 cdata
.rx_adapter_id
);
366 ret
= rte_event_eth_tx_adapter_start(cdata
.tx_adapter_id
);
368 rte_exit(EXIT_FAILURE
, "Tx adapter[%d] start failed",
369 cdata
.tx_adapter_id
);
371 if (rte_event_dev_start(evdev_id
) < 0)
372 rte_exit(EXIT_FAILURE
, "Error starting eventdev");
376 generic_opt_check(void)
381 uint8_t rx_needed
= 0;
382 uint8_t sched_needed
= 0;
383 struct rte_event_dev_info eventdev_info
;
385 memset(&eventdev_info
, 0, sizeof(struct rte_event_dev_info
));
386 rte_event_dev_info_get(0, &eventdev_info
);
388 if (cdata
.all_type_queues
&& !(eventdev_info
.event_dev_cap
&
389 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES
))
390 rte_exit(EXIT_FAILURE
,
391 "Event dev doesn't support all type queues\n");
392 sched_needed
= !(eventdev_info
.event_dev_cap
&
393 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED
);
395 RTE_ETH_FOREACH_DEV(i
) {
396 ret
= rte_event_eth_rx_adapter_caps_get(0, i
, &cap
);
398 rte_exit(EXIT_FAILURE
,
399 "failed to get event rx adapter capabilities");
401 !(cap
& RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT
);
404 if (cdata
.worker_lcore_mask
== 0 ||
405 (rx_needed
&& cdata
.rx_lcore_mask
== 0) ||
406 (cdata
.tx_lcore_mask
== 0) ||
407 (sched_needed
&& cdata
.sched_lcore_mask
== 0)) {
408 printf("Core part of pipeline was not assigned any cores. "
409 "This will stall the pipeline, please check core masks "
410 "(use -h for details on setting core masks):\n"
411 "\trx: %"PRIu64
"\n\ttx: %"PRIu64
"\n\tsched: %"PRIu64
412 "\n\tworkers: %"PRIu64
"\n",
413 cdata
.rx_lcore_mask
, cdata
.tx_lcore_mask
,
414 cdata
.sched_lcore_mask
,
415 cdata
.worker_lcore_mask
);
416 rte_exit(-1, "Fix core masks\n");
420 memset(fdata
->sched_core
, 0,
421 sizeof(unsigned int) * MAX_NUM_CORE
);
423 memset(fdata
->rx_core
, 0,
424 sizeof(unsigned int) * MAX_NUM_CORE
);
428 set_worker_generic_setup_data(struct setup_data
*caps
, bool burst
)
431 caps
->worker
= worker_generic_burst
;
433 caps
->worker
= worker_generic
;
436 caps
->adptr_setup
= init_adapters
;
437 caps
->scheduler
= schedule_devices
;
438 caps
->evdev_setup
= setup_eventdev_generic
;
439 caps
->check_opt
= generic_opt_check
;