2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2017 Cavium, Inc.
6 #include "test_pipeline_common.h"
8 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
10 static __rte_always_inline
int
11 pipeline_atq_nb_event_queues(struct evt_options
*opt
)
15 return rte_eth_dev_count_avail();
19 pipeline_atq_worker_single_stage_tx(void *arg
)
21 PIPELINE_WROKER_SINGLE_STAGE_INIT
;
23 while (t
->done
== false) {
24 uint16_t event
= rte_event_dequeue_burst(dev
, port
, &ev
, 1, 0);
31 if (ev
.sched_type
== RTE_SCHED_TYPE_ATOMIC
) {
32 pipeline_tx_pkt(ev
.mbuf
);
36 pipeline_fwd_event(&ev
, RTE_SCHED_TYPE_ATOMIC
);
37 pipeline_event_enqueue(dev
, port
, &ev
);
44 pipeline_atq_worker_single_stage_fwd(void *arg
)
46 PIPELINE_WROKER_SINGLE_STAGE_INIT
;
47 const uint8_t tx_queue
= t
->tx_service
.queue_id
;
49 while (t
->done
== false) {
50 uint16_t event
= rte_event_dequeue_burst(dev
, port
, &ev
, 1, 0);
58 ev
.queue_id
= tx_queue
;
59 pipeline_fwd_event(&ev
, RTE_SCHED_TYPE_ATOMIC
);
60 pipeline_event_enqueue(dev
, port
, &ev
);
67 pipeline_atq_worker_single_stage_burst_tx(void *arg
)
69 PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT
;
71 while (t
->done
== false) {
72 uint16_t nb_rx
= rte_event_dequeue_burst(dev
, port
, ev
,
80 for (i
= 0; i
< nb_rx
; i
++) {
81 rte_prefetch0(ev
[i
+ 1].mbuf
);
82 if (ev
[i
].sched_type
== RTE_SCHED_TYPE_ATOMIC
) {
84 pipeline_tx_pkt(ev
[i
].mbuf
);
85 ev
[i
].op
= RTE_EVENT_OP_RELEASE
;
88 pipeline_fwd_event(&ev
[i
],
89 RTE_SCHED_TYPE_ATOMIC
);
92 pipeline_event_enqueue_burst(dev
, port
, ev
, nb_rx
);
99 pipeline_atq_worker_single_stage_burst_fwd(void *arg
)
101 PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT
;
102 const uint8_t tx_queue
= t
->tx_service
.queue_id
;
104 while (t
->done
== false) {
105 uint16_t nb_rx
= rte_event_dequeue_burst(dev
, port
, ev
,
113 for (i
= 0; i
< nb_rx
; i
++) {
114 rte_prefetch0(ev
[i
+ 1].mbuf
);
115 ev
[i
].queue_id
= tx_queue
;
116 pipeline_fwd_event(&ev
[i
], RTE_SCHED_TYPE_ATOMIC
);
120 pipeline_event_enqueue_burst(dev
, port
, ev
, nb_rx
);
127 pipeline_atq_worker_multi_stage_tx(void *arg
)
129 PIPELINE_WROKER_MULTI_STAGE_INIT
;
130 const uint8_t nb_stages
= t
->opt
->nb_stages
;
133 while (t
->done
== false) {
134 uint16_t event
= rte_event_dequeue_burst(dev
, port
, &ev
, 1, 0);
141 cq_id
= ev
.sub_event_type
% nb_stages
;
143 if (cq_id
== last_queue
) {
144 if (ev
.sched_type
== RTE_SCHED_TYPE_ATOMIC
) {
146 pipeline_tx_pkt(ev
.mbuf
);
150 pipeline_fwd_event(&ev
, RTE_SCHED_TYPE_ATOMIC
);
153 pipeline_fwd_event(&ev
, sched_type_list
[cq_id
]);
156 pipeline_event_enqueue(dev
, port
, &ev
);
162 pipeline_atq_worker_multi_stage_fwd(void *arg
)
164 PIPELINE_WROKER_MULTI_STAGE_INIT
;
165 const uint8_t nb_stages
= t
->opt
->nb_stages
;
166 const uint8_t tx_queue
= t
->tx_service
.queue_id
;
168 while (t
->done
== false) {
169 uint16_t event
= rte_event_dequeue_burst(dev
, port
, &ev
, 1, 0);
176 cq_id
= ev
.sub_event_type
% nb_stages
;
178 if (cq_id
== last_queue
) {
180 ev
.queue_id
= tx_queue
;
181 pipeline_fwd_event(&ev
, RTE_SCHED_TYPE_ATOMIC
);
184 pipeline_fwd_event(&ev
, sched_type_list
[cq_id
]);
187 pipeline_event_enqueue(dev
, port
, &ev
);
193 pipeline_atq_worker_multi_stage_burst_tx(void *arg
)
195 PIPELINE_WROKER_MULTI_STAGE_BURST_INIT
;
196 const uint8_t nb_stages
= t
->opt
->nb_stages
;
198 while (t
->done
== false) {
199 uint16_t nb_rx
= rte_event_dequeue_burst(dev
, port
, ev
,
207 for (i
= 0; i
< nb_rx
; i
++) {
208 rte_prefetch0(ev
[i
+ 1].mbuf
);
209 cq_id
= ev
[i
].sub_event_type
% nb_stages
;
211 if (cq_id
== last_queue
) {
212 if (ev
[i
].sched_type
== RTE_SCHED_TYPE_ATOMIC
) {
214 pipeline_tx_pkt(ev
[i
].mbuf
);
215 ev
[i
].op
= RTE_EVENT_OP_RELEASE
;
220 pipeline_fwd_event(&ev
[i
],
221 RTE_SCHED_TYPE_ATOMIC
);
223 ev
[i
].sub_event_type
++;
224 pipeline_fwd_event(&ev
[i
],
225 sched_type_list
[cq_id
]);
229 pipeline_event_enqueue_burst(dev
, port
, ev
, nb_rx
);
235 pipeline_atq_worker_multi_stage_burst_fwd(void *arg
)
237 PIPELINE_WROKER_MULTI_STAGE_BURST_INIT
;
238 const uint8_t nb_stages
= t
->opt
->nb_stages
;
239 const uint8_t tx_queue
= t
->tx_service
.queue_id
;
241 while (t
->done
== false) {
242 uint16_t nb_rx
= rte_event_dequeue_burst(dev
, port
, ev
,
250 for (i
= 0; i
< nb_rx
; i
++) {
251 rte_prefetch0(ev
[i
+ 1].mbuf
);
252 cq_id
= ev
[i
].sub_event_type
% nb_stages
;
254 if (cq_id
== last_queue
) {
256 ev
[i
].queue_id
= tx_queue
;
257 pipeline_fwd_event(&ev
[i
],
258 RTE_SCHED_TYPE_ATOMIC
);
260 ev
[i
].sub_event_type
++;
261 pipeline_fwd_event(&ev
[i
],
262 sched_type_list
[cq_id
]);
266 pipeline_event_enqueue_burst(dev
, port
, ev
, nb_rx
);
272 worker_wrapper(void *arg
)
274 struct worker_data
*w
= arg
;
275 struct evt_options
*opt
= w
->t
->opt
;
276 const bool burst
= evt_has_burst_mode(w
->dev_id
);
277 const bool mt_safe
= !w
->t
->mt_unsafe
;
278 const uint8_t nb_stages
= opt
->nb_stages
;
281 if (nb_stages
== 1) {
282 if (!burst
&& mt_safe
)
283 return pipeline_atq_worker_single_stage_tx(arg
);
284 else if (!burst
&& !mt_safe
)
285 return pipeline_atq_worker_single_stage_fwd(arg
);
286 else if (burst
&& mt_safe
)
287 return pipeline_atq_worker_single_stage_burst_tx(arg
);
288 else if (burst
&& !mt_safe
)
289 return pipeline_atq_worker_single_stage_burst_fwd(arg
);
291 if (!burst
&& mt_safe
)
292 return pipeline_atq_worker_multi_stage_tx(arg
);
293 else if (!burst
&& !mt_safe
)
294 return pipeline_atq_worker_multi_stage_fwd(arg
);
295 if (burst
&& mt_safe
)
296 return pipeline_atq_worker_multi_stage_burst_tx(arg
);
297 else if (burst
&& !mt_safe
)
298 return pipeline_atq_worker_multi_stage_burst_fwd(arg
);
300 rte_panic("invalid worker\n");
304 pipeline_atq_launch_lcores(struct evt_test
*test
, struct evt_options
*opt
)
306 struct test_pipeline
*t
= evt_test_priv(test
);
309 rte_service_component_runstate_set(t
->tx_service
.service_id
, 1);
310 return pipeline_launch_lcores(test
, opt
, worker_wrapper
);
314 pipeline_atq_eventdev_setup(struct evt_test
*test
, struct evt_options
*opt
)
320 struct rte_event_dev_info info
;
321 struct test_pipeline
*t
= evt_test_priv(test
);
322 uint8_t tx_evqueue_id
= 0;
323 uint8_t queue_arr
[RTE_EVENT_MAX_QUEUES_PER_DEV
];
324 uint8_t nb_worker_queues
= 0;
326 nb_ports
= evt_nr_active_lcores(opt
->wlcores
);
327 nb_queues
= rte_eth_dev_count_avail();
329 /* One extra port and queueu for Tx service */
331 tx_evqueue_id
= nb_queues
;
337 rte_event_dev_info_get(opt
->dev_id
, &info
);
339 const struct rte_event_dev_config config
= {
340 .nb_event_queues
= nb_queues
,
341 .nb_event_ports
= nb_ports
,
342 .nb_events_limit
= info
.max_num_events
,
343 .nb_event_queue_flows
= opt
->nb_flows
,
344 .nb_event_port_dequeue_depth
=
345 info
.max_event_port_dequeue_depth
,
346 .nb_event_port_enqueue_depth
=
347 info
.max_event_port_enqueue_depth
,
349 ret
= rte_event_dev_configure(opt
->dev_id
, &config
);
351 evt_err("failed to configure eventdev %d", opt
->dev_id
);
355 struct rte_event_queue_conf q_conf
= {
356 .priority
= RTE_EVENT_DEV_PRIORITY_NORMAL
,
357 .nb_atomic_flows
= opt
->nb_flows
,
358 .nb_atomic_order_sequences
= opt
->nb_flows
,
360 /* queue configurations */
361 for (queue
= 0; queue
< nb_queues
; queue
++) {
362 q_conf
.event_queue_cfg
= RTE_EVENT_QUEUE_CFG_ALL_TYPES
;
365 if (queue
== tx_evqueue_id
) {
366 q_conf
.event_queue_cfg
=
367 RTE_EVENT_QUEUE_CFG_SINGLE_LINK
;
369 queue_arr
[nb_worker_queues
] = queue
;
374 ret
= rte_event_queue_setup(opt
->dev_id
, queue
, &q_conf
);
376 evt_err("failed to setup queue=%d", queue
);
381 if (opt
->wkr_deq_dep
> info
.max_event_port_dequeue_depth
)
382 opt
->wkr_deq_dep
= info
.max_event_port_dequeue_depth
;
384 /* port configuration */
385 const struct rte_event_port_conf p_conf
= {
386 .dequeue_depth
= opt
->wkr_deq_dep
,
387 .enqueue_depth
= info
.max_event_port_dequeue_depth
,
388 .new_event_threshold
= info
.max_num_events
,
392 ret
= pipeline_event_port_setup(test
, opt
, queue_arr
,
393 nb_worker_queues
, p_conf
);
397 ret
= pipeline_event_tx_service_setup(test
, opt
, tx_evqueue_id
,
398 nb_ports
- 1, p_conf
);
400 ret
= pipeline_event_port_setup(test
, opt
, NULL
, nb_queues
,
407 * The pipelines are setup in the following manner:
409 * eth_dev_count = 2, nb_stages = 2, atq mode
411 * Multi thread safe :
415 * event queue pipelines:
419 * q0, q1 are configured as ATQ so, all the different stages can
420 * be enqueued on the same queue.
422 * Multi thread unsafe :
426 * event queue pipelines:
428 * } (q3->tx) Tx service
431 * q0,q1 are configured as stated above.
432 * q3 configured as SINGLE_LINK|ATOMIC.
434 ret
= pipeline_event_rx_adapter_setup(opt
, 1, p_conf
);
438 if (!evt_has_distributed_sched(opt
->dev_id
)) {
440 rte_event_dev_service_id_get(opt
->dev_id
, &service_id
);
441 ret
= evt_service_setup(service_id
);
443 evt_err("No service lcore found to run event dev.");
448 ret
= rte_event_dev_start(opt
->dev_id
);
450 evt_err("failed to start eventdev %d", opt
->dev_id
);
458 pipeline_atq_opt_dump(struct evt_options
*opt
)
460 pipeline_opt_dump(opt
, pipeline_atq_nb_event_queues(opt
));
464 pipeline_atq_opt_check(struct evt_options
*opt
)
466 return pipeline_opt_check(opt
, pipeline_atq_nb_event_queues(opt
));
470 pipeline_atq_capability_check(struct evt_options
*opt
)
472 struct rte_event_dev_info dev_info
;
474 rte_event_dev_info_get(opt
->dev_id
, &dev_info
);
475 if (dev_info
.max_event_queues
< pipeline_atq_nb_event_queues(opt
) ||
476 dev_info
.max_event_ports
<
477 evt_nr_active_lcores(opt
->wlcores
)) {
478 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
479 pipeline_atq_nb_event_queues(opt
),
480 dev_info
.max_event_queues
,
481 evt_nr_active_lcores(opt
->wlcores
),
482 dev_info
.max_event_ports
);
488 static const struct evt_test_ops pipeline_atq
= {
489 .cap_check
= pipeline_atq_capability_check
,
490 .opt_check
= pipeline_atq_opt_check
,
491 .opt_dump
= pipeline_atq_opt_dump
,
492 .test_setup
= pipeline_test_setup
,
493 .mempool_setup
= pipeline_mempool_setup
,
494 .ethdev_setup
= pipeline_ethdev_setup
,
495 .eventdev_setup
= pipeline_atq_eventdev_setup
,
496 .launch_lcores
= pipeline_atq_launch_lcores
,
497 .eventdev_destroy
= pipeline_eventdev_destroy
,
498 .mempool_destroy
= pipeline_mempool_destroy
,
499 .ethdev_destroy
= pipeline_ethdev_destroy
,
500 .test_result
= pipeline_test_result
,
501 .test_destroy
= pipeline_test_destroy
,
504 EVT_TEST_REGISTER(pipeline_atq
);