]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/app/test-eventdev/test_pipeline_queue.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / app / test-eventdev / test_pipeline_queue.c
1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2017 Cavium, Inc.
4 */
5
6 #include "test_pipeline_common.h"
7
8 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
9
10 static __rte_always_inline int
11 pipeline_queue_nb_event_queues(struct evt_options *opt)
12 {
13 uint16_t eth_count = rte_eth_dev_count_avail();
14
15 return (eth_count * opt->nb_stages) + eth_count;
16 }
17
18 static __rte_noinline int
19 pipeline_queue_worker_single_stage_tx(void *arg)
20 {
21 PIPELINE_WORKER_SINGLE_STAGE_INIT;
22
23 while (t->done == false) {
24 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
25
26 if (!event) {
27 rte_pause();
28 continue;
29 }
30
31 if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
32 pipeline_event_tx(dev, port, &ev);
33 w->processed_pkts++;
34 } else {
35 ev.queue_id++;
36 pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
37 pipeline_event_enqueue(dev, port, &ev);
38 }
39 }
40
41 return 0;
42 }
43
44 static __rte_noinline int
45 pipeline_queue_worker_single_stage_fwd(void *arg)
46 {
47 PIPELINE_WORKER_SINGLE_STAGE_INIT;
48 const uint8_t *tx_queue = t->tx_evqueue_id;
49
50 while (t->done == false) {
51 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
52
53 if (!event) {
54 rte_pause();
55 continue;
56 }
57
58 ev.queue_id = tx_queue[ev.mbuf->port];
59 rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
60 pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
61 pipeline_event_enqueue(dev, port, &ev);
62 w->processed_pkts++;
63 }
64
65 return 0;
66 }
67
68 static __rte_noinline int
69 pipeline_queue_worker_single_stage_burst_tx(void *arg)
70 {
71 PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
72
73 while (t->done == false) {
74 uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
75 BURST_SIZE, 0);
76
77 if (!nb_rx) {
78 rte_pause();
79 continue;
80 }
81
82 for (i = 0; i < nb_rx; i++) {
83 rte_prefetch0(ev[i + 1].mbuf);
84 if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
85 pipeline_event_tx(dev, port, &ev[i]);
86 ev[i].op = RTE_EVENT_OP_RELEASE;
87 w->processed_pkts++;
88 } else {
89 ev[i].queue_id++;
90 pipeline_fwd_event(&ev[i],
91 RTE_SCHED_TYPE_ATOMIC);
92 }
93 }
94
95 pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
96 }
97
98 return 0;
99 }
100
101 static __rte_noinline int
102 pipeline_queue_worker_single_stage_burst_fwd(void *arg)
103 {
104 PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
105 const uint8_t *tx_queue = t->tx_evqueue_id;
106
107 while (t->done == false) {
108 uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
109 BURST_SIZE, 0);
110
111 if (!nb_rx) {
112 rte_pause();
113 continue;
114 }
115
116 for (i = 0; i < nb_rx; i++) {
117 rte_prefetch0(ev[i + 1].mbuf);
118 ev[i].queue_id = tx_queue[ev[i].mbuf->port];
119 rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
120 pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
121 }
122
123 pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
124 w->processed_pkts += nb_rx;
125 }
126
127 return 0;
128 }
129
130
131 static __rte_noinline int
132 pipeline_queue_worker_multi_stage_tx(void *arg)
133 {
134 PIPELINE_WORKER_MULTI_STAGE_INIT;
135 const uint8_t *tx_queue = t->tx_evqueue_id;
136
137 while (t->done == false) {
138 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
139
140 if (!event) {
141 rte_pause();
142 continue;
143 }
144
145 cq_id = ev.queue_id % nb_stages;
146
147 if (ev.queue_id == tx_queue[ev.mbuf->port]) {
148 pipeline_event_tx(dev, port, &ev);
149 w->processed_pkts++;
150 continue;
151 }
152
153 ev.queue_id++;
154 pipeline_fwd_event(&ev, cq_id != last_queue ?
155 sched_type_list[cq_id] :
156 RTE_SCHED_TYPE_ATOMIC);
157 pipeline_event_enqueue(dev, port, &ev);
158 }
159
160 return 0;
161 }
162
163 static __rte_noinline int
164 pipeline_queue_worker_multi_stage_fwd(void *arg)
165 {
166 PIPELINE_WORKER_MULTI_STAGE_INIT;
167 const uint8_t *tx_queue = t->tx_evqueue_id;
168
169 while (t->done == false) {
170 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
171
172 if (!event) {
173 rte_pause();
174 continue;
175 }
176
177 cq_id = ev.queue_id % nb_stages;
178
179 if (cq_id == last_queue) {
180 ev.queue_id = tx_queue[ev.mbuf->port];
181 rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
182 pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
183 w->processed_pkts++;
184 } else {
185 ev.queue_id++;
186 pipeline_fwd_event(&ev, sched_type_list[cq_id]);
187 }
188
189 pipeline_event_enqueue(dev, port, &ev);
190 }
191
192 return 0;
193 }
194
195 static __rte_noinline int
196 pipeline_queue_worker_multi_stage_burst_tx(void *arg)
197 {
198 PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
199 const uint8_t *tx_queue = t->tx_evqueue_id;
200
201 while (t->done == false) {
202 uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
203 BURST_SIZE, 0);
204
205 if (!nb_rx) {
206 rte_pause();
207 continue;
208 }
209
210 for (i = 0; i < nb_rx; i++) {
211 rte_prefetch0(ev[i + 1].mbuf);
212 cq_id = ev[i].queue_id % nb_stages;
213
214 if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) {
215 pipeline_event_tx(dev, port, &ev[i]);
216 ev[i].op = RTE_EVENT_OP_RELEASE;
217 w->processed_pkts++;
218 continue;
219 }
220
221 ev[i].queue_id++;
222 pipeline_fwd_event(&ev[i], cq_id != last_queue ?
223 sched_type_list[cq_id] :
224 RTE_SCHED_TYPE_ATOMIC);
225 }
226
227 pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
228 }
229
230 return 0;
231 }
232
233 static __rte_noinline int
234 pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
235 {
236 PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
237 const uint8_t *tx_queue = t->tx_evqueue_id;
238
239 while (t->done == false) {
240 uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
241 BURST_SIZE, 0);
242
243 if (!nb_rx) {
244 rte_pause();
245 continue;
246 }
247
248 for (i = 0; i < nb_rx; i++) {
249 rte_prefetch0(ev[i + 1].mbuf);
250 cq_id = ev[i].queue_id % nb_stages;
251
252 if (cq_id == last_queue) {
253 ev[i].queue_id = tx_queue[ev[i].mbuf->port];
254 rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
255 pipeline_fwd_event(&ev[i],
256 RTE_SCHED_TYPE_ATOMIC);
257 w->processed_pkts++;
258 } else {
259 ev[i].queue_id++;
260 pipeline_fwd_event(&ev[i],
261 sched_type_list[cq_id]);
262 }
263 }
264
265 pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
266 }
267
268 return 0;
269 }
270
271 static int
272 worker_wrapper(void *arg)
273 {
274 struct worker_data *w = arg;
275 struct evt_options *opt = w->t->opt;
276 const bool burst = evt_has_burst_mode(w->dev_id);
277 const bool internal_port = w->t->internal_port;
278 const uint8_t nb_stages = opt->nb_stages;
279 RTE_SET_USED(opt);
280
281 if (nb_stages == 1) {
282 if (!burst && internal_port)
283 return pipeline_queue_worker_single_stage_tx(arg);
284 else if (!burst && !internal_port)
285 return pipeline_queue_worker_single_stage_fwd(arg);
286 else if (burst && internal_port)
287 return pipeline_queue_worker_single_stage_burst_tx(arg);
288 else if (burst && !internal_port)
289 return pipeline_queue_worker_single_stage_burst_fwd(
290 arg);
291 } else {
292 if (!burst && internal_port)
293 return pipeline_queue_worker_multi_stage_tx(arg);
294 else if (!burst && !internal_port)
295 return pipeline_queue_worker_multi_stage_fwd(arg);
296 else if (burst && internal_port)
297 return pipeline_queue_worker_multi_stage_burst_tx(arg);
298 else if (burst && !internal_port)
299 return pipeline_queue_worker_multi_stage_burst_fwd(arg);
300
301 }
302 rte_panic("invalid worker\n");
303 }
304
305 static int
306 pipeline_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
307 {
308 return pipeline_launch_lcores(test, opt, worker_wrapper);
309 }
310
311 static int
312 pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
313 {
314 int ret;
315 int nb_ports;
316 int nb_queues;
317 int nb_stages = opt->nb_stages;
318 uint8_t queue;
319 uint8_t tx_evport_id = 0;
320 uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
321 uint8_t queue_arr[RTE_EVENT_MAX_QUEUES_PER_DEV];
322 uint8_t nb_worker_queues = 0;
323 uint16_t prod = 0;
324 struct rte_event_dev_info info;
325 struct test_pipeline *t = evt_test_priv(test);
326
327 nb_ports = evt_nr_active_lcores(opt->wlcores);
328 nb_queues = rte_eth_dev_count_avail() * (nb_stages);
329
330 /* One queue for Tx adapter per port */
331 nb_queues += rte_eth_dev_count_avail();
332
333 memset(tx_evqueue_id, 0, sizeof(uint8_t) * RTE_MAX_ETHPORTS);
334 memset(queue_arr, 0, sizeof(uint8_t) * RTE_EVENT_MAX_QUEUES_PER_DEV);
335
336 rte_event_dev_info_get(opt->dev_id, &info);
337 ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
338 if (ret) {
339 evt_err("failed to configure eventdev %d", opt->dev_id);
340 return ret;
341 }
342
343 struct rte_event_queue_conf q_conf = {
344 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
345 .nb_atomic_flows = opt->nb_flows,
346 .nb_atomic_order_sequences = opt->nb_flows,
347 };
348 /* queue configurations */
349 for (queue = 0; queue < nb_queues; queue++) {
350 uint8_t slot;
351
352 q_conf.event_queue_cfg = 0;
353 slot = queue % (nb_stages + 1);
354 if (slot == nb_stages) {
355 q_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
356 if (!t->internal_port) {
357 q_conf.event_queue_cfg =
358 RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
359 }
360 tx_evqueue_id[prod++] = queue;
361 } else {
362 q_conf.schedule_type = opt->sched_type_list[slot];
363 queue_arr[nb_worker_queues] = queue;
364 nb_worker_queues++;
365 }
366
367 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
368 if (ret) {
369 evt_err("failed to setup queue=%d", queue);
370 return ret;
371 }
372 }
373
374 if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth)
375 opt->wkr_deq_dep = info.max_event_port_dequeue_depth;
376
377 /* port configuration */
378 const struct rte_event_port_conf p_conf = {
379 .dequeue_depth = opt->wkr_deq_dep,
380 .enqueue_depth = info.max_event_port_dequeue_depth,
381 .new_event_threshold = info.max_num_events,
382 };
383
384 if (!t->internal_port) {
385 ret = pipeline_event_port_setup(test, opt, queue_arr,
386 nb_worker_queues, p_conf);
387 if (ret)
388 return ret;
389 } else
390 ret = pipeline_event_port_setup(test, opt, NULL, nb_queues,
391 p_conf);
392
393 if (ret)
394 return ret;
395 /*
396 * The pipelines are setup in the following manner:
397 *
398 * eth_dev_count = 2, nb_stages = 2.
399 *
400 * queues = 6
401 * stride = 3
402 *
403 * event queue pipelines:
404 * eth0 -> q0 -> q1 -> (q2->tx)
405 * eth1 -> q3 -> q4 -> (q5->tx)
406 *
407 * q2, q5 configured as ATOMIC | SINGLE_LINK
408 *
409 */
410 ret = pipeline_event_rx_adapter_setup(opt, nb_stages + 1, p_conf);
411 if (ret)
412 return ret;
413
414 ret = pipeline_event_tx_adapter_setup(opt, p_conf);
415 if (ret)
416 return ret;
417
418 if (!evt_has_distributed_sched(opt->dev_id)) {
419 uint32_t service_id;
420 rte_event_dev_service_id_get(opt->dev_id, &service_id);
421 ret = evt_service_setup(service_id);
422 if (ret) {
423 evt_err("No service lcore found to run event dev.");
424 return ret;
425 }
426 }
427
428 /* Connect the tx_evqueue_id to the Tx adapter port */
429 if (!t->internal_port) {
430 RTE_ETH_FOREACH_DEV(prod) {
431 ret = rte_event_eth_tx_adapter_event_port_get(prod,
432 &tx_evport_id);
433 if (ret) {
434 evt_err("Unable to get Tx adptr[%d] evprt[%d]",
435 prod, tx_evport_id);
436 return ret;
437 }
438
439 if (rte_event_port_link(opt->dev_id, tx_evport_id,
440 &tx_evqueue_id[prod],
441 NULL, 1) != 1) {
442 evt_err("Unable to link Tx adptr[%d] evprt[%d]",
443 prod, tx_evport_id);
444 return ret;
445 }
446 }
447 }
448
449 ret = rte_event_dev_start(opt->dev_id);
450 if (ret) {
451 evt_err("failed to start eventdev %d", opt->dev_id);
452 return ret;
453 }
454
455
456 RTE_ETH_FOREACH_DEV(prod) {
457 ret = rte_eth_dev_start(prod);
458 if (ret) {
459 evt_err("Ethernet dev [%d] failed to start."
460 " Using synthetic producer", prod);
461 return ret;
462 }
463
464 }
465
466 RTE_ETH_FOREACH_DEV(prod) {
467 ret = rte_event_eth_rx_adapter_start(prod);
468 if (ret) {
469 evt_err("Rx adapter[%d] start failed", prod);
470 return ret;
471 }
472
473 ret = rte_event_eth_tx_adapter_start(prod);
474 if (ret) {
475 evt_err("Tx adapter[%d] start failed", prod);
476 return ret;
477 }
478 }
479
480 memcpy(t->tx_evqueue_id, tx_evqueue_id, sizeof(uint8_t) *
481 RTE_MAX_ETHPORTS);
482
483 return 0;
484 }
485
486 static void
487 pipeline_queue_opt_dump(struct evt_options *opt)
488 {
489 pipeline_opt_dump(opt, pipeline_queue_nb_event_queues(opt));
490 }
491
492 static int
493 pipeline_queue_opt_check(struct evt_options *opt)
494 {
495 return pipeline_opt_check(opt, pipeline_queue_nb_event_queues(opt));
496 }
497
498 static bool
499 pipeline_queue_capability_check(struct evt_options *opt)
500 {
501 struct rte_event_dev_info dev_info;
502
503 rte_event_dev_info_get(opt->dev_id, &dev_info);
504 if (dev_info.max_event_queues < pipeline_queue_nb_event_queues(opt) ||
505 dev_info.max_event_ports <
506 evt_nr_active_lcores(opt->wlcores)) {
507 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
508 pipeline_queue_nb_event_queues(opt),
509 dev_info.max_event_queues,
510 evt_nr_active_lcores(opt->wlcores),
511 dev_info.max_event_ports);
512 }
513
514 return true;
515 }
516
517 static const struct evt_test_ops pipeline_queue = {
518 .cap_check = pipeline_queue_capability_check,
519 .opt_check = pipeline_queue_opt_check,
520 .opt_dump = pipeline_queue_opt_dump,
521 .test_setup = pipeline_test_setup,
522 .mempool_setup = pipeline_mempool_setup,
523 .ethdev_setup = pipeline_ethdev_setup,
524 .eventdev_setup = pipeline_queue_eventdev_setup,
525 .launch_lcores = pipeline_queue_launch_lcores,
526 .eventdev_destroy = pipeline_eventdev_destroy,
527 .mempool_destroy = pipeline_mempool_destroy,
528 .ethdev_destroy = pipeline_ethdev_destroy,
529 .test_result = pipeline_test_result,
530 .test_destroy = pipeline_test_destroy,
531 };
532
533 EVT_TEST_REGISTER(pipeline_queue);