]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/app/test-eventdev/test_order_queue.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / app / test-eventdev / test_order_queue.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include "test_order_common.h"
9
10 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
11
12 static inline __attribute__((always_inline)) void
13 order_queue_process_stage_0(struct rte_event *const ev)
14 {
15 ev->queue_id = 1; /* q1 atomic queue */
16 ev->op = RTE_EVENT_OP_FORWARD;
17 ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
18 ev->event_type = RTE_EVENT_TYPE_CPU;
19 }
20
21 static int
22 order_queue_worker(void *arg)
23 {
24 ORDER_WORKER_INIT;
25 struct rte_event ev;
26
27 while (t->err == false) {
28 uint16_t event = rte_event_dequeue_burst(dev_id, port,
29 &ev, 1, 0);
30 if (!event) {
31 if (rte_atomic64_read(outstand_pkts) <= 0)
32 break;
33 rte_pause();
34 continue;
35 }
36
37 if (ev.queue_id == 0) { /* from ordered queue */
38 order_queue_process_stage_0(&ev);
39 while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
40 != 1)
41 rte_pause();
42 } else if (ev.queue_id == 1) { /* from atomic queue */
43 order_process_stage_1(t, &ev, nb_flows,
44 expected_flow_seq, outstand_pkts);
45 } else {
46 order_process_stage_invalid(t, &ev);
47 }
48 }
49 return 0;
50 }
51
52 static int
53 order_queue_worker_burst(void *arg)
54 {
55 ORDER_WORKER_INIT;
56 struct rte_event ev[BURST_SIZE];
57 uint16_t i;
58
59 while (t->err == false) {
60 uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
61 BURST_SIZE, 0);
62
63 if (nb_rx == 0) {
64 if (rte_atomic64_read(outstand_pkts) <= 0)
65 break;
66 rte_pause();
67 continue;
68 }
69
70 for (i = 0; i < nb_rx; i++) {
71 if (ev[i].queue_id == 0) { /* from ordered queue */
72 order_queue_process_stage_0(&ev[i]);
73 } else if (ev[i].queue_id == 1) {/* from atomic queue */
74 order_process_stage_1(t, &ev[i], nb_flows,
75 expected_flow_seq, outstand_pkts);
76 ev[i].op = RTE_EVENT_OP_RELEASE;
77 } else {
78 order_process_stage_invalid(t, &ev[i]);
79 }
80 }
81
82 uint16_t enq;
83
84 enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
85 while (enq < nb_rx) {
86 enq += rte_event_enqueue_burst(dev_id, port,
87 ev + enq, nb_rx - enq);
88 }
89 }
90 return 0;
91 }
92
93 static int
94 worker_wrapper(void *arg)
95 {
96 struct worker_data *w = arg;
97 const bool burst = evt_has_burst_mode(w->dev_id);
98
99 if (burst)
100 return order_queue_worker_burst(arg);
101 else
102 return order_queue_worker(arg);
103 }
104
105 static int
106 order_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
107 {
108 return order_launch_lcores(test, opt, worker_wrapper);
109 }
110
111 #define NB_QUEUES 2
112 static int
113 order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
114 {
115 int ret;
116
117 const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
118 /* number of active worker cores + 1 producer */
119 const uint8_t nb_ports = nb_workers + 1;
120
121 const struct rte_event_dev_config config = {
122 .nb_event_queues = NB_QUEUES,/* q0 ordered, q1 atomic */
123 .nb_event_ports = nb_ports,
124 .nb_events_limit = 4096,
125 .nb_event_queue_flows = opt->nb_flows,
126 .nb_event_port_dequeue_depth = 128,
127 .nb_event_port_enqueue_depth = 128,
128 };
129
130 ret = rte_event_dev_configure(opt->dev_id, &config);
131 if (ret) {
132 evt_err("failed to configure eventdev %d", opt->dev_id);
133 return ret;
134 }
135
136 /* q0 (ordered queue) configuration */
137 struct rte_event_queue_conf q0_ordered_conf = {
138 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
139 .schedule_type = RTE_SCHED_TYPE_ORDERED,
140 .nb_atomic_flows = opt->nb_flows,
141 .nb_atomic_order_sequences = opt->nb_flows,
142 };
143 ret = rte_event_queue_setup(opt->dev_id, 0, &q0_ordered_conf);
144 if (ret) {
145 evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
146 return ret;
147 }
148
149 /* q1 (atomic queue) configuration */
150 struct rte_event_queue_conf q1_atomic_conf = {
151 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
152 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
153 .nb_atomic_flows = opt->nb_flows,
154 .nb_atomic_order_sequences = opt->nb_flows,
155 };
156 ret = rte_event_queue_setup(opt->dev_id, 1, &q1_atomic_conf);
157 if (ret) {
158 evt_err("failed to setup queue1 eventdev %d", opt->dev_id);
159 return ret;
160 }
161
162 /* setup one port per worker, linking to all queues */
163 ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
164 if (ret)
165 return ret;
166
167 if (!evt_has_distributed_sched(opt->dev_id)) {
168 uint32_t service_id;
169 rte_event_dev_service_id_get(opt->dev_id, &service_id);
170 ret = evt_service_setup(service_id);
171 if (ret) {
172 evt_err("No service lcore found to run event dev.");
173 return ret;
174 }
175 }
176
177 ret = rte_event_dev_start(opt->dev_id);
178 if (ret) {
179 evt_err("failed to start eventdev %d", opt->dev_id);
180 return ret;
181 }
182
183 return 0;
184 }
185
186 static void
187 order_queue_opt_dump(struct evt_options *opt)
188 {
189 order_opt_dump(opt);
190 evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
191 }
192
193 static bool
194 order_queue_capability_check(struct evt_options *opt)
195 {
196 struct rte_event_dev_info dev_info;
197
198 rte_event_dev_info_get(opt->dev_id, &dev_info);
199 if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
200 order_nb_event_ports(opt)) {
201 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
202 NB_QUEUES, dev_info.max_event_queues,
203 order_nb_event_ports(opt), dev_info.max_event_ports);
204 return false;
205 }
206
207 return true;
208 }
209
210 static const struct evt_test_ops order_queue = {
211 .cap_check = order_queue_capability_check,
212 .opt_check = order_opt_check,
213 .opt_dump = order_queue_opt_dump,
214 .test_setup = order_test_setup,
215 .mempool_setup = order_mempool_setup,
216 .eventdev_setup = order_queue_eventdev_setup,
217 .launch_lcores = order_queue_launch_lcores,
218 .eventdev_destroy = order_eventdev_destroy,
219 .mempool_destroy = order_mempool_destroy,
220 .test_result = order_test_result,
221 .test_destroy = order_test_destroy,
222 };
223
224 EVT_TEST_REGISTER(order_queue);