]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/test/test/test_table_ports.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / test / test / test_table_ports.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include "test_table_ports.h"
6 #include "test_table.h"
7
8 port_test port_tests[] = {
9 test_port_ring_reader,
10 test_port_ring_writer,
11 };
12
13 unsigned n_port_tests = RTE_DIM(port_tests);
14
15 /* Port tests */
16 int
17 test_port_ring_reader(void)
18 {
19 int status, i;
20 struct rte_port_ring_reader_params port_ring_reader_params;
21 void *port;
22
23 /* Invalid params */
24 port = rte_port_ring_reader_ops.f_create(NULL, 0);
25 if (port != NULL)
26 return -1;
27
28 status = rte_port_ring_reader_ops.f_free(port);
29 if (status >= 0)
30 return -2;
31
32 /* Create and free */
33 port_ring_reader_params.ring = RING_RX;
34 port = rte_port_ring_reader_ops.f_create(&port_ring_reader_params, 0);
35 if (port == NULL)
36 return -3;
37
38 status = rte_port_ring_reader_ops.f_free(port);
39 if (status != 0)
40 return -4;
41
42 /* -- Traffic RX -- */
43 int expected_pkts, received_pkts;
44 struct rte_mbuf *res_mbuf[RTE_PORT_IN_BURST_SIZE_MAX];
45 void *mbuf[RTE_PORT_IN_BURST_SIZE_MAX];
46
47 port_ring_reader_params.ring = RING_RX;
48 port = rte_port_ring_reader_ops.f_create(&port_ring_reader_params, 0);
49
50 /* Single packet */
51 mbuf[0] = (void *)rte_pktmbuf_alloc(pool);
52
53 expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
54 mbuf, 1, NULL);
55 received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf, 1);
56
57 if (received_pkts < expected_pkts)
58 return -5;
59
60 rte_pktmbuf_free(res_mbuf[0]);
61
62 /* Multiple packets */
63 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
64 mbuf[i] = rte_pktmbuf_alloc(pool);
65
66 expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
67 (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX, NULL);
68 received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf,
69 RTE_PORT_IN_BURST_SIZE_MAX);
70
71 if (received_pkts < expected_pkts)
72 return -6;
73
74 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
75 rte_pktmbuf_free(res_mbuf[i]);
76
77 return 0;
78 }
79
80 int
81 test_port_ring_writer(void)
82 {
83 int status, i;
84 struct rte_port_ring_writer_params port_ring_writer_params;
85 void *port;
86
87 /* Invalid params */
88 port = rte_port_ring_writer_ops.f_create(NULL, 0);
89 if (port != NULL)
90 return -1;
91
92 status = rte_port_ring_writer_ops.f_free(port);
93 if (status >= 0)
94 return -2;
95
96 port_ring_writer_params.ring = NULL;
97
98 port = rte_port_ring_writer_ops.f_create(&port_ring_writer_params, 0);
99 if (port != NULL)
100 return -3;
101
102 port_ring_writer_params.ring = RING_TX;
103 port_ring_writer_params.tx_burst_sz = RTE_PORT_IN_BURST_SIZE_MAX + 1;
104
105 port = rte_port_ring_writer_ops.f_create(&port_ring_writer_params, 0);
106 if (port != NULL)
107 return -4;
108
109 /* Create and free */
110 port_ring_writer_params.ring = RING_TX;
111 port_ring_writer_params.tx_burst_sz = RTE_PORT_IN_BURST_SIZE_MAX;
112
113 port = rte_port_ring_writer_ops.f_create(&port_ring_writer_params, 0);
114 if (port == NULL)
115 return -5;
116
117 status = rte_port_ring_writer_ops.f_free(port);
118 if (status != 0)
119 return -6;
120
121 /* -- Traffic TX -- */
122 int expected_pkts, received_pkts;
123 struct rte_mbuf *mbuf[RTE_PORT_IN_BURST_SIZE_MAX];
124 struct rte_mbuf *res_mbuf[RTE_PORT_IN_BURST_SIZE_MAX];
125
126 port_ring_writer_params.ring = RING_TX;
127 port_ring_writer_params.tx_burst_sz = RTE_PORT_IN_BURST_SIZE_MAX;
128 port = rte_port_ring_writer_ops.f_create(&port_ring_writer_params, 0);
129
130 /* Single packet */
131 mbuf[0] = rte_pktmbuf_alloc(pool);
132
133 rte_port_ring_writer_ops.f_tx(port, mbuf[0]);
134 rte_port_ring_writer_ops.f_flush(port);
135 expected_pkts = 1;
136 received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
137 (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
138
139 if (received_pkts < expected_pkts)
140 return -7;
141
142 rte_pktmbuf_free(res_mbuf[0]);
143
144 /* Multiple packets */
145 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++) {
146 mbuf[i] = rte_pktmbuf_alloc(pool);
147 rte_port_ring_writer_ops.f_tx(port, mbuf[i]);
148 }
149
150 expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
151 received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
152 (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
153
154 if (received_pkts < expected_pkts)
155 return -8;
156
157 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
158 rte_pktmbuf_free(res_mbuf[i]);
159
160 /* TX Bulk */
161 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
162 mbuf[i] = rte_pktmbuf_alloc(pool);
163 rte_port_ring_writer_ops.f_tx_bulk(port, mbuf, (uint64_t)-1);
164
165 expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
166 received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
167 (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
168
169 if (received_pkts < expected_pkts)
170 return -8;
171
172 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
173 rte_pktmbuf_free(res_mbuf[i]);
174
175 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
176 mbuf[i] = rte_pktmbuf_alloc(pool);
177 rte_port_ring_writer_ops.f_tx_bulk(port, mbuf, (uint64_t)-3);
178 rte_port_ring_writer_ops.f_tx_bulk(port, mbuf, (uint64_t)2);
179
180 expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
181 received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
182 (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
183
184 if (received_pkts < expected_pkts)
185 return -9;
186
187 for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
188 rte_pktmbuf_free(res_mbuf[i]);
189
190 return 0;
191 }