]>
Commit | Line | Data |
---|---|---|
f67539c2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2016 Ethan Zhuang <zhuangwj@gmail.com>. | |
3 | * Copyright(c) 2016 Intel Corporation. | |
7c673cae FG |
4 | */ |
5 | #include <string.h> | |
6 | ||
7 | #include <rte_common.h> | |
8 | #include <rte_malloc.h> | |
9 | #include <rte_kni.h> | |
10 | ||
11 | #include "rte_port_kni.h" | |
12 | ||
13 | /* | |
14 | * Port KNI Reader | |
15 | */ | |
16 | #ifdef RTE_PORT_STATS_COLLECT | |
17 | ||
18 | #define RTE_PORT_KNI_READER_STATS_PKTS_IN_ADD(port, val) \ | |
19 | port->stats.n_pkts_in += val | |
20 | #define RTE_PORT_KNI_READER_STATS_PKTS_DROP_ADD(port, val) \ | |
21 | port->stats.n_pkts_drop += val | |
22 | ||
23 | #else | |
24 | ||
25 | #define RTE_PORT_KNI_READER_STATS_PKTS_IN_ADD(port, val) | |
26 | #define RTE_PORT_KNI_READER_STATS_PKTS_DROP_ADD(port, val) | |
27 | ||
28 | #endif | |
29 | ||
30 | struct rte_port_kni_reader { | |
31 | struct rte_port_in_stats stats; | |
32 | ||
33 | struct rte_kni *kni; | |
34 | }; | |
35 | ||
36 | static void * | |
37 | rte_port_kni_reader_create(void *params, int socket_id) | |
38 | { | |
39 | struct rte_port_kni_reader_params *conf = | |
11fdf7f2 | 40 | params; |
7c673cae FG |
41 | struct rte_port_kni_reader *port; |
42 | ||
43 | /* Check input parameters */ | |
44 | if (conf == NULL) { | |
45 | RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__); | |
46 | return NULL; | |
47 | } | |
48 | ||
49 | /* Memory allocation */ | |
50 | port = rte_zmalloc_socket("PORT", sizeof(*port), | |
51 | RTE_CACHE_LINE_SIZE, socket_id); | |
52 | if (port == NULL) { | |
53 | RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__); | |
54 | return NULL; | |
55 | } | |
56 | ||
57 | /* Initialization */ | |
58 | port->kni = conf->kni; | |
59 | ||
60 | return port; | |
61 | } | |
62 | ||
63 | static int | |
64 | rte_port_kni_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts) | |
65 | { | |
66 | struct rte_port_kni_reader *p = | |
11fdf7f2 | 67 | port; |
7c673cae FG |
68 | uint16_t rx_pkt_cnt; |
69 | ||
70 | rx_pkt_cnt = rte_kni_rx_burst(p->kni, pkts, n_pkts); | |
71 | RTE_PORT_KNI_READER_STATS_PKTS_IN_ADD(p, rx_pkt_cnt); | |
72 | return rx_pkt_cnt; | |
73 | } | |
74 | ||
75 | static int | |
76 | rte_port_kni_reader_free(void *port) | |
77 | { | |
78 | if (port == NULL) { | |
79 | RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__); | |
80 | return -EINVAL; | |
81 | } | |
82 | ||
83 | rte_free(port); | |
84 | ||
85 | return 0; | |
86 | } | |
87 | ||
88 | static int rte_port_kni_reader_stats_read(void *port, | |
89 | struct rte_port_in_stats *stats, int clear) | |
90 | { | |
91 | struct rte_port_kni_reader *p = | |
11fdf7f2 | 92 | port; |
7c673cae FG |
93 | |
94 | if (stats != NULL) | |
95 | memcpy(stats, &p->stats, sizeof(p->stats)); | |
96 | ||
97 | if (clear) | |
98 | memset(&p->stats, 0, sizeof(p->stats)); | |
99 | ||
100 | return 0; | |
101 | } | |
102 | ||
103 | /* | |
104 | * Port KNI Writer | |
105 | */ | |
106 | #ifdef RTE_PORT_STATS_COLLECT | |
107 | ||
108 | #define RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(port, val) \ | |
109 | port->stats.n_pkts_in += val | |
110 | #define RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(port, val) \ | |
111 | port->stats.n_pkts_drop += val | |
112 | ||
113 | #else | |
114 | ||
115 | #define RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(port, val) | |
116 | #define RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(port, val) | |
117 | ||
118 | #endif | |
119 | ||
120 | struct rte_port_kni_writer { | |
121 | struct rte_port_out_stats stats; | |
122 | ||
123 | struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX]; | |
124 | uint32_t tx_burst_sz; | |
125 | uint32_t tx_buf_count; | |
126 | uint64_t bsz_mask; | |
127 | struct rte_kni *kni; | |
128 | }; | |
129 | ||
130 | static void * | |
131 | rte_port_kni_writer_create(void *params, int socket_id) | |
132 | { | |
133 | struct rte_port_kni_writer_params *conf = | |
11fdf7f2 | 134 | params; |
7c673cae FG |
135 | struct rte_port_kni_writer *port; |
136 | ||
137 | /* Check input parameters */ | |
138 | if ((conf == NULL) || | |
139 | (conf->tx_burst_sz == 0) || | |
140 | (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) || | |
141 | (!rte_is_power_of_2(conf->tx_burst_sz))) { | |
142 | RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__); | |
143 | return NULL; | |
144 | } | |
145 | ||
146 | /* Memory allocation */ | |
147 | port = rte_zmalloc_socket("PORT", sizeof(*port), | |
148 | RTE_CACHE_LINE_SIZE, socket_id); | |
149 | if (port == NULL) { | |
150 | RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__); | |
151 | return NULL; | |
152 | } | |
153 | ||
154 | /* Initialization */ | |
155 | port->kni = conf->kni; | |
156 | port->tx_burst_sz = conf->tx_burst_sz; | |
157 | port->tx_buf_count = 0; | |
158 | port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1); | |
159 | ||
160 | return port; | |
161 | } | |
162 | ||
163 | static inline void | |
164 | send_burst(struct rte_port_kni_writer *p) | |
165 | { | |
166 | uint32_t nb_tx; | |
167 | ||
168 | nb_tx = rte_kni_tx_burst(p->kni, p->tx_buf, p->tx_buf_count); | |
169 | ||
170 | RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx); | |
171 | for (; nb_tx < p->tx_buf_count; nb_tx++) | |
172 | rte_pktmbuf_free(p->tx_buf[nb_tx]); | |
173 | ||
174 | p->tx_buf_count = 0; | |
175 | } | |
176 | ||
177 | static int | |
178 | rte_port_kni_writer_tx(void *port, struct rte_mbuf *pkt) | |
179 | { | |
180 | struct rte_port_kni_writer *p = | |
11fdf7f2 | 181 | port; |
7c673cae FG |
182 | |
183 | p->tx_buf[p->tx_buf_count++] = pkt; | |
184 | RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1); | |
185 | if (p->tx_buf_count >= p->tx_burst_sz) | |
186 | send_burst(p); | |
187 | ||
188 | return 0; | |
189 | } | |
190 | ||
191 | static int | |
192 | rte_port_kni_writer_tx_bulk(void *port, | |
193 | struct rte_mbuf **pkts, | |
194 | uint64_t pkts_mask) | |
195 | { | |
196 | struct rte_port_kni_writer *p = | |
11fdf7f2 | 197 | port; |
7c673cae FG |
198 | uint64_t bsz_mask = p->bsz_mask; |
199 | uint32_t tx_buf_count = p->tx_buf_count; | |
200 | uint64_t expr = (pkts_mask & (pkts_mask + 1)) | | |
201 | ((pkts_mask & bsz_mask) ^ bsz_mask); | |
202 | ||
203 | if (expr == 0) { | |
204 | uint64_t n_pkts = __builtin_popcountll(pkts_mask); | |
205 | uint32_t n_pkts_ok; | |
206 | ||
207 | if (tx_buf_count) | |
208 | send_burst(p); | |
209 | ||
210 | RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, n_pkts); | |
211 | n_pkts_ok = rte_kni_tx_burst(p->kni, pkts, n_pkts); | |
212 | ||
213 | RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok); | |
214 | for (; n_pkts_ok < n_pkts; n_pkts_ok++) { | |
215 | struct rte_mbuf *pkt = pkts[n_pkts_ok]; | |
216 | ||
217 | rte_pktmbuf_free(pkt); | |
218 | } | |
219 | } else { | |
220 | for (; pkts_mask;) { | |
221 | uint32_t pkt_index = __builtin_ctzll(pkts_mask); | |
222 | uint64_t pkt_mask = 1LLU << pkt_index; | |
223 | struct rte_mbuf *pkt = pkts[pkt_index]; | |
224 | ||
225 | p->tx_buf[tx_buf_count++] = pkt; | |
226 | RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1); | |
227 | pkts_mask &= ~pkt_mask; | |
228 | } | |
229 | ||
230 | p->tx_buf_count = tx_buf_count; | |
231 | if (tx_buf_count >= p->tx_burst_sz) | |
232 | send_burst(p); | |
233 | } | |
234 | ||
235 | return 0; | |
236 | } | |
237 | ||
238 | static int | |
239 | rte_port_kni_writer_flush(void *port) | |
240 | { | |
241 | struct rte_port_kni_writer *p = | |
11fdf7f2 | 242 | port; |
7c673cae FG |
243 | |
244 | if (p->tx_buf_count > 0) | |
245 | send_burst(p); | |
246 | ||
247 | return 0; | |
248 | } | |
249 | ||
250 | static int | |
251 | rte_port_kni_writer_free(void *port) | |
252 | { | |
253 | if (port == NULL) { | |
254 | RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__); | |
255 | return -EINVAL; | |
256 | } | |
257 | ||
258 | rte_port_kni_writer_flush(port); | |
259 | rte_free(port); | |
260 | ||
261 | return 0; | |
262 | } | |
263 | ||
264 | static int rte_port_kni_writer_stats_read(void *port, | |
265 | struct rte_port_out_stats *stats, int clear) | |
266 | { | |
267 | struct rte_port_kni_writer *p = | |
11fdf7f2 | 268 | port; |
7c673cae FG |
269 | |
270 | if (stats != NULL) | |
271 | memcpy(stats, &p->stats, sizeof(p->stats)); | |
272 | ||
273 | if (clear) | |
274 | memset(&p->stats, 0, sizeof(p->stats)); | |
275 | ||
276 | return 0; | |
277 | } | |
278 | ||
279 | /* | |
280 | * Port KNI Writer Nodrop | |
281 | */ | |
282 | #ifdef RTE_PORT_STATS_COLLECT | |
283 | ||
284 | #define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \ | |
285 | port->stats.n_pkts_in += val | |
286 | #define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \ | |
287 | port->stats.n_pkts_drop += val | |
288 | ||
289 | #else | |
290 | ||
291 | #define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) | |
292 | #define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) | |
293 | ||
294 | #endif | |
295 | ||
296 | struct rte_port_kni_writer_nodrop { | |
297 | struct rte_port_out_stats stats; | |
298 | ||
299 | struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX]; | |
300 | uint32_t tx_burst_sz; | |
301 | uint32_t tx_buf_count; | |
302 | uint64_t bsz_mask; | |
303 | uint64_t n_retries; | |
304 | struct rte_kni *kni; | |
305 | }; | |
306 | ||
307 | static void * | |
308 | rte_port_kni_writer_nodrop_create(void *params, int socket_id) | |
309 | { | |
310 | struct rte_port_kni_writer_nodrop_params *conf = | |
11fdf7f2 | 311 | params; |
7c673cae FG |
312 | struct rte_port_kni_writer_nodrop *port; |
313 | ||
314 | /* Check input parameters */ | |
315 | if ((conf == NULL) || | |
316 | (conf->tx_burst_sz == 0) || | |
317 | (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) || | |
318 | (!rte_is_power_of_2(conf->tx_burst_sz))) { | |
319 | RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__); | |
320 | return NULL; | |
321 | } | |
322 | ||
323 | /* Memory allocation */ | |
324 | port = rte_zmalloc_socket("PORT", sizeof(*port), | |
325 | RTE_CACHE_LINE_SIZE, socket_id); | |
326 | if (port == NULL) { | |
327 | RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__); | |
328 | return NULL; | |
329 | } | |
330 | ||
331 | /* Initialization */ | |
332 | port->kni = conf->kni; | |
333 | port->tx_burst_sz = conf->tx_burst_sz; | |
334 | port->tx_buf_count = 0; | |
335 | port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1); | |
336 | ||
337 | /* | |
338 | * When n_retries is 0 it means that we should wait for every packet to | |
339 | * send no matter how many retries should it take. To limit number of | |
340 | * branches in fast path, we use UINT64_MAX instead of branching. | |
341 | */ | |
342 | port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries; | |
343 | ||
344 | return port; | |
345 | } | |
346 | ||
347 | static inline void | |
348 | send_burst_nodrop(struct rte_port_kni_writer_nodrop *p) | |
349 | { | |
350 | uint32_t nb_tx = 0, i; | |
351 | ||
352 | nb_tx = rte_kni_tx_burst(p->kni, p->tx_buf, p->tx_buf_count); | |
353 | ||
354 | /* We sent all the packets in a first try */ | |
355 | if (nb_tx >= p->tx_buf_count) { | |
356 | p->tx_buf_count = 0; | |
357 | return; | |
358 | } | |
359 | ||
360 | for (i = 0; i < p->n_retries; i++) { | |
361 | nb_tx += rte_kni_tx_burst(p->kni, | |
362 | p->tx_buf + nb_tx, | |
363 | p->tx_buf_count - nb_tx); | |
364 | ||
365 | /* We sent all the packets in more than one try */ | |
366 | if (nb_tx >= p->tx_buf_count) { | |
367 | p->tx_buf_count = 0; | |
368 | return; | |
369 | } | |
370 | } | |
371 | ||
372 | /* We didn't send the packets in maximum allowed attempts */ | |
373 | RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx); | |
374 | for ( ; nb_tx < p->tx_buf_count; nb_tx++) | |
375 | rte_pktmbuf_free(p->tx_buf[nb_tx]); | |
376 | ||
377 | p->tx_buf_count = 0; | |
378 | } | |
379 | ||
380 | static int | |
381 | rte_port_kni_writer_nodrop_tx(void *port, struct rte_mbuf *pkt) | |
382 | { | |
383 | struct rte_port_kni_writer_nodrop *p = | |
11fdf7f2 | 384 | port; |
7c673cae FG |
385 | |
386 | p->tx_buf[p->tx_buf_count++] = pkt; | |
387 | RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1); | |
388 | if (p->tx_buf_count >= p->tx_burst_sz) | |
389 | send_burst_nodrop(p); | |
390 | ||
391 | return 0; | |
392 | } | |
393 | ||
394 | static int | |
395 | rte_port_kni_writer_nodrop_tx_bulk(void *port, | |
396 | struct rte_mbuf **pkts, | |
397 | uint64_t pkts_mask) | |
398 | { | |
399 | struct rte_port_kni_writer_nodrop *p = | |
11fdf7f2 | 400 | port; |
7c673cae FG |
401 | |
402 | uint64_t bsz_mask = p->bsz_mask; | |
403 | uint32_t tx_buf_count = p->tx_buf_count; | |
404 | uint64_t expr = (pkts_mask & (pkts_mask + 1)) | | |
405 | ((pkts_mask & bsz_mask) ^ bsz_mask); | |
406 | ||
407 | if (expr == 0) { | |
408 | uint64_t n_pkts = __builtin_popcountll(pkts_mask); | |
409 | uint32_t n_pkts_ok; | |
410 | ||
411 | if (tx_buf_count) | |
412 | send_burst_nodrop(p); | |
413 | ||
414 | RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts); | |
415 | n_pkts_ok = rte_kni_tx_burst(p->kni, pkts, n_pkts); | |
416 | ||
417 | if (n_pkts_ok >= n_pkts) | |
418 | return 0; | |
419 | ||
420 | /* | |
421 | * If we didn't manage to send all packets in single burst, move | |
422 | * remaining packets to the buffer and call send burst. | |
423 | */ | |
424 | for (; n_pkts_ok < n_pkts; n_pkts_ok++) { | |
425 | struct rte_mbuf *pkt = pkts[n_pkts_ok]; | |
426 | p->tx_buf[p->tx_buf_count++] = pkt; | |
427 | } | |
428 | send_burst_nodrop(p); | |
429 | } else { | |
430 | for ( ; pkts_mask; ) { | |
431 | uint32_t pkt_index = __builtin_ctzll(pkts_mask); | |
432 | uint64_t pkt_mask = 1LLU << pkt_index; | |
433 | struct rte_mbuf *pkt = pkts[pkt_index]; | |
434 | ||
435 | p->tx_buf[tx_buf_count++] = pkt; | |
436 | RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1); | |
437 | pkts_mask &= ~pkt_mask; | |
438 | } | |
439 | ||
440 | p->tx_buf_count = tx_buf_count; | |
441 | if (tx_buf_count >= p->tx_burst_sz) | |
442 | send_burst_nodrop(p); | |
443 | } | |
444 | ||
445 | return 0; | |
446 | } | |
447 | ||
448 | static int | |
449 | rte_port_kni_writer_nodrop_flush(void *port) | |
450 | { | |
451 | struct rte_port_kni_writer_nodrop *p = | |
11fdf7f2 | 452 | port; |
7c673cae FG |
453 | |
454 | if (p->tx_buf_count > 0) | |
455 | send_burst_nodrop(p); | |
456 | ||
457 | return 0; | |
458 | } | |
459 | ||
460 | static int | |
461 | rte_port_kni_writer_nodrop_free(void *port) | |
462 | { | |
463 | if (port == NULL) { | |
464 | RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__); | |
465 | return -EINVAL; | |
466 | } | |
467 | ||
468 | rte_port_kni_writer_nodrop_flush(port); | |
469 | rte_free(port); | |
470 | ||
471 | return 0; | |
472 | } | |
473 | ||
474 | static int rte_port_kni_writer_nodrop_stats_read(void *port, | |
475 | struct rte_port_out_stats *stats, int clear) | |
476 | { | |
477 | struct rte_port_kni_writer_nodrop *p = | |
11fdf7f2 | 478 | port; |
7c673cae FG |
479 | |
480 | if (stats != NULL) | |
481 | memcpy(stats, &p->stats, sizeof(p->stats)); | |
482 | ||
483 | if (clear) | |
484 | memset(&p->stats, 0, sizeof(p->stats)); | |
485 | ||
486 | return 0; | |
487 | } | |
488 | ||
489 | ||
490 | /* | |
491 | * Summary of port operations | |
492 | */ | |
493 | struct rte_port_in_ops rte_port_kni_reader_ops = { | |
494 | .f_create = rte_port_kni_reader_create, | |
495 | .f_free = rte_port_kni_reader_free, | |
496 | .f_rx = rte_port_kni_reader_rx, | |
497 | .f_stats = rte_port_kni_reader_stats_read, | |
498 | }; | |
499 | ||
500 | struct rte_port_out_ops rte_port_kni_writer_ops = { | |
501 | .f_create = rte_port_kni_writer_create, | |
502 | .f_free = rte_port_kni_writer_free, | |
503 | .f_tx = rte_port_kni_writer_tx, | |
504 | .f_tx_bulk = rte_port_kni_writer_tx_bulk, | |
505 | .f_flush = rte_port_kni_writer_flush, | |
506 | .f_stats = rte_port_kni_writer_stats_read, | |
507 | }; | |
508 | ||
509 | struct rte_port_out_ops rte_port_kni_writer_nodrop_ops = { | |
510 | .f_create = rte_port_kni_writer_nodrop_create, | |
511 | .f_free = rte_port_kni_writer_nodrop_free, | |
512 | .f_tx = rte_port_kni_writer_nodrop_tx, | |
513 | .f_tx_bulk = rte_port_kni_writer_nodrop_tx_bulk, | |
514 | .f_flush = rte_port_kni_writer_nodrop_flush, | |
515 | .f_stats = rte_port_kni_writer_nodrop_stats_read, | |
516 | }; |