]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/app/test-pmd/config.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / app / test-pmd / config.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
4 */
5
6 #include <stdarg.h>
7 #include <errno.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12
13 #include <sys/queue.h>
14 #include <sys/types.h>
15 #include <sys/stat.h>
16 #include <fcntl.h>
17 #include <unistd.h>
18
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
22 #include <rte_log.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_mbuf.h>
34 #include <rte_interrupts.h>
35 #include <rte_pci.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
40 #include <rte_flow.h>
41 #include <rte_errno.h>
42 #ifdef RTE_LIBRTE_IXGBE_PMD
43 #include <rte_pmd_ixgbe.h>
44 #endif
45 #ifdef RTE_LIBRTE_I40E_PMD
46 #include <rte_pmd_i40e.h>
47 #endif
48 #ifdef RTE_LIBRTE_BNXT_PMD
49 #include <rte_pmd_bnxt.h>
50 #endif
51 #include <rte_gro.h>
52
53 #include "testpmd.h"
54
55 #define ETHDEV_FWVERS_LEN 32
56
57 static char *flowtype_to_str(uint16_t flow_type);
58
59 static const struct {
60 enum tx_pkt_split split;
61 const char *name;
62 } tx_split_name[] = {
63 {
64 .split = TX_PKT_SPLIT_OFF,
65 .name = "off",
66 },
67 {
68 .split = TX_PKT_SPLIT_ON,
69 .name = "on",
70 },
71 {
72 .split = TX_PKT_SPLIT_RND,
73 .name = "rand",
74 },
75 };
76
77 const struct rss_type_info rss_type_table[] = {
78 { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
79 ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
80 ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP},
81 { "none", 0 },
82 { "eth", ETH_RSS_ETH },
83 { "l2-src-only", ETH_RSS_L2_SRC_ONLY },
84 { "l2-dst-only", ETH_RSS_L2_DST_ONLY },
85 { "vlan", ETH_RSS_VLAN },
86 { "s-vlan", ETH_RSS_S_VLAN },
87 { "c-vlan", ETH_RSS_C_VLAN },
88 { "ipv4", ETH_RSS_IPV4 },
89 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
90 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
91 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
92 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
93 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
94 { "ipv6", ETH_RSS_IPV6 },
95 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
96 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
97 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
98 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
99 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
100 { "l2-payload", ETH_RSS_L2_PAYLOAD },
101 { "ipv6-ex", ETH_RSS_IPV6_EX },
102 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
103 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
104 { "port", ETH_RSS_PORT },
105 { "vxlan", ETH_RSS_VXLAN },
106 { "geneve", ETH_RSS_GENEVE },
107 { "nvgre", ETH_RSS_NVGRE },
108 { "ip", ETH_RSS_IP },
109 { "udp", ETH_RSS_UDP },
110 { "tcp", ETH_RSS_TCP },
111 { "sctp", ETH_RSS_SCTP },
112 { "tunnel", ETH_RSS_TUNNEL },
113 { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
114 { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
115 { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
116 { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
117 { "esp", ETH_RSS_ESP },
118 { "ah", ETH_RSS_AH },
119 { "l2tpv3", ETH_RSS_L2TPV3 },
120 { "pfcp", ETH_RSS_PFCP },
121 { NULL, 0 },
122 };
123
124 static void
125 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
126 {
127 char buf[RTE_ETHER_ADDR_FMT_SIZE];
128 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
129 printf("%s%s", name, buf);
130 }
131
132 void
133 nic_stats_display(portid_t port_id)
134 {
135 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
136 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
137 static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
138 static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
139 static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
140 uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
141 diff_cycles;
142 uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
143 struct rte_eth_stats stats;
144 struct rte_port *port = &ports[port_id];
145 uint8_t i;
146
147 static const char *nic_stats_border = "########################";
148
149 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
150 print_valid_ports();
151 return;
152 }
153 rte_eth_stats_get(port_id, &stats);
154 printf("\n %s NIC statistics for port %-2d %s\n",
155 nic_stats_border, port_id, nic_stats_border);
156
157 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
158 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
159 "%-"PRIu64"\n",
160 stats.ipackets, stats.imissed, stats.ibytes);
161 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
162 printf(" RX-nombuf: %-10"PRIu64"\n",
163 stats.rx_nombuf);
164 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
165 "%-"PRIu64"\n",
166 stats.opackets, stats.oerrors, stats.obytes);
167 }
168 else {
169 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
170 " RX-bytes: %10"PRIu64"\n",
171 stats.ipackets, stats.ierrors, stats.ibytes);
172 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors);
173 printf(" RX-nombuf: %10"PRIu64"\n",
174 stats.rx_nombuf);
175 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
176 " TX-bytes: %10"PRIu64"\n",
177 stats.opackets, stats.oerrors, stats.obytes);
178 }
179
180 if (port->rx_queue_stats_mapping_enabled) {
181 printf("\n");
182 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
183 printf(" Stats reg %2d RX-packets: %10"PRIu64
184 " RX-errors: %10"PRIu64
185 " RX-bytes: %10"PRIu64"\n",
186 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
187 }
188 }
189 if (port->tx_queue_stats_mapping_enabled) {
190 printf("\n");
191 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
192 printf(" Stats reg %2d TX-packets: %10"PRIu64
193 " TX-bytes: %10"PRIu64"\n",
194 i, stats.q_opackets[i], stats.q_obytes[i]);
195 }
196 }
197
198 diff_cycles = prev_cycles[port_id];
199 prev_cycles[port_id] = rte_rdtsc();
200 if (diff_cycles > 0)
201 diff_cycles = prev_cycles[port_id] - diff_cycles;
202
203 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
204 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
205 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
206 (stats.opackets - prev_pkts_tx[port_id]) : 0;
207 prev_pkts_rx[port_id] = stats.ipackets;
208 prev_pkts_tx[port_id] = stats.opackets;
209 mpps_rx = diff_cycles > 0 ?
210 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
211 mpps_tx = diff_cycles > 0 ?
212 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
213
214 diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
215 (stats.ibytes - prev_bytes_rx[port_id]) : 0;
216 diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
217 (stats.obytes - prev_bytes_tx[port_id]) : 0;
218 prev_bytes_rx[port_id] = stats.ibytes;
219 prev_bytes_tx[port_id] = stats.obytes;
220 mbps_rx = diff_cycles > 0 ?
221 diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0;
222 mbps_tx = diff_cycles > 0 ?
223 diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0;
224
225 printf("\n Throughput (since last show)\n");
226 printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12"
227 PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
228 mpps_tx, mbps_tx * 8);
229
230 printf(" %s############################%s\n",
231 nic_stats_border, nic_stats_border);
232 }
233
234 void
235 nic_stats_clear(portid_t port_id)
236 {
237 int ret;
238
239 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
240 print_valid_ports();
241 return;
242 }
243
244 ret = rte_eth_stats_reset(port_id);
245 if (ret != 0) {
246 printf("%s: Error: failed to reset stats (port %u): %s",
247 __func__, port_id, strerror(ret));
248 return;
249 }
250
251 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
252 if (ret != 0) {
253 printf("%s: Error: failed to get stats (port %u): %s",
254 __func__, port_id, strerror(ret));
255 return;
256 }
257 printf("\n NIC statistics for port %d cleared\n", port_id);
258 }
259
260 void
261 nic_xstats_display(portid_t port_id)
262 {
263 struct rte_eth_xstat *xstats;
264 int cnt_xstats, idx_xstat;
265 struct rte_eth_xstat_name *xstats_names;
266
267 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
268 print_valid_ports();
269 return;
270 }
271 printf("###### NIC extended statistics for port %-2d\n", port_id);
272 if (!rte_eth_dev_is_valid_port(port_id)) {
273 printf("Error: Invalid port number %i\n", port_id);
274 return;
275 }
276
277 /* Get count */
278 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
279 if (cnt_xstats < 0) {
280 printf("Error: Cannot get count of xstats\n");
281 return;
282 }
283
284 /* Get id-name lookup table */
285 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
286 if (xstats_names == NULL) {
287 printf("Cannot allocate memory for xstats lookup\n");
288 return;
289 }
290 if (cnt_xstats != rte_eth_xstats_get_names(
291 port_id, xstats_names, cnt_xstats)) {
292 printf("Error: Cannot get xstats lookup\n");
293 free(xstats_names);
294 return;
295 }
296
297 /* Get stats themselves */
298 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
299 if (xstats == NULL) {
300 printf("Cannot allocate memory for xstats\n");
301 free(xstats_names);
302 return;
303 }
304 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
305 printf("Error: Unable to get xstats\n");
306 free(xstats_names);
307 free(xstats);
308 return;
309 }
310
311 /* Display xstats */
312 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
313 if (xstats_hide_zero && !xstats[idx_xstat].value)
314 continue;
315 printf("%s: %"PRIu64"\n",
316 xstats_names[idx_xstat].name,
317 xstats[idx_xstat].value);
318 }
319 free(xstats_names);
320 free(xstats);
321 }
322
323 void
324 nic_xstats_clear(portid_t port_id)
325 {
326 int ret;
327
328 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
329 print_valid_ports();
330 return;
331 }
332
333 ret = rte_eth_xstats_reset(port_id);
334 if (ret != 0) {
335 printf("%s: Error: failed to reset xstats (port %u): %s",
336 __func__, port_id, strerror(ret));
337 return;
338 }
339
340 ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
341 if (ret != 0) {
342 printf("%s: Error: failed to get stats (port %u): %s",
343 __func__, port_id, strerror(ret));
344 return;
345 }
346 }
347
348 void
349 nic_stats_mapping_display(portid_t port_id)
350 {
351 struct rte_port *port = &ports[port_id];
352 uint16_t i;
353
354 static const char *nic_stats_mapping_border = "########################";
355
356 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
357 print_valid_ports();
358 return;
359 }
360
361 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
362 printf("Port id %d - either does not support queue statistic mapping or"
363 " no queue statistic mapping set\n", port_id);
364 return;
365 }
366
367 printf("\n %s NIC statistics mapping for port %-2d %s\n",
368 nic_stats_mapping_border, port_id, nic_stats_mapping_border);
369
370 if (port->rx_queue_stats_mapping_enabled) {
371 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
372 if (rx_queue_stats_mappings[i].port_id == port_id) {
373 printf(" RX-queue %2d mapped to Stats Reg %2d\n",
374 rx_queue_stats_mappings[i].queue_id,
375 rx_queue_stats_mappings[i].stats_counter_id);
376 }
377 }
378 printf("\n");
379 }
380
381
382 if (port->tx_queue_stats_mapping_enabled) {
383 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
384 if (tx_queue_stats_mappings[i].port_id == port_id) {
385 printf(" TX-queue %2d mapped to Stats Reg %2d\n",
386 tx_queue_stats_mappings[i].queue_id,
387 tx_queue_stats_mappings[i].stats_counter_id);
388 }
389 }
390 }
391
392 printf(" %s####################################%s\n",
393 nic_stats_mapping_border, nic_stats_mapping_border);
394 }
395
396 void
397 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
398 {
399 struct rte_eth_burst_mode mode;
400 struct rte_eth_rxq_info qinfo;
401 int32_t rc;
402 static const char *info_border = "*********************";
403
404 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
405 if (rc != 0) {
406 printf("Failed to retrieve information for port: %u, "
407 "RX queue: %hu\nerror desc: %s(%d)\n",
408 port_id, queue_id, strerror(-rc), rc);
409 return;
410 }
411
412 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
413 info_border, port_id, queue_id, info_border);
414
415 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
416 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
417 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
418 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
419 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
420 printf("\nRX drop packets: %s",
421 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
422 printf("\nRX deferred start: %s",
423 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
424 printf("\nRX scattered packets: %s",
425 (qinfo.scattered_rx != 0) ? "on" : "off");
426 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
427
428 if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
429 printf("\nBurst mode: %s%s",
430 mode.info,
431 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
432 " (per queue)" : "");
433
434 printf("\n");
435 }
436
437 void
438 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
439 {
440 struct rte_eth_burst_mode mode;
441 struct rte_eth_txq_info qinfo;
442 int32_t rc;
443 static const char *info_border = "*********************";
444
445 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
446 if (rc != 0) {
447 printf("Failed to retrieve information for port: %u, "
448 "TX queue: %hu\nerror desc: %s(%d)\n",
449 port_id, queue_id, strerror(-rc), rc);
450 return;
451 }
452
453 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
454 info_border, port_id, queue_id, info_border);
455
456 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
457 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
458 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
459 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
460 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
461 printf("\nTX deferred start: %s",
462 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
463 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
464
465 if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
466 printf("\nBurst mode: %s%s",
467 mode.info,
468 mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
469 " (per queue)" : "");
470
471 printf("\n");
472 }
473
474 static int bus_match_all(const struct rte_bus *bus, const void *data)
475 {
476 RTE_SET_USED(bus);
477 RTE_SET_USED(data);
478 return 0;
479 }
480
481 void
482 device_infos_display(const char *identifier)
483 {
484 static const char *info_border = "*********************";
485 struct rte_bus *start = NULL, *next;
486 struct rte_dev_iterator dev_iter;
487 char name[RTE_ETH_NAME_MAX_LEN];
488 struct rte_ether_addr mac_addr;
489 struct rte_device *dev;
490 struct rte_devargs da;
491 portid_t port_id;
492 char devstr[128];
493
494 memset(&da, 0, sizeof(da));
495 if (!identifier)
496 goto skip_parse;
497
498 if (rte_devargs_parsef(&da, "%s", identifier)) {
499 printf("cannot parse identifier\n");
500 if (da.args)
501 free(da.args);
502 return;
503 }
504
505 skip_parse:
506 while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
507
508 start = next;
509 if (identifier && da.bus != next)
510 continue;
511
512 /* Skip buses that don't have iterate method */
513 if (!next->dev_iterate)
514 continue;
515
516 snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
517 RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
518
519 if (!dev->driver)
520 continue;
521 /* Check for matching device if identifier is present */
522 if (identifier &&
523 strncmp(da.name, dev->name, strlen(dev->name)))
524 continue;
525 printf("\n%s Infos for device %s %s\n",
526 info_border, dev->name, info_border);
527 printf("Bus name: %s", dev->bus->name);
528 printf("\nDriver name: %s", dev->driver->name);
529 printf("\nDevargs: %s",
530 dev->devargs ? dev->devargs->args : "");
531 printf("\nConnect to socket: %d", dev->numa_node);
532 printf("\n");
533
534 /* List ports with matching device name */
535 RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
536 printf("\n\tPort id: %-2d", port_id);
537 if (eth_macaddr_get_print_err(port_id,
538 &mac_addr) == 0)
539 print_ethaddr("\n\tMAC address: ",
540 &mac_addr);
541 rte_eth_dev_get_name_by_port(port_id, name);
542 printf("\n\tDevice name: %s", name);
543 printf("\n");
544 }
545 }
546 };
547 }
548
549 void
550 port_infos_display(portid_t port_id)
551 {
552 struct rte_port *port;
553 struct rte_ether_addr mac_addr;
554 struct rte_eth_link link;
555 struct rte_eth_dev_info dev_info;
556 int vlan_offload;
557 struct rte_mempool * mp;
558 static const char *info_border = "*********************";
559 uint16_t mtu;
560 char name[RTE_ETH_NAME_MAX_LEN];
561 int ret;
562 char fw_version[ETHDEV_FWVERS_LEN];
563
564 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
565 print_valid_ports();
566 return;
567 }
568 port = &ports[port_id];
569 ret = eth_link_get_nowait_print_err(port_id, &link);
570 if (ret < 0)
571 return;
572
573 ret = eth_dev_info_get_print_err(port_id, &dev_info);
574 if (ret != 0)
575 return;
576
577 printf("\n%s Infos for port %-2d %s\n",
578 info_border, port_id, info_border);
579 if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
580 print_ethaddr("MAC address: ", &mac_addr);
581 rte_eth_dev_get_name_by_port(port_id, name);
582 printf("\nDevice name: %s", name);
583 printf("\nDriver name: %s", dev_info.driver_name);
584
585 if (rte_eth_dev_fw_version_get(port_id, fw_version,
586 ETHDEV_FWVERS_LEN) == 0)
587 printf("\nFirmware-version: %s", fw_version);
588 else
589 printf("\nFirmware-version: %s", "not available");
590
591 if (dev_info.device->devargs && dev_info.device->devargs->args)
592 printf("\nDevargs: %s", dev_info.device->devargs->args);
593 printf("\nConnect to socket: %u", port->socket_id);
594
595 if (port_numa[port_id] != NUMA_NO_CONFIG) {
596 mp = mbuf_pool_find(port_numa[port_id]);
597 if (mp)
598 printf("\nmemory allocation on the socket: %d",
599 port_numa[port_id]);
600 } else
601 printf("\nmemory allocation on the socket: %u",port->socket_id);
602
603 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
604 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
605 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
606 ("full-duplex") : ("half-duplex"));
607
608 if (!rte_eth_dev_get_mtu(port_id, &mtu))
609 printf("MTU: %u\n", mtu);
610
611 printf("Promiscuous mode: %s\n",
612 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
613 printf("Allmulticast mode: %s\n",
614 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
615 printf("Maximum number of MAC addresses: %u\n",
616 (unsigned int)(port->dev_info.max_mac_addrs));
617 printf("Maximum number of MAC addresses of hash filtering: %u\n",
618 (unsigned int)(port->dev_info.max_hash_mac_addrs));
619
620 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
621 if (vlan_offload >= 0){
622 printf("VLAN offload: \n");
623 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
624 printf(" strip on, ");
625 else
626 printf(" strip off, ");
627
628 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
629 printf("filter on, ");
630 else
631 printf("filter off, ");
632
633 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
634 printf("extend on, ");
635 else
636 printf("extend off, ");
637
638 if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
639 printf("qinq strip on\n");
640 else
641 printf("qinq strip off\n");
642 }
643
644 if (dev_info.hash_key_size > 0)
645 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
646 if (dev_info.reta_size > 0)
647 printf("Redirection table size: %u\n", dev_info.reta_size);
648 if (!dev_info.flow_type_rss_offloads)
649 printf("No RSS offload flow type is supported.\n");
650 else {
651 uint16_t i;
652 char *p;
653
654 printf("Supported RSS offload flow types:\n");
655 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
656 i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
657 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
658 continue;
659 p = flowtype_to_str(i);
660 if (p)
661 printf(" %s\n", p);
662 else
663 printf(" user defined %d\n", i);
664 }
665 }
666
667 printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
668 printf("Maximum configurable length of RX packet: %u\n",
669 dev_info.max_rx_pktlen);
670 printf("Maximum configurable size of LRO aggregated packet: %u\n",
671 dev_info.max_lro_pkt_size);
672 if (dev_info.max_vfs)
673 printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
674 if (dev_info.max_vmdq_pools)
675 printf("Maximum number of VMDq pools: %u\n",
676 dev_info.max_vmdq_pools);
677
678 printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
679 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
680 printf("Max possible number of RXDs per queue: %hu\n",
681 dev_info.rx_desc_lim.nb_max);
682 printf("Min possible number of RXDs per queue: %hu\n",
683 dev_info.rx_desc_lim.nb_min);
684 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
685
686 printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
687 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
688 printf("Max possible number of TXDs per queue: %hu\n",
689 dev_info.tx_desc_lim.nb_max);
690 printf("Min possible number of TXDs per queue: %hu\n",
691 dev_info.tx_desc_lim.nb_min);
692 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
693 printf("Max segment number per packet: %hu\n",
694 dev_info.tx_desc_lim.nb_seg_max);
695 printf("Max segment number per MTU/TSO: %hu\n",
696 dev_info.tx_desc_lim.nb_mtu_seg_max);
697
698 /* Show switch info only if valid switch domain and port id is set */
699 if (dev_info.switch_info.domain_id !=
700 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
701 if (dev_info.switch_info.name)
702 printf("Switch name: %s\n", dev_info.switch_info.name);
703
704 printf("Switch domain Id: %u\n",
705 dev_info.switch_info.domain_id);
706 printf("Switch Port Id: %u\n",
707 dev_info.switch_info.port_id);
708 }
709 }
710
711 void
712 port_summary_header_display(void)
713 {
714 uint16_t port_number;
715
716 port_number = rte_eth_dev_count_avail();
717 printf("Number of available ports: %i\n", port_number);
718 printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
719 "Driver", "Status", "Link");
720 }
721
722 void
723 port_summary_display(portid_t port_id)
724 {
725 struct rte_ether_addr mac_addr;
726 struct rte_eth_link link;
727 struct rte_eth_dev_info dev_info;
728 char name[RTE_ETH_NAME_MAX_LEN];
729 int ret;
730
731 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
732 print_valid_ports();
733 return;
734 }
735
736 ret = eth_link_get_nowait_print_err(port_id, &link);
737 if (ret < 0)
738 return;
739
740 ret = eth_dev_info_get_print_err(port_id, &dev_info);
741 if (ret != 0)
742 return;
743
744 rte_eth_dev_get_name_by_port(port_id, name);
745 ret = eth_macaddr_get_print_err(port_id, &mac_addr);
746 if (ret != 0)
747 return;
748
749 printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n",
750 port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
751 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
752 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
753 dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
754 (unsigned int) link.link_speed);
755 }
756
757 void
758 port_offload_cap_display(portid_t port_id)
759 {
760 struct rte_eth_dev_info dev_info;
761 static const char *info_border = "************";
762 int ret;
763
764 if (port_id_is_invalid(port_id, ENABLED_WARN))
765 return;
766
767 ret = eth_dev_info_get_print_err(port_id, &dev_info);
768 if (ret != 0)
769 return;
770
771 printf("\n%s Port %d supported offload features: %s\n",
772 info_border, port_id, info_border);
773
774 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
775 printf("VLAN stripped: ");
776 if (ports[port_id].dev_conf.rxmode.offloads &
777 DEV_RX_OFFLOAD_VLAN_STRIP)
778 printf("on\n");
779 else
780 printf("off\n");
781 }
782
783 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
784 printf("Double VLANs stripped: ");
785 if (ports[port_id].dev_conf.rxmode.offloads &
786 DEV_RX_OFFLOAD_QINQ_STRIP)
787 printf("on\n");
788 else
789 printf("off\n");
790 }
791
792 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
793 printf("RX IPv4 checksum: ");
794 if (ports[port_id].dev_conf.rxmode.offloads &
795 DEV_RX_OFFLOAD_IPV4_CKSUM)
796 printf("on\n");
797 else
798 printf("off\n");
799 }
800
801 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
802 printf("RX UDP checksum: ");
803 if (ports[port_id].dev_conf.rxmode.offloads &
804 DEV_RX_OFFLOAD_UDP_CKSUM)
805 printf("on\n");
806 else
807 printf("off\n");
808 }
809
810 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
811 printf("RX TCP checksum: ");
812 if (ports[port_id].dev_conf.rxmode.offloads &
813 DEV_RX_OFFLOAD_TCP_CKSUM)
814 printf("on\n");
815 else
816 printf("off\n");
817 }
818
819 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) {
820 printf("RX SCTP checksum: ");
821 if (ports[port_id].dev_conf.rxmode.offloads &
822 DEV_RX_OFFLOAD_SCTP_CKSUM)
823 printf("on\n");
824 else
825 printf("off\n");
826 }
827
828 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
829 printf("RX Outer IPv4 checksum: ");
830 if (ports[port_id].dev_conf.rxmode.offloads &
831 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
832 printf("on\n");
833 else
834 printf("off\n");
835 }
836
837 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) {
838 printf("RX Outer UDP checksum: ");
839 if (ports[port_id].dev_conf.rxmode.offloads &
840 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM)
841 printf("on\n");
842 else
843 printf("off\n");
844 }
845
846 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
847 printf("Large receive offload: ");
848 if (ports[port_id].dev_conf.rxmode.offloads &
849 DEV_RX_OFFLOAD_TCP_LRO)
850 printf("on\n");
851 else
852 printf("off\n");
853 }
854
855 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
856 printf("HW timestamp: ");
857 if (ports[port_id].dev_conf.rxmode.offloads &
858 DEV_RX_OFFLOAD_TIMESTAMP)
859 printf("on\n");
860 else
861 printf("off\n");
862 }
863
864 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) {
865 printf("Rx Keep CRC: ");
866 if (ports[port_id].dev_conf.rxmode.offloads &
867 DEV_RX_OFFLOAD_KEEP_CRC)
868 printf("on\n");
869 else
870 printf("off\n");
871 }
872
873 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) {
874 printf("RX offload security: ");
875 if (ports[port_id].dev_conf.rxmode.offloads &
876 DEV_RX_OFFLOAD_SECURITY)
877 printf("on\n");
878 else
879 printf("off\n");
880 }
881
882 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
883 printf("VLAN insert: ");
884 if (ports[port_id].dev_conf.txmode.offloads &
885 DEV_TX_OFFLOAD_VLAN_INSERT)
886 printf("on\n");
887 else
888 printf("off\n");
889 }
890
891 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
892 printf("Double VLANs insert: ");
893 if (ports[port_id].dev_conf.txmode.offloads &
894 DEV_TX_OFFLOAD_QINQ_INSERT)
895 printf("on\n");
896 else
897 printf("off\n");
898 }
899
900 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
901 printf("TX IPv4 checksum: ");
902 if (ports[port_id].dev_conf.txmode.offloads &
903 DEV_TX_OFFLOAD_IPV4_CKSUM)
904 printf("on\n");
905 else
906 printf("off\n");
907 }
908
909 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
910 printf("TX UDP checksum: ");
911 if (ports[port_id].dev_conf.txmode.offloads &
912 DEV_TX_OFFLOAD_UDP_CKSUM)
913 printf("on\n");
914 else
915 printf("off\n");
916 }
917
918 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
919 printf("TX TCP checksum: ");
920 if (ports[port_id].dev_conf.txmode.offloads &
921 DEV_TX_OFFLOAD_TCP_CKSUM)
922 printf("on\n");
923 else
924 printf("off\n");
925 }
926
927 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
928 printf("TX SCTP checksum: ");
929 if (ports[port_id].dev_conf.txmode.offloads &
930 DEV_TX_OFFLOAD_SCTP_CKSUM)
931 printf("on\n");
932 else
933 printf("off\n");
934 }
935
936 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
937 printf("TX Outer IPv4 checksum: ");
938 if (ports[port_id].dev_conf.txmode.offloads &
939 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
940 printf("on\n");
941 else
942 printf("off\n");
943 }
944
945 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
946 printf("TX TCP segmentation: ");
947 if (ports[port_id].dev_conf.txmode.offloads &
948 DEV_TX_OFFLOAD_TCP_TSO)
949 printf("on\n");
950 else
951 printf("off\n");
952 }
953
954 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
955 printf("TX UDP segmentation: ");
956 if (ports[port_id].dev_conf.txmode.offloads &
957 DEV_TX_OFFLOAD_UDP_TSO)
958 printf("on\n");
959 else
960 printf("off\n");
961 }
962
963 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
964 printf("TSO for VXLAN tunnel packet: ");
965 if (ports[port_id].dev_conf.txmode.offloads &
966 DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
967 printf("on\n");
968 else
969 printf("off\n");
970 }
971
972 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
973 printf("TSO for GRE tunnel packet: ");
974 if (ports[port_id].dev_conf.txmode.offloads &
975 DEV_TX_OFFLOAD_GRE_TNL_TSO)
976 printf("on\n");
977 else
978 printf("off\n");
979 }
980
981 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
982 printf("TSO for IPIP tunnel packet: ");
983 if (ports[port_id].dev_conf.txmode.offloads &
984 DEV_TX_OFFLOAD_IPIP_TNL_TSO)
985 printf("on\n");
986 else
987 printf("off\n");
988 }
989
990 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
991 printf("TSO for GENEVE tunnel packet: ");
992 if (ports[port_id].dev_conf.txmode.offloads &
993 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
994 printf("on\n");
995 else
996 printf("off\n");
997 }
998
999 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
1000 printf("IP tunnel TSO: ");
1001 if (ports[port_id].dev_conf.txmode.offloads &
1002 DEV_TX_OFFLOAD_IP_TNL_TSO)
1003 printf("on\n");
1004 else
1005 printf("off\n");
1006 }
1007
1008 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
1009 printf("UDP tunnel TSO: ");
1010 if (ports[port_id].dev_conf.txmode.offloads &
1011 DEV_TX_OFFLOAD_UDP_TNL_TSO)
1012 printf("on\n");
1013 else
1014 printf("off\n");
1015 }
1016
1017 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
1018 printf("TX Outer UDP checksum: ");
1019 if (ports[port_id].dev_conf.txmode.offloads &
1020 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
1021 printf("on\n");
1022 else
1023 printf("off\n");
1024 }
1025
1026 }
1027
1028 int
1029 port_id_is_invalid(portid_t port_id, enum print_warning warning)
1030 {
1031 uint16_t pid;
1032
1033 if (port_id == (portid_t)RTE_PORT_ALL)
1034 return 0;
1035
1036 RTE_ETH_FOREACH_DEV(pid)
1037 if (port_id == pid)
1038 return 0;
1039
1040 if (warning == ENABLED_WARN)
1041 printf("Invalid port %d\n", port_id);
1042
1043 return 1;
1044 }
1045
1046 void print_valid_ports(void)
1047 {
1048 portid_t pid;
1049
1050 printf("The valid ports array is [");
1051 RTE_ETH_FOREACH_DEV(pid) {
1052 printf(" %d", pid);
1053 }
1054 printf(" ]\n");
1055 }
1056
1057 static int
1058 vlan_id_is_invalid(uint16_t vlan_id)
1059 {
1060 if (vlan_id < 4096)
1061 return 0;
1062 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1063 return 1;
1064 }
1065
1066 static int
1067 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
1068 {
1069 const struct rte_pci_device *pci_dev;
1070 const struct rte_bus *bus;
1071 uint64_t pci_len;
1072
1073 if (reg_off & 0x3) {
1074 printf("Port register offset 0x%X not aligned on a 4-byte "
1075 "boundary\n",
1076 (unsigned)reg_off);
1077 return 1;
1078 }
1079
1080 if (!ports[port_id].dev_info.device) {
1081 printf("Invalid device\n");
1082 return 0;
1083 }
1084
1085 bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
1086 if (bus && !strcmp(bus->name, "pci")) {
1087 pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
1088 } else {
1089 printf("Not a PCI device\n");
1090 return 1;
1091 }
1092
1093 pci_len = pci_dev->mem_resource[0].len;
1094 if (reg_off >= pci_len) {
1095 printf("Port %d: register offset %u (0x%X) out of port PCI "
1096 "resource (length=%"PRIu64")\n",
1097 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
1098 return 1;
1099 }
1100 return 0;
1101 }
1102
1103 static int
1104 reg_bit_pos_is_invalid(uint8_t bit_pos)
1105 {
1106 if (bit_pos <= 31)
1107 return 0;
1108 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
1109 return 1;
1110 }
1111
1112 #define display_port_and_reg_off(port_id, reg_off) \
1113 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
1114
1115 static inline void
1116 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1117 {
1118 display_port_and_reg_off(port_id, (unsigned)reg_off);
1119 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1120 }
1121
1122 void
1123 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1124 {
1125 uint32_t reg_v;
1126
1127
1128 if (port_id_is_invalid(port_id, ENABLED_WARN))
1129 return;
1130 if (port_reg_off_is_invalid(port_id, reg_off))
1131 return;
1132 if (reg_bit_pos_is_invalid(bit_x))
1133 return;
1134 reg_v = port_id_pci_reg_read(port_id, reg_off);
1135 display_port_and_reg_off(port_id, (unsigned)reg_off);
1136 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1137 }
1138
1139 void
1140 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1141 uint8_t bit1_pos, uint8_t bit2_pos)
1142 {
1143 uint32_t reg_v;
1144 uint8_t l_bit;
1145 uint8_t h_bit;
1146
1147 if (port_id_is_invalid(port_id, ENABLED_WARN))
1148 return;
1149 if (port_reg_off_is_invalid(port_id, reg_off))
1150 return;
1151 if (reg_bit_pos_is_invalid(bit1_pos))
1152 return;
1153 if (reg_bit_pos_is_invalid(bit2_pos))
1154 return;
1155 if (bit1_pos > bit2_pos)
1156 l_bit = bit2_pos, h_bit = bit1_pos;
1157 else
1158 l_bit = bit1_pos, h_bit = bit2_pos;
1159
1160 reg_v = port_id_pci_reg_read(port_id, reg_off);
1161 reg_v >>= l_bit;
1162 if (h_bit < 31)
1163 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1164 display_port_and_reg_off(port_id, (unsigned)reg_off);
1165 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1166 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1167 }
1168
1169 void
1170 port_reg_display(portid_t port_id, uint32_t reg_off)
1171 {
1172 uint32_t reg_v;
1173
1174 if (port_id_is_invalid(port_id, ENABLED_WARN))
1175 return;
1176 if (port_reg_off_is_invalid(port_id, reg_off))
1177 return;
1178 reg_v = port_id_pci_reg_read(port_id, reg_off);
1179 display_port_reg_value(port_id, reg_off, reg_v);
1180 }
1181
1182 void
1183 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1184 uint8_t bit_v)
1185 {
1186 uint32_t reg_v;
1187
1188 if (port_id_is_invalid(port_id, ENABLED_WARN))
1189 return;
1190 if (port_reg_off_is_invalid(port_id, reg_off))
1191 return;
1192 if (reg_bit_pos_is_invalid(bit_pos))
1193 return;
1194 if (bit_v > 1) {
1195 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
1196 return;
1197 }
1198 reg_v = port_id_pci_reg_read(port_id, reg_off);
1199 if (bit_v == 0)
1200 reg_v &= ~(1 << bit_pos);
1201 else
1202 reg_v |= (1 << bit_pos);
1203 port_id_pci_reg_write(port_id, reg_off, reg_v);
1204 display_port_reg_value(port_id, reg_off, reg_v);
1205 }
1206
1207 void
1208 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1209 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1210 {
1211 uint32_t max_v;
1212 uint32_t reg_v;
1213 uint8_t l_bit;
1214 uint8_t h_bit;
1215
1216 if (port_id_is_invalid(port_id, ENABLED_WARN))
1217 return;
1218 if (port_reg_off_is_invalid(port_id, reg_off))
1219 return;
1220 if (reg_bit_pos_is_invalid(bit1_pos))
1221 return;
1222 if (reg_bit_pos_is_invalid(bit2_pos))
1223 return;
1224 if (bit1_pos > bit2_pos)
1225 l_bit = bit2_pos, h_bit = bit1_pos;
1226 else
1227 l_bit = bit1_pos, h_bit = bit2_pos;
1228
1229 if ((h_bit - l_bit) < 31)
1230 max_v = (1 << (h_bit - l_bit + 1)) - 1;
1231 else
1232 max_v = 0xFFFFFFFF;
1233
1234 if (value > max_v) {
1235 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1236 (unsigned)value, (unsigned)value,
1237 (unsigned)max_v, (unsigned)max_v);
1238 return;
1239 }
1240 reg_v = port_id_pci_reg_read(port_id, reg_off);
1241 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1242 reg_v |= (value << l_bit); /* Set changed bits */
1243 port_id_pci_reg_write(port_id, reg_off, reg_v);
1244 display_port_reg_value(port_id, reg_off, reg_v);
1245 }
1246
1247 void
1248 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1249 {
1250 if (port_id_is_invalid(port_id, ENABLED_WARN))
1251 return;
1252 if (port_reg_off_is_invalid(port_id, reg_off))
1253 return;
1254 port_id_pci_reg_write(port_id, reg_off, reg_v);
1255 display_port_reg_value(port_id, reg_off, reg_v);
1256 }
1257
1258 void
1259 port_mtu_set(portid_t port_id, uint16_t mtu)
1260 {
1261 int diag;
1262 struct rte_port *rte_port = &ports[port_id];
1263 struct rte_eth_dev_info dev_info;
1264 uint16_t eth_overhead;
1265 int ret;
1266
1267 if (port_id_is_invalid(port_id, ENABLED_WARN))
1268 return;
1269
1270 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1271 if (ret != 0)
1272 return;
1273
1274 if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1275 printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1276 mtu, dev_info.min_mtu, dev_info.max_mtu);
1277 return;
1278 }
1279 diag = rte_eth_dev_set_mtu(port_id, mtu);
1280 if (diag == 0 &&
1281 dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1282 /*
1283 * Ether overhead in driver is equal to the difference of
1284 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
1285 * device supports jumbo frame.
1286 */
1287 eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
1288 if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) {
1289 rte_port->dev_conf.rxmode.offloads |=
1290 DEV_RX_OFFLOAD_JUMBO_FRAME;
1291 rte_port->dev_conf.rxmode.max_rx_pkt_len =
1292 mtu + eth_overhead;
1293 } else
1294 rte_port->dev_conf.rxmode.offloads &=
1295 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1296
1297 return;
1298 }
1299 printf("Set MTU failed. diag=%d\n", diag);
1300 }
1301
1302 /* Generic flow management functions. */
1303
1304 /** Generate a port_flow entry from attributes/pattern/actions. */
1305 static struct port_flow *
1306 port_flow_new(const struct rte_flow_attr *attr,
1307 const struct rte_flow_item *pattern,
1308 const struct rte_flow_action *actions,
1309 struct rte_flow_error *error)
1310 {
1311 const struct rte_flow_conv_rule rule = {
1312 .attr_ro = attr,
1313 .pattern_ro = pattern,
1314 .actions_ro = actions,
1315 };
1316 struct port_flow *pf;
1317 int ret;
1318
1319 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1320 if (ret < 0)
1321 return NULL;
1322 pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1323 if (!pf) {
1324 rte_flow_error_set
1325 (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1326 "calloc() failed");
1327 return NULL;
1328 }
1329 if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1330 error) >= 0)
1331 return pf;
1332 free(pf);
1333 return NULL;
1334 }
1335
1336 /** Print a message out of a flow error. */
1337 static int
1338 port_flow_complain(struct rte_flow_error *error)
1339 {
1340 static const char *const errstrlist[] = {
1341 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1342 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1343 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1344 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1345 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1346 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1347 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1348 [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1349 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1350 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1351 [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1352 [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1353 [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1354 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1355 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1356 [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1357 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1358 };
1359 const char *errstr;
1360 char buf[32];
1361 int err = rte_errno;
1362
1363 if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1364 !errstrlist[error->type])
1365 errstr = "unknown type";
1366 else
1367 errstr = errstrlist[error->type];
1368 printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__,
1369 error->type, errstr,
1370 error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1371 error->cause), buf) : "",
1372 error->message ? error->message : "(no stated reason)",
1373 rte_strerror(err));
1374 return -err;
1375 }
1376
1377 /** Validate flow rule. */
1378 int
1379 port_flow_validate(portid_t port_id,
1380 const struct rte_flow_attr *attr,
1381 const struct rte_flow_item *pattern,
1382 const struct rte_flow_action *actions)
1383 {
1384 struct rte_flow_error error;
1385
1386 /* Poisoning to make sure PMDs update it in case of error. */
1387 memset(&error, 0x11, sizeof(error));
1388 if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1389 return port_flow_complain(&error);
1390 printf("Flow rule validated\n");
1391 return 0;
1392 }
1393
1394 /** Update age action context by port_flow pointer. */
1395 void
1396 update_age_action_context(const struct rte_flow_action *actions,
1397 struct port_flow *pf)
1398 {
1399 struct rte_flow_action_age *age = NULL;
1400
1401 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1402 switch (actions->type) {
1403 case RTE_FLOW_ACTION_TYPE_AGE:
1404 age = (struct rte_flow_action_age *)
1405 (uintptr_t)actions->conf;
1406 age->context = pf;
1407 return;
1408 default:
1409 break;
1410 }
1411 }
1412 }
1413
1414 /** Create flow rule. */
1415 int
1416 port_flow_create(portid_t port_id,
1417 const struct rte_flow_attr *attr,
1418 const struct rte_flow_item *pattern,
1419 const struct rte_flow_action *actions)
1420 {
1421 struct rte_flow *flow;
1422 struct rte_port *port;
1423 struct port_flow *pf;
1424 uint32_t id = 0;
1425 struct rte_flow_error error;
1426
1427 port = &ports[port_id];
1428 if (port->flow_list) {
1429 if (port->flow_list->id == UINT32_MAX) {
1430 printf("Highest rule ID is already assigned, delete"
1431 " it first");
1432 return -ENOMEM;
1433 }
1434 id = port->flow_list->id + 1;
1435 }
1436 pf = port_flow_new(attr, pattern, actions, &error);
1437 if (!pf)
1438 return port_flow_complain(&error);
1439 update_age_action_context(actions, pf);
1440 /* Poisoning to make sure PMDs update it in case of error. */
1441 memset(&error, 0x22, sizeof(error));
1442 flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1443 if (!flow) {
1444 free(pf);
1445 return port_flow_complain(&error);
1446 }
1447 pf->next = port->flow_list;
1448 pf->id = id;
1449 pf->flow = flow;
1450 port->flow_list = pf;
1451 printf("Flow rule #%u created\n", pf->id);
1452 return 0;
1453 }
1454
1455 /** Destroy a number of flow rules. */
1456 int
1457 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1458 {
1459 struct rte_port *port;
1460 struct port_flow **tmp;
1461 uint32_t c = 0;
1462 int ret = 0;
1463
1464 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1465 port_id == (portid_t)RTE_PORT_ALL)
1466 return -EINVAL;
1467 port = &ports[port_id];
1468 tmp = &port->flow_list;
1469 while (*tmp) {
1470 uint32_t i;
1471
1472 for (i = 0; i != n; ++i) {
1473 struct rte_flow_error error;
1474 struct port_flow *pf = *tmp;
1475
1476 if (rule[i] != pf->id)
1477 continue;
1478 /*
1479 * Poisoning to make sure PMDs update it in case
1480 * of error.
1481 */
1482 memset(&error, 0x33, sizeof(error));
1483 if (rte_flow_destroy(port_id, pf->flow, &error)) {
1484 ret = port_flow_complain(&error);
1485 continue;
1486 }
1487 printf("Flow rule #%u destroyed\n", pf->id);
1488 *tmp = pf->next;
1489 free(pf);
1490 break;
1491 }
1492 if (i == n)
1493 tmp = &(*tmp)->next;
1494 ++c;
1495 }
1496 return ret;
1497 }
1498
1499 /** Remove all flow rules. */
1500 int
1501 port_flow_flush(portid_t port_id)
1502 {
1503 struct rte_flow_error error;
1504 struct rte_port *port;
1505 int ret = 0;
1506
1507 /* Poisoning to make sure PMDs update it in case of error. */
1508 memset(&error, 0x44, sizeof(error));
1509 if (rte_flow_flush(port_id, &error)) {
1510 ret = port_flow_complain(&error);
1511 if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1512 port_id == (portid_t)RTE_PORT_ALL)
1513 return ret;
1514 }
1515 port = &ports[port_id];
1516 while (port->flow_list) {
1517 struct port_flow *pf = port->flow_list->next;
1518
1519 free(port->flow_list);
1520 port->flow_list = pf;
1521 }
1522 return ret;
1523 }
1524
1525 /** Dump all flow rules. */
1526 int
1527 port_flow_dump(portid_t port_id, const char *file_name)
1528 {
1529 int ret = 0;
1530 FILE *file = stdout;
1531 struct rte_flow_error error;
1532
1533 if (file_name && strlen(file_name)) {
1534 file = fopen(file_name, "w");
1535 if (!file) {
1536 printf("Failed to create file %s: %s\n", file_name,
1537 strerror(errno));
1538 return -errno;
1539 }
1540 }
1541 ret = rte_flow_dev_dump(port_id, file, &error);
1542 if (ret) {
1543 port_flow_complain(&error);
1544 printf("Failed to dump flow: %s\n", strerror(-ret));
1545 } else
1546 printf("Flow dump finished\n");
1547 if (file_name && strlen(file_name))
1548 fclose(file);
1549 return ret;
1550 }
1551
1552 /** Query a flow rule. */
1553 int
1554 port_flow_query(portid_t port_id, uint32_t rule,
1555 const struct rte_flow_action *action)
1556 {
1557 struct rte_flow_error error;
1558 struct rte_port *port;
1559 struct port_flow *pf;
1560 const char *name;
1561 union {
1562 struct rte_flow_query_count count;
1563 } query;
1564 int ret;
1565
1566 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1567 port_id == (portid_t)RTE_PORT_ALL)
1568 return -EINVAL;
1569 port = &ports[port_id];
1570 for (pf = port->flow_list; pf; pf = pf->next)
1571 if (pf->id == rule)
1572 break;
1573 if (!pf) {
1574 printf("Flow rule #%u not found\n", rule);
1575 return -ENOENT;
1576 }
1577 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1578 &name, sizeof(name),
1579 (void *)(uintptr_t)action->type, &error);
1580 if (ret < 0)
1581 return port_flow_complain(&error);
1582 switch (action->type) {
1583 case RTE_FLOW_ACTION_TYPE_COUNT:
1584 break;
1585 default:
1586 printf("Cannot query action type %d (%s)\n",
1587 action->type, name);
1588 return -ENOTSUP;
1589 }
1590 /* Poisoning to make sure PMDs update it in case of error. */
1591 memset(&error, 0x55, sizeof(error));
1592 memset(&query, 0, sizeof(query));
1593 if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1594 return port_flow_complain(&error);
1595 switch (action->type) {
1596 case RTE_FLOW_ACTION_TYPE_COUNT:
1597 printf("%s:\n"
1598 " hits_set: %u\n"
1599 " bytes_set: %u\n"
1600 " hits: %" PRIu64 "\n"
1601 " bytes: %" PRIu64 "\n",
1602 name,
1603 query.count.hits_set,
1604 query.count.bytes_set,
1605 query.count.hits,
1606 query.count.bytes);
1607 break;
1608 default:
1609 printf("Cannot display result for action type %d (%s)\n",
1610 action->type, name);
1611 break;
1612 }
1613 return 0;
1614 }
1615
1616 /** List simply and destroy all aged flows. */
1617 void
1618 port_flow_aged(portid_t port_id, uint8_t destroy)
1619 {
1620 void **contexts;
1621 int nb_context, total = 0, idx;
1622 struct rte_flow_error error;
1623 struct port_flow *pf;
1624
1625 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1626 port_id == (portid_t)RTE_PORT_ALL)
1627 return;
1628 total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
1629 printf("Port %u total aged flows: %d\n", port_id, total);
1630 if (total < 0) {
1631 port_flow_complain(&error);
1632 return;
1633 }
1634 if (total == 0)
1635 return;
1636 contexts = malloc(sizeof(void *) * total);
1637 if (contexts == NULL) {
1638 printf("Cannot allocate contexts for aged flow\n");
1639 return;
1640 }
1641 printf("ID\tGroup\tPrio\tAttr\n");
1642 nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
1643 if (nb_context != total) {
1644 printf("Port:%d get aged flows count(%d) != total(%d)\n",
1645 port_id, nb_context, total);
1646 free(contexts);
1647 return;
1648 }
1649 for (idx = 0; idx < nb_context; idx++) {
1650 pf = (struct port_flow *)contexts[idx];
1651 if (!pf) {
1652 printf("Error: get Null context in port %u\n", port_id);
1653 continue;
1654 }
1655 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t\n",
1656 pf->id,
1657 pf->rule.attr->group,
1658 pf->rule.attr->priority,
1659 pf->rule.attr->ingress ? 'i' : '-',
1660 pf->rule.attr->egress ? 'e' : '-',
1661 pf->rule.attr->transfer ? 't' : '-');
1662 }
1663 if (destroy) {
1664 int ret;
1665 uint32_t flow_id;
1666
1667 total = 0;
1668 printf("\n");
1669 for (idx = 0; idx < nb_context; idx++) {
1670 pf = (struct port_flow *)contexts[idx];
1671 if (!pf)
1672 continue;
1673 flow_id = pf->id;
1674 ret = port_flow_destroy(port_id, 1, &flow_id);
1675 if (!ret)
1676 total++;
1677 }
1678 printf("%d flows be destroyed\n", total);
1679 }
1680 free(contexts);
1681 }
1682
1683 /** List flow rules. */
1684 void
1685 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1686 {
1687 struct rte_port *port;
1688 struct port_flow *pf;
1689 struct port_flow *list = NULL;
1690 uint32_t i;
1691
1692 if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1693 port_id == (portid_t)RTE_PORT_ALL)
1694 return;
1695 port = &ports[port_id];
1696 if (!port->flow_list)
1697 return;
1698 /* Sort flows by group, priority and ID. */
1699 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1700 struct port_flow **tmp;
1701 const struct rte_flow_attr *curr = pf->rule.attr;
1702
1703 if (n) {
1704 /* Filter out unwanted groups. */
1705 for (i = 0; i != n; ++i)
1706 if (curr->group == group[i])
1707 break;
1708 if (i == n)
1709 continue;
1710 }
1711 for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
1712 const struct rte_flow_attr *comp = (*tmp)->rule.attr;
1713
1714 if (curr->group > comp->group ||
1715 (curr->group == comp->group &&
1716 curr->priority > comp->priority) ||
1717 (curr->group == comp->group &&
1718 curr->priority == comp->priority &&
1719 pf->id > (*tmp)->id))
1720 continue;
1721 break;
1722 }
1723 pf->tmp = *tmp;
1724 *tmp = pf;
1725 }
1726 printf("ID\tGroup\tPrio\tAttr\tRule\n");
1727 for (pf = list; pf != NULL; pf = pf->tmp) {
1728 const struct rte_flow_item *item = pf->rule.pattern;
1729 const struct rte_flow_action *action = pf->rule.actions;
1730 const char *name;
1731
1732 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
1733 pf->id,
1734 pf->rule.attr->group,
1735 pf->rule.attr->priority,
1736 pf->rule.attr->ingress ? 'i' : '-',
1737 pf->rule.attr->egress ? 'e' : '-',
1738 pf->rule.attr->transfer ? 't' : '-');
1739 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1740 if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
1741 &name, sizeof(name),
1742 (void *)(uintptr_t)item->type,
1743 NULL) <= 0)
1744 name = "[UNKNOWN]";
1745 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1746 printf("%s ", name);
1747 ++item;
1748 }
1749 printf("=>");
1750 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1751 if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1752 &name, sizeof(name),
1753 (void *)(uintptr_t)action->type,
1754 NULL) <= 0)
1755 name = "[UNKNOWN]";
1756 if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1757 printf(" %s", name);
1758 ++action;
1759 }
1760 printf("\n");
1761 }
1762 }
1763
1764 /** Restrict ingress traffic to the defined flow rules. */
1765 int
1766 port_flow_isolate(portid_t port_id, int set)
1767 {
1768 struct rte_flow_error error;
1769
1770 /* Poisoning to make sure PMDs update it in case of error. */
1771 memset(&error, 0x66, sizeof(error));
1772 if (rte_flow_isolate(port_id, set, &error))
1773 return port_flow_complain(&error);
1774 printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1775 port_id,
1776 set ? "now restricted" : "not restricted anymore");
1777 return 0;
1778 }
1779
1780 /*
1781 * RX/TX ring descriptors display functions.
1782 */
1783 int
1784 rx_queue_id_is_invalid(queueid_t rxq_id)
1785 {
1786 if (rxq_id < nb_rxq)
1787 return 0;
1788 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1789 return 1;
1790 }
1791
1792 int
1793 tx_queue_id_is_invalid(queueid_t txq_id)
1794 {
1795 if (txq_id < nb_txq)
1796 return 0;
1797 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1798 return 1;
1799 }
1800
1801 static int
1802 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1803 {
1804 if (rxdesc_id < nb_rxd)
1805 return 0;
1806 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1807 rxdesc_id, nb_rxd);
1808 return 1;
1809 }
1810
1811 static int
1812 tx_desc_id_is_invalid(uint16_t txdesc_id)
1813 {
1814 if (txdesc_id < nb_txd)
1815 return 0;
1816 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1817 txdesc_id, nb_txd);
1818 return 1;
1819 }
1820
1821 static const struct rte_memzone *
1822 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
1823 {
1824 char mz_name[RTE_MEMZONE_NAMESIZE];
1825 const struct rte_memzone *mz;
1826
1827 snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
1828 port_id, q_id, ring_name);
1829 mz = rte_memzone_lookup(mz_name);
1830 if (mz == NULL)
1831 printf("%s ring memory zoneof (port %d, queue %d) not"
1832 "found (zone name = %s\n",
1833 ring_name, port_id, q_id, mz_name);
1834 return mz;
1835 }
1836
1837 union igb_ring_dword {
1838 uint64_t dword;
1839 struct {
1840 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1841 uint32_t lo;
1842 uint32_t hi;
1843 #else
1844 uint32_t hi;
1845 uint32_t lo;
1846 #endif
1847 } words;
1848 };
1849
1850 struct igb_ring_desc_32_bytes {
1851 union igb_ring_dword lo_dword;
1852 union igb_ring_dword hi_dword;
1853 union igb_ring_dword resv1;
1854 union igb_ring_dword resv2;
1855 };
1856
1857 struct igb_ring_desc_16_bytes {
1858 union igb_ring_dword lo_dword;
1859 union igb_ring_dword hi_dword;
1860 };
1861
1862 static void
1863 ring_rxd_display_dword(union igb_ring_dword dword)
1864 {
1865 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1866 (unsigned)dword.words.hi);
1867 }
1868
1869 static void
1870 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1871 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1872 portid_t port_id,
1873 #else
1874 __rte_unused portid_t port_id,
1875 #endif
1876 uint16_t desc_id)
1877 {
1878 struct igb_ring_desc_16_bytes *ring =
1879 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1880 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1881 int ret;
1882 struct rte_eth_dev_info dev_info;
1883
1884 ret = eth_dev_info_get_print_err(port_id, &dev_info);
1885 if (ret != 0)
1886 return;
1887
1888 if (strstr(dev_info.driver_name, "i40e") != NULL) {
1889 /* 32 bytes RX descriptor, i40e only */
1890 struct igb_ring_desc_32_bytes *ring =
1891 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
1892 ring[desc_id].lo_dword.dword =
1893 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1894 ring_rxd_display_dword(ring[desc_id].lo_dword);
1895 ring[desc_id].hi_dword.dword =
1896 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1897 ring_rxd_display_dword(ring[desc_id].hi_dword);
1898 ring[desc_id].resv1.dword =
1899 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1900 ring_rxd_display_dword(ring[desc_id].resv1);
1901 ring[desc_id].resv2.dword =
1902 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1903 ring_rxd_display_dword(ring[desc_id].resv2);
1904
1905 return;
1906 }
1907 #endif
1908 /* 16 bytes RX descriptor */
1909 ring[desc_id].lo_dword.dword =
1910 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1911 ring_rxd_display_dword(ring[desc_id].lo_dword);
1912 ring[desc_id].hi_dword.dword =
1913 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1914 ring_rxd_display_dword(ring[desc_id].hi_dword);
1915 }
1916
1917 static void
1918 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1919 {
1920 struct igb_ring_desc_16_bytes *ring;
1921 struct igb_ring_desc_16_bytes txd;
1922
1923 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1924 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1925 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1926 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
1927 (unsigned)txd.lo_dword.words.lo,
1928 (unsigned)txd.lo_dword.words.hi,
1929 (unsigned)txd.hi_dword.words.lo,
1930 (unsigned)txd.hi_dword.words.hi);
1931 }
1932
1933 void
1934 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
1935 {
1936 const struct rte_memzone *rx_mz;
1937
1938 if (port_id_is_invalid(port_id, ENABLED_WARN))
1939 return;
1940 if (rx_queue_id_is_invalid(rxq_id))
1941 return;
1942 if (rx_desc_id_is_invalid(rxd_id))
1943 return;
1944 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
1945 if (rx_mz == NULL)
1946 return;
1947 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
1948 }
1949
1950 void
1951 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
1952 {
1953 const struct rte_memzone *tx_mz;
1954
1955 if (port_id_is_invalid(port_id, ENABLED_WARN))
1956 return;
1957 if (tx_queue_id_is_invalid(txq_id))
1958 return;
1959 if (tx_desc_id_is_invalid(txd_id))
1960 return;
1961 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
1962 if (tx_mz == NULL)
1963 return;
1964 ring_tx_descriptor_display(tx_mz, txd_id);
1965 }
1966
1967 void
1968 fwd_lcores_config_display(void)
1969 {
1970 lcoreid_t lc_id;
1971
1972 printf("List of forwarding lcores:");
1973 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
1974 printf(" %2u", fwd_lcores_cpuids[lc_id]);
1975 printf("\n");
1976 }
1977 void
1978 rxtx_config_display(void)
1979 {
1980 portid_t pid;
1981 queueid_t qid;
1982
1983 printf(" %s packet forwarding%s packets/burst=%d\n",
1984 cur_fwd_eng->fwd_mode_name,
1985 retry_enabled == 0 ? "" : " with retry",
1986 nb_pkt_per_burst);
1987
1988 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
1989 printf(" packet len=%u - nb packet segments=%d\n",
1990 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
1991
1992 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
1993 nb_fwd_lcores, nb_fwd_ports);
1994
1995 RTE_ETH_FOREACH_DEV(pid) {
1996 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
1997 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
1998 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
1999 uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
2000 uint16_t nb_rx_desc_tmp;
2001 uint16_t nb_tx_desc_tmp;
2002 struct rte_eth_rxq_info rx_qinfo;
2003 struct rte_eth_txq_info tx_qinfo;
2004 int32_t rc;
2005
2006 /* per port config */
2007 printf(" port %d: RX queue number: %d Tx queue number: %d\n",
2008 (unsigned int)pid, nb_rxq, nb_txq);
2009
2010 printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
2011 ports[pid].dev_conf.rxmode.offloads,
2012 ports[pid].dev_conf.txmode.offloads);
2013
2014 /* per rx queue config only for first queue to be less verbose */
2015 for (qid = 0; qid < 1; qid++) {
2016 rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
2017 if (rc)
2018 nb_rx_desc_tmp = nb_rx_desc[qid];
2019 else
2020 nb_rx_desc_tmp = rx_qinfo.nb_desc;
2021
2022 printf(" RX queue: %d\n", qid);
2023 printf(" RX desc=%d - RX free threshold=%d\n",
2024 nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
2025 printf(" RX threshold registers: pthresh=%d hthresh=%d "
2026 " wthresh=%d\n",
2027 rx_conf[qid].rx_thresh.pthresh,
2028 rx_conf[qid].rx_thresh.hthresh,
2029 rx_conf[qid].rx_thresh.wthresh);
2030 printf(" RX Offloads=0x%"PRIx64"\n",
2031 rx_conf[qid].offloads);
2032 }
2033
2034 /* per tx queue config only for first queue to be less verbose */
2035 for (qid = 0; qid < 1; qid++) {
2036 rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
2037 if (rc)
2038 nb_tx_desc_tmp = nb_tx_desc[qid];
2039 else
2040 nb_tx_desc_tmp = tx_qinfo.nb_desc;
2041
2042 printf(" TX queue: %d\n", qid);
2043 printf(" TX desc=%d - TX free threshold=%d\n",
2044 nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
2045 printf(" TX threshold registers: pthresh=%d hthresh=%d "
2046 " wthresh=%d\n",
2047 tx_conf[qid].tx_thresh.pthresh,
2048 tx_conf[qid].tx_thresh.hthresh,
2049 tx_conf[qid].tx_thresh.wthresh);
2050 printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
2051 tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
2052 }
2053 }
2054 }
2055
2056 void
2057 port_rss_reta_info(portid_t port_id,
2058 struct rte_eth_rss_reta_entry64 *reta_conf,
2059 uint16_t nb_entries)
2060 {
2061 uint16_t i, idx, shift;
2062 int ret;
2063
2064 if (port_id_is_invalid(port_id, ENABLED_WARN))
2065 return;
2066
2067 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
2068 if (ret != 0) {
2069 printf("Failed to get RSS RETA info, return code = %d\n", ret);
2070 return;
2071 }
2072
2073 for (i = 0; i < nb_entries; i++) {
2074 idx = i / RTE_RETA_GROUP_SIZE;
2075 shift = i % RTE_RETA_GROUP_SIZE;
2076 if (!(reta_conf[idx].mask & (1ULL << shift)))
2077 continue;
2078 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
2079 i, reta_conf[idx].reta[shift]);
2080 }
2081 }
2082
2083 /*
2084 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
2085 * key of the port.
2086 */
2087 void
2088 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
2089 {
2090 struct rte_eth_rss_conf rss_conf = {0};
2091 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
2092 uint64_t rss_hf;
2093 uint8_t i;
2094 int diag;
2095 struct rte_eth_dev_info dev_info;
2096 uint8_t hash_key_size;
2097 int ret;
2098
2099 if (port_id_is_invalid(port_id, ENABLED_WARN))
2100 return;
2101
2102 ret = eth_dev_info_get_print_err(port_id, &dev_info);
2103 if (ret != 0)
2104 return;
2105
2106 if (dev_info.hash_key_size > 0 &&
2107 dev_info.hash_key_size <= sizeof(rss_key))
2108 hash_key_size = dev_info.hash_key_size;
2109 else {
2110 printf("dev_info did not provide a valid hash key size\n");
2111 return;
2112 }
2113
2114 /* Get RSS hash key if asked to display it */
2115 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
2116 rss_conf.rss_key_len = hash_key_size;
2117 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2118 if (diag != 0) {
2119 switch (diag) {
2120 case -ENODEV:
2121 printf("port index %d invalid\n", port_id);
2122 break;
2123 case -ENOTSUP:
2124 printf("operation not supported by device\n");
2125 break;
2126 default:
2127 printf("operation failed - diag=%d\n", diag);
2128 break;
2129 }
2130 return;
2131 }
2132 rss_hf = rss_conf.rss_hf;
2133 if (rss_hf == 0) {
2134 printf("RSS disabled\n");
2135 return;
2136 }
2137 printf("RSS functions:\n ");
2138 for (i = 0; rss_type_table[i].str; i++) {
2139 if (rss_hf & rss_type_table[i].rss_type)
2140 printf("%s ", rss_type_table[i].str);
2141 }
2142 printf("\n");
2143 if (!show_rss_key)
2144 return;
2145 printf("RSS key:\n");
2146 for (i = 0; i < hash_key_size; i++)
2147 printf("%02X", rss_key[i]);
2148 printf("\n");
2149 }
2150
2151 void
2152 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
2153 uint hash_key_len)
2154 {
2155 struct rte_eth_rss_conf rss_conf;
2156 int diag;
2157 unsigned int i;
2158
2159 rss_conf.rss_key = NULL;
2160 rss_conf.rss_key_len = hash_key_len;
2161 rss_conf.rss_hf = 0;
2162 for (i = 0; rss_type_table[i].str; i++) {
2163 if (!strcmp(rss_type_table[i].str, rss_type))
2164 rss_conf.rss_hf = rss_type_table[i].rss_type;
2165 }
2166 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2167 if (diag == 0) {
2168 rss_conf.rss_key = hash_key;
2169 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
2170 }
2171 if (diag == 0)
2172 return;
2173
2174 switch (diag) {
2175 case -ENODEV:
2176 printf("port index %d invalid\n", port_id);
2177 break;
2178 case -ENOTSUP:
2179 printf("operation not supported by device\n");
2180 break;
2181 default:
2182 printf("operation failed - diag=%d\n", diag);
2183 break;
2184 }
2185 }
2186
2187 /*
2188 * Setup forwarding configuration for each logical core.
2189 */
2190 static void
2191 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
2192 {
2193 streamid_t nb_fs_per_lcore;
2194 streamid_t nb_fs;
2195 streamid_t sm_id;
2196 lcoreid_t nb_extra;
2197 lcoreid_t nb_fc;
2198 lcoreid_t nb_lc;
2199 lcoreid_t lc_id;
2200
2201 nb_fs = cfg->nb_fwd_streams;
2202 nb_fc = cfg->nb_fwd_lcores;
2203 if (nb_fs <= nb_fc) {
2204 nb_fs_per_lcore = 1;
2205 nb_extra = 0;
2206 } else {
2207 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
2208 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
2209 }
2210
2211 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
2212 sm_id = 0;
2213 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
2214 fwd_lcores[lc_id]->stream_idx = sm_id;
2215 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
2216 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2217 }
2218
2219 /*
2220 * Assign extra remaining streams, if any.
2221 */
2222 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
2223 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
2224 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
2225 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
2226 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2227 }
2228 }
2229
2230 static portid_t
2231 fwd_topology_tx_port_get(portid_t rxp)
2232 {
2233 static int warning_once = 1;
2234
2235 RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
2236
2237 switch (port_topology) {
2238 default:
2239 case PORT_TOPOLOGY_PAIRED:
2240 if ((rxp & 0x1) == 0) {
2241 if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
2242 return rxp + 1;
2243 if (warning_once) {
2244 printf("\nWarning! port-topology=paired"
2245 " and odd forward ports number,"
2246 " the last port will pair with"
2247 " itself.\n\n");
2248 warning_once = 0;
2249 }
2250 return rxp;
2251 }
2252 return rxp - 1;
2253 case PORT_TOPOLOGY_CHAINED:
2254 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
2255 case PORT_TOPOLOGY_LOOP:
2256 return rxp;
2257 }
2258 }
2259
2260 static void
2261 simple_fwd_config_setup(void)
2262 {
2263 portid_t i;
2264
2265 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
2266 cur_fwd_config.nb_fwd_streams =
2267 (streamid_t) cur_fwd_config.nb_fwd_ports;
2268
2269 /* reinitialize forwarding streams */
2270 init_fwd_streams();
2271
2272 /*
2273 * In the simple forwarding test, the number of forwarding cores
2274 * must be lower or equal to the number of forwarding ports.
2275 */
2276 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2277 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
2278 cur_fwd_config.nb_fwd_lcores =
2279 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
2280 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2281
2282 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2283 fwd_streams[i]->rx_port = fwd_ports_ids[i];
2284 fwd_streams[i]->rx_queue = 0;
2285 fwd_streams[i]->tx_port =
2286 fwd_ports_ids[fwd_topology_tx_port_get(i)];
2287 fwd_streams[i]->tx_queue = 0;
2288 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2289 fwd_streams[i]->retry_enabled = retry_enabled;
2290 }
2291 }
2292
2293 /**
2294 * For the RSS forwarding test all streams distributed over lcores. Each stream
2295 * being composed of a RX queue to poll on a RX port for input messages,
2296 * associated with a TX queue of a TX port where to send forwarded packets.
2297 */
2298 static void
2299 rss_fwd_config_setup(void)
2300 {
2301 portid_t rxp;
2302 portid_t txp;
2303 queueid_t rxq;
2304 queueid_t nb_q;
2305 streamid_t sm_id;
2306
2307 nb_q = nb_rxq;
2308 if (nb_q > nb_txq)
2309 nb_q = nb_txq;
2310 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2311 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2312 cur_fwd_config.nb_fwd_streams =
2313 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
2314
2315 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2316 cur_fwd_config.nb_fwd_lcores =
2317 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2318
2319 /* reinitialize forwarding streams */
2320 init_fwd_streams();
2321
2322 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2323 rxp = 0; rxq = 0;
2324 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2325 struct fwd_stream *fs;
2326
2327 fs = fwd_streams[sm_id];
2328 txp = fwd_topology_tx_port_get(rxp);
2329 fs->rx_port = fwd_ports_ids[rxp];
2330 fs->rx_queue = rxq;
2331 fs->tx_port = fwd_ports_ids[txp];
2332 fs->tx_queue = rxq;
2333 fs->peer_addr = fs->tx_port;
2334 fs->retry_enabled = retry_enabled;
2335 rxp++;
2336 if (rxp < nb_fwd_ports)
2337 continue;
2338 rxp = 0;
2339 rxq++;
2340 }
2341 }
2342
2343 /**
2344 * For the DCB forwarding test, each core is assigned on each traffic class.
2345 *
2346 * Each core is assigned a multi-stream, each stream being composed of
2347 * a RX queue to poll on a RX port for input messages, associated with
2348 * a TX queue of a TX port where to send forwarded packets. All RX and
2349 * TX queues are mapping to the same traffic class.
2350 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2351 * the same core
2352 */
2353 static void
2354 dcb_fwd_config_setup(void)
2355 {
2356 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2357 portid_t txp, rxp = 0;
2358 queueid_t txq, rxq = 0;
2359 lcoreid_t lc_id;
2360 uint16_t nb_rx_queue, nb_tx_queue;
2361 uint16_t i, j, k, sm_id = 0;
2362 uint8_t tc = 0;
2363
2364 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2365 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2366 cur_fwd_config.nb_fwd_streams =
2367 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2368
2369 /* reinitialize forwarding streams */
2370 init_fwd_streams();
2371 sm_id = 0;
2372 txp = 1;
2373 /* get the dcb info on the first RX and TX ports */
2374 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2375 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2376
2377 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2378 fwd_lcores[lc_id]->stream_nb = 0;
2379 fwd_lcores[lc_id]->stream_idx = sm_id;
2380 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2381 /* if the nb_queue is zero, means this tc is
2382 * not enabled on the POOL
2383 */
2384 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2385 break;
2386 k = fwd_lcores[lc_id]->stream_nb +
2387 fwd_lcores[lc_id]->stream_idx;
2388 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2389 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2390 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2391 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2392 for (j = 0; j < nb_rx_queue; j++) {
2393 struct fwd_stream *fs;
2394
2395 fs = fwd_streams[k + j];
2396 fs->rx_port = fwd_ports_ids[rxp];
2397 fs->rx_queue = rxq + j;
2398 fs->tx_port = fwd_ports_ids[txp];
2399 fs->tx_queue = txq + j % nb_tx_queue;
2400 fs->peer_addr = fs->tx_port;
2401 fs->retry_enabled = retry_enabled;
2402 }
2403 fwd_lcores[lc_id]->stream_nb +=
2404 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2405 }
2406 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2407
2408 tc++;
2409 if (tc < rxp_dcb_info.nb_tcs)
2410 continue;
2411 /* Restart from TC 0 on next RX port */
2412 tc = 0;
2413 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2414 rxp = (portid_t)
2415 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2416 else
2417 rxp++;
2418 if (rxp >= nb_fwd_ports)
2419 return;
2420 /* get the dcb information on next RX and TX ports */
2421 if ((rxp & 0x1) == 0)
2422 txp = (portid_t) (rxp + 1);
2423 else
2424 txp = (portid_t) (rxp - 1);
2425 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2426 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2427 }
2428 }
2429
2430 static void
2431 icmp_echo_config_setup(void)
2432 {
2433 portid_t rxp;
2434 queueid_t rxq;
2435 lcoreid_t lc_id;
2436 uint16_t sm_id;
2437
2438 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2439 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2440 (nb_txq * nb_fwd_ports);
2441 else
2442 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2443 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2444 cur_fwd_config.nb_fwd_streams =
2445 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2446 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2447 cur_fwd_config.nb_fwd_lcores =
2448 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2449 if (verbose_level > 0) {
2450 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2451 __FUNCTION__,
2452 cur_fwd_config.nb_fwd_lcores,
2453 cur_fwd_config.nb_fwd_ports,
2454 cur_fwd_config.nb_fwd_streams);
2455 }
2456
2457 /* reinitialize forwarding streams */
2458 init_fwd_streams();
2459 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2460 rxp = 0; rxq = 0;
2461 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2462 if (verbose_level > 0)
2463 printf(" core=%d: \n", lc_id);
2464 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2465 struct fwd_stream *fs;
2466 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2467 fs->rx_port = fwd_ports_ids[rxp];
2468 fs->rx_queue = rxq;
2469 fs->tx_port = fs->rx_port;
2470 fs->tx_queue = rxq;
2471 fs->peer_addr = fs->tx_port;
2472 fs->retry_enabled = retry_enabled;
2473 if (verbose_level > 0)
2474 printf(" stream=%d port=%d rxq=%d txq=%d\n",
2475 sm_id, fs->rx_port, fs->rx_queue,
2476 fs->tx_queue);
2477 rxq = (queueid_t) (rxq + 1);
2478 if (rxq == nb_rxq) {
2479 rxq = 0;
2480 rxp = (portid_t) (rxp + 1);
2481 }
2482 }
2483 }
2484 }
2485
2486 #if defined RTE_LIBRTE_PMD_SOFTNIC
2487 static void
2488 softnic_fwd_config_setup(void)
2489 {
2490 struct rte_port *port;
2491 portid_t pid, softnic_portid;
2492 queueid_t i;
2493 uint8_t softnic_enable = 0;
2494
2495 RTE_ETH_FOREACH_DEV(pid) {
2496 port = &ports[pid];
2497 const char *driver = port->dev_info.driver_name;
2498
2499 if (strcmp(driver, "net_softnic") == 0) {
2500 softnic_portid = pid;
2501 softnic_enable = 1;
2502 break;
2503 }
2504 }
2505
2506 if (softnic_enable == 0) {
2507 printf("Softnic mode not configured(%s)!\n", __func__);
2508 return;
2509 }
2510
2511 cur_fwd_config.nb_fwd_ports = 1;
2512 cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq;
2513
2514 /* Re-initialize forwarding streams */
2515 init_fwd_streams();
2516
2517 /*
2518 * In the softnic forwarding test, the number of forwarding cores
2519 * is set to one and remaining are used for softnic packet processing.
2520 */
2521 cur_fwd_config.nb_fwd_lcores = 1;
2522 setup_fwd_config_of_each_lcore(&cur_fwd_config);
2523
2524 for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) {
2525 fwd_streams[i]->rx_port = softnic_portid;
2526 fwd_streams[i]->rx_queue = i;
2527 fwd_streams[i]->tx_port = softnic_portid;
2528 fwd_streams[i]->tx_queue = i;
2529 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2530 fwd_streams[i]->retry_enabled = retry_enabled;
2531 }
2532 }
2533 #endif
2534
2535 void
2536 fwd_config_setup(void)
2537 {
2538 cur_fwd_config.fwd_eng = cur_fwd_eng;
2539 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2540 icmp_echo_config_setup();
2541 return;
2542 }
2543
2544 #if defined RTE_LIBRTE_PMD_SOFTNIC
2545 if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
2546 softnic_fwd_config_setup();
2547 return;
2548 }
2549 #endif
2550
2551 if ((nb_rxq > 1) && (nb_txq > 1)){
2552 if (dcb_config)
2553 dcb_fwd_config_setup();
2554 else
2555 rss_fwd_config_setup();
2556 }
2557 else
2558 simple_fwd_config_setup();
2559 }
2560
2561 static const char *
2562 mp_alloc_to_str(uint8_t mode)
2563 {
2564 switch (mode) {
2565 case MP_ALLOC_NATIVE:
2566 return "native";
2567 case MP_ALLOC_ANON:
2568 return "anon";
2569 case MP_ALLOC_XMEM:
2570 return "xmem";
2571 case MP_ALLOC_XMEM_HUGE:
2572 return "xmemhuge";
2573 case MP_ALLOC_XBUF:
2574 return "xbuf";
2575 default:
2576 return "invalid";
2577 }
2578 }
2579
2580 void
2581 pkt_fwd_config_display(struct fwd_config *cfg)
2582 {
2583 struct fwd_stream *fs;
2584 lcoreid_t lc_id;
2585 streamid_t sm_id;
2586
2587 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2588 "NUMA support %s, MP allocation mode: %s\n",
2589 cfg->fwd_eng->fwd_mode_name,
2590 retry_enabled == 0 ? "" : " with retry",
2591 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2592 numa_support == 1 ? "enabled" : "disabled",
2593 mp_alloc_to_str(mp_alloc_type));
2594
2595 if (retry_enabled)
2596 printf("TX retry num: %u, delay between TX retries: %uus\n",
2597 burst_tx_retry_num, burst_tx_delay_time);
2598 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2599 printf("Logical Core %u (socket %u) forwards packets on "
2600 "%d streams:",
2601 fwd_lcores_cpuids[lc_id],
2602 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2603 fwd_lcores[lc_id]->stream_nb);
2604 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2605 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2606 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
2607 "P=%d/Q=%d (socket %u) ",
2608 fs->rx_port, fs->rx_queue,
2609 ports[fs->rx_port].socket_id,
2610 fs->tx_port, fs->tx_queue,
2611 ports[fs->tx_port].socket_id);
2612 print_ethaddr("peer=",
2613 &peer_eth_addrs[fs->peer_addr]);
2614 }
2615 printf("\n");
2616 }
2617 printf("\n");
2618 }
2619
2620 void
2621 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
2622 {
2623 struct rte_ether_addr new_peer_addr;
2624 if (!rte_eth_dev_is_valid_port(port_id)) {
2625 printf("Error: Invalid port number %i\n", port_id);
2626 return;
2627 }
2628 if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
2629 printf("Error: Invalid ethernet address: %s\n", peer_addr);
2630 return;
2631 }
2632 peer_eth_addrs[port_id] = new_peer_addr;
2633 }
2634
2635 int
2636 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2637 {
2638 unsigned int i;
2639 unsigned int lcore_cpuid;
2640 int record_now;
2641
2642 record_now = 0;
2643 again:
2644 for (i = 0; i < nb_lc; i++) {
2645 lcore_cpuid = lcorelist[i];
2646 if (! rte_lcore_is_enabled(lcore_cpuid)) {
2647 printf("lcore %u not enabled\n", lcore_cpuid);
2648 return -1;
2649 }
2650 if (lcore_cpuid == rte_get_master_lcore()) {
2651 printf("lcore %u cannot be masked on for running "
2652 "packet forwarding, which is the master lcore "
2653 "and reserved for command line parsing only\n",
2654 lcore_cpuid);
2655 return -1;
2656 }
2657 if (record_now)
2658 fwd_lcores_cpuids[i] = lcore_cpuid;
2659 }
2660 if (record_now == 0) {
2661 record_now = 1;
2662 goto again;
2663 }
2664 nb_cfg_lcores = (lcoreid_t) nb_lc;
2665 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2666 printf("previous number of forwarding cores %u - changed to "
2667 "number of configured cores %u\n",
2668 (unsigned int) nb_fwd_lcores, nb_lc);
2669 nb_fwd_lcores = (lcoreid_t) nb_lc;
2670 }
2671
2672 return 0;
2673 }
2674
2675 int
2676 set_fwd_lcores_mask(uint64_t lcoremask)
2677 {
2678 unsigned int lcorelist[64];
2679 unsigned int nb_lc;
2680 unsigned int i;
2681
2682 if (lcoremask == 0) {
2683 printf("Invalid NULL mask of cores\n");
2684 return -1;
2685 }
2686 nb_lc = 0;
2687 for (i = 0; i < 64; i++) {
2688 if (! ((uint64_t)(1ULL << i) & lcoremask))
2689 continue;
2690 lcorelist[nb_lc++] = i;
2691 }
2692 return set_fwd_lcores_list(lcorelist, nb_lc);
2693 }
2694
2695 void
2696 set_fwd_lcores_number(uint16_t nb_lc)
2697 {
2698 if (nb_lc > nb_cfg_lcores) {
2699 printf("nb fwd cores %u > %u (max. number of configured "
2700 "lcores) - ignored\n",
2701 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2702 return;
2703 }
2704 nb_fwd_lcores = (lcoreid_t) nb_lc;
2705 printf("Number of forwarding cores set to %u\n",
2706 (unsigned int) nb_fwd_lcores);
2707 }
2708
2709 void
2710 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2711 {
2712 unsigned int i;
2713 portid_t port_id;
2714 int record_now;
2715
2716 record_now = 0;
2717 again:
2718 for (i = 0; i < nb_pt; i++) {
2719 port_id = (portid_t) portlist[i];
2720 if (port_id_is_invalid(port_id, ENABLED_WARN))
2721 return;
2722 if (record_now)
2723 fwd_ports_ids[i] = port_id;
2724 }
2725 if (record_now == 0) {
2726 record_now = 1;
2727 goto again;
2728 }
2729 nb_cfg_ports = (portid_t) nb_pt;
2730 if (nb_fwd_ports != (portid_t) nb_pt) {
2731 printf("previous number of forwarding ports %u - changed to "
2732 "number of configured ports %u\n",
2733 (unsigned int) nb_fwd_ports, nb_pt);
2734 nb_fwd_ports = (portid_t) nb_pt;
2735 }
2736 }
2737
2738 /**
2739 * Parse the user input and obtain the list of forwarding ports
2740 *
2741 * @param[in] list
2742 * String containing the user input. User can specify
2743 * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
2744 * For example, if the user wants to use all the available
2745 * 4 ports in his system, then the input can be 0-3 or 0,1,2,3.
2746 * If the user wants to use only the ports 1,2 then the input
2747 * is 1,2.
2748 * valid characters are '-' and ','
2749 * @param[out] values
2750 * This array will be filled with a list of port IDs
2751 * based on the user input
2752 * Note that duplicate entries are discarded and only the first
2753 * count entries in this array are port IDs and all the rest
2754 * will contain default values
2755 * @param[in] maxsize
2756 * This parameter denotes 2 things
2757 * 1) Number of elements in the values array
2758 * 2) Maximum value of each element in the values array
2759 * @return
2760 * On success, returns total count of parsed port IDs
2761 * On failure, returns 0
2762 */
2763 static unsigned int
2764 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
2765 {
2766 unsigned int count = 0;
2767 char *end = NULL;
2768 int min, max;
2769 int value, i;
2770 unsigned int marked[maxsize];
2771
2772 if (list == NULL || values == NULL)
2773 return 0;
2774
2775 for (i = 0; i < (int)maxsize; i++)
2776 marked[i] = 0;
2777
2778 min = INT_MAX;
2779
2780 do {
2781 /*Remove the blank spaces if any*/
2782 while (isblank(*list))
2783 list++;
2784 if (*list == '\0')
2785 break;
2786 errno = 0;
2787 value = strtol(list, &end, 10);
2788 if (errno || end == NULL)
2789 return 0;
2790 if (value < 0 || value >= (int)maxsize)
2791 return 0;
2792 while (isblank(*end))
2793 end++;
2794 if (*end == '-' && min == INT_MAX) {
2795 min = value;
2796 } else if ((*end == ',') || (*end == '\0')) {
2797 max = value;
2798 if (min == INT_MAX)
2799 min = value;
2800 for (i = min; i <= max; i++) {
2801 if (count < maxsize) {
2802 if (marked[i])
2803 continue;
2804 values[count] = i;
2805 marked[i] = 1;
2806 count++;
2807 }
2808 }
2809 min = INT_MAX;
2810 } else
2811 return 0;
2812 list = end + 1;
2813 } while (*end != '\0');
2814
2815 return count;
2816 }
2817
2818 void
2819 parse_fwd_portlist(const char *portlist)
2820 {
2821 unsigned int portcount;
2822 unsigned int portindex[RTE_MAX_ETHPORTS];
2823 unsigned int i, valid_port_count = 0;
2824
2825 portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
2826 if (!portcount)
2827 rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
2828
2829 /*
2830 * Here we verify the validity of the ports
2831 * and thereby calculate the total number of
2832 * valid ports
2833 */
2834 for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
2835 if (rte_eth_dev_is_valid_port(portindex[i])) {
2836 portindex[valid_port_count] = portindex[i];
2837 valid_port_count++;
2838 }
2839 }
2840
2841 set_fwd_ports_list(portindex, valid_port_count);
2842 }
2843
2844 void
2845 set_fwd_ports_mask(uint64_t portmask)
2846 {
2847 unsigned int portlist[64];
2848 unsigned int nb_pt;
2849 unsigned int i;
2850
2851 if (portmask == 0) {
2852 printf("Invalid NULL mask of ports\n");
2853 return;
2854 }
2855 nb_pt = 0;
2856 RTE_ETH_FOREACH_DEV(i) {
2857 if (! ((uint64_t)(1ULL << i) & portmask))
2858 continue;
2859 portlist[nb_pt++] = i;
2860 }
2861 set_fwd_ports_list(portlist, nb_pt);
2862 }
2863
2864 void
2865 set_fwd_ports_number(uint16_t nb_pt)
2866 {
2867 if (nb_pt > nb_cfg_ports) {
2868 printf("nb fwd ports %u > %u (number of configured "
2869 "ports) - ignored\n",
2870 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2871 return;
2872 }
2873 nb_fwd_ports = (portid_t) nb_pt;
2874 printf("Number of forwarding ports set to %u\n",
2875 (unsigned int) nb_fwd_ports);
2876 }
2877
2878 int
2879 port_is_forwarding(portid_t port_id)
2880 {
2881 unsigned int i;
2882
2883 if (port_id_is_invalid(port_id, ENABLED_WARN))
2884 return -1;
2885
2886 for (i = 0; i < nb_fwd_ports; i++) {
2887 if (fwd_ports_ids[i] == port_id)
2888 return 1;
2889 }
2890
2891 return 0;
2892 }
2893
2894 void
2895 set_nb_pkt_per_burst(uint16_t nb)
2896 {
2897 if (nb > MAX_PKT_BURST) {
2898 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2899 " ignored\n",
2900 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2901 return;
2902 }
2903 nb_pkt_per_burst = nb;
2904 printf("Number of packets per burst set to %u\n",
2905 (unsigned int) nb_pkt_per_burst);
2906 }
2907
2908 static const char *
2909 tx_split_get_name(enum tx_pkt_split split)
2910 {
2911 uint32_t i;
2912
2913 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2914 if (tx_split_name[i].split == split)
2915 return tx_split_name[i].name;
2916 }
2917 return NULL;
2918 }
2919
2920 void
2921 set_tx_pkt_split(const char *name)
2922 {
2923 uint32_t i;
2924
2925 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2926 if (strcmp(tx_split_name[i].name, name) == 0) {
2927 tx_pkt_split = tx_split_name[i].split;
2928 return;
2929 }
2930 }
2931 printf("unknown value: \"%s\"\n", name);
2932 }
2933
2934 void
2935 show_tx_pkt_segments(void)
2936 {
2937 uint32_t i, n;
2938 const char *split;
2939
2940 n = tx_pkt_nb_segs;
2941 split = tx_split_get_name(tx_pkt_split);
2942
2943 printf("Number of segments: %u\n", n);
2944 printf("Segment sizes: ");
2945 for (i = 0; i != n - 1; i++)
2946 printf("%hu,", tx_pkt_seg_lengths[i]);
2947 printf("%hu\n", tx_pkt_seg_lengths[i]);
2948 printf("Split packet: %s\n", split);
2949 }
2950
2951 void
2952 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
2953 {
2954 uint16_t tx_pkt_len;
2955 unsigned i;
2956
2957 if (nb_segs >= (unsigned) nb_txd) {
2958 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
2959 nb_segs, (unsigned int) nb_txd);
2960 return;
2961 }
2962
2963 /*
2964 * Check that each segment length is greater or equal than
2965 * the mbuf data sise.
2966 * Check also that the total packet length is greater or equal than the
2967 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
2968 * 20 + 8).
2969 */
2970 tx_pkt_len = 0;
2971 for (i = 0; i < nb_segs; i++) {
2972 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
2973 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
2974 i, seg_lengths[i], (unsigned) mbuf_data_size);
2975 return;
2976 }
2977 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
2978 }
2979 if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
2980 printf("total packet length=%u < %d - give up\n",
2981 (unsigned) tx_pkt_len,
2982 (int)(sizeof(struct rte_ether_hdr) + 20 + 8));
2983 return;
2984 }
2985
2986 for (i = 0; i < nb_segs; i++)
2987 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
2988
2989 tx_pkt_length = tx_pkt_len;
2990 tx_pkt_nb_segs = (uint8_t) nb_segs;
2991 }
2992
2993 void
2994 setup_gro(const char *onoff, portid_t port_id)
2995 {
2996 if (!rte_eth_dev_is_valid_port(port_id)) {
2997 printf("invalid port id %u\n", port_id);
2998 return;
2999 }
3000 if (test_done == 0) {
3001 printf("Before enable/disable GRO,"
3002 " please stop forwarding first\n");
3003 return;
3004 }
3005 if (strcmp(onoff, "on") == 0) {
3006 if (gro_ports[port_id].enable != 0) {
3007 printf("Port %u has enabled GRO. Please"
3008 " disable GRO first\n", port_id);
3009 return;
3010 }
3011 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3012 gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
3013 gro_ports[port_id].param.max_flow_num =
3014 GRO_DEFAULT_FLOW_NUM;
3015 gro_ports[port_id].param.max_item_per_flow =
3016 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
3017 }
3018 gro_ports[port_id].enable = 1;
3019 } else {
3020 if (gro_ports[port_id].enable == 0) {
3021 printf("Port %u has disabled GRO\n", port_id);
3022 return;
3023 }
3024 gro_ports[port_id].enable = 0;
3025 }
3026 }
3027
3028 void
3029 setup_gro_flush_cycles(uint8_t cycles)
3030 {
3031 if (test_done == 0) {
3032 printf("Before change flush interval for GRO,"
3033 " please stop forwarding first.\n");
3034 return;
3035 }
3036
3037 if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
3038 GRO_DEFAULT_FLUSH_CYCLES) {
3039 printf("The flushing cycle be in the range"
3040 " of 1 to %u. Revert to the default"
3041 " value %u.\n",
3042 GRO_MAX_FLUSH_CYCLES,
3043 GRO_DEFAULT_FLUSH_CYCLES);
3044 cycles = GRO_DEFAULT_FLUSH_CYCLES;
3045 }
3046
3047 gro_flush_cycles = cycles;
3048 }
3049
3050 void
3051 show_gro(portid_t port_id)
3052 {
3053 struct rte_gro_param *param;
3054 uint32_t max_pkts_num;
3055
3056 param = &gro_ports[port_id].param;
3057
3058 if (!rte_eth_dev_is_valid_port(port_id)) {
3059 printf("Invalid port id %u.\n", port_id);
3060 return;
3061 }
3062 if (gro_ports[port_id].enable) {
3063 printf("GRO type: TCP/IPv4\n");
3064 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3065 max_pkts_num = param->max_flow_num *
3066 param->max_item_per_flow;
3067 } else
3068 max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
3069 printf("Max number of packets to perform GRO: %u\n",
3070 max_pkts_num);
3071 printf("Flushing cycles: %u\n", gro_flush_cycles);
3072 } else
3073 printf("Port %u doesn't enable GRO.\n", port_id);
3074 }
3075
3076 void
3077 setup_gso(const char *mode, portid_t port_id)
3078 {
3079 if (!rte_eth_dev_is_valid_port(port_id)) {
3080 printf("invalid port id %u\n", port_id);
3081 return;
3082 }
3083 if (strcmp(mode, "on") == 0) {
3084 if (test_done == 0) {
3085 printf("before enabling GSO,"
3086 " please stop forwarding first\n");
3087 return;
3088 }
3089 gso_ports[port_id].enable = 1;
3090 } else if (strcmp(mode, "off") == 0) {
3091 if (test_done == 0) {
3092 printf("before disabling GSO,"
3093 " please stop forwarding first\n");
3094 return;
3095 }
3096 gso_ports[port_id].enable = 0;
3097 }
3098 }
3099
3100 char*
3101 list_pkt_forwarding_modes(void)
3102 {
3103 static char fwd_modes[128] = "";
3104 const char *separator = "|";
3105 struct fwd_engine *fwd_eng;
3106 unsigned i = 0;
3107
3108 if (strlen (fwd_modes) == 0) {
3109 while ((fwd_eng = fwd_engines[i++]) != NULL) {
3110 strncat(fwd_modes, fwd_eng->fwd_mode_name,
3111 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3112 strncat(fwd_modes, separator,
3113 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3114 }
3115 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3116 }
3117
3118 return fwd_modes;
3119 }
3120
3121 char*
3122 list_pkt_forwarding_retry_modes(void)
3123 {
3124 static char fwd_modes[128] = "";
3125 const char *separator = "|";
3126 struct fwd_engine *fwd_eng;
3127 unsigned i = 0;
3128
3129 if (strlen(fwd_modes) == 0) {
3130 while ((fwd_eng = fwd_engines[i++]) != NULL) {
3131 if (fwd_eng == &rx_only_engine)
3132 continue;
3133 strncat(fwd_modes, fwd_eng->fwd_mode_name,
3134 sizeof(fwd_modes) -
3135 strlen(fwd_modes) - 1);
3136 strncat(fwd_modes, separator,
3137 sizeof(fwd_modes) -
3138 strlen(fwd_modes) - 1);
3139 }
3140 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3141 }
3142
3143 return fwd_modes;
3144 }
3145
3146 void
3147 set_pkt_forwarding_mode(const char *fwd_mode_name)
3148 {
3149 struct fwd_engine *fwd_eng;
3150 unsigned i;
3151
3152 i = 0;
3153 while ((fwd_eng = fwd_engines[i]) != NULL) {
3154 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
3155 printf("Set %s packet forwarding mode%s\n",
3156 fwd_mode_name,
3157 retry_enabled == 0 ? "" : " with retry");
3158 cur_fwd_eng = fwd_eng;
3159 return;
3160 }
3161 i++;
3162 }
3163 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
3164 }
3165
3166 void
3167 add_rx_dump_callbacks(portid_t portid)
3168 {
3169 struct rte_eth_dev_info dev_info;
3170 uint16_t queue;
3171 int ret;
3172
3173 if (port_id_is_invalid(portid, ENABLED_WARN))
3174 return;
3175
3176 ret = eth_dev_info_get_print_err(portid, &dev_info);
3177 if (ret != 0)
3178 return;
3179
3180 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3181 if (!ports[portid].rx_dump_cb[queue])
3182 ports[portid].rx_dump_cb[queue] =
3183 rte_eth_add_rx_callback(portid, queue,
3184 dump_rx_pkts, NULL);
3185 }
3186
3187 void
3188 add_tx_dump_callbacks(portid_t portid)
3189 {
3190 struct rte_eth_dev_info dev_info;
3191 uint16_t queue;
3192 int ret;
3193
3194 if (port_id_is_invalid(portid, ENABLED_WARN))
3195 return;
3196
3197 ret = eth_dev_info_get_print_err(portid, &dev_info);
3198 if (ret != 0)
3199 return;
3200
3201 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3202 if (!ports[portid].tx_dump_cb[queue])
3203 ports[portid].tx_dump_cb[queue] =
3204 rte_eth_add_tx_callback(portid, queue,
3205 dump_tx_pkts, NULL);
3206 }
3207
3208 void
3209 remove_rx_dump_callbacks(portid_t portid)
3210 {
3211 struct rte_eth_dev_info dev_info;
3212 uint16_t queue;
3213 int ret;
3214
3215 if (port_id_is_invalid(portid, ENABLED_WARN))
3216 return;
3217
3218 ret = eth_dev_info_get_print_err(portid, &dev_info);
3219 if (ret != 0)
3220 return;
3221
3222 for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3223 if (ports[portid].rx_dump_cb[queue]) {
3224 rte_eth_remove_rx_callback(portid, queue,
3225 ports[portid].rx_dump_cb[queue]);
3226 ports[portid].rx_dump_cb[queue] = NULL;
3227 }
3228 }
3229
3230 void
3231 remove_tx_dump_callbacks(portid_t portid)
3232 {
3233 struct rte_eth_dev_info dev_info;
3234 uint16_t queue;
3235 int ret;
3236
3237 if (port_id_is_invalid(portid, ENABLED_WARN))
3238 return;
3239
3240 ret = eth_dev_info_get_print_err(portid, &dev_info);
3241 if (ret != 0)
3242 return;
3243
3244 for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3245 if (ports[portid].tx_dump_cb[queue]) {
3246 rte_eth_remove_tx_callback(portid, queue,
3247 ports[portid].tx_dump_cb[queue]);
3248 ports[portid].tx_dump_cb[queue] = NULL;
3249 }
3250 }
3251
3252 void
3253 configure_rxtx_dump_callbacks(uint16_t verbose)
3254 {
3255 portid_t portid;
3256
3257 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3258 TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
3259 return;
3260 #endif
3261
3262 RTE_ETH_FOREACH_DEV(portid)
3263 {
3264 if (verbose == 1 || verbose > 2)
3265 add_rx_dump_callbacks(portid);
3266 else
3267 remove_rx_dump_callbacks(portid);
3268 if (verbose >= 2)
3269 add_tx_dump_callbacks(portid);
3270 else
3271 remove_tx_dump_callbacks(portid);
3272 }
3273 }
3274
3275 void
3276 set_verbose_level(uint16_t vb_level)
3277 {
3278 printf("Change verbose level from %u to %u\n",
3279 (unsigned int) verbose_level, (unsigned int) vb_level);
3280 verbose_level = vb_level;
3281 configure_rxtx_dump_callbacks(verbose_level);
3282 }
3283
3284 void
3285 vlan_extend_set(portid_t port_id, int on)
3286 {
3287 int diag;
3288 int vlan_offload;
3289 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3290
3291 if (port_id_is_invalid(port_id, ENABLED_WARN))
3292 return;
3293
3294 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3295
3296 if (on) {
3297 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
3298 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3299 } else {
3300 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
3301 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3302 }
3303
3304 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3305 if (diag < 0)
3306 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
3307 "diag=%d\n", port_id, on, diag);
3308 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3309 }
3310
3311 void
3312 rx_vlan_strip_set(portid_t port_id, int on)
3313 {
3314 int diag;
3315 int vlan_offload;
3316 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3317
3318 if (port_id_is_invalid(port_id, ENABLED_WARN))
3319 return;
3320
3321 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3322
3323 if (on) {
3324 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
3325 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3326 } else {
3327 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
3328 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3329 }
3330
3331 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3332 if (diag < 0)
3333 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
3334 "diag=%d\n", port_id, on, diag);
3335 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3336 }
3337
3338 void
3339 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
3340 {
3341 int diag;
3342
3343 if (port_id_is_invalid(port_id, ENABLED_WARN))
3344 return;
3345
3346 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
3347 if (diag < 0)
3348 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
3349 "diag=%d\n", port_id, queue_id, on, diag);
3350 }
3351
3352 void
3353 rx_vlan_filter_set(portid_t port_id, int on)
3354 {
3355 int diag;
3356 int vlan_offload;
3357 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3358
3359 if (port_id_is_invalid(port_id, ENABLED_WARN))
3360 return;
3361
3362 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3363
3364 if (on) {
3365 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
3366 port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3367 } else {
3368 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
3369 port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
3370 }
3371
3372 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3373 if (diag < 0)
3374 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
3375 "diag=%d\n", port_id, on, diag);
3376 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3377 }
3378
3379 void
3380 rx_vlan_qinq_strip_set(portid_t port_id, int on)
3381 {
3382 int diag;
3383 int vlan_offload;
3384 uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3385
3386 if (port_id_is_invalid(port_id, ENABLED_WARN))
3387 return;
3388
3389 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3390
3391 if (on) {
3392 vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
3393 port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3394 } else {
3395 vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
3396 port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3397 }
3398
3399 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3400 if (diag < 0)
3401 printf("%s(port_pi=%d, on=%d) failed "
3402 "diag=%d\n", __func__, port_id, on, diag);
3403 ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3404 }
3405
3406 int
3407 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
3408 {
3409 int diag;
3410
3411 if (port_id_is_invalid(port_id, ENABLED_WARN))
3412 return 1;
3413 if (vlan_id_is_invalid(vlan_id))
3414 return 1;
3415 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
3416 if (diag == 0)
3417 return 0;
3418 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
3419 "diag=%d\n",
3420 port_id, vlan_id, on, diag);
3421 return -1;
3422 }
3423
3424 void
3425 rx_vlan_all_filter_set(portid_t port_id, int on)
3426 {
3427 uint16_t vlan_id;
3428
3429 if (port_id_is_invalid(port_id, ENABLED_WARN))
3430 return;
3431 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
3432 if (rx_vft_set(port_id, vlan_id, on))
3433 break;
3434 }
3435 }
3436
3437 void
3438 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
3439 {
3440 int diag;
3441
3442 if (port_id_is_invalid(port_id, ENABLED_WARN))
3443 return;
3444
3445 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
3446 if (diag == 0)
3447 return;
3448
3449 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
3450 "diag=%d\n",
3451 port_id, vlan_type, tp_id, diag);
3452 }
3453
3454 void
3455 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
3456 {
3457 struct rte_eth_dev_info dev_info;
3458 int ret;
3459
3460 if (port_id_is_invalid(port_id, ENABLED_WARN))
3461 return;
3462 if (vlan_id_is_invalid(vlan_id))
3463 return;
3464
3465 if (ports[port_id].dev_conf.txmode.offloads &
3466 DEV_TX_OFFLOAD_QINQ_INSERT) {
3467 printf("Error, as QinQ has been enabled.\n");
3468 return;
3469 }
3470
3471 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3472 if (ret != 0)
3473 return;
3474
3475 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
3476 printf("Error: vlan insert is not supported by port %d\n",
3477 port_id);
3478 return;
3479 }
3480
3481 tx_vlan_reset(port_id);
3482 ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
3483 ports[port_id].tx_vlan_id = vlan_id;
3484 }
3485
3486 void
3487 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
3488 {
3489 struct rte_eth_dev_info dev_info;
3490 int ret;
3491
3492 if (port_id_is_invalid(port_id, ENABLED_WARN))
3493 return;
3494 if (vlan_id_is_invalid(vlan_id))
3495 return;
3496 if (vlan_id_is_invalid(vlan_id_outer))
3497 return;
3498
3499 ret = eth_dev_info_get_print_err(port_id, &dev_info);
3500 if (ret != 0)
3501 return;
3502
3503 if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
3504 printf("Error: qinq insert not supported by port %d\n",
3505 port_id);
3506 return;
3507 }
3508
3509 tx_vlan_reset(port_id);
3510 ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
3511 DEV_TX_OFFLOAD_QINQ_INSERT);
3512 ports[port_id].tx_vlan_id = vlan_id;
3513 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
3514 }
3515
3516 void
3517 tx_vlan_reset(portid_t port_id)
3518 {
3519 if (port_id_is_invalid(port_id, ENABLED_WARN))
3520 return;
3521 ports[port_id].dev_conf.txmode.offloads &=
3522 ~(DEV_TX_OFFLOAD_VLAN_INSERT |
3523 DEV_TX_OFFLOAD_QINQ_INSERT);
3524 ports[port_id].tx_vlan_id = 0;
3525 ports[port_id].tx_vlan_id_outer = 0;
3526 }
3527
3528 void
3529 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
3530 {
3531 if (port_id_is_invalid(port_id, ENABLED_WARN))
3532 return;
3533
3534 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
3535 }
3536
3537 void
3538 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
3539 {
3540 uint16_t i;
3541 uint8_t existing_mapping_found = 0;
3542
3543 if (port_id_is_invalid(port_id, ENABLED_WARN))
3544 return;
3545
3546 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
3547 return;
3548
3549 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
3550 printf("map_value not in required range 0..%d\n",
3551 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
3552 return;
3553 }
3554
3555 if (!is_rx) { /*then tx*/
3556 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
3557 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
3558 (tx_queue_stats_mappings[i].queue_id == queue_id)) {
3559 tx_queue_stats_mappings[i].stats_counter_id = map_value;
3560 existing_mapping_found = 1;
3561 break;
3562 }
3563 }
3564 if (!existing_mapping_found) { /* A new additional mapping... */
3565 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
3566 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
3567 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
3568 nb_tx_queue_stats_mappings++;
3569 }
3570 }
3571 else { /*rx*/
3572 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
3573 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
3574 (rx_queue_stats_mappings[i].queue_id == queue_id)) {
3575 rx_queue_stats_mappings[i].stats_counter_id = map_value;
3576 existing_mapping_found = 1;
3577 break;
3578 }
3579 }
3580 if (!existing_mapping_found) { /* A new additional mapping... */
3581 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
3582 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
3583 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
3584 nb_rx_queue_stats_mappings++;
3585 }
3586 }
3587 }
3588
3589 void
3590 set_xstats_hide_zero(uint8_t on_off)
3591 {
3592 xstats_hide_zero = on_off;
3593 }
3594
3595 static inline void
3596 print_fdir_mask(struct rte_eth_fdir_masks *mask)
3597 {
3598 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
3599
3600 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3601 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
3602 " tunnel_id: 0x%08x",
3603 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
3604 rte_be_to_cpu_32(mask->tunnel_id_mask));
3605 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3606 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
3607 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
3608 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
3609
3610 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
3611 rte_be_to_cpu_16(mask->src_port_mask),
3612 rte_be_to_cpu_16(mask->dst_port_mask));
3613
3614 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3615 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
3616 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
3617 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
3618 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
3619
3620 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
3621 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
3622 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
3623 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
3624 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
3625 }
3626
3627 printf("\n");
3628 }
3629
3630 static inline void
3631 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3632 {
3633 struct rte_eth_flex_payload_cfg *cfg;
3634 uint32_t i, j;
3635
3636 for (i = 0; i < flex_conf->nb_payloads; i++) {
3637 cfg = &flex_conf->flex_set[i];
3638 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
3639 printf("\n RAW: ");
3640 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
3641 printf("\n L2_PAYLOAD: ");
3642 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
3643 printf("\n L3_PAYLOAD: ");
3644 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
3645 printf("\n L4_PAYLOAD: ");
3646 else
3647 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
3648 for (j = 0; j < num; j++)
3649 printf(" %-5u", cfg->src_offset[j]);
3650 }
3651 printf("\n");
3652 }
3653
3654 static char *
3655 flowtype_to_str(uint16_t flow_type)
3656 {
3657 struct flow_type_info {
3658 char str[32];
3659 uint16_t ftype;
3660 };
3661
3662 uint8_t i;
3663 static struct flow_type_info flowtype_str_table[] = {
3664 {"raw", RTE_ETH_FLOW_RAW},
3665 {"ipv4", RTE_ETH_FLOW_IPV4},
3666 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
3667 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
3668 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
3669 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
3670 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
3671 {"ipv6", RTE_ETH_FLOW_IPV6},
3672 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
3673 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
3674 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
3675 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
3676 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
3677 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
3678 {"port", RTE_ETH_FLOW_PORT},
3679 {"vxlan", RTE_ETH_FLOW_VXLAN},
3680 {"geneve", RTE_ETH_FLOW_GENEVE},
3681 {"nvgre", RTE_ETH_FLOW_NVGRE},
3682 {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
3683 };
3684
3685 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
3686 if (flowtype_str_table[i].ftype == flow_type)
3687 return flowtype_str_table[i].str;
3688 }
3689
3690 return NULL;
3691 }
3692
3693 static inline void
3694 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3695 {
3696 struct rte_eth_fdir_flex_mask *mask;
3697 uint32_t i, j;
3698 char *p;
3699
3700 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
3701 mask = &flex_conf->flex_mask[i];
3702 p = flowtype_to_str(mask->flow_type);
3703 printf("\n %s:\t", p ? p : "unknown");
3704 for (j = 0; j < num; j++)
3705 printf(" %02x", mask->mask[j]);
3706 }
3707 printf("\n");
3708 }
3709
3710 static inline void
3711 print_fdir_flow_type(uint32_t flow_types_mask)
3712 {
3713 int i;
3714 char *p;
3715
3716 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
3717 if (!(flow_types_mask & (1 << i)))
3718 continue;
3719 p = flowtype_to_str(i);
3720 if (p)
3721 printf(" %s", p);
3722 else
3723 printf(" unknown");
3724 }
3725 printf("\n");
3726 }
3727
3728 void
3729 fdir_get_infos(portid_t port_id)
3730 {
3731 struct rte_eth_fdir_stats fdir_stat;
3732 struct rte_eth_fdir_info fdir_info;
3733 int ret;
3734
3735 static const char *fdir_stats_border = "########################";
3736
3737 if (port_id_is_invalid(port_id, ENABLED_WARN))
3738 return;
3739 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
3740 if (ret < 0) {
3741 printf("\n FDIR is not supported on port %-2d\n",
3742 port_id);
3743 return;
3744 }
3745
3746 memset(&fdir_info, 0, sizeof(fdir_info));
3747 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3748 RTE_ETH_FILTER_INFO, &fdir_info);
3749 memset(&fdir_stat, 0, sizeof(fdir_stat));
3750 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3751 RTE_ETH_FILTER_STATS, &fdir_stat);
3752 printf("\n %s FDIR infos for port %-2d %s\n",
3753 fdir_stats_border, port_id, fdir_stats_border);
3754 printf(" MODE: ");
3755 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
3756 printf(" PERFECT\n");
3757 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
3758 printf(" PERFECT-MAC-VLAN\n");
3759 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3760 printf(" PERFECT-TUNNEL\n");
3761 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
3762 printf(" SIGNATURE\n");
3763 else
3764 printf(" DISABLE\n");
3765 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
3766 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
3767 printf(" SUPPORTED FLOW TYPE: ");
3768 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
3769 }
3770 printf(" FLEX PAYLOAD INFO:\n");
3771 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
3772 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
3773 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
3774 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
3775 fdir_info.flex_payload_unit,
3776 fdir_info.max_flex_payload_segment_num,
3777 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
3778 printf(" MASK: ");
3779 print_fdir_mask(&fdir_info.mask);
3780 if (fdir_info.flex_conf.nb_payloads > 0) {
3781 printf(" FLEX PAYLOAD SRC OFFSET:");
3782 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3783 }
3784 if (fdir_info.flex_conf.nb_flexmasks > 0) {
3785 printf(" FLEX MASK CFG:");
3786 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3787 }
3788 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
3789 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
3790 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
3791 fdir_info.guarant_spc, fdir_info.best_spc);
3792 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
3793 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
3794 " add: %-10"PRIu64" remove: %"PRIu64"\n"
3795 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
3796 fdir_stat.collision, fdir_stat.free,
3797 fdir_stat.maxhash, fdir_stat.maxlen,
3798 fdir_stat.add, fdir_stat.remove,
3799 fdir_stat.f_add, fdir_stat.f_remove);
3800 printf(" %s############################%s\n",
3801 fdir_stats_border, fdir_stats_border);
3802 }
3803
3804 void
3805 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
3806 {
3807 struct rte_port *port;
3808 struct rte_eth_fdir_flex_conf *flex_conf;
3809 int i, idx = 0;
3810
3811 port = &ports[port_id];
3812 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3813 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
3814 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
3815 idx = i;
3816 break;
3817 }
3818 }
3819 if (i >= RTE_ETH_FLOW_MAX) {
3820 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
3821 idx = flex_conf->nb_flexmasks;
3822 flex_conf->nb_flexmasks++;
3823 } else {
3824 printf("The flex mask table is full. Can not set flex"
3825 " mask for flow_type(%u).", cfg->flow_type);
3826 return;
3827 }
3828 }
3829 rte_memcpy(&flex_conf->flex_mask[idx],
3830 cfg,
3831 sizeof(struct rte_eth_fdir_flex_mask));
3832 }
3833
3834 void
3835 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
3836 {
3837 struct rte_port *port;
3838 struct rte_eth_fdir_flex_conf *flex_conf;
3839 int i, idx = 0;
3840
3841 port = &ports[port_id];
3842 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3843 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
3844 if (cfg->type == flex_conf->flex_set[i].type) {
3845 idx = i;
3846 break;
3847 }
3848 }
3849 if (i >= RTE_ETH_PAYLOAD_MAX) {
3850 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
3851 idx = flex_conf->nb_payloads;
3852 flex_conf->nb_payloads++;
3853 } else {
3854 printf("The flex payload table is full. Can not set"
3855 " flex payload for type(%u).", cfg->type);
3856 return;
3857 }
3858 }
3859 rte_memcpy(&flex_conf->flex_set[idx],
3860 cfg,
3861 sizeof(struct rte_eth_flex_payload_cfg));
3862
3863 }
3864
3865 void
3866 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
3867 {
3868 #ifdef RTE_LIBRTE_IXGBE_PMD
3869 int diag;
3870
3871 if (is_rx)
3872 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
3873 else
3874 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
3875
3876 if (diag == 0)
3877 return;
3878 printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
3879 is_rx ? "rx" : "tx", port_id, diag);
3880 return;
3881 #endif
3882 printf("VF %s setting not supported for port %d\n",
3883 is_rx ? "Rx" : "Tx", port_id);
3884 RTE_SET_USED(vf);
3885 RTE_SET_USED(on);
3886 }
3887
3888 int
3889 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
3890 {
3891 int diag;
3892 struct rte_eth_link link;
3893 int ret;
3894
3895 if (port_id_is_invalid(port_id, ENABLED_WARN))
3896 return 1;
3897 ret = eth_link_get_nowait_print_err(port_id, &link);
3898 if (ret < 0)
3899 return 1;
3900 if (rate > link.link_speed) {
3901 printf("Invalid rate value:%u bigger than link speed: %u\n",
3902 rate, link.link_speed);
3903 return 1;
3904 }
3905 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
3906 if (diag == 0)
3907 return diag;
3908 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
3909 port_id, diag);
3910 return diag;
3911 }
3912
3913 int
3914 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
3915 {
3916 int diag = -ENOTSUP;
3917
3918 RTE_SET_USED(vf);
3919 RTE_SET_USED(rate);
3920 RTE_SET_USED(q_msk);
3921
3922 #ifdef RTE_LIBRTE_IXGBE_PMD
3923 if (diag == -ENOTSUP)
3924 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
3925 q_msk);
3926 #endif
3927 #ifdef RTE_LIBRTE_BNXT_PMD
3928 if (diag == -ENOTSUP)
3929 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
3930 #endif
3931 if (diag == 0)
3932 return diag;
3933
3934 printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
3935 port_id, diag);
3936 return diag;
3937 }
3938
3939 /*
3940 * Functions to manage the set of filtered Multicast MAC addresses.
3941 *
3942 * A pool of filtered multicast MAC addresses is associated with each port.
3943 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
3944 * The address of the pool and the number of valid multicast MAC addresses
3945 * recorded in the pool are stored in the fields "mc_addr_pool" and
3946 * "mc_addr_nb" of the "rte_port" data structure.
3947 *
3948 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
3949 * to be supplied a contiguous array of multicast MAC addresses.
3950 * To comply with this constraint, the set of multicast addresses recorded
3951 * into the pool are systematically compacted at the beginning of the pool.
3952 * Hence, when a multicast address is removed from the pool, all following
3953 * addresses, if any, are copied back to keep the set contiguous.
3954 */
3955 #define MCAST_POOL_INC 32
3956
3957 static int
3958 mcast_addr_pool_extend(struct rte_port *port)
3959 {
3960 struct rte_ether_addr *mc_pool;
3961 size_t mc_pool_size;
3962
3963 /*
3964 * If a free entry is available at the end of the pool, just
3965 * increment the number of recorded multicast addresses.
3966 */
3967 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
3968 port->mc_addr_nb++;
3969 return 0;
3970 }
3971
3972 /*
3973 * [re]allocate a pool with MCAST_POOL_INC more entries.
3974 * The previous test guarantees that port->mc_addr_nb is a multiple
3975 * of MCAST_POOL_INC.
3976 */
3977 mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
3978 MCAST_POOL_INC);
3979 mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
3980 mc_pool_size);
3981 if (mc_pool == NULL) {
3982 printf("allocation of pool of %u multicast addresses failed\n",
3983 port->mc_addr_nb + MCAST_POOL_INC);
3984 return -ENOMEM;
3985 }
3986
3987 port->mc_addr_pool = mc_pool;
3988 port->mc_addr_nb++;
3989 return 0;
3990
3991 }
3992
3993 static void
3994 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
3995 {
3996 if (mcast_addr_pool_extend(port) != 0)
3997 return;
3998 rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
3999 }
4000
4001 static void
4002 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
4003 {
4004 port->mc_addr_nb--;
4005 if (addr_idx == port->mc_addr_nb) {
4006 /* No need to recompact the set of multicast addressses. */
4007 if (port->mc_addr_nb == 0) {
4008 /* free the pool of multicast addresses. */
4009 free(port->mc_addr_pool);
4010 port->mc_addr_pool = NULL;
4011 }
4012 return;
4013 }
4014 memmove(&port->mc_addr_pool[addr_idx],
4015 &port->mc_addr_pool[addr_idx + 1],
4016 sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
4017 }
4018
4019 static int
4020 eth_port_multicast_addr_list_set(portid_t port_id)
4021 {
4022 struct rte_port *port;
4023 int diag;
4024
4025 port = &ports[port_id];
4026 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
4027 port->mc_addr_nb);
4028 if (diag < 0)
4029 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
4030 port_id, port->mc_addr_nb, diag);
4031
4032 return diag;
4033 }
4034
4035 void
4036 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
4037 {
4038 struct rte_port *port;
4039 uint32_t i;
4040
4041 if (port_id_is_invalid(port_id, ENABLED_WARN))
4042 return;
4043
4044 port = &ports[port_id];
4045
4046 /*
4047 * Check that the added multicast MAC address is not already recorded
4048 * in the pool of multicast addresses.
4049 */
4050 for (i = 0; i < port->mc_addr_nb; i++) {
4051 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
4052 printf("multicast address already filtered by port\n");
4053 return;
4054 }
4055 }
4056
4057 mcast_addr_pool_append(port, mc_addr);
4058 if (eth_port_multicast_addr_list_set(port_id) < 0)
4059 /* Rollback on failure, remove the address from the pool */
4060 mcast_addr_pool_remove(port, i);
4061 }
4062
4063 void
4064 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
4065 {
4066 struct rte_port *port;
4067 uint32_t i;
4068
4069 if (port_id_is_invalid(port_id, ENABLED_WARN))
4070 return;
4071
4072 port = &ports[port_id];
4073
4074 /*
4075 * Search the pool of multicast MAC addresses for the removed address.
4076 */
4077 for (i = 0; i < port->mc_addr_nb; i++) {
4078 if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
4079 break;
4080 }
4081 if (i == port->mc_addr_nb) {
4082 printf("multicast address not filtered by port %d\n", port_id);
4083 return;
4084 }
4085
4086 mcast_addr_pool_remove(port, i);
4087 if (eth_port_multicast_addr_list_set(port_id) < 0)
4088 /* Rollback on failure, add the address back into the pool */
4089 mcast_addr_pool_append(port, mc_addr);
4090 }
4091
4092 void
4093 port_dcb_info_display(portid_t port_id)
4094 {
4095 struct rte_eth_dcb_info dcb_info;
4096 uint16_t i;
4097 int ret;
4098 static const char *border = "================";
4099
4100 if (port_id_is_invalid(port_id, ENABLED_WARN))
4101 return;
4102
4103 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
4104 if (ret) {
4105 printf("\n Failed to get dcb infos on port %-2d\n",
4106 port_id);
4107 return;
4108 }
4109 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
4110 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
4111 printf("\n TC : ");
4112 for (i = 0; i < dcb_info.nb_tcs; i++)
4113 printf("\t%4d", i);
4114 printf("\n Priority : ");
4115 for (i = 0; i < dcb_info.nb_tcs; i++)
4116 printf("\t%4d", dcb_info.prio_tc[i]);
4117 printf("\n BW percent :");
4118 for (i = 0; i < dcb_info.nb_tcs; i++)
4119 printf("\t%4d%%", dcb_info.tc_bws[i]);
4120 printf("\n RXQ base : ");
4121 for (i = 0; i < dcb_info.nb_tcs; i++)
4122 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
4123 printf("\n RXQ number :");
4124 for (i = 0; i < dcb_info.nb_tcs; i++)
4125 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
4126 printf("\n TXQ base : ");
4127 for (i = 0; i < dcb_info.nb_tcs; i++)
4128 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
4129 printf("\n TXQ number :");
4130 for (i = 0; i < dcb_info.nb_tcs; i++)
4131 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
4132 printf("\n");
4133 }
4134
4135 uint8_t *
4136 open_file(const char *file_path, uint32_t *size)
4137 {
4138 int fd = open(file_path, O_RDONLY);
4139 off_t pkg_size;
4140 uint8_t *buf = NULL;
4141 int ret = 0;
4142 struct stat st_buf;
4143
4144 if (size)
4145 *size = 0;
4146
4147 if (fd == -1) {
4148 printf("%s: Failed to open %s\n", __func__, file_path);
4149 return buf;
4150 }
4151
4152 if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
4153 close(fd);
4154 printf("%s: File operations failed\n", __func__);
4155 return buf;
4156 }
4157
4158 pkg_size = st_buf.st_size;
4159 if (pkg_size < 0) {
4160 close(fd);
4161 printf("%s: File operations failed\n", __func__);
4162 return buf;
4163 }
4164
4165 buf = (uint8_t *)malloc(pkg_size);
4166 if (!buf) {
4167 close(fd);
4168 printf("%s: Failed to malloc memory\n", __func__);
4169 return buf;
4170 }
4171
4172 ret = read(fd, buf, pkg_size);
4173 if (ret < 0) {
4174 close(fd);
4175 printf("%s: File read operation failed\n", __func__);
4176 close_file(buf);
4177 return NULL;
4178 }
4179
4180 if (size)
4181 *size = pkg_size;
4182
4183 close(fd);
4184
4185 return buf;
4186 }
4187
4188 int
4189 save_file(const char *file_path, uint8_t *buf, uint32_t size)
4190 {
4191 FILE *fh = fopen(file_path, "wb");
4192
4193 if (fh == NULL) {
4194 printf("%s: Failed to open %s\n", __func__, file_path);
4195 return -1;
4196 }
4197
4198 if (fwrite(buf, 1, size, fh) != size) {
4199 fclose(fh);
4200 printf("%s: File write operation failed\n", __func__);
4201 return -1;
4202 }
4203
4204 fclose(fh);
4205
4206 return 0;
4207 }
4208
4209 int
4210 close_file(uint8_t *buf)
4211 {
4212 if (buf) {
4213 free((void *)buf);
4214 return 0;
4215 }
4216
4217 return -1;
4218 }
4219
4220 void
4221 port_queue_region_info_display(portid_t port_id, void *buf)
4222 {
4223 #ifdef RTE_LIBRTE_I40E_PMD
4224 uint16_t i, j;
4225 struct rte_pmd_i40e_queue_regions *info =
4226 (struct rte_pmd_i40e_queue_regions *)buf;
4227 static const char *queue_region_info_stats_border = "-------";
4228
4229 if (!info->queue_region_number)
4230 printf("there is no region has been set before");
4231
4232 printf("\n %s All queue region info for port=%2d %s",
4233 queue_region_info_stats_border, port_id,
4234 queue_region_info_stats_border);
4235 printf("\n queue_region_number: %-14u \n",
4236 info->queue_region_number);
4237
4238 for (i = 0; i < info->queue_region_number; i++) {
4239 printf("\n region_id: %-14u queue_number: %-14u "
4240 "queue_start_index: %-14u \n",
4241 info->region[i].region_id,
4242 info->region[i].queue_num,
4243 info->region[i].queue_start_index);
4244
4245 printf(" user_priority_num is %-14u :",
4246 info->region[i].user_priority_num);
4247 for (j = 0; j < info->region[i].user_priority_num; j++)
4248 printf(" %-14u ", info->region[i].user_priority[j]);
4249
4250 printf("\n flowtype_num is %-14u :",
4251 info->region[i].flowtype_num);
4252 for (j = 0; j < info->region[i].flowtype_num; j++)
4253 printf(" %-14u ", info->region[i].hw_flowtype[j]);
4254 }
4255 #else
4256 RTE_SET_USED(port_id);
4257 RTE_SET_USED(buf);
4258 #endif
4259
4260 printf("\n\n");
4261 }
4262
4263 void
4264 show_macs(portid_t port_id)
4265 {
4266 char buf[RTE_ETHER_ADDR_FMT_SIZE];
4267 struct rte_eth_dev_info dev_info;
4268 struct rte_ether_addr *addr;
4269 uint32_t i, num_macs = 0;
4270 struct rte_eth_dev *dev;
4271
4272 dev = &rte_eth_devices[port_id];
4273
4274 rte_eth_dev_info_get(port_id, &dev_info);
4275
4276 for (i = 0; i < dev_info.max_mac_addrs; i++) {
4277 addr = &dev->data->mac_addrs[i];
4278
4279 /* skip zero address */
4280 if (rte_is_zero_ether_addr(addr))
4281 continue;
4282
4283 num_macs++;
4284 }
4285
4286 printf("Number of MAC address added: %d\n", num_macs);
4287
4288 for (i = 0; i < dev_info.max_mac_addrs; i++) {
4289 addr = &dev->data->mac_addrs[i];
4290
4291 /* skip zero address */
4292 if (rte_is_zero_ether_addr(addr))
4293 continue;
4294
4295 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
4296 printf(" %s\n", buf);
4297 }
4298 }
4299
4300 void
4301 show_mcast_macs(portid_t port_id)
4302 {
4303 char buf[RTE_ETHER_ADDR_FMT_SIZE];
4304 struct rte_ether_addr *addr;
4305 struct rte_port *port;
4306 uint32_t i;
4307
4308 port = &ports[port_id];
4309
4310 printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
4311
4312 for (i = 0; i < port->mc_addr_nb; i++) {
4313 addr = &port->mc_addr_pool[i];
4314
4315 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
4316 printf(" %s\n", buf);
4317 }
4318 }