]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/examples/l2fwd-keepalive/main.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / examples / l2fwd-keepalive / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <sys/types.h>
11 #include <sys/queue.h>
12 #include <netinet/in.h>
13 #include <setjmp.h>
14 #include <stdarg.h>
15 #include <ctype.h>
16 #include <errno.h>
17 #include <getopt.h>
18 #include <signal.h>
19
20 #include <rte_common.h>
21 #include <rte_log.h>
22 #include <rte_malloc.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_eal.h>
26 #include <rte_launch.h>
27 #include <rte_atomic.h>
28 #include <rte_cycles.h>
29 #include <rte_prefetch.h>
30 #include <rte_lcore.h>
31 #include <rte_per_lcore.h>
32 #include <rte_branch_prediction.h>
33 #include <rte_interrupts.h>
34 #include <rte_random.h>
35 #include <rte_debug.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_mempool.h>
39 #include <rte_mbuf.h>
40 #include <rte_timer.h>
41 #include <rte_keepalive.h>
42
43 #include "shm.h"
44
45 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
46
47 #define NB_MBUF 8192
48
49 #define MAX_PKT_BURST 32
50 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
51
52 /*
53 * Configurable number of RX/TX ring descriptors
54 */
55 #define RTE_TEST_RX_DESC_DEFAULT 1024
56 #define RTE_TEST_TX_DESC_DEFAULT 1024
57 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
58 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
59
60 /* ethernet addresses of ports */
61 static struct ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
62
63 /* mask of enabled ports */
64 static uint32_t l2fwd_enabled_port_mask;
65
66 /* list of enabled ports */
67 static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
68
69 static unsigned int l2fwd_rx_queue_per_lcore = 1;
70
71 #define MAX_RX_QUEUE_PER_LCORE 16
72 #define MAX_TX_QUEUE_PER_PORT 16
73 struct lcore_queue_conf {
74 unsigned n_rx_port;
75 unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
76 } __rte_cache_aligned;
77 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
78
79 struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
80
81 static struct rte_eth_conf port_conf = {
82 .rxmode = {
83 .split_hdr_size = 0,
84 },
85 .txmode = {
86 .mq_mode = ETH_MQ_TX_NONE,
87 },
88 };
89
90 struct rte_mempool *l2fwd_pktmbuf_pool = NULL;
91
92 /* Per-port statistics struct */
93 struct l2fwd_port_statistics {
94 uint64_t tx;
95 uint64_t rx;
96 uint64_t dropped;
97 } __rte_cache_aligned;
98 struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
99
100 /* A tsc-based timer responsible for triggering statistics printout */
101 #define TIMER_MILLISECOND 1
102 #define MAX_TIMER_PERIOD 86400 /* 1 day max */
103 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* 10 seconds */
104 static int64_t check_period = 5; /* default check cycle is 5ms */
105
106 /* Keepalive structure */
107 struct rte_keepalive *rte_global_keepalive_info;
108
109 /* Termination signalling */
110 static int terminate_signal_received;
111
112 /* Termination signal handler */
113 static void handle_sigterm(__rte_unused int value)
114 {
115 terminate_signal_received = 1;
116 }
117
118 /* Print out statistics on packets dropped */
119 static void
120 print_stats(__attribute__((unused)) struct rte_timer *ptr_timer,
121 __attribute__((unused)) void *ptr_data)
122 {
123 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
124 uint16_t portid;
125
126 total_packets_dropped = 0;
127 total_packets_tx = 0;
128 total_packets_rx = 0;
129
130 const char clr[] = { 27, '[', '2', 'J', '\0' };
131 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
132
133 /* Clear screen and move to top left */
134 printf("%s%s", clr, topLeft);
135
136 printf("\nPort statistics ====================================");
137
138 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
139 /* skip disabled ports */
140 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
141 continue;
142 printf("\nStatistics for port %u ------------------------------"
143 "\nPackets sent: %24"PRIu64
144 "\nPackets received: %20"PRIu64
145 "\nPackets dropped: %21"PRIu64,
146 portid,
147 port_statistics[portid].tx,
148 port_statistics[portid].rx,
149 port_statistics[portid].dropped);
150
151 total_packets_dropped += port_statistics[portid].dropped;
152 total_packets_tx += port_statistics[portid].tx;
153 total_packets_rx += port_statistics[portid].rx;
154 }
155 printf("\nAggregate statistics ==============================="
156 "\nTotal packets sent: %18"PRIu64
157 "\nTotal packets received: %14"PRIu64
158 "\nTotal packets dropped: %15"PRIu64,
159 total_packets_tx,
160 total_packets_rx,
161 total_packets_dropped);
162 printf("\n====================================================\n");
163 }
164
165 static void
166 l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
167 {
168 struct ether_hdr *eth;
169 void *tmp;
170 int sent;
171 unsigned dst_port;
172 struct rte_eth_dev_tx_buffer *buffer;
173
174 dst_port = l2fwd_dst_ports[portid];
175 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
176
177 /* 02:00:00:00:00:xx */
178 tmp = &eth->d_addr.addr_bytes[0];
179 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
180
181 /* src addr */
182 ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], &eth->s_addr);
183
184 buffer = tx_buffer[dst_port];
185 sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
186 if (sent)
187 port_statistics[dst_port].tx += sent;
188 }
189
190 /* main processing loop */
191 static void
192 l2fwd_main_loop(void)
193 {
194 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
195 struct rte_mbuf *m;
196 int sent;
197 unsigned lcore_id;
198 uint64_t prev_tsc, diff_tsc, cur_tsc;
199 unsigned i, j, portid, nb_rx;
200 struct lcore_queue_conf *qconf;
201 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
202 / US_PER_S * BURST_TX_DRAIN_US;
203 struct rte_eth_dev_tx_buffer *buffer;
204
205 prev_tsc = 0;
206
207 lcore_id = rte_lcore_id();
208 qconf = &lcore_queue_conf[lcore_id];
209
210 if (qconf->n_rx_port == 0) {
211 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
212 return;
213 }
214
215 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
216
217 for (i = 0; i < qconf->n_rx_port; i++) {
218
219 portid = qconf->rx_port_list[i];
220 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
221 portid);
222 }
223
224 uint64_t tsc_initial = rte_rdtsc();
225 uint64_t tsc_lifetime = (rand()&0x07) * rte_get_tsc_hz();
226
227 while (!terminate_signal_received) {
228 /* Keepalive heartbeat */
229 rte_keepalive_mark_alive(rte_global_keepalive_info);
230
231 cur_tsc = rte_rdtsc();
232
233 /*
234 * Die randomly within 7 secs for demo purposes if
235 * keepalive enabled
236 */
237 if (check_period > 0 && cur_tsc - tsc_initial > tsc_lifetime)
238 break;
239
240 /*
241 * TX burst queue drain
242 */
243 diff_tsc = cur_tsc - prev_tsc;
244 if (unlikely(diff_tsc > drain_tsc)) {
245
246 for (i = 0; i < qconf->n_rx_port; i++) {
247
248 portid = l2fwd_dst_ports[qconf->rx_port_list[i]];
249 buffer = tx_buffer[portid];
250
251 sent = rte_eth_tx_buffer_flush(portid, 0, buffer);
252 if (sent)
253 port_statistics[portid].tx += sent;
254
255 }
256
257 prev_tsc = cur_tsc;
258 }
259
260 /*
261 * Read packet from RX queues
262 */
263 for (i = 0; i < qconf->n_rx_port; i++) {
264
265 portid = qconf->rx_port_list[i];
266 nb_rx = rte_eth_rx_burst(portid, 0,
267 pkts_burst, MAX_PKT_BURST);
268
269 port_statistics[portid].rx += nb_rx;
270
271 for (j = 0; j < nb_rx; j++) {
272 m = pkts_burst[j];
273 rte_prefetch0(rte_pktmbuf_mtod(m, void *));
274 l2fwd_simple_forward(m, portid);
275 }
276 }
277 }
278 }
279
280 static int
281 l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)
282 {
283 l2fwd_main_loop();
284 return 0;
285 }
286
287 /* display usage */
288 static void
289 l2fwd_usage(const char *prgname)
290 {
291 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
292 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
293 " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
294 " -K PERIOD: Keepalive check period (5 default; 86400 max)\n"
295 " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
296 prgname);
297 }
298
299 static int
300 l2fwd_parse_portmask(const char *portmask)
301 {
302 char *end = NULL;
303 unsigned long pm;
304
305 /* parse hexadecimal string */
306 pm = strtoul(portmask, &end, 16);
307 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
308 return -1;
309
310 if (pm == 0)
311 return -1;
312
313 return pm;
314 }
315
316 static unsigned int
317 l2fwd_parse_nqueue(const char *q_arg)
318 {
319 char *end = NULL;
320 unsigned long n;
321
322 /* parse hexadecimal string */
323 n = strtoul(q_arg, &end, 10);
324 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
325 return 0;
326 if (n == 0)
327 return 0;
328 if (n >= MAX_RX_QUEUE_PER_LCORE)
329 return 0;
330
331 return n;
332 }
333
334 static int
335 l2fwd_parse_timer_period(const char *q_arg)
336 {
337 char *end = NULL;
338 int n;
339
340 /* parse number string */
341 n = strtol(q_arg, &end, 10);
342 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
343 return -1;
344 if (n >= MAX_TIMER_PERIOD)
345 return -1;
346
347 return n;
348 }
349
350 static int
351 l2fwd_parse_check_period(const char *q_arg)
352 {
353 char *end = NULL;
354 int n;
355
356 /* parse number string */
357 n = strtol(q_arg, &end, 10);
358 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
359 return -1;
360 if (n >= MAX_TIMER_PERIOD)
361 return -1;
362
363 return n;
364 }
365
366 /* Parse the argument given in the command line of the application */
367 static int
368 l2fwd_parse_args(int argc, char **argv)
369 {
370 int opt, ret;
371 char **argvopt;
372 int option_index;
373 char *prgname = argv[0];
374 static struct option lgopts[] = {
375 {NULL, 0, 0, 0}
376 };
377
378 argvopt = argv;
379
380 while ((opt = getopt_long(argc, argvopt, "p:q:T:K:",
381 lgopts, &option_index)) != EOF) {
382
383 switch (opt) {
384 /* portmask */
385 case 'p':
386 l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
387 if (l2fwd_enabled_port_mask == 0) {
388 printf("invalid portmask\n");
389 l2fwd_usage(prgname);
390 return -1;
391 }
392 break;
393
394 /* nqueue */
395 case 'q':
396 l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
397 if (l2fwd_rx_queue_per_lcore == 0) {
398 printf("invalid queue number\n");
399 l2fwd_usage(prgname);
400 return -1;
401 }
402 break;
403
404 /* timer period */
405 case 'T':
406 timer_period = l2fwd_parse_timer_period(optarg)
407 * (int64_t)(1000 * TIMER_MILLISECOND);
408 if (timer_period < 0) {
409 printf("invalid timer period\n");
410 l2fwd_usage(prgname);
411 return -1;
412 }
413 break;
414
415 /* Check period */
416 case 'K':
417 check_period = l2fwd_parse_check_period(optarg);
418 if (check_period < 0) {
419 printf("invalid check period\n");
420 l2fwd_usage(prgname);
421 return -1;
422 }
423 break;
424
425 /* long options */
426 case 0:
427 l2fwd_usage(prgname);
428 return -1;
429
430 default:
431 l2fwd_usage(prgname);
432 return -1;
433 }
434 }
435
436 if (optind >= 0)
437 argv[optind-1] = prgname;
438
439 ret = optind-1;
440 optind = 1; /* reset getopt lib */
441 return ret;
442 }
443
444 /* Check the link status of all ports in up to 9s, and print them finally */
445 static void
446 check_all_ports_link_status(uint32_t port_mask)
447 {
448 #define CHECK_INTERVAL 100 /* 100ms */
449 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
450 uint16_t portid;
451 uint8_t count, all_ports_up, print_flag = 0;
452 struct rte_eth_link link;
453
454 printf("\nChecking link status");
455 fflush(stdout);
456 for (count = 0; count <= MAX_CHECK_TIME; count++) {
457 all_ports_up = 1;
458 RTE_ETH_FOREACH_DEV(portid) {
459 if ((port_mask & (1 << portid)) == 0)
460 continue;
461 memset(&link, 0, sizeof(link));
462 rte_eth_link_get_nowait(portid, &link);
463 /* print link status if flag set */
464 if (print_flag == 1) {
465 if (link.link_status)
466 printf(
467 "Port%d Link Up. Speed %u Mbps - %s\n",
468 portid, link.link_speed,
469 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
470 ("full-duplex") : ("half-duplex\n"));
471 else
472 printf("Port %d Link Down\n", portid);
473 continue;
474 }
475 /* clear all_ports_up flag if any link down */
476 if (link.link_status == ETH_LINK_DOWN) {
477 all_ports_up = 0;
478 break;
479 }
480 }
481 /* after finally printing all link status, get out */
482 if (print_flag == 1)
483 break;
484
485 if (all_ports_up == 0) {
486 printf(".");
487 fflush(stdout);
488 rte_delay_ms(CHECK_INTERVAL);
489 }
490
491 /* set the print_flag if all ports up or timeout */
492 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
493 print_flag = 1;
494 printf("done\n");
495 }
496 }
497 }
498
499 static void
500 dead_core(__rte_unused void *ptr_data, const int id_core)
501 {
502 if (terminate_signal_received)
503 return;
504 printf("Dead core %i - restarting..\n", id_core);
505 if (rte_eal_get_lcore_state(id_core) == FINISHED) {
506 rte_eal_wait_lcore(id_core);
507 rte_eal_remote_launch(l2fwd_launch_one_lcore, NULL, id_core);
508 } else {
509 printf("..false positive!\n");
510 }
511 }
512
513 static void
514 relay_core_state(void *ptr_data, const int id_core,
515 const enum rte_keepalive_state core_state, uint64_t last_alive)
516 {
517 rte_keepalive_relayed_state((struct rte_keepalive_shm *)ptr_data,
518 id_core, core_state, last_alive);
519 }
520
521 int
522 main(int argc, char **argv)
523 {
524 struct lcore_queue_conf *qconf;
525 int ret;
526 uint16_t nb_ports;
527 uint16_t nb_ports_available = 0;
528 uint16_t portid, last_port;
529 unsigned lcore_id, rx_lcore_id;
530 unsigned nb_ports_in_mask = 0;
531 struct sigaction signal_handler;
532 struct rte_keepalive_shm *ka_shm;
533
534 memset(&signal_handler, 0, sizeof(signal_handler));
535 terminate_signal_received = 0;
536 signal_handler.sa_handler = &handle_sigterm;
537 if (sigaction(SIGINT, &signal_handler, NULL) == -1 ||
538 sigaction(SIGTERM, &signal_handler, NULL) == -1)
539 rte_exit(EXIT_FAILURE, "SIGNAL\n");
540
541
542 /* init EAL */
543 ret = rte_eal_init(argc, argv);
544 if (ret < 0)
545 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
546 argc -= ret;
547 argv += ret;
548
549 l2fwd_enabled_port_mask = 0;
550
551 /* parse application arguments (after the EAL ones) */
552 ret = l2fwd_parse_args(argc, argv);
553 if (ret < 0)
554 rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
555
556 /* create the mbuf pool */
557 l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 32,
558 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
559 if (l2fwd_pktmbuf_pool == NULL)
560 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
561
562 nb_ports = rte_eth_dev_count_avail();
563 if (nb_ports == 0)
564 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
565
566 /* reset l2fwd_dst_ports */
567 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
568 l2fwd_dst_ports[portid] = 0;
569 last_port = 0;
570
571 /*
572 * Each logical core is assigned a dedicated TX queue on each port.
573 */
574 RTE_ETH_FOREACH_DEV(portid) {
575 /* skip ports that are not enabled */
576 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
577 continue;
578
579 if (nb_ports_in_mask % 2) {
580 l2fwd_dst_ports[portid] = last_port;
581 l2fwd_dst_ports[last_port] = portid;
582 } else
583 last_port = portid;
584
585 nb_ports_in_mask++;
586 }
587 if (nb_ports_in_mask % 2) {
588 printf("Notice: odd number of ports in portmask.\n");
589 l2fwd_dst_ports[last_port] = last_port;
590 }
591
592 rx_lcore_id = 1;
593 qconf = NULL;
594
595 /* Initialize the port/queue configuration of each logical core */
596 RTE_ETH_FOREACH_DEV(portid) {
597 /* skip ports that are not enabled */
598 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
599 continue;
600
601 /* get the lcore_id for this port */
602 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
603 lcore_queue_conf[rx_lcore_id].n_rx_port ==
604 l2fwd_rx_queue_per_lcore) {
605 rx_lcore_id++;
606 if (rx_lcore_id >= RTE_MAX_LCORE)
607 rte_exit(EXIT_FAILURE, "Not enough cores\n");
608 }
609
610 if (qconf != &lcore_queue_conf[rx_lcore_id])
611 /* Assigned a new logical core in the loop above. */
612 qconf = &lcore_queue_conf[rx_lcore_id];
613
614 qconf->rx_port_list[qconf->n_rx_port] = portid;
615 qconf->n_rx_port++;
616 printf("Lcore %u: RX port %u\n",
617 rx_lcore_id, portid);
618 }
619
620 /* Initialise each port */
621 RTE_ETH_FOREACH_DEV(portid) {
622 struct rte_eth_dev_info dev_info;
623 struct rte_eth_rxconf rxq_conf;
624 struct rte_eth_txconf txq_conf;
625 struct rte_eth_conf local_port_conf = port_conf;
626
627 /* skip ports that are not enabled */
628 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
629 printf("Skipping disabled port %u\n", portid);
630 continue;
631 }
632 nb_ports_available++;
633
634 /* init port */
635 printf("Initializing port %u... ", portid);
636 fflush(stdout);
637 rte_eth_dev_info_get(portid, &dev_info);
638 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
639 local_port_conf.txmode.offloads |=
640 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
641 ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
642 if (ret < 0)
643 rte_exit(EXIT_FAILURE,
644 "Cannot configure device: err=%d, port=%u\n",
645 ret, portid);
646
647 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
648 &nb_txd);
649 if (ret < 0)
650 rte_exit(EXIT_FAILURE,
651 "Cannot adjust number of descriptors: err=%d, port=%u\n",
652 ret, portid);
653
654 rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
655
656 /* init one RX queue */
657 fflush(stdout);
658 rxq_conf = dev_info.default_rxconf;
659 rxq_conf.offloads = local_port_conf.rxmode.offloads;
660 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
661 rte_eth_dev_socket_id(portid),
662 &rxq_conf,
663 l2fwd_pktmbuf_pool);
664 if (ret < 0)
665 rte_exit(EXIT_FAILURE,
666 "rte_eth_rx_queue_setup:err=%d, port=%u\n",
667 ret, portid);
668
669 /* init one TX queue on each port */
670 fflush(stdout);
671 txq_conf = dev_info.default_txconf;
672 txq_conf.offloads = local_port_conf.txmode.offloads;
673 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
674 rte_eth_dev_socket_id(portid),
675 &txq_conf);
676 if (ret < 0)
677 rte_exit(EXIT_FAILURE,
678 "rte_eth_tx_queue_setup:err=%d, port=%u\n",
679 ret, portid);
680
681 /* Initialize TX buffers */
682 tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
683 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
684 rte_eth_dev_socket_id(portid));
685 if (tx_buffer[portid] == NULL)
686 rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
687 portid);
688
689 rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
690
691 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
692 rte_eth_tx_buffer_count_callback,
693 &port_statistics[portid].dropped);
694 if (ret < 0)
695 rte_exit(EXIT_FAILURE,
696 "Cannot set error callback for tx buffer on port %u\n",
697 portid);
698
699 /* Start device */
700 ret = rte_eth_dev_start(portid);
701 if (ret < 0)
702 rte_exit(EXIT_FAILURE,
703 "rte_eth_dev_start:err=%d, port=%u\n",
704 ret, portid);
705
706 rte_eth_promiscuous_enable(portid);
707
708 printf("Port %u, MAC address: "
709 "%02X:%02X:%02X:%02X:%02X:%02X\n\n",
710 portid,
711 l2fwd_ports_eth_addr[portid].addr_bytes[0],
712 l2fwd_ports_eth_addr[portid].addr_bytes[1],
713 l2fwd_ports_eth_addr[portid].addr_bytes[2],
714 l2fwd_ports_eth_addr[portid].addr_bytes[3],
715 l2fwd_ports_eth_addr[portid].addr_bytes[4],
716 l2fwd_ports_eth_addr[portid].addr_bytes[5]);
717
718 /* initialize port stats */
719 memset(&port_statistics, 0, sizeof(port_statistics));
720 }
721
722 if (!nb_ports_available) {
723 rte_exit(EXIT_FAILURE,
724 "All available ports are disabled. Please set portmask.\n");
725 }
726
727 check_all_ports_link_status(l2fwd_enabled_port_mask);
728
729 struct rte_timer hb_timer, stats_timer;
730
731 rte_timer_subsystem_init();
732 rte_timer_init(&stats_timer);
733
734 ka_shm = NULL;
735 if (check_period > 0) {
736 ka_shm = rte_keepalive_shm_create();
737 if (ka_shm == NULL)
738 rte_exit(EXIT_FAILURE,
739 "rte_keepalive_shm_create() failed");
740 rte_global_keepalive_info =
741 rte_keepalive_create(&dead_core, ka_shm);
742 if (rte_global_keepalive_info == NULL)
743 rte_exit(EXIT_FAILURE, "init_keep_alive() failed");
744 rte_keepalive_register_relay_callback(rte_global_keepalive_info,
745 relay_core_state, ka_shm);
746 rte_timer_init(&hb_timer);
747 if (rte_timer_reset(&hb_timer,
748 (check_period * rte_get_timer_hz()) / 1000,
749 PERIODICAL,
750 rte_lcore_id(),
751 (void(*)(struct rte_timer*, void*))
752 &rte_keepalive_dispatch_pings,
753 rte_global_keepalive_info
754 ) != 0 )
755 rte_exit(EXIT_FAILURE, "Keepalive setup failure.\n");
756 }
757 if (timer_period > 0) {
758 if (rte_timer_reset(&stats_timer,
759 (timer_period * rte_get_timer_hz()) / 1000,
760 PERIODICAL,
761 rte_lcore_id(),
762 &print_stats, NULL
763 ) != 0 )
764 rte_exit(EXIT_FAILURE, "Stats setup failure.\n");
765 }
766 /* launch per-lcore init on every slave lcore */
767 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
768 struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
769
770 if (qconf->n_rx_port == 0)
771 RTE_LOG(INFO, L2FWD,
772 "lcore %u has nothing to do\n",
773 lcore_id
774 );
775 else {
776 rte_eal_remote_launch(
777 l2fwd_launch_one_lcore,
778 NULL,
779 lcore_id
780 );
781 rte_keepalive_register_core(rte_global_keepalive_info,
782 lcore_id);
783 }
784 }
785 while (!terminate_signal_received) {
786 rte_timer_manage();
787 rte_delay_ms(5);
788 }
789
790 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
791 if (rte_eal_wait_lcore(lcore_id) < 0)
792 return -1;
793 }
794
795 if (ka_shm != NULL)
796 rte_keepalive_shm_cleanup(ka_shm);
797 return 0;
798 }