]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/examples/l2fwd-keepalive/main.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / examples / l2fwd-keepalive / main.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <stdint.h>
38 #include <inttypes.h>
39 #include <sys/types.h>
40 #include <sys/queue.h>
41 #include <netinet/in.h>
42 #include <setjmp.h>
43 #include <stdarg.h>
44 #include <ctype.h>
45 #include <errno.h>
46 #include <getopt.h>
47 #include <signal.h>
48
49 #include <rte_common.h>
50 #include <rte_log.h>
51 #include <rte_malloc.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_launch.h>
58 #include <rte_atomic.h>
59 #include <rte_cycles.h>
60 #include <rte_prefetch.h>
61 #include <rte_lcore.h>
62 #include <rte_per_lcore.h>
63 #include <rte_branch_prediction.h>
64 #include <rte_interrupts.h>
65 #include <rte_pci.h>
66 #include <rte_random.h>
67 #include <rte_debug.h>
68 #include <rte_ether.h>
69 #include <rte_ethdev.h>
70 #include <rte_mempool.h>
71 #include <rte_mbuf.h>
72 #include <rte_timer.h>
73 #include <rte_keepalive.h>
74
75 #include "shm.h"
76
77 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
78
79 #define NB_MBUF 8192
80
81 #define MAX_PKT_BURST 32
82 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
83
84 /*
85 * Configurable number of RX/TX ring descriptors
86 */
87 #define RTE_TEST_RX_DESC_DEFAULT 128
88 #define RTE_TEST_TX_DESC_DEFAULT 512
89 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
90 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
91
92 /* ethernet addresses of ports */
93 static struct ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
94
95 /* mask of enabled ports */
96 static uint32_t l2fwd_enabled_port_mask;
97
98 /* list of enabled ports */
99 static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
100
101 static unsigned int l2fwd_rx_queue_per_lcore = 1;
102
103 #define MAX_RX_QUEUE_PER_LCORE 16
104 #define MAX_TX_QUEUE_PER_PORT 16
105 struct lcore_queue_conf {
106 unsigned n_rx_port;
107 unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
108 } __rte_cache_aligned;
109 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
110
111 struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
112
113 static const struct rte_eth_conf port_conf = {
114 .rxmode = {
115 .split_hdr_size = 0,
116 .header_split = 0, /**< Header Split disabled */
117 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
118 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
119 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
120 .hw_strip_crc = 1, /**< CRC stripped by hardware */
121 },
122 .txmode = {
123 .mq_mode = ETH_MQ_TX_NONE,
124 },
125 };
126
127 struct rte_mempool *l2fwd_pktmbuf_pool = NULL;
128
129 /* Per-port statistics struct */
130 struct l2fwd_port_statistics {
131 uint64_t tx;
132 uint64_t rx;
133 uint64_t dropped;
134 } __rte_cache_aligned;
135 struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
136
137 /* A tsc-based timer responsible for triggering statistics printout */
138 #define TIMER_MILLISECOND 1
139 #define MAX_TIMER_PERIOD 86400 /* 1 day max */
140 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* 10 seconds */
141 static int64_t check_period = 5; /* default check cycle is 5ms */
142
143 /* Keepalive structure */
144 struct rte_keepalive *rte_global_keepalive_info;
145
146 /* Termination signalling */
147 static int terminate_signal_received;
148
149 /* Termination signal handler */
150 static void handle_sigterm(__rte_unused int value)
151 {
152 terminate_signal_received = 1;
153 }
154
155 /* Print out statistics on packets dropped */
156 static void
157 print_stats(__attribute__((unused)) struct rte_timer *ptr_timer,
158 __attribute__((unused)) void *ptr_data)
159 {
160 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
161 unsigned portid;
162
163 total_packets_dropped = 0;
164 total_packets_tx = 0;
165 total_packets_rx = 0;
166
167 const char clr[] = { 27, '[', '2', 'J', '\0' };
168 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
169
170 /* Clear screen and move to top left */
171 printf("%s%s", clr, topLeft);
172
173 printf("\nPort statistics ====================================");
174
175 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
176 /* skip disabled ports */
177 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
178 continue;
179 printf("\nStatistics for port %u ------------------------------"
180 "\nPackets sent: %24"PRIu64
181 "\nPackets received: %20"PRIu64
182 "\nPackets dropped: %21"PRIu64,
183 portid,
184 port_statistics[portid].tx,
185 port_statistics[portid].rx,
186 port_statistics[portid].dropped);
187
188 total_packets_dropped += port_statistics[portid].dropped;
189 total_packets_tx += port_statistics[portid].tx;
190 total_packets_rx += port_statistics[portid].rx;
191 }
192 printf("\nAggregate statistics ==============================="
193 "\nTotal packets sent: %18"PRIu64
194 "\nTotal packets received: %14"PRIu64
195 "\nTotal packets dropped: %15"PRIu64,
196 total_packets_tx,
197 total_packets_rx,
198 total_packets_dropped);
199 printf("\n====================================================\n");
200 }
201
202 static void
203 l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
204 {
205 struct ether_hdr *eth;
206 void *tmp;
207 int sent;
208 unsigned dst_port;
209 struct rte_eth_dev_tx_buffer *buffer;
210
211 dst_port = l2fwd_dst_ports[portid];
212 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
213
214 /* 02:00:00:00:00:xx */
215 tmp = &eth->d_addr.addr_bytes[0];
216 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
217
218 /* src addr */
219 ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], &eth->s_addr);
220
221 buffer = tx_buffer[dst_port];
222 sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
223 if (sent)
224 port_statistics[dst_port].tx += sent;
225 }
226
227 /* main processing loop */
228 static void
229 l2fwd_main_loop(void)
230 {
231 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
232 struct rte_mbuf *m;
233 int sent;
234 unsigned lcore_id;
235 uint64_t prev_tsc, diff_tsc, cur_tsc;
236 unsigned i, j, portid, nb_rx;
237 struct lcore_queue_conf *qconf;
238 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
239 / US_PER_S * BURST_TX_DRAIN_US;
240 struct rte_eth_dev_tx_buffer *buffer;
241
242 prev_tsc = 0;
243
244 lcore_id = rte_lcore_id();
245 qconf = &lcore_queue_conf[lcore_id];
246
247 if (qconf->n_rx_port == 0) {
248 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
249 return;
250 }
251
252 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
253
254 for (i = 0; i < qconf->n_rx_port; i++) {
255
256 portid = qconf->rx_port_list[i];
257 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
258 portid);
259 }
260
261 uint64_t tsc_initial = rte_rdtsc();
262 uint64_t tsc_lifetime = (rand()&0x07) * rte_get_tsc_hz();
263
264 while (!terminate_signal_received) {
265 /* Keepalive heartbeat */
266 rte_keepalive_mark_alive(rte_global_keepalive_info);
267
268 cur_tsc = rte_rdtsc();
269
270 /*
271 * Die randomly within 7 secs for demo purposes if
272 * keepalive enabled
273 */
274 if (check_period > 0 && cur_tsc - tsc_initial > tsc_lifetime)
275 break;
276
277 /*
278 * TX burst queue drain
279 */
280 diff_tsc = cur_tsc - prev_tsc;
281 if (unlikely(diff_tsc > drain_tsc)) {
282
283 for (i = 0; i < qconf->n_rx_port; i++) {
284
285 portid = l2fwd_dst_ports[qconf->rx_port_list[i]];
286 buffer = tx_buffer[portid];
287
288 sent = rte_eth_tx_buffer_flush(portid, 0, buffer);
289 if (sent)
290 port_statistics[portid].tx += sent;
291
292 }
293
294 prev_tsc = cur_tsc;
295 }
296
297 /*
298 * Read packet from RX queues
299 */
300 for (i = 0; i < qconf->n_rx_port; i++) {
301
302 portid = qconf->rx_port_list[i];
303 nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
304 pkts_burst, MAX_PKT_BURST);
305
306 port_statistics[portid].rx += nb_rx;
307
308 for (j = 0; j < nb_rx; j++) {
309 m = pkts_burst[j];
310 rte_prefetch0(rte_pktmbuf_mtod(m, void *));
311 l2fwd_simple_forward(m, portid);
312 }
313 }
314 }
315 }
316
317 static int
318 l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)
319 {
320 l2fwd_main_loop();
321 return 0;
322 }
323
324 /* display usage */
325 static void
326 l2fwd_usage(const char *prgname)
327 {
328 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
329 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
330 " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
331 " -K PERIOD: Keepalive check period (5 default; 86400 max)\n"
332 " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
333 prgname);
334 }
335
336 static int
337 l2fwd_parse_portmask(const char *portmask)
338 {
339 char *end = NULL;
340 unsigned long pm;
341
342 /* parse hexadecimal string */
343 pm = strtoul(portmask, &end, 16);
344 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
345 return -1;
346
347 if (pm == 0)
348 return -1;
349
350 return pm;
351 }
352
353 static unsigned int
354 l2fwd_parse_nqueue(const char *q_arg)
355 {
356 char *end = NULL;
357 unsigned long n;
358
359 /* parse hexadecimal string */
360 n = strtoul(q_arg, &end, 10);
361 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
362 return 0;
363 if (n == 0)
364 return 0;
365 if (n >= MAX_RX_QUEUE_PER_LCORE)
366 return 0;
367
368 return n;
369 }
370
371 static int
372 l2fwd_parse_timer_period(const char *q_arg)
373 {
374 char *end = NULL;
375 int n;
376
377 /* parse number string */
378 n = strtol(q_arg, &end, 10);
379 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
380 return -1;
381 if (n >= MAX_TIMER_PERIOD)
382 return -1;
383
384 return n;
385 }
386
387 static int
388 l2fwd_parse_check_period(const char *q_arg)
389 {
390 char *end = NULL;
391 int n;
392
393 /* parse number string */
394 n = strtol(q_arg, &end, 10);
395 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
396 return -1;
397 if (n >= MAX_TIMER_PERIOD)
398 return -1;
399
400 return n;
401 }
402
403 /* Parse the argument given in the command line of the application */
404 static int
405 l2fwd_parse_args(int argc, char **argv)
406 {
407 int opt, ret;
408 char **argvopt;
409 int option_index;
410 char *prgname = argv[0];
411 static struct option lgopts[] = {
412 {NULL, 0, 0, 0}
413 };
414
415 argvopt = argv;
416
417 while ((opt = getopt_long(argc, argvopt, "p:q:T:K:",
418 lgopts, &option_index)) != EOF) {
419
420 switch (opt) {
421 /* portmask */
422 case 'p':
423 l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
424 if (l2fwd_enabled_port_mask == 0) {
425 printf("invalid portmask\n");
426 l2fwd_usage(prgname);
427 return -1;
428 }
429 break;
430
431 /* nqueue */
432 case 'q':
433 l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
434 if (l2fwd_rx_queue_per_lcore == 0) {
435 printf("invalid queue number\n");
436 l2fwd_usage(prgname);
437 return -1;
438 }
439 break;
440
441 /* timer period */
442 case 'T':
443 timer_period = l2fwd_parse_timer_period(optarg)
444 * (int64_t)(1000 * TIMER_MILLISECOND);
445 if (timer_period < 0) {
446 printf("invalid timer period\n");
447 l2fwd_usage(prgname);
448 return -1;
449 }
450 break;
451
452 /* Check period */
453 case 'K':
454 check_period = l2fwd_parse_check_period(optarg);
455 if (check_period < 0) {
456 printf("invalid check period\n");
457 l2fwd_usage(prgname);
458 return -1;
459 }
460 break;
461
462 /* long options */
463 case 0:
464 l2fwd_usage(prgname);
465 return -1;
466
467 default:
468 l2fwd_usage(prgname);
469 return -1;
470 }
471 }
472
473 if (optind >= 0)
474 argv[optind-1] = prgname;
475
476 ret = optind-1;
477 optind = 1; /* reset getopt lib */
478 return ret;
479 }
480
481 /* Check the link status of all ports in up to 9s, and print them finally */
482 static void
483 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
484 {
485 #define CHECK_INTERVAL 100 /* 100ms */
486 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
487 uint8_t portid, count, all_ports_up, print_flag = 0;
488 struct rte_eth_link link;
489
490 printf("\nChecking link status");
491 fflush(stdout);
492 for (count = 0; count <= MAX_CHECK_TIME; count++) {
493 all_ports_up = 1;
494 for (portid = 0; portid < port_num; portid++) {
495 if ((port_mask & (1 << portid)) == 0)
496 continue;
497 memset(&link, 0, sizeof(link));
498 rte_eth_link_get_nowait(portid, &link);
499 /* print link status if flag set */
500 if (print_flag == 1) {
501 if (link.link_status)
502 printf("Port %d Link Up - speed %u "
503 "Mbps - %s\n", (uint8_t)portid,
504 (unsigned)link.link_speed,
505 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
506 ("full-duplex") : ("half-duplex\n"));
507 else
508 printf("Port %d Link Down\n",
509 (uint8_t)portid);
510 continue;
511 }
512 /* clear all_ports_up flag if any link down */
513 if (link.link_status == ETH_LINK_DOWN) {
514 all_ports_up = 0;
515 break;
516 }
517 }
518 /* after finally printing all link status, get out */
519 if (print_flag == 1)
520 break;
521
522 if (all_ports_up == 0) {
523 printf(".");
524 fflush(stdout);
525 rte_delay_ms(CHECK_INTERVAL);
526 }
527
528 /* set the print_flag if all ports up or timeout */
529 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
530 print_flag = 1;
531 printf("done\n");
532 }
533 }
534 }
535
536 static void
537 dead_core(__rte_unused void *ptr_data, const int id_core)
538 {
539 if (terminate_signal_received)
540 return;
541 printf("Dead core %i - restarting..\n", id_core);
542 if (rte_eal_get_lcore_state(id_core) == FINISHED) {
543 rte_eal_wait_lcore(id_core);
544 rte_eal_remote_launch(l2fwd_launch_one_lcore, NULL, id_core);
545 } else {
546 printf("..false positive!\n");
547 }
548 }
549
550 static void
551 relay_core_state(void *ptr_data, const int id_core,
552 const enum rte_keepalive_state core_state, uint64_t last_alive)
553 {
554 rte_keepalive_relayed_state((struct rte_keepalive_shm *)ptr_data,
555 id_core, core_state, last_alive);
556 }
557
558 int
559 main(int argc, char **argv)
560 {
561 struct lcore_queue_conf *qconf;
562 struct rte_eth_dev_info dev_info;
563 int ret;
564 uint8_t nb_ports;
565 uint8_t nb_ports_available;
566 uint8_t portid, last_port;
567 unsigned lcore_id, rx_lcore_id;
568 unsigned nb_ports_in_mask = 0;
569 struct sigaction signal_handler;
570 struct rte_keepalive_shm *ka_shm;
571
572 memset(&signal_handler, 0, sizeof(signal_handler));
573 terminate_signal_received = 0;
574 signal_handler.sa_handler = &handle_sigterm;
575 if (sigaction(SIGINT, &signal_handler, NULL) == -1 ||
576 sigaction(SIGTERM, &signal_handler, NULL) == -1)
577 rte_exit(EXIT_FAILURE, "SIGNAL\n");
578
579
580 /* init EAL */
581 ret = rte_eal_init(argc, argv);
582 if (ret < 0)
583 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
584 argc -= ret;
585 argv += ret;
586
587 l2fwd_enabled_port_mask = 0;
588
589 /* parse application arguments (after the EAL ones) */
590 ret = l2fwd_parse_args(argc, argv);
591 if (ret < 0)
592 rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
593
594 /* create the mbuf pool */
595 l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 32,
596 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
597 if (l2fwd_pktmbuf_pool == NULL)
598 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
599
600 nb_ports = rte_eth_dev_count();
601 if (nb_ports == 0)
602 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
603
604 /* reset l2fwd_dst_ports */
605 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
606 l2fwd_dst_ports[portid] = 0;
607 last_port = 0;
608
609 /*
610 * Each logical core is assigned a dedicated TX queue on each port.
611 */
612 for (portid = 0; portid < nb_ports; portid++) {
613 /* skip ports that are not enabled */
614 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
615 continue;
616
617 if (nb_ports_in_mask % 2) {
618 l2fwd_dst_ports[portid] = last_port;
619 l2fwd_dst_ports[last_port] = portid;
620 } else
621 last_port = portid;
622
623 nb_ports_in_mask++;
624
625 rte_eth_dev_info_get(portid, &dev_info);
626 }
627 if (nb_ports_in_mask % 2) {
628 printf("Notice: odd number of ports in portmask.\n");
629 l2fwd_dst_ports[last_port] = last_port;
630 }
631
632 rx_lcore_id = 1;
633 qconf = NULL;
634
635 /* Initialize the port/queue configuration of each logical core */
636 for (portid = 0; portid < nb_ports; portid++) {
637 /* skip ports that are not enabled */
638 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
639 continue;
640
641 /* get the lcore_id for this port */
642 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
643 lcore_queue_conf[rx_lcore_id].n_rx_port ==
644 l2fwd_rx_queue_per_lcore) {
645 rx_lcore_id++;
646 if (rx_lcore_id >= RTE_MAX_LCORE)
647 rte_exit(EXIT_FAILURE, "Not enough cores\n");
648 }
649
650 if (qconf != &lcore_queue_conf[rx_lcore_id])
651 /* Assigned a new logical core in the loop above. */
652 qconf = &lcore_queue_conf[rx_lcore_id];
653
654 qconf->rx_port_list[qconf->n_rx_port] = portid;
655 qconf->n_rx_port++;
656 printf("Lcore %u: RX port %u\n",
657 rx_lcore_id, (unsigned) portid);
658 }
659
660 nb_ports_available = nb_ports;
661
662 /* Initialise each port */
663 for (portid = 0; portid < nb_ports; portid++) {
664 /* skip ports that are not enabled */
665 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
666 printf("Skipping disabled port %u\n",
667 (unsigned) portid);
668 nb_ports_available--;
669 continue;
670 }
671 /* init port */
672 printf("Initializing port %u... ", (unsigned) portid);
673 fflush(stdout);
674 ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
675 if (ret < 0)
676 rte_exit(EXIT_FAILURE,
677 "Cannot configure device: err=%d, port=%u\n",
678 ret, (unsigned) portid);
679
680 rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
681
682 /* init one RX queue */
683 fflush(stdout);
684 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
685 rte_eth_dev_socket_id(portid),
686 NULL,
687 l2fwd_pktmbuf_pool);
688 if (ret < 0)
689 rte_exit(EXIT_FAILURE,
690 "rte_eth_rx_queue_setup:err=%d, port=%u\n",
691 ret, (unsigned) portid);
692
693 /* init one TX queue on each port */
694 fflush(stdout);
695 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
696 rte_eth_dev_socket_id(portid),
697 NULL);
698 if (ret < 0)
699 rte_exit(EXIT_FAILURE,
700 "rte_eth_tx_queue_setup:err=%d, port=%u\n",
701 ret, (unsigned) portid);
702
703 /* Initialize TX buffers */
704 tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
705 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
706 rte_eth_dev_socket_id(portid));
707 if (tx_buffer[portid] == NULL)
708 rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
709 (unsigned) portid);
710
711 rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
712
713 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
714 rte_eth_tx_buffer_count_callback,
715 &port_statistics[portid].dropped);
716 if (ret < 0)
717 rte_exit(EXIT_FAILURE, "Cannot set error callback for "
718 "tx buffer on port %u\n", (unsigned) portid);
719
720 /* Start device */
721 ret = rte_eth_dev_start(portid);
722 if (ret < 0)
723 rte_exit(EXIT_FAILURE,
724 "rte_eth_dev_start:err=%d, port=%u\n",
725 ret, (unsigned) portid);
726
727 rte_eth_promiscuous_enable(portid);
728
729 printf("Port %u, MAC address: "
730 "%02X:%02X:%02X:%02X:%02X:%02X\n\n",
731 (unsigned) portid,
732 l2fwd_ports_eth_addr[portid].addr_bytes[0],
733 l2fwd_ports_eth_addr[portid].addr_bytes[1],
734 l2fwd_ports_eth_addr[portid].addr_bytes[2],
735 l2fwd_ports_eth_addr[portid].addr_bytes[3],
736 l2fwd_ports_eth_addr[portid].addr_bytes[4],
737 l2fwd_ports_eth_addr[portid].addr_bytes[5]);
738
739 /* initialize port stats */
740 memset(&port_statistics, 0, sizeof(port_statistics));
741 }
742
743 if (!nb_ports_available) {
744 rte_exit(EXIT_FAILURE,
745 "All available ports are disabled. Please set portmask.\n");
746 }
747
748 check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask);
749
750 struct rte_timer hb_timer, stats_timer;
751
752 rte_timer_subsystem_init();
753 rte_timer_init(&stats_timer);
754
755 ka_shm = NULL;
756 if (check_period > 0) {
757 ka_shm = rte_keepalive_shm_create();
758 if (ka_shm == NULL)
759 rte_exit(EXIT_FAILURE,
760 "rte_keepalive_shm_create() failed");
761 rte_global_keepalive_info =
762 rte_keepalive_create(&dead_core, ka_shm);
763 if (rte_global_keepalive_info == NULL)
764 rte_exit(EXIT_FAILURE, "init_keep_alive() failed");
765 rte_keepalive_register_relay_callback(rte_global_keepalive_info,
766 relay_core_state, ka_shm);
767 rte_timer_init(&hb_timer);
768 if (rte_timer_reset(&hb_timer,
769 (check_period * rte_get_timer_hz()) / 1000,
770 PERIODICAL,
771 rte_lcore_id(),
772 (void(*)(struct rte_timer*, void*))
773 &rte_keepalive_dispatch_pings,
774 rte_global_keepalive_info
775 ) != 0 )
776 rte_exit(EXIT_FAILURE, "Keepalive setup failure.\n");
777 }
778 if (timer_period > 0) {
779 if (rte_timer_reset(&stats_timer,
780 (timer_period * rte_get_timer_hz()) / 1000,
781 PERIODICAL,
782 rte_lcore_id(),
783 &print_stats, NULL
784 ) != 0 )
785 rte_exit(EXIT_FAILURE, "Stats setup failure.\n");
786 }
787 /* launch per-lcore init on every slave lcore */
788 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
789 struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
790
791 if (qconf->n_rx_port == 0)
792 RTE_LOG(INFO, L2FWD,
793 "lcore %u has nothing to do\n",
794 lcore_id
795 );
796 else {
797 rte_eal_remote_launch(
798 l2fwd_launch_one_lcore,
799 NULL,
800 lcore_id
801 );
802 rte_keepalive_register_core(rte_global_keepalive_info,
803 lcore_id);
804 }
805 }
806 while (!terminate_signal_received) {
807 rte_timer_manage();
808 rte_delay_ms(5);
809 }
810
811 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
812 if (rte_eal_wait_lcore(lcore_id) < 0)
813 return -1;
814 }
815
816 if (ka_shm != NULL)
817 rte_keepalive_shm_cleanup(ka_shm);
818 return 0;
819 }