]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/examples/multi_process/l2fwd_fork/main.c
bump version to 12.2.12-pve1
[ceph.git] / ceph / src / dpdk / examples / multi_process / l2fwd_fork / main.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33 #define _GNU_SOURCE
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <unistd.h>
37 #include <string.h>
38 #include <stdint.h>
39 #include <sched.h>
40 #include <inttypes.h>
41 #include <sys/types.h>
42 #include <sys/queue.h>
43 #include <netinet/in.h>
44 #include <setjmp.h>
45 #include <stdarg.h>
46 #include <ctype.h>
47 #include <errno.h>
48 #include <getopt.h>
49
50 #include <rte_common.h>
51 #include <rte_log.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_launch.h>
58 #include <rte_atomic.h>
59 #include <rte_spinlock.h>
60 #include <rte_cycles.h>
61 #include <rte_prefetch.h>
62 #include <rte_lcore.h>
63 #include <rte_per_lcore.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_interrupts.h>
66 #include <rte_pci.h>
67 #include <rte_random.h>
68 #include <rte_debug.h>
69 #include <rte_ether.h>
70 #include <rte_ethdev.h>
71 #include <rte_ring.h>
72 #include <rte_mempool.h>
73 #include <rte_mbuf.h>
74 #include <rte_malloc.h>
75
76 #include "flib.h"
77
78 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
79 #define MBUF_NAME "mbuf_pool_%d"
80 #define MBUF_SIZE \
81 (RTE_MBUF_DEFAULT_DATAROOM + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
82 #define NB_MBUF 8192
83 #define RING_MASTER_NAME "l2fwd_ring_m2s_"
84 #define RING_SLAVE_NAME "l2fwd_ring_s2m_"
85 #define MAX_NAME_LEN 32
86 /* RECREATE flag indicate needs initialize resource and launch slave_core again */
87 #define SLAVE_RECREATE_FLAG 0x1
88 /* RESTART flag indicate needs restart port and send START command again */
89 #define SLAVE_RESTART_FLAG 0x2
90 #define INVALID_MAPPING_ID ((unsigned)LCORE_ID_ANY)
91 /* Maximum message buffer per slave */
92 #define NB_CORE_MSGBUF 32
93 enum l2fwd_cmd{
94 CMD_START,
95 CMD_STOP,
96 };
97
98 #define MAX_PKT_BURST 32
99 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
100
101 /*
102 * Configurable number of RX/TX ring descriptors
103 */
104 #define RTE_TEST_RX_DESC_DEFAULT 128
105 #define RTE_TEST_TX_DESC_DEFAULT 512
106 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
107 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
108
109 /* ethernet addresses of ports */
110 static struct ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
111
112 /* mask of enabled ports */
113 static uint32_t l2fwd_enabled_port_mask = 0;
114
115 /* list of enabled ports */
116 static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
117
118 static unsigned int l2fwd_rx_queue_per_lcore = 1;
119
120 struct mbuf_table {
121 unsigned len;
122 struct rte_mbuf *m_table[MAX_PKT_BURST];
123 };
124
125 #define MAX_RX_QUEUE_PER_LCORE 16
126 #define MAX_TX_QUEUE_PER_PORT 16
127 struct lcore_queue_conf {
128 unsigned n_rx_port;
129 unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
130 } __rte_cache_aligned;
131 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
132
133 struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
134
135 struct lcore_resource_struct {
136 int enabled; /* Only set in case this lcore involved into packet forwarding */
137 int flags; /* Set only slave need to restart or recreate */
138 unsigned lcore_id; /* lcore ID */
139 unsigned pair_id; /* dependency lcore ID on port */
140 char ring_name[2][MAX_NAME_LEN];
141 /* ring[0] for master send cmd, slave read */
142 /* ring[1] for slave send ack, master read */
143 struct rte_ring *ring[2];
144 int port_num; /* Total port numbers */
145 uint8_t port[RTE_MAX_ETHPORTS]; /* Port id for that lcore to receive packets */
146 }__attribute__((packed)) __rte_cache_aligned;
147
148 static struct lcore_resource_struct lcore_resource[RTE_MAX_LCORE];
149 static struct rte_mempool *message_pool;
150 static rte_spinlock_t res_lock = RTE_SPINLOCK_INITIALIZER;
151 /* use floating processes */
152 static int float_proc = 0;
153 /* Save original cpu affinity */
154 struct cpu_aff_arg{
155 cpu_set_t set;
156 size_t size;
157 }cpu_aff;
158
159 static const struct rte_eth_conf port_conf = {
160 .rxmode = {
161 .split_hdr_size = 0,
162 .header_split = 0, /**< Header Split disabled */
163 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
164 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
165 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
166 .hw_strip_crc = 0, /**< CRC stripped by hardware */
167 },
168 .txmode = {
169 .mq_mode = ETH_MQ_TX_NONE,
170 },
171 };
172
173 static struct rte_mempool * l2fwd_pktmbuf_pool[RTE_MAX_ETHPORTS];
174
175 /* Per-port statistics struct */
176 struct l2fwd_port_statistics {
177 uint64_t tx;
178 uint64_t rx;
179 uint64_t dropped;
180 } __rte_cache_aligned;
181 struct l2fwd_port_statistics *port_statistics;
182 /**
183 * pointer to lcore ID mapping array, used to return lcore id in case slave
184 * process exited unexpectedly, use only floating process option applied
185 **/
186 unsigned *mapping_id;
187
188 /* A tsc-based timer responsible for triggering statistics printout */
189 #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
190 #define MAX_TIMER_PERIOD 86400 /* 1 day max */
191 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
192
193 static int l2fwd_launch_one_lcore(void *dummy);
194
195 /* Print out statistics on packets dropped */
196 static void
197 print_stats(void)
198 {
199 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
200 unsigned portid;
201
202 total_packets_dropped = 0;
203 total_packets_tx = 0;
204 total_packets_rx = 0;
205
206 const char clr[] = { 27, '[', '2', 'J', '\0' };
207 const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
208
209 /* Clear screen and move to top left */
210 printf("%s%s", clr, topLeft);
211
212 printf("\nPort statistics ====================================");
213
214 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
215 /* skip disabled ports */
216 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
217 continue;
218 printf("\nStatistics for port %u ------------------------------"
219 "\nPackets sent: %24"PRIu64
220 "\nPackets received: %20"PRIu64
221 "\nPackets dropped: %21"PRIu64,
222 portid,
223 port_statistics[portid].tx,
224 port_statistics[portid].rx,
225 port_statistics[portid].dropped);
226
227 total_packets_dropped += port_statistics[portid].dropped;
228 total_packets_tx += port_statistics[portid].tx;
229 total_packets_rx += port_statistics[portid].rx;
230 }
231 printf("\nAggregate statistics ==============================="
232 "\nTotal packets sent: %18"PRIu64
233 "\nTotal packets received: %14"PRIu64
234 "\nTotal packets dropped: %15"PRIu64,
235 total_packets_tx,
236 total_packets_rx,
237 total_packets_dropped);
238 printf("\n====================================================\n");
239 }
240
241 static int
242 clear_cpu_affinity(void)
243 {
244 int s;
245
246 s = sched_setaffinity(0, cpu_aff.size, &cpu_aff.set);
247 if (s != 0) {
248 printf("sched_setaffinity failed:%s\n", strerror(errno));
249 return -1;
250 }
251
252 return 0;
253 }
254
255 static int
256 get_cpu_affinity(void)
257 {
258 int s;
259
260 cpu_aff.size = sizeof(cpu_set_t);
261 CPU_ZERO(&cpu_aff.set);
262
263 s = sched_getaffinity(0, cpu_aff.size, &cpu_aff.set);
264 if (s != 0) {
265 printf("sched_getaffinity failed:%s\n", strerror(errno));
266 return -1;
267 }
268
269 return 0;
270 }
271
272 /**
273 * This fnciton demonstrates the approach to create ring in first instance
274 * or re-attach an existed ring in later instance.
275 **/
276 static struct rte_ring *
277 create_ring(const char *name, unsigned count,
278 int socket_id,unsigned flags)
279 {
280 struct rte_ring *ring;
281
282 if (name == NULL)
283 return NULL;
284
285 /* If already create, just attached it */
286 if (likely((ring = rte_ring_lookup(name)) != NULL))
287 return ring;
288
289 /* First call it, create one */
290 return rte_ring_create(name, count, socket_id, flags);
291 }
292
293 /* Malloc with rte_malloc on structures that shared by master and slave */
294 static int
295 l2fwd_malloc_shared_struct(void)
296 {
297 port_statistics = rte_zmalloc("port_stat",
298 sizeof(struct l2fwd_port_statistics) * RTE_MAX_ETHPORTS,
299 0);
300 if (port_statistics == NULL)
301 return -1;
302
303 /* allocate mapping_id array */
304 if (float_proc) {
305 int i;
306 mapping_id = rte_malloc("mapping_id", sizeof(unsigned) * RTE_MAX_LCORE,
307 0);
308
309 if (mapping_id == NULL)
310 return -1;
311
312 for (i = 0 ;i < RTE_MAX_LCORE; i++)
313 mapping_id[i] = INVALID_MAPPING_ID;
314 }
315 return 0;
316 }
317
318 /* Create ring which used for communicate among master and slave */
319 static int
320 create_ms_ring(unsigned slaveid)
321 {
322 unsigned flag = RING_F_SP_ENQ | RING_F_SC_DEQ;
323 struct lcore_resource_struct *res = &lcore_resource[slaveid];
324 unsigned socketid = rte_socket_id();
325
326 /* Always assume create ring on master socket_id */
327 /* Default only create a ring size 32 */
328 snprintf(res->ring_name[0], MAX_NAME_LEN, "%s%u",
329 RING_MASTER_NAME, slaveid);
330 if ((res->ring[0] = create_ring(res->ring_name[0], NB_CORE_MSGBUF,
331 socketid, flag)) == NULL) {
332 printf("Create m2s ring %s failed\n", res->ring_name[0]);
333 return -1;
334 }
335
336 snprintf(res->ring_name[1], MAX_NAME_LEN, "%s%u",
337 RING_SLAVE_NAME, slaveid);
338 if ((res->ring[1] = create_ring(res->ring_name[1], NB_CORE_MSGBUF,
339 socketid, flag)) == NULL) {
340 printf("Create s2m ring %s failed\n", res->ring_name[1]);
341 return -1;
342 }
343
344 return 0;
345 }
346
347 /* send command to pair in paired master and slave ring */
348 static inline int
349 sendcmd(unsigned slaveid, enum l2fwd_cmd cmd, int is_master)
350 {
351 struct lcore_resource_struct *res = &lcore_resource[slaveid];
352 void *msg;
353 int fd = !is_master;
354
355 /* Only check master, it must be enabled and running if it is slave */
356 if (is_master && !res->enabled)
357 return -1;
358
359 if (res->ring[fd] == NULL)
360 return -1;
361
362 if (rte_mempool_get(message_pool, &msg) < 0) {
363 printf("Error to get message buffer\n");
364 return -1;
365 }
366
367 *(enum l2fwd_cmd *)msg = cmd;
368
369 if (rte_ring_enqueue(res->ring[fd], msg) != 0) {
370 printf("Enqueue error\n");
371 rte_mempool_put(message_pool, msg);
372 return -1;
373 }
374
375 return 0;
376 }
377
378 /* Get command from pair in paired master and slave ring */
379 static inline int
380 getcmd(unsigned slaveid, enum l2fwd_cmd *cmd, int is_master)
381 {
382 struct lcore_resource_struct *res = &lcore_resource[slaveid];
383 void *msg;
384 int fd = !!is_master;
385 int ret;
386 /* Only check master, it must be enabled and running if it is slave */
387 if (is_master && (!res->enabled))
388 return -1;
389
390 if (res->ring[fd] == NULL)
391 return -1;
392
393 ret = rte_ring_dequeue(res->ring[fd], &msg);
394
395 if (ret == 0) {
396 *cmd = *(enum l2fwd_cmd *)msg;
397 rte_mempool_put(message_pool, msg);
398 }
399 return ret;
400 }
401
402 /* Master send command to slave and wait until ack received or error met */
403 static int
404 master_sendcmd_with_ack(unsigned slaveid, enum l2fwd_cmd cmd)
405 {
406 enum l2fwd_cmd ack_cmd;
407 int ret = -1;
408
409 if (sendcmd(slaveid, cmd, 1) != 0)
410 rte_exit(EXIT_FAILURE, "Failed to send message\n");
411
412 /* Get ack */
413 while (1) {
414 ret = getcmd(slaveid, &ack_cmd, 1);
415 if (ret == 0 && cmd == ack_cmd)
416 break;
417
418 /* If slave not running yet, return an error */
419 if (flib_query_slave_status(slaveid) != ST_RUN) {
420 ret = -ENOENT;
421 break;
422 }
423 }
424
425 return ret;
426 }
427
428 /* restart all port that assigned to that slave lcore */
429 static int
430 reset_slave_all_ports(unsigned slaveid)
431 {
432 struct lcore_resource_struct *slave = &lcore_resource[slaveid];
433 int i, ret = 0;
434
435 /* stop/start port */
436 for (i = 0; i < slave->port_num; i++) {
437 char buf_name[RTE_MEMPOOL_NAMESIZE];
438 struct rte_mempool *pool;
439 printf("Stop port :%d\n", slave->port[i]);
440 rte_eth_dev_stop(slave->port[i]);
441 snprintf(buf_name, RTE_MEMPOOL_NAMESIZE, MBUF_NAME, slave->port[i]);
442 pool = rte_mempool_lookup(buf_name);
443 if (pool)
444 printf("Port %d mempool free object is %u(%u)\n", slave->port[i],
445 rte_mempool_avail_count(pool),
446 (unsigned int)NB_MBUF);
447 else
448 printf("Can't find mempool %s\n", buf_name);
449
450 printf("Start port :%d\n", slave->port[i]);
451 ret = rte_eth_dev_start(slave->port[i]);
452 if (ret != 0)
453 break;
454 }
455 return ret;
456 }
457
458 static int
459 reset_shared_structures(unsigned slaveid)
460 {
461 int ret;
462 /* Only port are shared resource here */
463 ret = reset_slave_all_ports(slaveid);
464
465 return ret;
466 }
467
468 /**
469 * Call this function to re-create resource that needed for slave process that
470 * exited in last instance
471 **/
472 static int
473 init_slave_res(unsigned slaveid)
474 {
475 struct lcore_resource_struct *slave = &lcore_resource[slaveid];
476 enum l2fwd_cmd cmd;
477
478 if (!slave->enabled) {
479 printf("Something wrong with lcore=%u enabled=%d\n",slaveid,
480 slave->enabled);
481 return -1;
482 }
483
484 /* Initialize ring */
485 if (create_ms_ring(slaveid) != 0)
486 rte_exit(EXIT_FAILURE, "failed to create ring for slave %u\n",
487 slaveid);
488
489 /* drain un-read buffer if have */
490 while (getcmd(slaveid, &cmd, 1) == 0);
491 while (getcmd(slaveid, &cmd, 0) == 0);
492
493 return 0;
494 }
495
496 static int
497 recreate_one_slave(unsigned slaveid)
498 {
499 int ret = 0;
500 /* Re-initialize resource for stalled slave */
501 if ((ret = init_slave_res(slaveid)) != 0) {
502 printf("Init slave=%u failed\n", slaveid);
503 return ret;
504 }
505
506 if ((ret = flib_remote_launch(l2fwd_launch_one_lcore, NULL, slaveid))
507 != 0)
508 printf("Launch slave %u failed\n", slaveid);
509
510 return ret;
511 }
512
513 /**
514 * remapping resource belong to slave_id to new lcore that gets from flib_assign_lcore_id(),
515 * used only floating process option applied.
516 *
517 * @param slaveid
518 * original lcore_id that apply for remapping
519 */
520 static void
521 remapping_slave_resource(unsigned slaveid, unsigned map_id)
522 {
523
524 /* remapping lcore_resource */
525 memcpy(&lcore_resource[map_id], &lcore_resource[slaveid],
526 sizeof(struct lcore_resource_struct));
527
528 /* remapping lcore_queue_conf */
529 memcpy(&lcore_queue_conf[map_id], &lcore_queue_conf[slaveid],
530 sizeof(struct lcore_queue_conf));
531 }
532
533 static int
534 reset_pair(unsigned slaveid, unsigned pairid)
535 {
536 int ret;
537 if ((ret = reset_shared_structures(slaveid)) != 0)
538 goto back;
539
540 if((ret = reset_shared_structures(pairid)) != 0)
541 goto back;
542
543 if (float_proc) {
544 unsigned map_id = mapping_id[slaveid];
545
546 if (map_id != INVALID_MAPPING_ID) {
547 printf("%u return mapping id %u\n", slaveid, map_id);
548 flib_free_lcore_id(map_id);
549 mapping_id[slaveid] = INVALID_MAPPING_ID;
550 }
551
552 map_id = mapping_id[pairid];
553 if (map_id != INVALID_MAPPING_ID) {
554 printf("%u return mapping id %u\n", pairid, map_id);
555 flib_free_lcore_id(map_id);
556 mapping_id[pairid] = INVALID_MAPPING_ID;
557 }
558 }
559
560 if((ret = recreate_one_slave(slaveid)) != 0)
561 goto back;
562
563 ret = recreate_one_slave(pairid);
564
565 back:
566 return ret;
567 }
568
569 static void
570 slave_exit_cb(unsigned slaveid, __attribute__((unused))int stat)
571 {
572 struct lcore_resource_struct *slave = &lcore_resource[slaveid];
573
574 printf("Get slave %u leave info\n", slaveid);
575 if (!slave->enabled) {
576 printf("Lcore=%u not registered for it's exit\n", slaveid);
577 return;
578 }
579 rte_spinlock_lock(&res_lock);
580
581 /* Change the state and wait master to start them */
582 slave->flags = SLAVE_RECREATE_FLAG;
583
584 rte_spinlock_unlock(&res_lock);
585 }
586
587 static void
588 l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
589 {
590 struct ether_hdr *eth;
591 void *tmp;
592 unsigned dst_port;
593 int sent;
594 struct rte_eth_dev_tx_buffer *buffer;
595
596 dst_port = l2fwd_dst_ports[portid];
597 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
598
599 /* 02:00:00:00:00:xx */
600 tmp = &eth->d_addr.addr_bytes[0];
601 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
602
603 /* src addr */
604 ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], &eth->s_addr);
605
606 buffer = tx_buffer[dst_port];
607 sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
608 if (sent)
609 port_statistics[dst_port].tx += sent;
610 }
611
612 /* main processing loop */
613 static void
614 l2fwd_main_loop(void)
615 {
616 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
617 struct rte_mbuf *m;
618 int sent;
619 unsigned lcore_id;
620 uint64_t prev_tsc, diff_tsc, cur_tsc;
621 unsigned i, j, portid, nb_rx;
622 struct lcore_queue_conf *qconf;
623 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S *
624 BURST_TX_DRAIN_US;
625 struct rte_eth_dev_tx_buffer *buffer;
626
627 prev_tsc = 0;
628
629 lcore_id = rte_lcore_id();
630
631 qconf = &lcore_queue_conf[lcore_id];
632
633 if (qconf->n_rx_port == 0) {
634 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
635 return;
636 }
637
638 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
639
640 for (i = 0; i < qconf->n_rx_port; i++) {
641 portid = qconf->rx_port_list[i];
642 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
643 portid);
644 }
645
646 while (1) {
647 enum l2fwd_cmd cmd;
648 cur_tsc = rte_rdtsc();
649
650 if (unlikely(getcmd(lcore_id, &cmd, 0) == 0)) {
651 sendcmd(lcore_id, cmd, 0);
652
653 /* If get stop command, stop forwarding and exit */
654 if (cmd == CMD_STOP) {
655 return;
656 }
657 }
658
659 /*
660 * TX burst queue drain
661 */
662 diff_tsc = cur_tsc - prev_tsc;
663 if (unlikely(diff_tsc > drain_tsc)) {
664
665 for (i = 0; i < qconf->n_rx_port; i++) {
666
667 portid = l2fwd_dst_ports[qconf->rx_port_list[i]];
668 buffer = tx_buffer[portid];
669
670 sent = rte_eth_tx_buffer_flush(portid, 0, buffer);
671 if (sent)
672 port_statistics[portid].tx += sent;
673
674 }
675 }
676
677 /*
678 * Read packet from RX queues
679 */
680 for (i = 0; i < qconf->n_rx_port; i++) {
681
682 portid = qconf->rx_port_list[i];
683 nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
684 pkts_burst, MAX_PKT_BURST);
685
686 port_statistics[portid].rx += nb_rx;
687
688 for (j = 0; j < nb_rx; j++) {
689 m = pkts_burst[j];
690 rte_prefetch0(rte_pktmbuf_mtod(m, void *));
691 l2fwd_simple_forward(m, portid);
692 }
693 }
694 }
695 }
696
697 static int
698 l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)
699 {
700 unsigned lcore_id = rte_lcore_id();
701
702 if (float_proc) {
703 unsigned flcore_id;
704
705 /* Change it to floating process, also change it's lcore_id */
706 clear_cpu_affinity();
707 RTE_PER_LCORE(_lcore_id) = 0;
708 /* Get a lcore_id */
709 if (flib_assign_lcore_id() < 0 ) {
710 printf("flib_assign_lcore_id failed\n");
711 return -1;
712 }
713 flcore_id = rte_lcore_id();
714 /* Set mapping id, so master can return it after slave exited */
715 mapping_id[lcore_id] = flcore_id;
716 printf("Org lcore_id = %u, cur lcore_id = %u\n",
717 lcore_id, flcore_id);
718 remapping_slave_resource(lcore_id, flcore_id);
719 }
720
721 l2fwd_main_loop();
722
723 /* return lcore_id before return */
724 if (float_proc) {
725 flib_free_lcore_id(rte_lcore_id());
726 mapping_id[lcore_id] = INVALID_MAPPING_ID;
727 }
728 return 0;
729 }
730
731 /* display usage */
732 static void
733 l2fwd_usage(const char *prgname)
734 {
735 printf("%s [EAL options] -- -p PORTMASK -s COREMASK [-q NQ] -f\n"
736 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
737 " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
738 " -f use floating process which won't bind to any core to run\n"
739 " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
740 prgname);
741 }
742
743 static int
744 l2fwd_parse_portmask(const char *portmask)
745 {
746 char *end = NULL;
747 unsigned long pm;
748
749 /* parse hexadecimal string */
750 pm = strtoul(portmask, &end, 16);
751 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
752 return -1;
753
754 if (pm == 0)
755 return -1;
756
757 return pm;
758 }
759
760 static unsigned int
761 l2fwd_parse_nqueue(const char *q_arg)
762 {
763 char *end = NULL;
764 unsigned long n;
765
766 /* parse hexadecimal string */
767 n = strtoul(q_arg, &end, 10);
768 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
769 return 0;
770 if (n == 0)
771 return 0;
772 if (n >= MAX_RX_QUEUE_PER_LCORE)
773 return 0;
774
775 return n;
776 }
777
778 static int
779 l2fwd_parse_timer_period(const char *q_arg)
780 {
781 char *end = NULL;
782 int n;
783
784 /* parse number string */
785 n = strtol(q_arg, &end, 10);
786 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
787 return -1;
788 if (n >= MAX_TIMER_PERIOD)
789 return -1;
790
791 return n;
792 }
793
794 /* Parse the argument given in the command line of the application */
795 static int
796 l2fwd_parse_args(int argc, char **argv)
797 {
798 int opt, ret;
799 char **argvopt;
800 int option_index;
801 char *prgname = argv[0];
802 static struct option lgopts[] = {
803 {NULL, 0, 0, 0}
804 };
805 int has_pmask = 0;
806
807 argvopt = argv;
808
809 while ((opt = getopt_long(argc, argvopt, "p:q:T:f",
810 lgopts, &option_index)) != EOF) {
811
812 switch (opt) {
813 /* portmask */
814 case 'p':
815 l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
816 if (l2fwd_enabled_port_mask == 0) {
817 printf("invalid portmask\n");
818 l2fwd_usage(prgname);
819 return -1;
820 }
821 has_pmask = 1;
822 break;
823
824 /* nqueue */
825 case 'q':
826 l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
827 if (l2fwd_rx_queue_per_lcore == 0) {
828 printf("invalid queue number\n");
829 l2fwd_usage(prgname);
830 return -1;
831 }
832 break;
833
834 /* timer period */
835 case 'T':
836 timer_period = l2fwd_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
837 if (timer_period < 0) {
838 printf("invalid timer period\n");
839 l2fwd_usage(prgname);
840 return -1;
841 }
842 break;
843
844 /* use floating process */
845 case 'f':
846 float_proc = 1;
847 break;
848
849 /* long options */
850 case 0:
851 l2fwd_usage(prgname);
852 return -1;
853
854 default:
855 l2fwd_usage(prgname);
856 return -1;
857 }
858 }
859
860 if (optind >= 0)
861 argv[optind-1] = prgname;
862
863 if (!has_pmask) {
864 l2fwd_usage(prgname);
865 return -1;
866 }
867 ret = optind-1;
868 optind = 0; /* reset getopt lib */
869 return ret;
870 }
871
872 /* Check the link status of all ports in up to 9s, and print them finally */
873 static void
874 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
875 {
876 #define CHECK_INTERVAL 100 /* 100ms */
877 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
878 uint8_t portid, count, all_ports_up, print_flag = 0;
879 struct rte_eth_link link;
880
881 printf("\nChecking link status");
882 fflush(stdout);
883 for (count = 0; count <= MAX_CHECK_TIME; count++) {
884 all_ports_up = 1;
885 for (portid = 0; portid < port_num; portid++) {
886 if ((port_mask & (1 << portid)) == 0)
887 continue;
888 memset(&link, 0, sizeof(link));
889 rte_eth_link_get_nowait(portid, &link);
890 /* print link status if flag set */
891 if (print_flag == 1) {
892 if (link.link_status)
893 printf("Port %d Link Up - speed %u "
894 "Mbps - %s\n", (uint8_t)portid,
895 (unsigned)link.link_speed,
896 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
897 ("full-duplex") : ("half-duplex\n"));
898 else
899 printf("Port %d Link Down\n",
900 (uint8_t)portid);
901 continue;
902 }
903 /* clear all_ports_up flag if any link down */
904 if (link.link_status == ETH_LINK_DOWN) {
905 all_ports_up = 0;
906 break;
907 }
908 }
909 /* after finally printing all link status, get out */
910 if (print_flag == 1)
911 break;
912
913 if (all_ports_up == 0) {
914 printf(".");
915 fflush(stdout);
916 rte_delay_ms(CHECK_INTERVAL);
917 }
918
919 /* set the print_flag if all ports up or timeout */
920 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
921 print_flag = 1;
922 printf("done\n");
923 }
924 }
925 }
926
927 int
928 main(int argc, char **argv)
929 {
930 struct lcore_queue_conf *qconf;
931 struct rte_eth_dev_info dev_info;
932 int ret;
933 uint8_t nb_ports;
934 uint8_t nb_ports_available;
935 uint8_t portid, last_port;
936 unsigned rx_lcore_id;
937 unsigned nb_ports_in_mask = 0;
938 unsigned i;
939 int flags = 0;
940 uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
941
942 /* Save cpu_affinity first, restore it in case it's floating process option */
943 if (get_cpu_affinity() != 0)
944 rte_exit(EXIT_FAILURE, "get_cpu_affinity error\n");
945
946 /* Also tries to set cpu affinity to detect whether it will fail in child process */
947 if(clear_cpu_affinity() != 0)
948 rte_exit(EXIT_FAILURE, "clear_cpu_affinity error\n");
949
950 /* init EAL */
951 ret = rte_eal_init(argc, argv);
952 if (ret < 0)
953 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
954 argc -= ret;
955 argv += ret;
956
957 /* parse application arguments (after the EAL ones) */
958 ret = l2fwd_parse_args(argc, argv);
959 if (ret < 0)
960 rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
961
962 /*flib init */
963 if (flib_init() != 0)
964 rte_exit(EXIT_FAILURE, "flib init error");
965
966 /**
967 * Allocated structures that slave lcore would change. For those that slaves are
968 * read only, needn't use malloc to share and global or static variables is ok since
969 * slave inherit all the knowledge that master initialized.
970 **/
971 if (l2fwd_malloc_shared_struct() != 0)
972 rte_exit(EXIT_FAILURE, "malloc mem failed\n");
973
974 /* Initialize lcore_resource structures */
975 memset(lcore_resource, 0, sizeof(lcore_resource));
976 for (i = 0; i < RTE_MAX_LCORE; i++)
977 lcore_resource[i].lcore_id = i;
978
979 nb_ports = rte_eth_dev_count();
980 if (nb_ports == 0)
981 rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
982
983 /* create the mbuf pool */
984 for (portid = 0; portid < nb_ports; portid++) {
985 /* skip ports that are not enabled */
986 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
987 continue;
988 char buf_name[RTE_MEMPOOL_NAMESIZE];
989 flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
990 snprintf(buf_name, RTE_MEMPOOL_NAMESIZE, MBUF_NAME, portid);
991 l2fwd_pktmbuf_pool[portid] =
992 rte_mempool_create(buf_name, NB_MBUF,
993 MBUF_SIZE, 32,
994 sizeof(struct rte_pktmbuf_pool_private),
995 rte_pktmbuf_pool_init, NULL,
996 rte_pktmbuf_init, NULL,
997 rte_socket_id(), flags);
998 if (l2fwd_pktmbuf_pool[portid] == NULL)
999 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
1000
1001 printf("Create mbuf %s\n", buf_name);
1002 }
1003
1004 /* reset l2fwd_dst_ports */
1005 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
1006 l2fwd_dst_ports[portid] = 0;
1007 last_port = 0;
1008
1009 /*
1010 * Each logical core is assigned a dedicated TX queue on each port.
1011 */
1012 for (portid = 0; portid < nb_ports; portid++) {
1013 /* skip ports that are not enabled */
1014 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
1015 continue;
1016
1017 if (nb_ports_in_mask % 2) {
1018 l2fwd_dst_ports[portid] = last_port;
1019 l2fwd_dst_ports[last_port] = portid;
1020 }
1021 else
1022 last_port = portid;
1023
1024 nb_ports_in_mask++;
1025
1026 rte_eth_dev_info_get(portid, &dev_info);
1027 }
1028 if (nb_ports_in_mask % 2) {
1029 printf("Notice: odd number of ports in portmask.\n");
1030 l2fwd_dst_ports[last_port] = last_port;
1031 }
1032
1033 rx_lcore_id = 0;
1034 qconf = NULL;
1035
1036 /* Initialize the port/queue configuration of each logical core */
1037 for (portid = 0; portid < nb_ports; portid++) {
1038 struct lcore_resource_struct *res;
1039 /* skip ports that are not enabled */
1040 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
1041 continue;
1042
1043 /* get the lcore_id for this port */
1044 /* skip master lcore */
1045 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
1046 rte_get_master_lcore() == rx_lcore_id ||
1047 lcore_queue_conf[rx_lcore_id].n_rx_port ==
1048 l2fwd_rx_queue_per_lcore) {
1049
1050 rx_lcore_id++;
1051 if (rx_lcore_id >= RTE_MAX_LCORE)
1052 rte_exit(EXIT_FAILURE, "Not enough cores\n");
1053 }
1054
1055 if (qconf != &lcore_queue_conf[rx_lcore_id])
1056 /* Assigned a new logical core in the loop above. */
1057 qconf = &lcore_queue_conf[rx_lcore_id];
1058
1059 qconf->rx_port_list[qconf->n_rx_port] = portid;
1060 qconf->n_rx_port++;
1061
1062 /* Save the port resource info into lcore_resource strucutres */
1063 res = &lcore_resource[rx_lcore_id];
1064 res->enabled = 1;
1065 res->port[res->port_num++] = portid;
1066
1067 printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid);
1068 }
1069
1070 nb_ports_available = nb_ports;
1071
1072 /* Initialise each port */
1073 for (portid = 0; portid < nb_ports; portid++) {
1074 /* skip ports that are not enabled */
1075 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
1076 printf("Skipping disabled port %u\n", (unsigned) portid);
1077 nb_ports_available--;
1078 continue;
1079 }
1080 /* init port */
1081 printf("Initializing port %u... ", (unsigned) portid);
1082 fflush(stdout);
1083 ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
1084 if (ret < 0)
1085 rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
1086 ret, (unsigned) portid);
1087
1088 rte_eth_macaddr_get(portid,&l2fwd_ports_eth_addr[portid]);
1089
1090 /* init one RX queue */
1091 fflush(stdout);
1092 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
1093 rte_eth_dev_socket_id(portid),
1094 NULL,
1095 l2fwd_pktmbuf_pool[portid]);
1096 if (ret < 0)
1097 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
1098 ret, (unsigned) portid);
1099
1100 /* init one TX queue on each port */
1101 fflush(stdout);
1102 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
1103 rte_eth_dev_socket_id(portid),
1104 NULL);
1105 if (ret < 0)
1106 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
1107 ret, (unsigned) portid);
1108
1109 /* Initialize TX buffers */
1110 tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
1111 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
1112 rte_eth_dev_socket_id(portid));
1113 if (tx_buffer[portid] == NULL)
1114 rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
1115 (unsigned) portid);
1116
1117 rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
1118
1119 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
1120 rte_eth_tx_buffer_count_callback,
1121 &port_statistics[portid].dropped);
1122 if (ret < 0)
1123 rte_exit(EXIT_FAILURE, "Cannot set error callback for "
1124 "tx buffer on port %u\n", (unsigned) portid);
1125
1126 /* Start device */
1127 ret = rte_eth_dev_start(portid);
1128 if (ret < 0)
1129 rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
1130 ret, (unsigned) portid);
1131
1132 printf("done: \n");
1133
1134 rte_eth_promiscuous_enable(portid);
1135
1136 printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
1137 (unsigned) portid,
1138 l2fwd_ports_eth_addr[portid].addr_bytes[0],
1139 l2fwd_ports_eth_addr[portid].addr_bytes[1],
1140 l2fwd_ports_eth_addr[portid].addr_bytes[2],
1141 l2fwd_ports_eth_addr[portid].addr_bytes[3],
1142 l2fwd_ports_eth_addr[portid].addr_bytes[4],
1143 l2fwd_ports_eth_addr[portid].addr_bytes[5]);
1144
1145 /* initialize port stats */
1146 //memset(&port_statistics, 0, sizeof(port_statistics));
1147 }
1148
1149 if (!nb_ports_available) {
1150 rte_exit(EXIT_FAILURE,
1151 "All available ports are disabled. Please set portmask.\n");
1152 }
1153
1154 check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask);
1155
1156 /* Record pair lcore */
1157 /**
1158 * Since l2fwd example would create pair between different neighbour port, that's
1159 * port 0 receive and forward to port 1, the same to port 1, these 2 ports will have
1160 * dependency. If one port stopped working (killed, for example), the port need to
1161 * be stopped/started again. During the time, another port need to wait until stop/start
1162 * procedure completed. So, record the pair relationship for those lcores working
1163 * on ports.
1164 **/
1165 for (portid = 0; portid < nb_ports; portid++) {
1166 uint32_t pair_port;
1167 unsigned lcore = 0, pair_lcore = 0;
1168 unsigned j, find_lcore, find_pair_lcore;
1169 /* skip ports that are not enabled */
1170 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
1171 continue;
1172
1173 /* Find pair ports' lcores */
1174 find_lcore = find_pair_lcore = 0;
1175 pair_port = l2fwd_dst_ports[portid];
1176 for (i = 0; i < RTE_MAX_LCORE; i++) {
1177 if (!rte_lcore_is_enabled(i))
1178 continue;
1179 for (j = 0; j < lcore_queue_conf[i].n_rx_port;j++) {
1180 if (lcore_queue_conf[i].rx_port_list[j] == portid) {
1181 lcore = i;
1182 find_lcore = 1;
1183 break;
1184 }
1185 if (lcore_queue_conf[i].rx_port_list[j] == pair_port) {
1186 pair_lcore = i;
1187 find_pair_lcore = 1;
1188 break;
1189 }
1190 }
1191 if (find_lcore && find_pair_lcore)
1192 break;
1193 }
1194 if (!find_lcore || !find_pair_lcore)
1195 rte_exit(EXIT_FAILURE, "Not find port=%d pair\n", portid);
1196
1197 printf("lcore %u and %u paired\n", lcore, pair_lcore);
1198 lcore_resource[lcore].pair_id = pair_lcore;
1199 lcore_resource[pair_lcore].pair_id = lcore;
1200 }
1201
1202 /* Create message buffer for all master and slave */
1203 message_pool = rte_mempool_create("ms_msg_pool",
1204 NB_CORE_MSGBUF * RTE_MAX_LCORE,
1205 sizeof(enum l2fwd_cmd), NB_CORE_MSGBUF / 2,
1206 0,
1207 rte_pktmbuf_pool_init, NULL,
1208 rte_pktmbuf_init, NULL,
1209 rte_socket_id(), 0);
1210
1211 if (message_pool == NULL)
1212 rte_exit(EXIT_FAILURE, "Create msg mempool failed\n");
1213
1214 /* Create ring for each master and slave pair, also register cb when slave leaves */
1215 for (i = 0; i < RTE_MAX_LCORE; i++) {
1216 /**
1217 * Only create ring and register slave_exit cb in case that core involved into
1218 * packet forwarding
1219 **/
1220 if (lcore_resource[i].enabled) {
1221 /* Create ring for master and slave communication */
1222 ret = create_ms_ring(i);
1223 if (ret != 0)
1224 rte_exit(EXIT_FAILURE, "Create ring for lcore=%u failed",
1225 i);
1226
1227 if (flib_register_slave_exit_notify(i,
1228 slave_exit_cb) != 0)
1229 rte_exit(EXIT_FAILURE,
1230 "Register master_trace_slave_exit failed");
1231 }
1232 }
1233
1234 /* launch per-lcore init on every lcore except master */
1235 flib_mp_remote_launch(l2fwd_launch_one_lcore, NULL, SKIP_MASTER);
1236
1237 /* print statistics 10 second */
1238 prev_tsc = cur_tsc = rte_rdtsc();
1239 timer_tsc = 0;
1240 while (1) {
1241 sleep(1);
1242 cur_tsc = rte_rdtsc();
1243 diff_tsc = cur_tsc - prev_tsc;
1244 /* if timer is enabled */
1245 if (timer_period > 0) {
1246
1247 /* advance the timer */
1248 timer_tsc += diff_tsc;
1249
1250 /* if timer has reached its timeout */
1251 if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
1252
1253 print_stats();
1254 /* reset the timer */
1255 timer_tsc = 0;
1256 }
1257 }
1258
1259 prev_tsc = cur_tsc;
1260
1261 /* Check any slave need restart or recreate */
1262 rte_spinlock_lock(&res_lock);
1263 for (i = 0; i < RTE_MAX_LCORE; i++) {
1264 struct lcore_resource_struct *res = &lcore_resource[i];
1265 struct lcore_resource_struct *pair = &lcore_resource[res->pair_id];
1266
1267 /* If find slave exited, try to reset pair */
1268 if (res->enabled && res->flags && pair->enabled) {
1269 if (!pair->flags) {
1270 master_sendcmd_with_ack(pair->lcore_id, CMD_STOP);
1271 rte_spinlock_unlock(&res_lock);
1272 sleep(1);
1273 rte_spinlock_lock(&res_lock);
1274 if (pair->flags)
1275 continue;
1276 }
1277 if (reset_pair(res->lcore_id, pair->lcore_id) != 0)
1278 rte_exit(EXIT_FAILURE, "failed to reset slave");
1279 res->flags = 0;
1280 pair->flags = 0;
1281 }
1282 }
1283 rte_spinlock_unlock(&res_lock);
1284 }
1285
1286 }