4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
42 #include <sys/queue.h>
43 #include <netinet/in.h>
50 #include <rte_common.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
56 #include <rte_per_lcore.h>
57 #include <rte_launch.h>
58 #include <rte_atomic.h>
59 #include <rte_spinlock.h>
60 #include <rte_cycles.h>
61 #include <rte_prefetch.h>
62 #include <rte_lcore.h>
63 #include <rte_per_lcore.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_interrupts.h>
67 #include <rte_random.h>
68 #include <rte_debug.h>
69 #include <rte_ether.h>
70 #include <rte_ethdev.h>
72 #include <rte_mempool.h>
74 #include <rte_malloc.h>
78 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
79 #define MBUF_NAME "mbuf_pool_%d"
81 (RTE_MBUF_DEFAULT_DATAROOM + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
83 #define RING_MASTER_NAME "l2fwd_ring_m2s_"
84 #define RING_SLAVE_NAME "l2fwd_ring_s2m_"
85 #define MAX_NAME_LEN 32
86 /* RECREATE flag indicate needs initialize resource and launch slave_core again */
87 #define SLAVE_RECREATE_FLAG 0x1
88 /* RESTART flag indicate needs restart port and send START command again */
89 #define SLAVE_RESTART_FLAG 0x2
90 #define INVALID_MAPPING_ID ((unsigned)LCORE_ID_ANY)
91 /* Maximum message buffer per slave */
92 #define NB_CORE_MSGBUF 32
98 #define MAX_PKT_BURST 32
99 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
102 * Configurable number of RX/TX ring descriptors
104 #define RTE_TEST_RX_DESC_DEFAULT 128
105 #define RTE_TEST_TX_DESC_DEFAULT 512
106 static uint16_t nb_rxd
= RTE_TEST_RX_DESC_DEFAULT
;
107 static uint16_t nb_txd
= RTE_TEST_TX_DESC_DEFAULT
;
109 /* ethernet addresses of ports */
110 static struct ether_addr l2fwd_ports_eth_addr
[RTE_MAX_ETHPORTS
];
112 /* mask of enabled ports */
113 static uint32_t l2fwd_enabled_port_mask
= 0;
115 /* list of enabled ports */
116 static uint32_t l2fwd_dst_ports
[RTE_MAX_ETHPORTS
];
118 static unsigned int l2fwd_rx_queue_per_lcore
= 1;
122 struct rte_mbuf
*m_table
[MAX_PKT_BURST
];
125 #define MAX_RX_QUEUE_PER_LCORE 16
126 #define MAX_TX_QUEUE_PER_PORT 16
127 struct lcore_queue_conf
{
129 unsigned rx_port_list
[MAX_RX_QUEUE_PER_LCORE
];
130 } __rte_cache_aligned
;
131 struct lcore_queue_conf lcore_queue_conf
[RTE_MAX_LCORE
];
133 struct rte_eth_dev_tx_buffer
*tx_buffer
[RTE_MAX_ETHPORTS
];
135 struct lcore_resource_struct
{
136 int enabled
; /* Only set in case this lcore involved into packet forwarding */
137 int flags
; /* Set only slave need to restart or recreate */
138 unsigned lcore_id
; /* lcore ID */
139 unsigned pair_id
; /* dependency lcore ID on port */
140 char ring_name
[2][MAX_NAME_LEN
];
141 /* ring[0] for master send cmd, slave read */
142 /* ring[1] for slave send ack, master read */
143 struct rte_ring
*ring
[2];
144 int port_num
; /* Total port numbers */
145 uint8_t port
[RTE_MAX_ETHPORTS
]; /* Port id for that lcore to receive packets */
146 }__attribute__((packed
)) __rte_cache_aligned
;
148 static struct lcore_resource_struct lcore_resource
[RTE_MAX_LCORE
];
149 static struct rte_mempool
*message_pool
;
150 static rte_spinlock_t res_lock
= RTE_SPINLOCK_INITIALIZER
;
151 /* use floating processes */
152 static int float_proc
= 0;
153 /* Save original cpu affinity */
159 static const struct rte_eth_conf port_conf
= {
162 .header_split
= 0, /**< Header Split disabled */
163 .hw_ip_checksum
= 0, /**< IP checksum offload disabled */
164 .hw_vlan_filter
= 0, /**< VLAN filtering disabled */
165 .jumbo_frame
= 0, /**< Jumbo Frame Support disabled */
166 .hw_strip_crc
= 0, /**< CRC stripped by hardware */
169 .mq_mode
= ETH_MQ_TX_NONE
,
173 static struct rte_mempool
* l2fwd_pktmbuf_pool
[RTE_MAX_ETHPORTS
];
175 /* Per-port statistics struct */
176 struct l2fwd_port_statistics
{
180 } __rte_cache_aligned
;
181 struct l2fwd_port_statistics
*port_statistics
;
183 * pointer to lcore ID mapping array, used to return lcore id in case slave
184 * process exited unexpectedly, use only floating process option applied
186 unsigned *mapping_id
;
188 /* A tsc-based timer responsible for triggering statistics printout */
189 #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
190 #define MAX_TIMER_PERIOD 86400 /* 1 day max */
191 static int64_t timer_period
= 10 * TIMER_MILLISECOND
* 1000; /* default period is 10 seconds */
193 static int l2fwd_launch_one_lcore(void *dummy
);
195 /* Print out statistics on packets dropped */
199 uint64_t total_packets_dropped
, total_packets_tx
, total_packets_rx
;
202 total_packets_dropped
= 0;
203 total_packets_tx
= 0;
204 total_packets_rx
= 0;
206 const char clr
[] = { 27, '[', '2', 'J', '\0' };
207 const char topLeft
[] = { 27, '[', '1', ';', '1', 'H','\0' };
209 /* Clear screen and move to top left */
210 printf("%s%s", clr
, topLeft
);
212 printf("\nPort statistics ====================================");
214 for (portid
= 0; portid
< RTE_MAX_ETHPORTS
; portid
++) {
215 /* skip disabled ports */
216 if ((l2fwd_enabled_port_mask
& (1 << portid
)) == 0)
218 printf("\nStatistics for port %u ------------------------------"
219 "\nPackets sent: %24"PRIu64
220 "\nPackets received: %20"PRIu64
221 "\nPackets dropped: %21"PRIu64
,
223 port_statistics
[portid
].tx
,
224 port_statistics
[portid
].rx
,
225 port_statistics
[portid
].dropped
);
227 total_packets_dropped
+= port_statistics
[portid
].dropped
;
228 total_packets_tx
+= port_statistics
[portid
].tx
;
229 total_packets_rx
+= port_statistics
[portid
].rx
;
231 printf("\nAggregate statistics ==============================="
232 "\nTotal packets sent: %18"PRIu64
233 "\nTotal packets received: %14"PRIu64
234 "\nTotal packets dropped: %15"PRIu64
,
237 total_packets_dropped
);
238 printf("\n====================================================\n");
242 clear_cpu_affinity(void)
246 s
= sched_setaffinity(0, cpu_aff
.size
, &cpu_aff
.set
);
248 printf("sched_setaffinity failed:%s\n", strerror(errno
));
256 get_cpu_affinity(void)
260 cpu_aff
.size
= sizeof(cpu_set_t
);
261 CPU_ZERO(&cpu_aff
.set
);
263 s
= sched_getaffinity(0, cpu_aff
.size
, &cpu_aff
.set
);
265 printf("sched_getaffinity failed:%s\n", strerror(errno
));
273 * This fnciton demonstrates the approach to create ring in first instance
274 * or re-attach an existed ring in later instance.
276 static struct rte_ring
*
277 create_ring(const char *name
, unsigned count
,
278 int socket_id
,unsigned flags
)
280 struct rte_ring
*ring
;
285 /* If already create, just attached it */
286 if (likely((ring
= rte_ring_lookup(name
)) != NULL
))
289 /* First call it, create one */
290 return rte_ring_create(name
, count
, socket_id
, flags
);
293 /* Malloc with rte_malloc on structures that shared by master and slave */
295 l2fwd_malloc_shared_struct(void)
297 port_statistics
= rte_zmalloc("port_stat",
298 sizeof(struct l2fwd_port_statistics
) * RTE_MAX_ETHPORTS
,
300 if (port_statistics
== NULL
)
303 /* allocate mapping_id array */
306 mapping_id
= rte_malloc("mapping_id", sizeof(unsigned) * RTE_MAX_LCORE
,
309 if (mapping_id
== NULL
)
312 for (i
= 0 ;i
< RTE_MAX_LCORE
; i
++)
313 mapping_id
[i
] = INVALID_MAPPING_ID
;
318 /* Create ring which used for communicate among master and slave */
320 create_ms_ring(unsigned slaveid
)
322 unsigned flag
= RING_F_SP_ENQ
| RING_F_SC_DEQ
;
323 struct lcore_resource_struct
*res
= &lcore_resource
[slaveid
];
324 unsigned socketid
= rte_socket_id();
326 /* Always assume create ring on master socket_id */
327 /* Default only create a ring size 32 */
328 snprintf(res
->ring_name
[0], MAX_NAME_LEN
, "%s%u",
329 RING_MASTER_NAME
, slaveid
);
330 if ((res
->ring
[0] = create_ring(res
->ring_name
[0], NB_CORE_MSGBUF
,
331 socketid
, flag
)) == NULL
) {
332 printf("Create m2s ring %s failed\n", res
->ring_name
[0]);
336 snprintf(res
->ring_name
[1], MAX_NAME_LEN
, "%s%u",
337 RING_SLAVE_NAME
, slaveid
);
338 if ((res
->ring
[1] = create_ring(res
->ring_name
[1], NB_CORE_MSGBUF
,
339 socketid
, flag
)) == NULL
) {
340 printf("Create s2m ring %s failed\n", res
->ring_name
[1]);
347 /* send command to pair in paired master and slave ring */
349 sendcmd(unsigned slaveid
, enum l2fwd_cmd cmd
, int is_master
)
351 struct lcore_resource_struct
*res
= &lcore_resource
[slaveid
];
355 /* Only check master, it must be enabled and running if it is slave */
356 if (is_master
&& !res
->enabled
)
359 if (res
->ring
[fd
] == NULL
)
362 if (rte_mempool_get(message_pool
, &msg
) < 0) {
363 printf("Error to get message buffer\n");
367 *(enum l2fwd_cmd
*)msg
= cmd
;
369 if (rte_ring_enqueue(res
->ring
[fd
], msg
) != 0) {
370 printf("Enqueue error\n");
371 rte_mempool_put(message_pool
, msg
);
378 /* Get command from pair in paired master and slave ring */
380 getcmd(unsigned slaveid
, enum l2fwd_cmd
*cmd
, int is_master
)
382 struct lcore_resource_struct
*res
= &lcore_resource
[slaveid
];
384 int fd
= !!is_master
;
386 /* Only check master, it must be enabled and running if it is slave */
387 if (is_master
&& (!res
->enabled
))
390 if (res
->ring
[fd
] == NULL
)
393 ret
= rte_ring_dequeue(res
->ring
[fd
], &msg
);
396 *cmd
= *(enum l2fwd_cmd
*)msg
;
397 rte_mempool_put(message_pool
, msg
);
402 /* Master send command to slave and wait until ack received or error met */
404 master_sendcmd_with_ack(unsigned slaveid
, enum l2fwd_cmd cmd
)
406 enum l2fwd_cmd ack_cmd
;
409 if (sendcmd(slaveid
, cmd
, 1) != 0)
410 rte_exit(EXIT_FAILURE
, "Failed to send message\n");
414 ret
= getcmd(slaveid
, &ack_cmd
, 1);
415 if (ret
== 0 && cmd
== ack_cmd
)
418 /* If slave not running yet, return an error */
419 if (flib_query_slave_status(slaveid
) != ST_RUN
) {
428 /* restart all port that assigned to that slave lcore */
430 reset_slave_all_ports(unsigned slaveid
)
432 struct lcore_resource_struct
*slave
= &lcore_resource
[slaveid
];
435 /* stop/start port */
436 for (i
= 0; i
< slave
->port_num
; i
++) {
437 char buf_name
[RTE_MEMPOOL_NAMESIZE
];
438 struct rte_mempool
*pool
;
439 printf("Stop port :%d\n", slave
->port
[i
]);
440 rte_eth_dev_stop(slave
->port
[i
]);
441 snprintf(buf_name
, RTE_MEMPOOL_NAMESIZE
, MBUF_NAME
, slave
->port
[i
]);
442 pool
= rte_mempool_lookup(buf_name
);
444 printf("Port %d mempool free object is %u(%u)\n", slave
->port
[i
],
445 rte_mempool_avail_count(pool
),
446 (unsigned int)NB_MBUF
);
448 printf("Can't find mempool %s\n", buf_name
);
450 printf("Start port :%d\n", slave
->port
[i
]);
451 ret
= rte_eth_dev_start(slave
->port
[i
]);
459 reset_shared_structures(unsigned slaveid
)
462 /* Only port are shared resource here */
463 ret
= reset_slave_all_ports(slaveid
);
469 * Call this function to re-create resource that needed for slave process that
470 * exited in last instance
473 init_slave_res(unsigned slaveid
)
475 struct lcore_resource_struct
*slave
= &lcore_resource
[slaveid
];
478 if (!slave
->enabled
) {
479 printf("Something wrong with lcore=%u enabled=%d\n",slaveid
,
484 /* Initialize ring */
485 if (create_ms_ring(slaveid
) != 0)
486 rte_exit(EXIT_FAILURE
, "failed to create ring for slave %u\n",
489 /* drain un-read buffer if have */
490 while (getcmd(slaveid
, &cmd
, 1) == 0);
491 while (getcmd(slaveid
, &cmd
, 0) == 0);
497 recreate_one_slave(unsigned slaveid
)
500 /* Re-initialize resource for stalled slave */
501 if ((ret
= init_slave_res(slaveid
)) != 0) {
502 printf("Init slave=%u failed\n", slaveid
);
506 if ((ret
= flib_remote_launch(l2fwd_launch_one_lcore
, NULL
, slaveid
))
508 printf("Launch slave %u failed\n", slaveid
);
514 * remapping resource belong to slave_id to new lcore that gets from flib_assign_lcore_id(),
515 * used only floating process option applied.
518 * original lcore_id that apply for remapping
521 remapping_slave_resource(unsigned slaveid
, unsigned map_id
)
524 /* remapping lcore_resource */
525 memcpy(&lcore_resource
[map_id
], &lcore_resource
[slaveid
],
526 sizeof(struct lcore_resource_struct
));
528 /* remapping lcore_queue_conf */
529 memcpy(&lcore_queue_conf
[map_id
], &lcore_queue_conf
[slaveid
],
530 sizeof(struct lcore_queue_conf
));
534 reset_pair(unsigned slaveid
, unsigned pairid
)
537 if ((ret
= reset_shared_structures(slaveid
)) != 0)
540 if((ret
= reset_shared_structures(pairid
)) != 0)
544 unsigned map_id
= mapping_id
[slaveid
];
546 if (map_id
!= INVALID_MAPPING_ID
) {
547 printf("%u return mapping id %u\n", slaveid
, map_id
);
548 flib_free_lcore_id(map_id
);
549 mapping_id
[slaveid
] = INVALID_MAPPING_ID
;
552 map_id
= mapping_id
[pairid
];
553 if (map_id
!= INVALID_MAPPING_ID
) {
554 printf("%u return mapping id %u\n", pairid
, map_id
);
555 flib_free_lcore_id(map_id
);
556 mapping_id
[pairid
] = INVALID_MAPPING_ID
;
560 if((ret
= recreate_one_slave(slaveid
)) != 0)
563 ret
= recreate_one_slave(pairid
);
570 slave_exit_cb(unsigned slaveid
, __attribute__((unused
))int stat
)
572 struct lcore_resource_struct
*slave
= &lcore_resource
[slaveid
];
574 printf("Get slave %u leave info\n", slaveid
);
575 if (!slave
->enabled
) {
576 printf("Lcore=%u not registered for it's exit\n", slaveid
);
579 rte_spinlock_lock(&res_lock
);
581 /* Change the state and wait master to start them */
582 slave
->flags
= SLAVE_RECREATE_FLAG
;
584 rte_spinlock_unlock(&res_lock
);
588 l2fwd_simple_forward(struct rte_mbuf
*m
, unsigned portid
)
590 struct ether_hdr
*eth
;
594 struct rte_eth_dev_tx_buffer
*buffer
;
596 dst_port
= l2fwd_dst_ports
[portid
];
597 eth
= rte_pktmbuf_mtod(m
, struct ether_hdr
*);
599 /* 02:00:00:00:00:xx */
600 tmp
= ð
->d_addr
.addr_bytes
[0];
601 *((uint64_t *)tmp
) = 0x000000000002 + ((uint64_t)dst_port
<< 40);
604 ether_addr_copy(&l2fwd_ports_eth_addr
[dst_port
], ð
->s_addr
);
606 buffer
= tx_buffer
[dst_port
];
607 sent
= rte_eth_tx_buffer(dst_port
, 0, buffer
, m
);
609 port_statistics
[dst_port
].tx
+= sent
;
612 /* main processing loop */
614 l2fwd_main_loop(void)
616 struct rte_mbuf
*pkts_burst
[MAX_PKT_BURST
];
620 uint64_t prev_tsc
, diff_tsc
, cur_tsc
;
621 unsigned i
, j
, portid
, nb_rx
;
622 struct lcore_queue_conf
*qconf
;
623 const uint64_t drain_tsc
= (rte_get_tsc_hz() + US_PER_S
- 1) / US_PER_S
*
625 struct rte_eth_dev_tx_buffer
*buffer
;
629 lcore_id
= rte_lcore_id();
631 qconf
= &lcore_queue_conf
[lcore_id
];
633 if (qconf
->n_rx_port
== 0) {
634 RTE_LOG(INFO
, L2FWD
, "lcore %u has nothing to do\n", lcore_id
);
638 RTE_LOG(INFO
, L2FWD
, "entering main loop on lcore %u\n", lcore_id
);
640 for (i
= 0; i
< qconf
->n_rx_port
; i
++) {
641 portid
= qconf
->rx_port_list
[i
];
642 RTE_LOG(INFO
, L2FWD
, " -- lcoreid=%u portid=%u\n", lcore_id
,
648 cur_tsc
= rte_rdtsc();
650 if (unlikely(getcmd(lcore_id
, &cmd
, 0) == 0)) {
651 sendcmd(lcore_id
, cmd
, 0);
653 /* If get stop command, stop forwarding and exit */
654 if (cmd
== CMD_STOP
) {
660 * TX burst queue drain
662 diff_tsc
= cur_tsc
- prev_tsc
;
663 if (unlikely(diff_tsc
> drain_tsc
)) {
665 for (i
= 0; i
< qconf
->n_rx_port
; i
++) {
667 portid
= l2fwd_dst_ports
[qconf
->rx_port_list
[i
]];
668 buffer
= tx_buffer
[portid
];
670 sent
= rte_eth_tx_buffer_flush(portid
, 0, buffer
);
672 port_statistics
[portid
].tx
+= sent
;
678 * Read packet from RX queues
680 for (i
= 0; i
< qconf
->n_rx_port
; i
++) {
682 portid
= qconf
->rx_port_list
[i
];
683 nb_rx
= rte_eth_rx_burst((uint8_t) portid
, 0,
684 pkts_burst
, MAX_PKT_BURST
);
686 port_statistics
[portid
].rx
+= nb_rx
;
688 for (j
= 0; j
< nb_rx
; j
++) {
690 rte_prefetch0(rte_pktmbuf_mtod(m
, void *));
691 l2fwd_simple_forward(m
, portid
);
698 l2fwd_launch_one_lcore(__attribute__((unused
)) void *dummy
)
700 unsigned lcore_id
= rte_lcore_id();
705 /* Change it to floating process, also change it's lcore_id */
706 clear_cpu_affinity();
707 RTE_PER_LCORE(_lcore_id
) = 0;
709 if (flib_assign_lcore_id() < 0 ) {
710 printf("flib_assign_lcore_id failed\n");
713 flcore_id
= rte_lcore_id();
714 /* Set mapping id, so master can return it after slave exited */
715 mapping_id
[lcore_id
] = flcore_id
;
716 printf("Org lcore_id = %u, cur lcore_id = %u\n",
717 lcore_id
, flcore_id
);
718 remapping_slave_resource(lcore_id
, flcore_id
);
723 /* return lcore_id before return */
725 flib_free_lcore_id(rte_lcore_id());
726 mapping_id
[lcore_id
] = INVALID_MAPPING_ID
;
733 l2fwd_usage(const char *prgname
)
735 printf("%s [EAL options] -- -p PORTMASK -s COREMASK [-q NQ] -f\n"
736 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
737 " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
738 " -f use floating process which won't bind to any core to run\n"
739 " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
744 l2fwd_parse_portmask(const char *portmask
)
749 /* parse hexadecimal string */
750 pm
= strtoul(portmask
, &end
, 16);
751 if ((portmask
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
761 l2fwd_parse_nqueue(const char *q_arg
)
766 /* parse hexadecimal string */
767 n
= strtoul(q_arg
, &end
, 10);
768 if ((q_arg
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
772 if (n
>= MAX_RX_QUEUE_PER_LCORE
)
779 l2fwd_parse_timer_period(const char *q_arg
)
784 /* parse number string */
785 n
= strtol(q_arg
, &end
, 10);
786 if ((q_arg
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
788 if (n
>= MAX_TIMER_PERIOD
)
794 /* Parse the argument given in the command line of the application */
796 l2fwd_parse_args(int argc
, char **argv
)
801 char *prgname
= argv
[0];
802 static struct option lgopts
[] = {
809 while ((opt
= getopt_long(argc
, argvopt
, "p:q:T:f",
810 lgopts
, &option_index
)) != EOF
) {
815 l2fwd_enabled_port_mask
= l2fwd_parse_portmask(optarg
);
816 if (l2fwd_enabled_port_mask
== 0) {
817 printf("invalid portmask\n");
818 l2fwd_usage(prgname
);
826 l2fwd_rx_queue_per_lcore
= l2fwd_parse_nqueue(optarg
);
827 if (l2fwd_rx_queue_per_lcore
== 0) {
828 printf("invalid queue number\n");
829 l2fwd_usage(prgname
);
836 timer_period
= l2fwd_parse_timer_period(optarg
) * 1000 * TIMER_MILLISECOND
;
837 if (timer_period
< 0) {
838 printf("invalid timer period\n");
839 l2fwd_usage(prgname
);
844 /* use floating process */
851 l2fwd_usage(prgname
);
855 l2fwd_usage(prgname
);
861 argv
[optind
-1] = prgname
;
864 l2fwd_usage(prgname
);
868 optind
= 0; /* reset getopt lib */
872 /* Check the link status of all ports in up to 9s, and print them finally */
874 check_all_ports_link_status(uint8_t port_num
, uint32_t port_mask
)
876 #define CHECK_INTERVAL 100 /* 100ms */
877 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
878 uint8_t portid
, count
, all_ports_up
, print_flag
= 0;
879 struct rte_eth_link link
;
881 printf("\nChecking link status");
883 for (count
= 0; count
<= MAX_CHECK_TIME
; count
++) {
885 for (portid
= 0; portid
< port_num
; portid
++) {
886 if ((port_mask
& (1 << portid
)) == 0)
888 memset(&link
, 0, sizeof(link
));
889 rte_eth_link_get_nowait(portid
, &link
);
890 /* print link status if flag set */
891 if (print_flag
== 1) {
892 if (link
.link_status
)
893 printf("Port %d Link Up - speed %u "
894 "Mbps - %s\n", (uint8_t)portid
,
895 (unsigned)link
.link_speed
,
896 (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
897 ("full-duplex") : ("half-duplex\n"));
899 printf("Port %d Link Down\n",
903 /* clear all_ports_up flag if any link down */
904 if (link
.link_status
== ETH_LINK_DOWN
) {
909 /* after finally printing all link status, get out */
913 if (all_ports_up
== 0) {
916 rte_delay_ms(CHECK_INTERVAL
);
919 /* set the print_flag if all ports up or timeout */
920 if (all_ports_up
== 1 || count
== (MAX_CHECK_TIME
- 1)) {
928 main(int argc
, char **argv
)
930 struct lcore_queue_conf
*qconf
;
931 struct rte_eth_dev_info dev_info
;
934 uint8_t nb_ports_available
;
935 uint8_t portid
, last_port
;
936 unsigned rx_lcore_id
;
937 unsigned nb_ports_in_mask
= 0;
940 uint64_t prev_tsc
, diff_tsc
, cur_tsc
, timer_tsc
;
942 /* Save cpu_affinity first, restore it in case it's floating process option */
943 if (get_cpu_affinity() != 0)
944 rte_exit(EXIT_FAILURE
, "get_cpu_affinity error\n");
946 /* Also tries to set cpu affinity to detect whether it will fail in child process */
947 if(clear_cpu_affinity() != 0)
948 rte_exit(EXIT_FAILURE
, "clear_cpu_affinity error\n");
951 ret
= rte_eal_init(argc
, argv
);
953 rte_exit(EXIT_FAILURE
, "Invalid EAL arguments\n");
957 /* parse application arguments (after the EAL ones) */
958 ret
= l2fwd_parse_args(argc
, argv
);
960 rte_exit(EXIT_FAILURE
, "Invalid L2FWD arguments\n");
963 if (flib_init() != 0)
964 rte_exit(EXIT_FAILURE
, "flib init error");
967 * Allocated structures that slave lcore would change. For those that slaves are
968 * read only, needn't use malloc to share and global or static variables is ok since
969 * slave inherit all the knowledge that master initialized.
971 if (l2fwd_malloc_shared_struct() != 0)
972 rte_exit(EXIT_FAILURE
, "malloc mem failed\n");
974 /* Initialize lcore_resource structures */
975 memset(lcore_resource
, 0, sizeof(lcore_resource
));
976 for (i
= 0; i
< RTE_MAX_LCORE
; i
++)
977 lcore_resource
[i
].lcore_id
= i
;
979 nb_ports
= rte_eth_dev_count();
981 rte_exit(EXIT_FAILURE
, "No Ethernet ports - bye\n");
983 /* create the mbuf pool */
984 for (portid
= 0; portid
< nb_ports
; portid
++) {
985 /* skip ports that are not enabled */
986 if ((l2fwd_enabled_port_mask
& (1 << portid
)) == 0)
988 char buf_name
[RTE_MEMPOOL_NAMESIZE
];
989 flags
= MEMPOOL_F_SP_PUT
| MEMPOOL_F_SC_GET
;
990 snprintf(buf_name
, RTE_MEMPOOL_NAMESIZE
, MBUF_NAME
, portid
);
991 l2fwd_pktmbuf_pool
[portid
] =
992 rte_mempool_create(buf_name
, NB_MBUF
,
994 sizeof(struct rte_pktmbuf_pool_private
),
995 rte_pktmbuf_pool_init
, NULL
,
996 rte_pktmbuf_init
, NULL
,
997 rte_socket_id(), flags
);
998 if (l2fwd_pktmbuf_pool
[portid
] == NULL
)
999 rte_exit(EXIT_FAILURE
, "Cannot init mbuf pool\n");
1001 printf("Create mbuf %s\n", buf_name
);
1004 /* reset l2fwd_dst_ports */
1005 for (portid
= 0; portid
< RTE_MAX_ETHPORTS
; portid
++)
1006 l2fwd_dst_ports
[portid
] = 0;
1010 * Each logical core is assigned a dedicated TX queue on each port.
1012 for (portid
= 0; portid
< nb_ports
; portid
++) {
1013 /* skip ports that are not enabled */
1014 if ((l2fwd_enabled_port_mask
& (1 << portid
)) == 0)
1017 if (nb_ports_in_mask
% 2) {
1018 l2fwd_dst_ports
[portid
] = last_port
;
1019 l2fwd_dst_ports
[last_port
] = portid
;
1026 rte_eth_dev_info_get(portid
, &dev_info
);
1028 if (nb_ports_in_mask
% 2) {
1029 printf("Notice: odd number of ports in portmask.\n");
1030 l2fwd_dst_ports
[last_port
] = last_port
;
1036 /* Initialize the port/queue configuration of each logical core */
1037 for (portid
= 0; portid
< nb_ports
; portid
++) {
1038 struct lcore_resource_struct
*res
;
1039 /* skip ports that are not enabled */
1040 if ((l2fwd_enabled_port_mask
& (1 << portid
)) == 0)
1043 /* get the lcore_id for this port */
1044 /* skip master lcore */
1045 while (rte_lcore_is_enabled(rx_lcore_id
) == 0 ||
1046 rte_get_master_lcore() == rx_lcore_id
||
1047 lcore_queue_conf
[rx_lcore_id
].n_rx_port
==
1048 l2fwd_rx_queue_per_lcore
) {
1051 if (rx_lcore_id
>= RTE_MAX_LCORE
)
1052 rte_exit(EXIT_FAILURE
, "Not enough cores\n");
1055 if (qconf
!= &lcore_queue_conf
[rx_lcore_id
])
1056 /* Assigned a new logical core in the loop above. */
1057 qconf
= &lcore_queue_conf
[rx_lcore_id
];
1059 qconf
->rx_port_list
[qconf
->n_rx_port
] = portid
;
1062 /* Save the port resource info into lcore_resource strucutres */
1063 res
= &lcore_resource
[rx_lcore_id
];
1065 res
->port
[res
->port_num
++] = portid
;
1067 printf("Lcore %u: RX port %u\n", rx_lcore_id
, (unsigned) portid
);
1070 nb_ports_available
= nb_ports
;
1072 /* Initialise each port */
1073 for (portid
= 0; portid
< nb_ports
; portid
++) {
1074 /* skip ports that are not enabled */
1075 if ((l2fwd_enabled_port_mask
& (1 << portid
)) == 0) {
1076 printf("Skipping disabled port %u\n", (unsigned) portid
);
1077 nb_ports_available
--;
1081 printf("Initializing port %u... ", (unsigned) portid
);
1083 ret
= rte_eth_dev_configure(portid
, 1, 1, &port_conf
);
1085 rte_exit(EXIT_FAILURE
, "Cannot configure device: err=%d, port=%u\n",
1086 ret
, (unsigned) portid
);
1088 rte_eth_macaddr_get(portid
,&l2fwd_ports_eth_addr
[portid
]);
1090 /* init one RX queue */
1092 ret
= rte_eth_rx_queue_setup(portid
, 0, nb_rxd
,
1093 rte_eth_dev_socket_id(portid
),
1095 l2fwd_pktmbuf_pool
[portid
]);
1097 rte_exit(EXIT_FAILURE
, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
1098 ret
, (unsigned) portid
);
1100 /* init one TX queue on each port */
1102 ret
= rte_eth_tx_queue_setup(portid
, 0, nb_txd
,
1103 rte_eth_dev_socket_id(portid
),
1106 rte_exit(EXIT_FAILURE
, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
1107 ret
, (unsigned) portid
);
1109 /* Initialize TX buffers */
1110 tx_buffer
[portid
] = rte_zmalloc_socket("tx_buffer",
1111 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST
), 0,
1112 rte_eth_dev_socket_id(portid
));
1113 if (tx_buffer
[portid
] == NULL
)
1114 rte_exit(EXIT_FAILURE
, "Cannot allocate buffer for tx on port %u\n",
1117 rte_eth_tx_buffer_init(tx_buffer
[portid
], MAX_PKT_BURST
);
1119 ret
= rte_eth_tx_buffer_set_err_callback(tx_buffer
[portid
],
1120 rte_eth_tx_buffer_count_callback
,
1121 &port_statistics
[portid
].dropped
);
1123 rte_exit(EXIT_FAILURE
, "Cannot set error callback for "
1124 "tx buffer on port %u\n", (unsigned) portid
);
1127 ret
= rte_eth_dev_start(portid
);
1129 rte_exit(EXIT_FAILURE
, "rte_eth_dev_start:err=%d, port=%u\n",
1130 ret
, (unsigned) portid
);
1134 rte_eth_promiscuous_enable(portid
);
1136 printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
1138 l2fwd_ports_eth_addr
[portid
].addr_bytes
[0],
1139 l2fwd_ports_eth_addr
[portid
].addr_bytes
[1],
1140 l2fwd_ports_eth_addr
[portid
].addr_bytes
[2],
1141 l2fwd_ports_eth_addr
[portid
].addr_bytes
[3],
1142 l2fwd_ports_eth_addr
[portid
].addr_bytes
[4],
1143 l2fwd_ports_eth_addr
[portid
].addr_bytes
[5]);
1145 /* initialize port stats */
1146 //memset(&port_statistics, 0, sizeof(port_statistics));
1149 if (!nb_ports_available
) {
1150 rte_exit(EXIT_FAILURE
,
1151 "All available ports are disabled. Please set portmask.\n");
1154 check_all_ports_link_status(nb_ports
, l2fwd_enabled_port_mask
);
1156 /* Record pair lcore */
1158 * Since l2fwd example would create pair between different neighbour port, that's
1159 * port 0 receive and forward to port 1, the same to port 1, these 2 ports will have
1160 * dependency. If one port stopped working (killed, for example), the port need to
1161 * be stopped/started again. During the time, another port need to wait until stop/start
1162 * procedure completed. So, record the pair relationship for those lcores working
1165 for (portid
= 0; portid
< nb_ports
; portid
++) {
1167 unsigned lcore
= 0, pair_lcore
= 0;
1168 unsigned j
, find_lcore
, find_pair_lcore
;
1169 /* skip ports that are not enabled */
1170 if ((l2fwd_enabled_port_mask
& (1 << portid
)) == 0)
1173 /* Find pair ports' lcores */
1174 find_lcore
= find_pair_lcore
= 0;
1175 pair_port
= l2fwd_dst_ports
[portid
];
1176 for (i
= 0; i
< RTE_MAX_LCORE
; i
++) {
1177 if (!rte_lcore_is_enabled(i
))
1179 for (j
= 0; j
< lcore_queue_conf
[i
].n_rx_port
;j
++) {
1180 if (lcore_queue_conf
[i
].rx_port_list
[j
] == portid
) {
1185 if (lcore_queue_conf
[i
].rx_port_list
[j
] == pair_port
) {
1187 find_pair_lcore
= 1;
1191 if (find_lcore
&& find_pair_lcore
)
1194 if (!find_lcore
|| !find_pair_lcore
)
1195 rte_exit(EXIT_FAILURE
, "Not find port=%d pair\n", portid
);
1197 printf("lcore %u and %u paired\n", lcore
, pair_lcore
);
1198 lcore_resource
[lcore
].pair_id
= pair_lcore
;
1199 lcore_resource
[pair_lcore
].pair_id
= lcore
;
1202 /* Create message buffer for all master and slave */
1203 message_pool
= rte_mempool_create("ms_msg_pool",
1204 NB_CORE_MSGBUF
* RTE_MAX_LCORE
,
1205 sizeof(enum l2fwd_cmd
), NB_CORE_MSGBUF
/ 2,
1207 rte_pktmbuf_pool_init
, NULL
,
1208 rte_pktmbuf_init
, NULL
,
1209 rte_socket_id(), 0);
1211 if (message_pool
== NULL
)
1212 rte_exit(EXIT_FAILURE
, "Create msg mempool failed\n");
1214 /* Create ring for each master and slave pair, also register cb when slave leaves */
1215 for (i
= 0; i
< RTE_MAX_LCORE
; i
++) {
1217 * Only create ring and register slave_exit cb in case that core involved into
1220 if (lcore_resource
[i
].enabled
) {
1221 /* Create ring for master and slave communication */
1222 ret
= create_ms_ring(i
);
1224 rte_exit(EXIT_FAILURE
, "Create ring for lcore=%u failed",
1227 if (flib_register_slave_exit_notify(i
,
1228 slave_exit_cb
) != 0)
1229 rte_exit(EXIT_FAILURE
,
1230 "Register master_trace_slave_exit failed");
1234 /* launch per-lcore init on every lcore except master */
1235 flib_mp_remote_launch(l2fwd_launch_one_lcore
, NULL
, SKIP_MASTER
);
1237 /* print statistics 10 second */
1238 prev_tsc
= cur_tsc
= rte_rdtsc();
1242 cur_tsc
= rte_rdtsc();
1243 diff_tsc
= cur_tsc
- prev_tsc
;
1244 /* if timer is enabled */
1245 if (timer_period
> 0) {
1247 /* advance the timer */
1248 timer_tsc
+= diff_tsc
;
1250 /* if timer has reached its timeout */
1251 if (unlikely(timer_tsc
>= (uint64_t) timer_period
)) {
1254 /* reset the timer */
1261 /* Check any slave need restart or recreate */
1262 rte_spinlock_lock(&res_lock
);
1263 for (i
= 0; i
< RTE_MAX_LCORE
; i
++) {
1264 struct lcore_resource_struct
*res
= &lcore_resource
[i
];
1265 struct lcore_resource_struct
*pair
= &lcore_resource
[res
->pair_id
];
1267 /* If find slave exited, try to reset pair */
1268 if (res
->enabled
&& res
->flags
&& pair
->enabled
) {
1270 master_sendcmd_with_ack(pair
->lcore_id
, CMD_STOP
);
1271 rte_spinlock_unlock(&res_lock
);
1273 rte_spinlock_lock(&res_lock
);
1277 if (reset_pair(res
->lcore_id
, pair
->lcore_id
) != 0)
1278 rte_exit(EXIT_FAILURE
, "failed to reset slave");
1283 rte_spinlock_unlock(&res_lock
);