4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
47 #include <rte_common.h>
48 #include <rte_byteorder.h>
50 #include <rte_malloc.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
55 #include <rte_per_lcore.h>
56 #include <rte_launch.h>
57 #include <rte_atomic.h>
58 #include <rte_cycles.h>
59 #include <rte_prefetch.h>
60 #include <rte_lcore.h>
61 #include <rte_per_lcore.h>
62 #include <rte_branch_prediction.h>
63 #include <rte_interrupts.h>
65 #include <rte_random.h>
66 #include <rte_debug.h>
67 #include <rte_ether.h>
68 #include <rte_ethdev.h>
69 #include <rte_mempool.h>
74 #include <rte_string_fns.h>
75 #include <rte_timer.h>
76 #include <rte_power.h>
78 #include <rte_spinlock.h>
80 #define RTE_LOGTYPE_L3FWD_POWER RTE_LOGTYPE_USER1
82 #define MAX_PKT_BURST 32
84 #define MIN_ZERO_POLL_COUNT 10
86 /* around 100ms at 2 Ghz */
87 #define TIMER_RESOLUTION_CYCLES 200000000ULL
89 #define TIMER_NUMBER_PER_SECOND 10
91 #define SCALING_PERIOD (1000000/TIMER_NUMBER_PER_SECOND)
92 #define SCALING_DOWN_TIME_RATIO_THRESHOLD 0.25
94 #define APP_LOOKUP_EXACT_MATCH 0
95 #define APP_LOOKUP_LPM 1
96 #define DO_RFC_1812_CHECKS
98 #ifndef APP_LOOKUP_METHOD
99 #define APP_LOOKUP_METHOD APP_LOOKUP_LPM
102 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
103 #include <rte_hash.h>
104 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
107 #error "APP_LOOKUP_METHOD set to incorrect value"
111 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
112 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
113 #define IPv6_BYTES(addr) \
114 addr[0], addr[1], addr[2], addr[3], \
115 addr[4], addr[5], addr[6], addr[7], \
116 addr[8], addr[9], addr[10], addr[11],\
117 addr[12], addr[13],addr[14], addr[15]
120 #define MAX_JUMBO_PKT_LEN 9600
122 #define IPV6_ADDR_LEN 16
124 #define MEMPOOL_CACHE_SIZE 256
127 * This expression is used to calculate the number of mbufs needed depending on
128 * user input, taking into account memory for rx and tx hardware rings, cache
129 * per lcore and mtable per port per lcore. RTE_MAX is used to ensure that
130 * NB_MBUF never goes below a minimum value of 8192.
133 #define NB_MBUF RTE_MAX ( \
134 (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
135 nb_ports*nb_lcores*MAX_PKT_BURST + \
136 nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
137 nb_lcores*MEMPOOL_CACHE_SIZE), \
140 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
144 /* Configure how many packets ahead to prefetch, when reading packets */
145 #define PREFETCH_OFFSET 3
148 * Configurable number of RX/TX ring descriptors
150 #define RTE_TEST_RX_DESC_DEFAULT 128
151 #define RTE_TEST_TX_DESC_DEFAULT 512
152 static uint16_t nb_rxd
= RTE_TEST_RX_DESC_DEFAULT
;
153 static uint16_t nb_txd
= RTE_TEST_TX_DESC_DEFAULT
;
155 /* ethernet addresses of ports */
156 static struct ether_addr ports_eth_addr
[RTE_MAX_ETHPORTS
];
158 /* ethernet addresses of ports */
159 static rte_spinlock_t locks
[RTE_MAX_ETHPORTS
];
161 /* mask of enabled ports */
162 static uint32_t enabled_port_mask
= 0;
163 /* Ports set in promiscuous mode off by default. */
164 static int promiscuous_on
= 0;
165 /* NUMA is enabled by default. */
166 static int numa_on
= 1;
168 enum freq_scale_hint_t
176 struct lcore_rx_queue
{
179 enum freq_scale_hint_t freq_up_hint
;
180 uint32_t zero_rx_packet_count
;
182 } __rte_cache_aligned
;
184 #define MAX_RX_QUEUE_PER_LCORE 16
185 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
186 #define MAX_RX_QUEUE_PER_PORT 128
188 #define MAX_RX_QUEUE_INTERRUPT_PER_PORT 16
191 #define MAX_LCORE_PARAMS 1024
192 struct lcore_params
{
196 } __rte_cache_aligned
;
198 static struct lcore_params lcore_params_array
[MAX_LCORE_PARAMS
];
199 static struct lcore_params lcore_params_array_default
[] = {
211 static struct lcore_params
* lcore_params
= lcore_params_array_default
;
212 static uint16_t nb_lcore_params
= sizeof(lcore_params_array_default
) /
213 sizeof(lcore_params_array_default
[0]);
215 static struct rte_eth_conf port_conf
= {
217 .mq_mode
= ETH_MQ_RX_RSS
,
218 .max_rx_pkt_len
= ETHER_MAX_LEN
,
220 .header_split
= 0, /**< Header Split disabled */
221 .hw_ip_checksum
= 1, /**< IP checksum offload enabled */
222 .hw_vlan_filter
= 0, /**< VLAN filtering disabled */
223 .jumbo_frame
= 0, /**< Jumbo Frame Support disabled */
224 .hw_strip_crc
= 0, /**< CRC stripped by hardware */
229 .rss_hf
= ETH_RSS_UDP
,
233 .mq_mode
= ETH_MQ_TX_NONE
,
241 static struct rte_mempool
* pktmbuf_pool
[NB_SOCKETS
];
244 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
246 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
247 #include <rte_hash_crc.h>
248 #define DEFAULT_HASH_FUNC rte_hash_crc
250 #include <rte_jhash.h>
251 #define DEFAULT_HASH_FUNC rte_jhash
260 } __attribute__((__packed__
));
263 uint8_t ip_dst
[IPV6_ADDR_LEN
];
264 uint8_t ip_src
[IPV6_ADDR_LEN
];
268 } __attribute__((__packed__
));
270 struct ipv4_l3fwd_route
{
271 struct ipv4_5tuple key
;
275 struct ipv6_l3fwd_route
{
276 struct ipv6_5tuple key
;
280 static struct ipv4_l3fwd_route ipv4_l3fwd_route_array
[] = {
281 {{IPv4(100,10,0,1), IPv4(200,10,0,1), 101, 11, IPPROTO_TCP
}, 0},
282 {{IPv4(100,20,0,2), IPv4(200,20,0,2), 102, 12, IPPROTO_TCP
}, 1},
283 {{IPv4(100,30,0,3), IPv4(200,30,0,3), 103, 13, IPPROTO_TCP
}, 2},
284 {{IPv4(100,40,0,4), IPv4(200,40,0,4), 104, 14, IPPROTO_TCP
}, 3},
287 static struct ipv6_l3fwd_route ipv6_l3fwd_route_array
[] = {
290 {0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
291 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
292 {0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
293 0x02, 0x1e, 0x67, 0xff, 0xfe, 0x0d, 0xb6, 0x0a},
299 typedef struct rte_hash lookup_struct_t
;
300 static lookup_struct_t
*ipv4_l3fwd_lookup_struct
[NB_SOCKETS
];
301 static lookup_struct_t
*ipv6_l3fwd_lookup_struct
[NB_SOCKETS
];
303 #define L3FWD_HASH_ENTRIES 1024
305 #define IPV4_L3FWD_NUM_ROUTES \
306 (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
308 #define IPV6_L3FWD_NUM_ROUTES \
309 (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
311 static uint8_t ipv4_l3fwd_out_if
[L3FWD_HASH_ENTRIES
] __rte_cache_aligned
;
312 static uint8_t ipv6_l3fwd_out_if
[L3FWD_HASH_ENTRIES
] __rte_cache_aligned
;
315 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
316 struct ipv4_l3fwd_route
{
322 static struct ipv4_l3fwd_route ipv4_l3fwd_route_array
[] = {
323 {IPv4(1,1,1,0), 24, 0},
324 {IPv4(2,1,1,0), 24, 1},
325 {IPv4(3,1,1,0), 24, 2},
326 {IPv4(4,1,1,0), 24, 3},
327 {IPv4(5,1,1,0), 24, 4},
328 {IPv4(6,1,1,0), 24, 5},
329 {IPv4(7,1,1,0), 24, 6},
330 {IPv4(8,1,1,0), 24, 7},
333 #define IPV4_L3FWD_NUM_ROUTES \
334 (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
336 #define IPV4_L3FWD_LPM_MAX_RULES 1024
338 typedef struct rte_lpm lookup_struct_t
;
339 static lookup_struct_t
*ipv4_l3fwd_lookup_struct
[NB_SOCKETS
];
344 struct lcore_rx_queue rx_queue_list
[MAX_RX_QUEUE_PER_LCORE
];
346 uint16_t tx_port_id
[RTE_MAX_ETHPORTS
];
347 uint16_t tx_queue_id
[RTE_MAX_ETHPORTS
];
348 struct rte_eth_dev_tx_buffer
*tx_buffer
[RTE_MAX_ETHPORTS
];
349 lookup_struct_t
* ipv4_lookup_struct
;
350 lookup_struct_t
* ipv6_lookup_struct
;
351 } __rte_cache_aligned
;
354 /* total sleep time in ms since last frequency scaling down */
356 /* number of long sleep recently */
357 uint32_t nb_long_sleep
;
358 /* freq. scaling up trend */
360 /* total packet processed recently */
361 uint64_t nb_rx_processed
;
362 /* total iterations looped recently */
363 uint64_t nb_iteration_looped
;
365 } __rte_cache_aligned
;
367 static struct lcore_conf lcore_conf
[RTE_MAX_LCORE
] __rte_cache_aligned
;
368 static struct lcore_stats stats
[RTE_MAX_LCORE
] __rte_cache_aligned
;
369 static struct rte_timer power_timers
[RTE_MAX_LCORE
];
371 static inline uint32_t power_idle_heuristic(uint32_t zero_rx_packet_count
);
372 static inline enum freq_scale_hint_t
power_freq_scaleup_heuristic( \
373 unsigned lcore_id
, uint8_t port_id
, uint16_t queue_id
);
375 /* exit signal handler */
377 signal_exit_now(int sigtype
)
382 if (sigtype
== SIGINT
) {
383 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++) {
384 if (rte_lcore_is_enabled(lcore_id
) == 0)
387 /* init power management library */
388 ret
= rte_power_exit(lcore_id
);
390 rte_exit(EXIT_FAILURE
, "Power management "
391 "library de-initialization failed on "
392 "core%u\n", lcore_id
);
396 rte_exit(EXIT_SUCCESS
, "User forced exit\n");
399 /* Freqency scale down timer callback */
401 power_timer_cb(__attribute__((unused
)) struct rte_timer
*tim
,
402 __attribute__((unused
)) void *arg
)
405 float sleep_time_ratio
;
406 unsigned lcore_id
= rte_lcore_id();
408 /* accumulate total execution time in us when callback is invoked */
409 sleep_time_ratio
= (float)(stats
[lcore_id
].sleep_time
) /
410 (float)SCALING_PERIOD
;
412 * check whether need to scale down frequency a step if it sleep a lot.
414 if (sleep_time_ratio
>= SCALING_DOWN_TIME_RATIO_THRESHOLD
) {
415 if (rte_power_freq_down
)
416 rte_power_freq_down(lcore_id
);
418 else if ( (unsigned)(stats
[lcore_id
].nb_rx_processed
/
419 stats
[lcore_id
].nb_iteration_looped
) < MAX_PKT_BURST
) {
421 * scale down a step if average packet per iteration less
424 if (rte_power_freq_down
)
425 rte_power_freq_down(lcore_id
);
429 * initialize another timer according to current frequency to ensure
430 * timer interval is relatively fixed.
432 hz
= rte_get_timer_hz();
433 rte_timer_reset(&power_timers
[lcore_id
], hz
/TIMER_NUMBER_PER_SECOND
,
434 SINGLE
, lcore_id
, power_timer_cb
, NULL
);
436 stats
[lcore_id
].nb_rx_processed
= 0;
437 stats
[lcore_id
].nb_iteration_looped
= 0;
439 stats
[lcore_id
].sleep_time
= 0;
442 /* Enqueue a single packet, and send burst if queue is filled */
444 send_single_packet(struct rte_mbuf
*m
, uint8_t port
)
447 struct lcore_conf
*qconf
;
449 lcore_id
= rte_lcore_id();
450 qconf
= &lcore_conf
[lcore_id
];
452 rte_eth_tx_buffer(port
, qconf
->tx_queue_id
[port
],
453 qconf
->tx_buffer
[port
], m
);
458 #ifdef DO_RFC_1812_CHECKS
460 is_valid_ipv4_pkt(struct ipv4_hdr
*pkt
, uint32_t link_len
)
462 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
464 * 1. The packet length reported by the Link Layer must be large
465 * enough to hold the minimum length legal IP datagram (20 bytes).
467 if (link_len
< sizeof(struct ipv4_hdr
))
470 /* 2. The IP checksum must be correct. */
471 /* this is checked in H/W */
474 * 3. The IP version number must be 4. If the version number is not 4
475 * then the packet may be another version of IP, such as IPng or
478 if (((pkt
->version_ihl
) >> 4) != 4)
481 * 4. The IP header length field must be large enough to hold the
482 * minimum length legal IP datagram (20 bytes = 5 words).
484 if ((pkt
->version_ihl
& 0xf) < 5)
488 * 5. The IP total length field must be large enough to hold the IP
489 * datagram header, whose length is specified in the IP header length
492 if (rte_cpu_to_be_16(pkt
->total_length
) < sizeof(struct ipv4_hdr
))
499 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
501 print_ipv4_key(struct ipv4_5tuple key
)
503 printf("IP dst = %08x, IP src = %08x, port dst = %d, port src = %d, "
504 "proto = %d\n", (unsigned)key
.ip_dst
, (unsigned)key
.ip_src
,
505 key
.port_dst
, key
.port_src
, key
.proto
);
508 print_ipv6_key(struct ipv6_5tuple key
)
510 printf( "IP dst = " IPv6_BYTES_FMT
", IP src = " IPv6_BYTES_FMT
", "
511 "port dst = %d, port src = %d, proto = %d\n",
512 IPv6_BYTES(key
.ip_dst
), IPv6_BYTES(key
.ip_src
),
513 key
.port_dst
, key
.port_src
, key
.proto
);
516 static inline uint8_t
517 get_ipv4_dst_port(struct ipv4_hdr
*ipv4_hdr
, uint8_t portid
,
518 lookup_struct_t
* ipv4_l3fwd_lookup_struct
)
520 struct ipv4_5tuple key
;
525 key
.ip_dst
= rte_be_to_cpu_32(ipv4_hdr
->dst_addr
);
526 key
.ip_src
= rte_be_to_cpu_32(ipv4_hdr
->src_addr
);
527 key
.proto
= ipv4_hdr
->next_proto_id
;
529 switch (ipv4_hdr
->next_proto_id
) {
531 tcp
= (struct tcp_hdr
*)((unsigned char *)ipv4_hdr
+
532 sizeof(struct ipv4_hdr
));
533 key
.port_dst
= rte_be_to_cpu_16(tcp
->dst_port
);
534 key
.port_src
= rte_be_to_cpu_16(tcp
->src_port
);
538 udp
= (struct udp_hdr
*)((unsigned char *)ipv4_hdr
+
539 sizeof(struct ipv4_hdr
));
540 key
.port_dst
= rte_be_to_cpu_16(udp
->dst_port
);
541 key
.port_src
= rte_be_to_cpu_16(udp
->src_port
);
550 /* Find destination port */
551 ret
= rte_hash_lookup(ipv4_l3fwd_lookup_struct
, (const void *)&key
);
552 return (uint8_t)((ret
< 0)? portid
: ipv4_l3fwd_out_if
[ret
]);
555 static inline uint8_t
556 get_ipv6_dst_port(struct ipv6_hdr
*ipv6_hdr
, uint8_t portid
,
557 lookup_struct_t
*ipv6_l3fwd_lookup_struct
)
559 struct ipv6_5tuple key
;
564 memcpy(key
.ip_dst
, ipv6_hdr
->dst_addr
, IPV6_ADDR_LEN
);
565 memcpy(key
.ip_src
, ipv6_hdr
->src_addr
, IPV6_ADDR_LEN
);
567 key
.proto
= ipv6_hdr
->proto
;
569 switch (ipv6_hdr
->proto
) {
571 tcp
= (struct tcp_hdr
*)((unsigned char *) ipv6_hdr
+
572 sizeof(struct ipv6_hdr
));
573 key
.port_dst
= rte_be_to_cpu_16(tcp
->dst_port
);
574 key
.port_src
= rte_be_to_cpu_16(tcp
->src_port
);
578 udp
= (struct udp_hdr
*)((unsigned char *) ipv6_hdr
+
579 sizeof(struct ipv6_hdr
));
580 key
.port_dst
= rte_be_to_cpu_16(udp
->dst_port
);
581 key
.port_src
= rte_be_to_cpu_16(udp
->src_port
);
590 /* Find destination port */
591 ret
= rte_hash_lookup(ipv6_l3fwd_lookup_struct
, (const void *)&key
);
592 return (uint8_t)((ret
< 0)? portid
: ipv6_l3fwd_out_if
[ret
]);
596 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
597 static inline uint8_t
598 get_ipv4_dst_port(struct ipv4_hdr
*ipv4_hdr
, uint8_t portid
,
599 lookup_struct_t
*ipv4_l3fwd_lookup_struct
)
603 return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct
,
604 rte_be_to_cpu_32(ipv4_hdr
->dst_addr
), &next_hop
) == 0)?
610 l3fwd_simple_forward(struct rte_mbuf
*m
, uint8_t portid
,
611 struct lcore_conf
*qconf
)
613 struct ether_hdr
*eth_hdr
;
614 struct ipv4_hdr
*ipv4_hdr
;
618 eth_hdr
= rte_pktmbuf_mtod(m
, struct ether_hdr
*);
620 if (RTE_ETH_IS_IPV4_HDR(m
->packet_type
)) {
621 /* Handle IPv4 headers.*/
623 rte_pktmbuf_mtod_offset(m
, struct ipv4_hdr
*,
624 sizeof(struct ether_hdr
));
626 #ifdef DO_RFC_1812_CHECKS
627 /* Check to make sure the packet is valid (RFC1812) */
628 if (is_valid_ipv4_pkt(ipv4_hdr
, m
->pkt_len
) < 0) {
634 dst_port
= get_ipv4_dst_port(ipv4_hdr
, portid
,
635 qconf
->ipv4_lookup_struct
);
636 if (dst_port
>= RTE_MAX_ETHPORTS
||
637 (enabled_port_mask
& 1 << dst_port
) == 0)
640 /* 02:00:00:00:00:xx */
641 d_addr_bytes
= ð_hdr
->d_addr
.addr_bytes
[0];
642 *((uint64_t *)d_addr_bytes
) =
643 0x000000000002 + ((uint64_t)dst_port
<< 40);
645 #ifdef DO_RFC_1812_CHECKS
646 /* Update time to live and header checksum */
647 --(ipv4_hdr
->time_to_live
);
648 ++(ipv4_hdr
->hdr_checksum
);
652 ether_addr_copy(&ports_eth_addr
[dst_port
], ð_hdr
->s_addr
);
654 send_single_packet(m
, dst_port
);
655 } else if (RTE_ETH_IS_IPV6_HDR(m
->packet_type
)) {
656 /* Handle IPv6 headers.*/
657 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
658 struct ipv6_hdr
*ipv6_hdr
;
661 rte_pktmbuf_mtod_offset(m
, struct ipv6_hdr
*,
662 sizeof(struct ether_hdr
));
664 dst_port
= get_ipv6_dst_port(ipv6_hdr
, portid
,
665 qconf
->ipv6_lookup_struct
);
667 if (dst_port
>= RTE_MAX_ETHPORTS
||
668 (enabled_port_mask
& 1 << dst_port
) == 0)
671 /* 02:00:00:00:00:xx */
672 d_addr_bytes
= ð_hdr
->d_addr
.addr_bytes
[0];
673 *((uint64_t *)d_addr_bytes
) =
674 0x000000000002 + ((uint64_t)dst_port
<< 40);
677 ether_addr_copy(&ports_eth_addr
[dst_port
], ð_hdr
->s_addr
);
679 send_single_packet(m
, dst_port
);
681 /* We don't currently handle IPv6 packets in LPM mode. */
689 #define MINIMUM_SLEEP_TIME 1
690 #define SUSPEND_THRESHOLD 300
692 static inline uint32_t
693 power_idle_heuristic(uint32_t zero_rx_packet_count
)
695 /* If zero count is less than 100, sleep 1us */
696 if (zero_rx_packet_count
< SUSPEND_THRESHOLD
)
697 return MINIMUM_SLEEP_TIME
;
698 /* If zero count is less than 1000, sleep 100 us which is the
699 minimum latency switching from C3/C6 to C0
702 return SUSPEND_THRESHOLD
;
707 static inline enum freq_scale_hint_t
708 power_freq_scaleup_heuristic(unsigned lcore_id
,
713 * HW Rx queue size is 128 by default, Rx burst read at maximum 32 entries
716 #define FREQ_GEAR1_RX_PACKET_THRESHOLD MAX_PKT_BURST
717 #define FREQ_GEAR2_RX_PACKET_THRESHOLD (MAX_PKT_BURST*2)
718 #define FREQ_GEAR3_RX_PACKET_THRESHOLD (MAX_PKT_BURST*3)
719 #define FREQ_UP_TREND1_ACC 1
720 #define FREQ_UP_TREND2_ACC 100
721 #define FREQ_UP_THRESHOLD 10000
723 if (likely(rte_eth_rx_descriptor_done(port_id
, queue_id
,
724 FREQ_GEAR3_RX_PACKET_THRESHOLD
) > 0)) {
725 stats
[lcore_id
].trend
= 0;
727 } else if (likely(rte_eth_rx_descriptor_done(port_id
, queue_id
,
728 FREQ_GEAR2_RX_PACKET_THRESHOLD
) > 0))
729 stats
[lcore_id
].trend
+= FREQ_UP_TREND2_ACC
;
730 else if (likely(rte_eth_rx_descriptor_done(port_id
, queue_id
,
731 FREQ_GEAR1_RX_PACKET_THRESHOLD
) > 0))
732 stats
[lcore_id
].trend
+= FREQ_UP_TREND1_ACC
;
734 if (likely(stats
[lcore_id
].trend
> FREQ_UP_THRESHOLD
)) {
735 stats
[lcore_id
].trend
= 0;
743 * force polling thread sleep until one-shot rx interrupt triggers
752 sleep_until_rx_interrupt(int num
)
754 struct rte_epoll_event event
[num
];
756 uint8_t port_id
, queue_id
;
759 RTE_LOG(INFO
, L3FWD_POWER
,
760 "lcore %u sleeps until interrupt triggers\n",
763 n
= rte_epoll_wait(RTE_EPOLL_PER_THREAD
, event
, num
, -1);
764 for (i
= 0; i
< n
; i
++) {
765 data
= event
[i
].epdata
.data
;
766 port_id
= ((uintptr_t)data
) >> CHAR_BIT
;
767 queue_id
= ((uintptr_t)data
) &
768 RTE_LEN2MASK(CHAR_BIT
, uint8_t);
769 rte_eth_dev_rx_intr_disable(port_id
, queue_id
);
770 RTE_LOG(INFO
, L3FWD_POWER
,
771 "lcore %u is waked up from rx interrupt on"
772 " port %d queue %d\n",
773 rte_lcore_id(), port_id
, queue_id
);
779 static void turn_on_intr(struct lcore_conf
*qconf
)
782 struct lcore_rx_queue
*rx_queue
;
783 uint8_t port_id
, queue_id
;
785 for (i
= 0; i
< qconf
->n_rx_queue
; ++i
) {
786 rx_queue
= &(qconf
->rx_queue_list
[i
]);
787 port_id
= rx_queue
->port_id
;
788 queue_id
= rx_queue
->queue_id
;
790 rte_spinlock_lock(&(locks
[port_id
]));
791 rte_eth_dev_rx_intr_enable(port_id
, queue_id
);
792 rte_spinlock_unlock(&(locks
[port_id
]));
796 static int event_register(struct lcore_conf
*qconf
)
798 struct lcore_rx_queue
*rx_queue
;
799 uint8_t portid
, queueid
;
804 for (i
= 0; i
< qconf
->n_rx_queue
; ++i
) {
805 rx_queue
= &(qconf
->rx_queue_list
[i
]);
806 portid
= rx_queue
->port_id
;
807 queueid
= rx_queue
->queue_id
;
808 data
= portid
<< CHAR_BIT
| queueid
;
810 ret
= rte_eth_dev_rx_intr_ctl_q(portid
, queueid
,
811 RTE_EPOLL_PER_THREAD
,
813 (void *)((uintptr_t)data
));
821 /* main processing loop */
823 main_loop(__attribute__((unused
)) void *dummy
)
825 struct rte_mbuf
*pkts_burst
[MAX_PKT_BURST
];
827 uint64_t prev_tsc
, diff_tsc
, cur_tsc
;
828 uint64_t prev_tsc_power
= 0, cur_tsc_power
, diff_tsc_power
;
830 uint8_t portid
, queueid
;
831 struct lcore_conf
*qconf
;
832 struct lcore_rx_queue
*rx_queue
;
833 enum freq_scale_hint_t lcore_scaleup_hint
;
834 uint32_t lcore_rx_idle_count
= 0;
835 uint32_t lcore_idle_hint
= 0;
838 const uint64_t drain_tsc
= (rte_get_tsc_hz() + US_PER_S
- 1) / US_PER_S
* BURST_TX_DRAIN_US
;
842 lcore_id
= rte_lcore_id();
843 qconf
= &lcore_conf
[lcore_id
];
845 if (qconf
->n_rx_queue
== 0) {
846 RTE_LOG(INFO
, L3FWD_POWER
, "lcore %u has nothing to do\n", lcore_id
);
850 RTE_LOG(INFO
, L3FWD_POWER
, "entering main loop on lcore %u\n", lcore_id
);
852 for (i
= 0; i
< qconf
->n_rx_queue
; i
++) {
853 portid
= qconf
->rx_queue_list
[i
].port_id
;
854 queueid
= qconf
->rx_queue_list
[i
].queue_id
;
855 RTE_LOG(INFO
, L3FWD_POWER
, " -- lcoreid=%u portid=%hhu "
856 "rxqueueid=%hhu\n", lcore_id
, portid
, queueid
);
859 /* add into event wait list */
860 if (event_register(qconf
) == 0)
863 RTE_LOG(INFO
, L3FWD_POWER
, "RX interrupt won't enable.\n");
866 stats
[lcore_id
].nb_iteration_looped
++;
868 cur_tsc
= rte_rdtsc();
869 cur_tsc_power
= cur_tsc
;
872 * TX burst queue drain
874 diff_tsc
= cur_tsc
- prev_tsc
;
875 if (unlikely(diff_tsc
> drain_tsc
)) {
876 for (i
= 0; i
< qconf
->n_tx_port
; ++i
) {
877 portid
= qconf
->tx_port_id
[i
];
878 rte_eth_tx_buffer_flush(portid
,
879 qconf
->tx_queue_id
[portid
],
880 qconf
->tx_buffer
[portid
]);
885 diff_tsc_power
= cur_tsc_power
- prev_tsc_power
;
886 if (diff_tsc_power
> TIMER_RESOLUTION_CYCLES
) {
888 prev_tsc_power
= cur_tsc_power
;
893 * Read packet from RX queues
895 lcore_scaleup_hint
= FREQ_CURRENT
;
896 lcore_rx_idle_count
= 0;
897 for (i
= 0; i
< qconf
->n_rx_queue
; ++i
) {
898 rx_queue
= &(qconf
->rx_queue_list
[i
]);
899 rx_queue
->idle_hint
= 0;
900 portid
= rx_queue
->port_id
;
901 queueid
= rx_queue
->queue_id
;
903 nb_rx
= rte_eth_rx_burst(portid
, queueid
, pkts_burst
,
906 stats
[lcore_id
].nb_rx_processed
+= nb_rx
;
907 if (unlikely(nb_rx
== 0)) {
909 * no packet received from rx queue, try to
910 * sleep for a while forcing CPU enter deeper
913 rx_queue
->zero_rx_packet_count
++;
915 if (rx_queue
->zero_rx_packet_count
<=
919 rx_queue
->idle_hint
= power_idle_heuristic(\
920 rx_queue
->zero_rx_packet_count
);
921 lcore_rx_idle_count
++;
923 rx_queue
->zero_rx_packet_count
= 0;
926 * do not scale up frequency immediately as
927 * user to kernel space communication is costly
928 * which might impact packet I/O for received
931 rx_queue
->freq_up_hint
=
932 power_freq_scaleup_heuristic(lcore_id
,
936 /* Prefetch first packets */
937 for (j
= 0; j
< PREFETCH_OFFSET
&& j
< nb_rx
; j
++) {
938 rte_prefetch0(rte_pktmbuf_mtod(
939 pkts_burst
[j
], void *));
942 /* Prefetch and forward already prefetched packets */
943 for (j
= 0; j
< (nb_rx
- PREFETCH_OFFSET
); j
++) {
944 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst
[
945 j
+ PREFETCH_OFFSET
], void *));
946 l3fwd_simple_forward(pkts_burst
[j
], portid
,
950 /* Forward remaining prefetched packets */
951 for (; j
< nb_rx
; j
++) {
952 l3fwd_simple_forward(pkts_burst
[j
], portid
,
957 if (likely(lcore_rx_idle_count
!= qconf
->n_rx_queue
)) {
958 for (i
= 1, lcore_scaleup_hint
=
959 qconf
->rx_queue_list
[0].freq_up_hint
;
960 i
< qconf
->n_rx_queue
; ++i
) {
961 rx_queue
= &(qconf
->rx_queue_list
[i
]);
962 if (rx_queue
->freq_up_hint
>
965 rx_queue
->freq_up_hint
;
968 if (lcore_scaleup_hint
== FREQ_HIGHEST
) {
969 if (rte_power_freq_max
)
970 rte_power_freq_max(lcore_id
);
971 } else if (lcore_scaleup_hint
== FREQ_HIGHER
) {
972 if (rte_power_freq_up
)
973 rte_power_freq_up(lcore_id
);
977 * All Rx queues empty in recent consecutive polls,
978 * sleep in a conservative manner, meaning sleep as
981 for (i
= 1, lcore_idle_hint
=
982 qconf
->rx_queue_list
[0].idle_hint
;
983 i
< qconf
->n_rx_queue
; ++i
) {
984 rx_queue
= &(qconf
->rx_queue_list
[i
]);
985 if (rx_queue
->idle_hint
< lcore_idle_hint
)
986 lcore_idle_hint
= rx_queue
->idle_hint
;
989 if (lcore_idle_hint
< SUSPEND_THRESHOLD
)
991 * execute "pause" instruction to avoid context
992 * switch which generally take hundred of
993 * microseconds for short sleep.
995 rte_delay_us(lcore_idle_hint
);
997 /* suspend until rx interrupt trigges */
1000 sleep_until_rx_interrupt(
1003 /* start receiving packets immediately */
1006 stats
[lcore_id
].sleep_time
+= lcore_idle_hint
;
1012 check_lcore_params(void)
1014 uint8_t queue
, lcore
;
1018 for (i
= 0; i
< nb_lcore_params
; ++i
) {
1019 queue
= lcore_params
[i
].queue_id
;
1020 if (queue
>= MAX_RX_QUEUE_PER_PORT
) {
1021 printf("invalid queue number: %hhu\n", queue
);
1024 lcore
= lcore_params
[i
].lcore_id
;
1025 if (!rte_lcore_is_enabled(lcore
)) {
1026 printf("error: lcore %hhu is not enabled in lcore "
1030 if ((socketid
= rte_lcore_to_socket_id(lcore
) != 0) &&
1032 printf("warning: lcore %hhu is on socket %d with numa "
1033 "off\n", lcore
, socketid
);
1040 check_port_config(const unsigned nb_ports
)
1045 for (i
= 0; i
< nb_lcore_params
; ++i
) {
1046 portid
= lcore_params
[i
].port_id
;
1047 if ((enabled_port_mask
& (1 << portid
)) == 0) {
1048 printf("port %u is not enabled in port mask\n",
1052 if (portid
>= nb_ports
) {
1053 printf("port %u is not present on the board\n",
1062 get_port_n_rx_queues(const uint8_t port
)
1067 for (i
= 0; i
< nb_lcore_params
; ++i
) {
1068 if (lcore_params
[i
].port_id
== port
&&
1069 lcore_params
[i
].queue_id
> queue
)
1070 queue
= lcore_params
[i
].queue_id
;
1072 return (uint8_t)(++queue
);
1076 init_lcore_rx_queues(void)
1078 uint16_t i
, nb_rx_queue
;
1081 for (i
= 0; i
< nb_lcore_params
; ++i
) {
1082 lcore
= lcore_params
[i
].lcore_id
;
1083 nb_rx_queue
= lcore_conf
[lcore
].n_rx_queue
;
1084 if (nb_rx_queue
>= MAX_RX_QUEUE_PER_LCORE
) {
1085 printf("error: too many queues (%u) for lcore: %u\n",
1086 (unsigned)nb_rx_queue
+ 1, (unsigned)lcore
);
1089 lcore_conf
[lcore
].rx_queue_list
[nb_rx_queue
].port_id
=
1090 lcore_params
[i
].port_id
;
1091 lcore_conf
[lcore
].rx_queue_list
[nb_rx_queue
].queue_id
=
1092 lcore_params
[i
].queue_id
;
1093 lcore_conf
[lcore
].n_rx_queue
++;
1101 print_usage(const char *prgname
)
1103 printf ("%s [EAL options] -- -p PORTMASK -P"
1104 " [--config (port,queue,lcore)[,(port,queue,lcore]]"
1105 " [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
1106 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
1107 " -P : enable promiscuous mode\n"
1108 " --config (port,queue,lcore): rx queues configuration\n"
1109 " --no-numa: optional, disable numa awareness\n"
1110 " --enable-jumbo: enable jumbo frame"
1111 " which max packet len is PKTLEN in decimal (64-9600)\n",
1115 static int parse_max_pkt_len(const char *pktlen
)
1120 /* parse decimal string */
1121 len
= strtoul(pktlen
, &end
, 10);
1122 if ((pktlen
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
1132 parse_portmask(const char *portmask
)
1137 /* parse hexadecimal string */
1138 pm
= strtoul(portmask
, &end
, 16);
1139 if ((portmask
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
1149 parse_config(const char *q_arg
)
1152 const char *p
, *p0
= q_arg
;
1160 unsigned long int_fld
[_NUM_FLD
];
1161 char *str_fld
[_NUM_FLD
];
1165 nb_lcore_params
= 0;
1167 while ((p
= strchr(p0
,'(')) != NULL
) {
1169 if((p0
= strchr(p
,')')) == NULL
)
1173 if(size
>= sizeof(s
))
1176 snprintf(s
, sizeof(s
), "%.*s", size
, p
);
1177 if (rte_strsplit(s
, sizeof(s
), str_fld
, _NUM_FLD
, ',') !=
1180 for (i
= 0; i
< _NUM_FLD
; i
++){
1182 int_fld
[i
] = strtoul(str_fld
[i
], &end
, 0);
1183 if (errno
!= 0 || end
== str_fld
[i
] || int_fld
[i
] >
1187 if (nb_lcore_params
>= MAX_LCORE_PARAMS
) {
1188 printf("exceeded max number of lcore params: %hu\n",
1192 lcore_params_array
[nb_lcore_params
].port_id
=
1193 (uint8_t)int_fld
[FLD_PORT
];
1194 lcore_params_array
[nb_lcore_params
].queue_id
=
1195 (uint8_t)int_fld
[FLD_QUEUE
];
1196 lcore_params_array
[nb_lcore_params
].lcore_id
=
1197 (uint8_t)int_fld
[FLD_LCORE
];
1200 lcore_params
= lcore_params_array
;
1205 /* Parse the argument given in the command line of the application */
1207 parse_args(int argc
, char **argv
)
1212 char *prgname
= argv
[0];
1213 static struct option lgopts
[] = {
1214 {"config", 1, 0, 0},
1215 {"no-numa", 0, 0, 0},
1216 {"enable-jumbo", 0, 0, 0},
1222 while ((opt
= getopt_long(argc
, argvopt
, "p:P",
1223 lgopts
, &option_index
)) != EOF
) {
1228 enabled_port_mask
= parse_portmask(optarg
);
1229 if (enabled_port_mask
== 0) {
1230 printf("invalid portmask\n");
1231 print_usage(prgname
);
1236 printf("Promiscuous mode selected\n");
1242 if (!strncmp(lgopts
[option_index
].name
, "config", 6)) {
1243 ret
= parse_config(optarg
);
1245 printf("invalid config\n");
1246 print_usage(prgname
);
1251 if (!strncmp(lgopts
[option_index
].name
,
1253 printf("numa is disabled \n");
1257 if (!strncmp(lgopts
[option_index
].name
,
1258 "enable-jumbo", 12)) {
1259 struct option lenopts
=
1260 {"max-pkt-len", required_argument
, \
1263 printf("jumbo frame is enabled \n");
1264 port_conf
.rxmode
.jumbo_frame
= 1;
1267 * if no max-pkt-len set, use the default value
1270 if (0 == getopt_long(argc
, argvopt
, "",
1271 &lenopts
, &option_index
)) {
1272 ret
= parse_max_pkt_len(optarg
);
1274 (ret
> MAX_JUMBO_PKT_LEN
)){
1275 printf("invalid packet "
1277 print_usage(prgname
);
1280 port_conf
.rxmode
.max_rx_pkt_len
= ret
;
1282 printf("set jumbo frame "
1283 "max packet length to %u\n",
1284 (unsigned int)port_conf
.rxmode
.max_rx_pkt_len
);
1290 print_usage(prgname
);
1296 argv
[optind
-1] = prgname
;
1299 optind
= 0; /* reset getopt lib */
1304 print_ethaddr(const char *name
, const struct ether_addr
*eth_addr
)
1306 char buf
[ETHER_ADDR_FMT_SIZE
];
1307 ether_format_addr(buf
, ETHER_ADDR_FMT_SIZE
, eth_addr
);
1308 printf("%s%s", name
, buf
);
1311 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1313 setup_hash(int socketid
)
1315 struct rte_hash_parameters ipv4_l3fwd_hash_params
= {
1317 .entries
= L3FWD_HASH_ENTRIES
,
1318 .key_len
= sizeof(struct ipv4_5tuple
),
1319 .hash_func
= DEFAULT_HASH_FUNC
,
1320 .hash_func_init_val
= 0,
1323 struct rte_hash_parameters ipv6_l3fwd_hash_params
= {
1325 .entries
= L3FWD_HASH_ENTRIES
,
1326 .key_len
= sizeof(struct ipv6_5tuple
),
1327 .hash_func
= DEFAULT_HASH_FUNC
,
1328 .hash_func_init_val
= 0,
1335 /* create ipv4 hash */
1336 snprintf(s
, sizeof(s
), "ipv4_l3fwd_hash_%d", socketid
);
1337 ipv4_l3fwd_hash_params
.name
= s
;
1338 ipv4_l3fwd_hash_params
.socket_id
= socketid
;
1339 ipv4_l3fwd_lookup_struct
[socketid
] =
1340 rte_hash_create(&ipv4_l3fwd_hash_params
);
1341 if (ipv4_l3fwd_lookup_struct
[socketid
] == NULL
)
1342 rte_exit(EXIT_FAILURE
, "Unable to create the l3fwd hash on "
1343 "socket %d\n", socketid
);
1345 /* create ipv6 hash */
1346 snprintf(s
, sizeof(s
), "ipv6_l3fwd_hash_%d", socketid
);
1347 ipv6_l3fwd_hash_params
.name
= s
;
1348 ipv6_l3fwd_hash_params
.socket_id
= socketid
;
1349 ipv6_l3fwd_lookup_struct
[socketid
] =
1350 rte_hash_create(&ipv6_l3fwd_hash_params
);
1351 if (ipv6_l3fwd_lookup_struct
[socketid
] == NULL
)
1352 rte_exit(EXIT_FAILURE
, "Unable to create the l3fwd hash on "
1353 "socket %d\n", socketid
);
1356 /* populate the ipv4 hash */
1357 for (i
= 0; i
< IPV4_L3FWD_NUM_ROUTES
; i
++) {
1358 ret
= rte_hash_add_key (ipv4_l3fwd_lookup_struct
[socketid
],
1359 (void *) &ipv4_l3fwd_route_array
[i
].key
);
1361 rte_exit(EXIT_FAILURE
, "Unable to add entry %u to the"
1362 "l3fwd hash on socket %d\n", i
, socketid
);
1364 ipv4_l3fwd_out_if
[ret
] = ipv4_l3fwd_route_array
[i
].if_out
;
1365 printf("Hash: Adding key\n");
1366 print_ipv4_key(ipv4_l3fwd_route_array
[i
].key
);
1369 /* populate the ipv6 hash */
1370 for (i
= 0; i
< IPV6_L3FWD_NUM_ROUTES
; i
++) {
1371 ret
= rte_hash_add_key (ipv6_l3fwd_lookup_struct
[socketid
],
1372 (void *) &ipv6_l3fwd_route_array
[i
].key
);
1374 rte_exit(EXIT_FAILURE
, "Unable to add entry %u to the"
1375 "l3fwd hash on socket %d\n", i
, socketid
);
1377 ipv6_l3fwd_out_if
[ret
] = ipv6_l3fwd_route_array
[i
].if_out
;
1378 printf("Hash: Adding key\n");
1379 print_ipv6_key(ipv6_l3fwd_route_array
[i
].key
);
1384 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1386 setup_lpm(int socketid
)
1392 /* create the LPM table */
1393 struct rte_lpm_config lpm_ipv4_config
;
1395 lpm_ipv4_config
.max_rules
= IPV4_L3FWD_LPM_MAX_RULES
;
1396 lpm_ipv4_config
.number_tbl8s
= 256;
1397 lpm_ipv4_config
.flags
= 0;
1399 snprintf(s
, sizeof(s
), "IPV4_L3FWD_LPM_%d", socketid
);
1400 ipv4_l3fwd_lookup_struct
[socketid
] =
1401 rte_lpm_create(s
, socketid
, &lpm_ipv4_config
);
1402 if (ipv4_l3fwd_lookup_struct
[socketid
] == NULL
)
1403 rte_exit(EXIT_FAILURE
, "Unable to create the l3fwd LPM table"
1404 " on socket %d\n", socketid
);
1406 /* populate the LPM table */
1407 for (i
= 0; i
< IPV4_L3FWD_NUM_ROUTES
; i
++) {
1408 ret
= rte_lpm_add(ipv4_l3fwd_lookup_struct
[socketid
],
1409 ipv4_l3fwd_route_array
[i
].ip
,
1410 ipv4_l3fwd_route_array
[i
].depth
,
1411 ipv4_l3fwd_route_array
[i
].if_out
);
1414 rte_exit(EXIT_FAILURE
, "Unable to add entry %u to the "
1415 "l3fwd LPM table on socket %d\n",
1419 printf("LPM: Adding route 0x%08x / %d (%d)\n",
1420 (unsigned)ipv4_l3fwd_route_array
[i
].ip
,
1421 ipv4_l3fwd_route_array
[i
].depth
,
1422 ipv4_l3fwd_route_array
[i
].if_out
);
1428 init_mem(unsigned nb_mbuf
)
1430 struct lcore_conf
*qconf
;
1435 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++) {
1436 if (rte_lcore_is_enabled(lcore_id
) == 0)
1440 socketid
= rte_lcore_to_socket_id(lcore_id
);
1444 if (socketid
>= NB_SOCKETS
) {
1445 rte_exit(EXIT_FAILURE
, "Socket %d of lcore %u is "
1446 "out of range %d\n", socketid
,
1447 lcore_id
, NB_SOCKETS
);
1449 if (pktmbuf_pool
[socketid
] == NULL
) {
1450 snprintf(s
, sizeof(s
), "mbuf_pool_%d", socketid
);
1451 pktmbuf_pool
[socketid
] =
1452 rte_pktmbuf_pool_create(s
, nb_mbuf
,
1453 MEMPOOL_CACHE_SIZE
, 0,
1454 RTE_MBUF_DEFAULT_BUF_SIZE
,
1456 if (pktmbuf_pool
[socketid
] == NULL
)
1457 rte_exit(EXIT_FAILURE
,
1458 "Cannot init mbuf pool on socket %d\n",
1461 printf("Allocated mbuf pool on socket %d\n",
1464 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1465 setup_lpm(socketid
);
1467 setup_hash(socketid
);
1470 qconf
= &lcore_conf
[lcore_id
];
1471 qconf
->ipv4_lookup_struct
= ipv4_l3fwd_lookup_struct
[socketid
];
1472 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1473 qconf
->ipv6_lookup_struct
= ipv6_l3fwd_lookup_struct
[socketid
];
1479 /* Check the link status of all ports in up to 9s, and print them finally */
1481 check_all_ports_link_status(uint8_t port_num
, uint32_t port_mask
)
1483 #define CHECK_INTERVAL 100 /* 100ms */
1484 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1485 uint8_t portid
, count
, all_ports_up
, print_flag
= 0;
1486 struct rte_eth_link link
;
1488 printf("\nChecking link status");
1490 for (count
= 0; count
<= MAX_CHECK_TIME
; count
++) {
1492 for (portid
= 0; portid
< port_num
; portid
++) {
1493 if ((port_mask
& (1 << portid
)) == 0)
1495 memset(&link
, 0, sizeof(link
));
1496 rte_eth_link_get_nowait(portid
, &link
);
1497 /* print link status if flag set */
1498 if (print_flag
== 1) {
1499 if (link
.link_status
)
1500 printf("Port %d Link Up - speed %u "
1501 "Mbps - %s\n", (uint8_t)portid
,
1502 (unsigned)link
.link_speed
,
1503 (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
1504 ("full-duplex") : ("half-duplex\n"));
1506 printf("Port %d Link Down\n",
1510 /* clear all_ports_up flag if any link down */
1511 if (link
.link_status
== ETH_LINK_DOWN
) {
1516 /* after finally printing all link status, get out */
1517 if (print_flag
== 1)
1520 if (all_ports_up
== 0) {
1523 rte_delay_ms(CHECK_INTERVAL
);
1526 /* set the print_flag if all ports up or timeout */
1527 if (all_ports_up
== 1 || count
== (MAX_CHECK_TIME
- 1)) {
1535 main(int argc
, char **argv
)
1537 struct lcore_conf
*qconf
;
1538 struct rte_eth_dev_info dev_info
;
1539 struct rte_eth_txconf
*txconf
;
1545 uint32_t n_tx_queue
, nb_lcores
;
1546 uint32_t dev_rxq_num
, dev_txq_num
;
1547 uint8_t portid
, nb_rx_queue
, queue
, socketid
;
1549 /* catch SIGINT and restore cpufreq governor to ondemand */
1550 signal(SIGINT
, signal_exit_now
);
1553 ret
= rte_eal_init(argc
, argv
);
1555 rte_exit(EXIT_FAILURE
, "Invalid EAL parameters\n");
1559 /* init RTE timer library to be used late */
1560 rte_timer_subsystem_init();
1562 /* parse application arguments (after the EAL ones) */
1563 ret
= parse_args(argc
, argv
);
1565 rte_exit(EXIT_FAILURE
, "Invalid L3FWD parameters\n");
1567 if (check_lcore_params() < 0)
1568 rte_exit(EXIT_FAILURE
, "check_lcore_params failed\n");
1570 ret
= init_lcore_rx_queues();
1572 rte_exit(EXIT_FAILURE
, "init_lcore_rx_queues failed\n");
1574 nb_ports
= rte_eth_dev_count();
1576 if (check_port_config(nb_ports
) < 0)
1577 rte_exit(EXIT_FAILURE
, "check_port_config failed\n");
1579 nb_lcores
= rte_lcore_count();
1581 /* initialize all ports */
1582 for (portid
= 0; portid
< nb_ports
; portid
++) {
1583 /* skip ports that are not enabled */
1584 if ((enabled_port_mask
& (1 << portid
)) == 0) {
1585 printf("\nSkipping disabled port %d\n", portid
);
1590 printf("Initializing port %d ... ", portid
);
1593 rte_eth_dev_info_get(portid
, &dev_info
);
1594 dev_rxq_num
= dev_info
.max_rx_queues
;
1595 dev_txq_num
= dev_info
.max_tx_queues
;
1597 nb_rx_queue
= get_port_n_rx_queues(portid
);
1598 if (nb_rx_queue
> dev_rxq_num
)
1599 rte_exit(EXIT_FAILURE
,
1600 "Cannot configure not existed rxq: "
1601 "port=%d\n", portid
);
1603 n_tx_queue
= nb_lcores
;
1604 if (n_tx_queue
> dev_txq_num
)
1605 n_tx_queue
= dev_txq_num
;
1606 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1607 nb_rx_queue
, (unsigned)n_tx_queue
);
1608 ret
= rte_eth_dev_configure(portid
, nb_rx_queue
,
1609 (uint16_t)n_tx_queue
, &port_conf
);
1611 rte_exit(EXIT_FAILURE
, "Cannot configure device: "
1612 "err=%d, port=%d\n", ret
, portid
);
1614 rte_eth_macaddr_get(portid
, &ports_eth_addr
[portid
]);
1615 print_ethaddr(" Address:", &ports_eth_addr
[portid
]);
1619 ret
= init_mem(NB_MBUF
);
1621 rte_exit(EXIT_FAILURE
, "init_mem failed\n");
1623 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++) {
1624 if (rte_lcore_is_enabled(lcore_id
) == 0)
1627 /* Initialize TX buffers */
1628 qconf
= &lcore_conf
[lcore_id
];
1629 qconf
->tx_buffer
[portid
] = rte_zmalloc_socket("tx_buffer",
1630 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST
), 0,
1631 rte_eth_dev_socket_id(portid
));
1632 if (qconf
->tx_buffer
[portid
] == NULL
)
1633 rte_exit(EXIT_FAILURE
, "Can't allocate tx buffer for port %u\n",
1636 rte_eth_tx_buffer_init(qconf
->tx_buffer
[portid
], MAX_PKT_BURST
);
1639 /* init one TX queue per couple (lcore,port) */
1641 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++) {
1642 if (rte_lcore_is_enabled(lcore_id
) == 0)
1645 if (queueid
>= dev_txq_num
)
1650 (uint8_t)rte_lcore_to_socket_id(lcore_id
);
1654 printf("txq=%u,%d,%d ", lcore_id
, queueid
, socketid
);
1657 rte_eth_dev_info_get(portid
, &dev_info
);
1658 txconf
= &dev_info
.default_txconf
;
1659 if (port_conf
.rxmode
.jumbo_frame
)
1660 txconf
->txq_flags
= 0;
1661 ret
= rte_eth_tx_queue_setup(portid
, queueid
, nb_txd
,
1664 rte_exit(EXIT_FAILURE
,
1665 "rte_eth_tx_queue_setup: err=%d, "
1666 "port=%d\n", ret
, portid
);
1668 qconf
= &lcore_conf
[lcore_id
];
1669 qconf
->tx_queue_id
[portid
] = queueid
;
1672 qconf
->tx_port_id
[qconf
->n_tx_port
] = portid
;
1678 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++) {
1679 if (rte_lcore_is_enabled(lcore_id
) == 0)
1682 /* init power management library */
1683 ret
= rte_power_init(lcore_id
);
1686 "Library initialization failed on core %u\n", lcore_id
);
1688 /* init timer structures for each enabled lcore */
1689 rte_timer_init(&power_timers
[lcore_id
]);
1690 hz
= rte_get_timer_hz();
1691 rte_timer_reset(&power_timers
[lcore_id
],
1692 hz
/TIMER_NUMBER_PER_SECOND
, SINGLE
, lcore_id
,
1693 power_timer_cb
, NULL
);
1695 qconf
= &lcore_conf
[lcore_id
];
1696 printf("\nInitializing rx queues on lcore %u ... ", lcore_id
);
1698 /* init RX queues */
1699 for(queue
= 0; queue
< qconf
->n_rx_queue
; ++queue
) {
1700 portid
= qconf
->rx_queue_list
[queue
].port_id
;
1701 queueid
= qconf
->rx_queue_list
[queue
].queue_id
;
1705 (uint8_t)rte_lcore_to_socket_id(lcore_id
);
1709 printf("rxq=%d,%d,%d ", portid
, queueid
, socketid
);
1712 ret
= rte_eth_rx_queue_setup(portid
, queueid
, nb_rxd
,
1714 pktmbuf_pool
[socketid
]);
1716 rte_exit(EXIT_FAILURE
,
1717 "rte_eth_rx_queue_setup: err=%d, "
1718 "port=%d\n", ret
, portid
);
1725 for (portid
= 0; portid
< nb_ports
; portid
++) {
1726 if ((enabled_port_mask
& (1 << portid
)) == 0) {
1730 ret
= rte_eth_dev_start(portid
);
1732 rte_exit(EXIT_FAILURE
, "rte_eth_dev_start: err=%d, "
1733 "port=%d\n", ret
, portid
);
1735 * If enabled, put device in promiscuous mode.
1736 * This allows IO forwarding mode to forward packets
1737 * to itself through 2 cross-connected ports of the
1741 rte_eth_promiscuous_enable(portid
);
1742 /* initialize spinlock for each port */
1743 rte_spinlock_init(&(locks
[portid
]));
1746 check_all_ports_link_status((uint8_t)nb_ports
, enabled_port_mask
);
1748 /* launch per-lcore init on every lcore */
1749 rte_eal_mp_remote_launch(main_loop
, NULL
, CALL_MASTER
);
1750 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
1751 if (rte_eal_wait_lcore(lcore_id
) < 0)