]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/examples/l3fwd-power/main.c
bump version to 12.2.12-pve1
[ceph.git] / ceph / src / dpdk / examples / l3fwd-power / main.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <stdint.h>
37 #include <inttypes.h>
38 #include <sys/types.h>
39 #include <string.h>
40 #include <sys/queue.h>
41 #include <stdarg.h>
42 #include <errno.h>
43 #include <getopt.h>
44 #include <unistd.h>
45 #include <signal.h>
46
47 #include <rte_common.h>
48 #include <rte_byteorder.h>
49 #include <rte_log.h>
50 #include <rte_malloc.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_launch.h>
57 #include <rte_atomic.h>
58 #include <rte_cycles.h>
59 #include <rte_prefetch.h>
60 #include <rte_lcore.h>
61 #include <rte_per_lcore.h>
62 #include <rte_branch_prediction.h>
63 #include <rte_interrupts.h>
64 #include <rte_pci.h>
65 #include <rte_random.h>
66 #include <rte_debug.h>
67 #include <rte_ether.h>
68 #include <rte_ethdev.h>
69 #include <rte_mempool.h>
70 #include <rte_mbuf.h>
71 #include <rte_ip.h>
72 #include <rte_tcp.h>
73 #include <rte_udp.h>
74 #include <rte_string_fns.h>
75 #include <rte_timer.h>
76 #include <rte_power.h>
77 #include <rte_eal.h>
78 #include <rte_spinlock.h>
79
80 #define RTE_LOGTYPE_L3FWD_POWER RTE_LOGTYPE_USER1
81
82 #define MAX_PKT_BURST 32
83
84 #define MIN_ZERO_POLL_COUNT 10
85
86 /* around 100ms at 2 Ghz */
87 #define TIMER_RESOLUTION_CYCLES 200000000ULL
88 /* 100 ms interval */
89 #define TIMER_NUMBER_PER_SECOND 10
90 /* 100000 us */
91 #define SCALING_PERIOD (1000000/TIMER_NUMBER_PER_SECOND)
92 #define SCALING_DOWN_TIME_RATIO_THRESHOLD 0.25
93
94 #define APP_LOOKUP_EXACT_MATCH 0
95 #define APP_LOOKUP_LPM 1
96 #define DO_RFC_1812_CHECKS
97
98 #ifndef APP_LOOKUP_METHOD
99 #define APP_LOOKUP_METHOD APP_LOOKUP_LPM
100 #endif
101
102 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
103 #include <rte_hash.h>
104 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
105 #include <rte_lpm.h>
106 #else
107 #error "APP_LOOKUP_METHOD set to incorrect value"
108 #endif
109
110 #ifndef IPv6_BYTES
111 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
112 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
113 #define IPv6_BYTES(addr) \
114 addr[0], addr[1], addr[2], addr[3], \
115 addr[4], addr[5], addr[6], addr[7], \
116 addr[8], addr[9], addr[10], addr[11],\
117 addr[12], addr[13],addr[14], addr[15]
118 #endif
119
120 #define MAX_JUMBO_PKT_LEN 9600
121
122 #define IPV6_ADDR_LEN 16
123
124 #define MEMPOOL_CACHE_SIZE 256
125
126 /*
127 * This expression is used to calculate the number of mbufs needed depending on
128 * user input, taking into account memory for rx and tx hardware rings, cache
129 * per lcore and mtable per port per lcore. RTE_MAX is used to ensure that
130 * NB_MBUF never goes below a minimum value of 8192.
131 */
132
133 #define NB_MBUF RTE_MAX ( \
134 (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
135 nb_ports*nb_lcores*MAX_PKT_BURST + \
136 nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
137 nb_lcores*MEMPOOL_CACHE_SIZE), \
138 (unsigned)8192)
139
140 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
141
142 #define NB_SOCKETS 8
143
144 /* Configure how many packets ahead to prefetch, when reading packets */
145 #define PREFETCH_OFFSET 3
146
147 /*
148 * Configurable number of RX/TX ring descriptors
149 */
150 #define RTE_TEST_RX_DESC_DEFAULT 128
151 #define RTE_TEST_TX_DESC_DEFAULT 512
152 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
153 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
154
155 /* ethernet addresses of ports */
156 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
157
158 /* ethernet addresses of ports */
159 static rte_spinlock_t locks[RTE_MAX_ETHPORTS];
160
161 /* mask of enabled ports */
162 static uint32_t enabled_port_mask = 0;
163 /* Ports set in promiscuous mode off by default. */
164 static int promiscuous_on = 0;
165 /* NUMA is enabled by default. */
166 static int numa_on = 1;
167
168 enum freq_scale_hint_t
169 {
170 FREQ_LOWER = -1,
171 FREQ_CURRENT = 0,
172 FREQ_HIGHER = 1,
173 FREQ_HIGHEST = 2
174 };
175
176 struct lcore_rx_queue {
177 uint8_t port_id;
178 uint8_t queue_id;
179 enum freq_scale_hint_t freq_up_hint;
180 uint32_t zero_rx_packet_count;
181 uint32_t idle_hint;
182 } __rte_cache_aligned;
183
184 #define MAX_RX_QUEUE_PER_LCORE 16
185 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
186 #define MAX_RX_QUEUE_PER_PORT 128
187
188 #define MAX_RX_QUEUE_INTERRUPT_PER_PORT 16
189
190
191 #define MAX_LCORE_PARAMS 1024
192 struct lcore_params {
193 uint8_t port_id;
194 uint8_t queue_id;
195 uint8_t lcore_id;
196 } __rte_cache_aligned;
197
198 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
199 static struct lcore_params lcore_params_array_default[] = {
200 {0, 0, 2},
201 {0, 1, 2},
202 {0, 2, 2},
203 {1, 0, 2},
204 {1, 1, 2},
205 {1, 2, 2},
206 {2, 0, 2},
207 {3, 0, 3},
208 {3, 1, 3},
209 };
210
211 static struct lcore_params * lcore_params = lcore_params_array_default;
212 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
213 sizeof(lcore_params_array_default[0]);
214
215 static struct rte_eth_conf port_conf = {
216 .rxmode = {
217 .mq_mode = ETH_MQ_RX_RSS,
218 .max_rx_pkt_len = ETHER_MAX_LEN,
219 .split_hdr_size = 0,
220 .header_split = 0, /**< Header Split disabled */
221 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
222 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
223 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
224 .hw_strip_crc = 0, /**< CRC stripped by hardware */
225 },
226 .rx_adv_conf = {
227 .rss_conf = {
228 .rss_key = NULL,
229 .rss_hf = ETH_RSS_UDP,
230 },
231 },
232 .txmode = {
233 .mq_mode = ETH_MQ_TX_NONE,
234 },
235 .intr_conf = {
236 .lsc = 1,
237 .rxq = 1,
238 },
239 };
240
241 static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
242
243
244 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
245
246 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
247 #include <rte_hash_crc.h>
248 #define DEFAULT_HASH_FUNC rte_hash_crc
249 #else
250 #include <rte_jhash.h>
251 #define DEFAULT_HASH_FUNC rte_jhash
252 #endif
253
254 struct ipv4_5tuple {
255 uint32_t ip_dst;
256 uint32_t ip_src;
257 uint16_t port_dst;
258 uint16_t port_src;
259 uint8_t proto;
260 } __attribute__((__packed__));
261
262 struct ipv6_5tuple {
263 uint8_t ip_dst[IPV6_ADDR_LEN];
264 uint8_t ip_src[IPV6_ADDR_LEN];
265 uint16_t port_dst;
266 uint16_t port_src;
267 uint8_t proto;
268 } __attribute__((__packed__));
269
270 struct ipv4_l3fwd_route {
271 struct ipv4_5tuple key;
272 uint8_t if_out;
273 };
274
275 struct ipv6_l3fwd_route {
276 struct ipv6_5tuple key;
277 uint8_t if_out;
278 };
279
280 static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
281 {{IPv4(100,10,0,1), IPv4(200,10,0,1), 101, 11, IPPROTO_TCP}, 0},
282 {{IPv4(100,20,0,2), IPv4(200,20,0,2), 102, 12, IPPROTO_TCP}, 1},
283 {{IPv4(100,30,0,3), IPv4(200,30,0,3), 103, 13, IPPROTO_TCP}, 2},
284 {{IPv4(100,40,0,4), IPv4(200,40,0,4), 104, 14, IPPROTO_TCP}, 3},
285 };
286
287 static struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
288 {
289 {
290 {0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
291 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
292 {0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
293 0x02, 0x1e, 0x67, 0xff, 0xfe, 0x0d, 0xb6, 0x0a},
294 1, 10, IPPROTO_UDP
295 }, 4
296 },
297 };
298
299 typedef struct rte_hash lookup_struct_t;
300 static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS];
301 static lookup_struct_t *ipv6_l3fwd_lookup_struct[NB_SOCKETS];
302
303 #define L3FWD_HASH_ENTRIES 1024
304
305 #define IPV4_L3FWD_NUM_ROUTES \
306 (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
307
308 #define IPV6_L3FWD_NUM_ROUTES \
309 (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
310
311 static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
312 static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
313 #endif
314
315 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
316 struct ipv4_l3fwd_route {
317 uint32_t ip;
318 uint8_t depth;
319 uint8_t if_out;
320 };
321
322 static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
323 {IPv4(1,1,1,0), 24, 0},
324 {IPv4(2,1,1,0), 24, 1},
325 {IPv4(3,1,1,0), 24, 2},
326 {IPv4(4,1,1,0), 24, 3},
327 {IPv4(5,1,1,0), 24, 4},
328 {IPv4(6,1,1,0), 24, 5},
329 {IPv4(7,1,1,0), 24, 6},
330 {IPv4(8,1,1,0), 24, 7},
331 };
332
333 #define IPV4_L3FWD_NUM_ROUTES \
334 (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
335
336 #define IPV4_L3FWD_LPM_MAX_RULES 1024
337
338 typedef struct rte_lpm lookup_struct_t;
339 static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS];
340 #endif
341
342 struct lcore_conf {
343 uint16_t n_rx_queue;
344 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
345 uint16_t n_tx_port;
346 uint16_t tx_port_id[RTE_MAX_ETHPORTS];
347 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
348 struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
349 lookup_struct_t * ipv4_lookup_struct;
350 lookup_struct_t * ipv6_lookup_struct;
351 } __rte_cache_aligned;
352
353 struct lcore_stats {
354 /* total sleep time in ms since last frequency scaling down */
355 uint32_t sleep_time;
356 /* number of long sleep recently */
357 uint32_t nb_long_sleep;
358 /* freq. scaling up trend */
359 uint32_t trend;
360 /* total packet processed recently */
361 uint64_t nb_rx_processed;
362 /* total iterations looped recently */
363 uint64_t nb_iteration_looped;
364 uint32_t padding[9];
365 } __rte_cache_aligned;
366
367 static struct lcore_conf lcore_conf[RTE_MAX_LCORE] __rte_cache_aligned;
368 static struct lcore_stats stats[RTE_MAX_LCORE] __rte_cache_aligned;
369 static struct rte_timer power_timers[RTE_MAX_LCORE];
370
371 static inline uint32_t power_idle_heuristic(uint32_t zero_rx_packet_count);
372 static inline enum freq_scale_hint_t power_freq_scaleup_heuristic( \
373 unsigned lcore_id, uint8_t port_id, uint16_t queue_id);
374
375 /* exit signal handler */
376 static void
377 signal_exit_now(int sigtype)
378 {
379 unsigned lcore_id;
380 int ret;
381
382 if (sigtype == SIGINT) {
383 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
384 if (rte_lcore_is_enabled(lcore_id) == 0)
385 continue;
386
387 /* init power management library */
388 ret = rte_power_exit(lcore_id);
389 if (ret)
390 rte_exit(EXIT_FAILURE, "Power management "
391 "library de-initialization failed on "
392 "core%u\n", lcore_id);
393 }
394 }
395
396 rte_exit(EXIT_SUCCESS, "User forced exit\n");
397 }
398
399 /* Freqency scale down timer callback */
400 static void
401 power_timer_cb(__attribute__((unused)) struct rte_timer *tim,
402 __attribute__((unused)) void *arg)
403 {
404 uint64_t hz;
405 float sleep_time_ratio;
406 unsigned lcore_id = rte_lcore_id();
407
408 /* accumulate total execution time in us when callback is invoked */
409 sleep_time_ratio = (float)(stats[lcore_id].sleep_time) /
410 (float)SCALING_PERIOD;
411 /**
412 * check whether need to scale down frequency a step if it sleep a lot.
413 */
414 if (sleep_time_ratio >= SCALING_DOWN_TIME_RATIO_THRESHOLD) {
415 if (rte_power_freq_down)
416 rte_power_freq_down(lcore_id);
417 }
418 else if ( (unsigned)(stats[lcore_id].nb_rx_processed /
419 stats[lcore_id].nb_iteration_looped) < MAX_PKT_BURST) {
420 /**
421 * scale down a step if average packet per iteration less
422 * than expectation.
423 */
424 if (rte_power_freq_down)
425 rte_power_freq_down(lcore_id);
426 }
427
428 /**
429 * initialize another timer according to current frequency to ensure
430 * timer interval is relatively fixed.
431 */
432 hz = rte_get_timer_hz();
433 rte_timer_reset(&power_timers[lcore_id], hz/TIMER_NUMBER_PER_SECOND,
434 SINGLE, lcore_id, power_timer_cb, NULL);
435
436 stats[lcore_id].nb_rx_processed = 0;
437 stats[lcore_id].nb_iteration_looped = 0;
438
439 stats[lcore_id].sleep_time = 0;
440 }
441
442 /* Enqueue a single packet, and send burst if queue is filled */
443 static inline int
444 send_single_packet(struct rte_mbuf *m, uint8_t port)
445 {
446 uint32_t lcore_id;
447 struct lcore_conf *qconf;
448
449 lcore_id = rte_lcore_id();
450 qconf = &lcore_conf[lcore_id];
451
452 rte_eth_tx_buffer(port, qconf->tx_queue_id[port],
453 qconf->tx_buffer[port], m);
454
455 return 0;
456 }
457
458 #ifdef DO_RFC_1812_CHECKS
459 static inline int
460 is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
461 {
462 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
463 /*
464 * 1. The packet length reported by the Link Layer must be large
465 * enough to hold the minimum length legal IP datagram (20 bytes).
466 */
467 if (link_len < sizeof(struct ipv4_hdr))
468 return -1;
469
470 /* 2. The IP checksum must be correct. */
471 /* this is checked in H/W */
472
473 /*
474 * 3. The IP version number must be 4. If the version number is not 4
475 * then the packet may be another version of IP, such as IPng or
476 * ST-II.
477 */
478 if (((pkt->version_ihl) >> 4) != 4)
479 return -3;
480 /*
481 * 4. The IP header length field must be large enough to hold the
482 * minimum length legal IP datagram (20 bytes = 5 words).
483 */
484 if ((pkt->version_ihl & 0xf) < 5)
485 return -4;
486
487 /*
488 * 5. The IP total length field must be large enough to hold the IP
489 * datagram header, whose length is specified in the IP header length
490 * field.
491 */
492 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
493 return -5;
494
495 return 0;
496 }
497 #endif
498
499 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
500 static void
501 print_ipv4_key(struct ipv4_5tuple key)
502 {
503 printf("IP dst = %08x, IP src = %08x, port dst = %d, port src = %d, "
504 "proto = %d\n", (unsigned)key.ip_dst, (unsigned)key.ip_src,
505 key.port_dst, key.port_src, key.proto);
506 }
507 static void
508 print_ipv6_key(struct ipv6_5tuple key)
509 {
510 printf( "IP dst = " IPv6_BYTES_FMT ", IP src = " IPv6_BYTES_FMT ", "
511 "port dst = %d, port src = %d, proto = %d\n",
512 IPv6_BYTES(key.ip_dst), IPv6_BYTES(key.ip_src),
513 key.port_dst, key.port_src, key.proto);
514 }
515
516 static inline uint8_t
517 get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
518 lookup_struct_t * ipv4_l3fwd_lookup_struct)
519 {
520 struct ipv4_5tuple key;
521 struct tcp_hdr *tcp;
522 struct udp_hdr *udp;
523 int ret = 0;
524
525 key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
526 key.ip_src = rte_be_to_cpu_32(ipv4_hdr->src_addr);
527 key.proto = ipv4_hdr->next_proto_id;
528
529 switch (ipv4_hdr->next_proto_id) {
530 case IPPROTO_TCP:
531 tcp = (struct tcp_hdr *)((unsigned char *)ipv4_hdr +
532 sizeof(struct ipv4_hdr));
533 key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
534 key.port_src = rte_be_to_cpu_16(tcp->src_port);
535 break;
536
537 case IPPROTO_UDP:
538 udp = (struct udp_hdr *)((unsigned char *)ipv4_hdr +
539 sizeof(struct ipv4_hdr));
540 key.port_dst = rte_be_to_cpu_16(udp->dst_port);
541 key.port_src = rte_be_to_cpu_16(udp->src_port);
542 break;
543
544 default:
545 key.port_dst = 0;
546 key.port_src = 0;
547 break;
548 }
549
550 /* Find destination port */
551 ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
552 return (uint8_t)((ret < 0)? portid : ipv4_l3fwd_out_if[ret]);
553 }
554
555 static inline uint8_t
556 get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint8_t portid,
557 lookup_struct_t *ipv6_l3fwd_lookup_struct)
558 {
559 struct ipv6_5tuple key;
560 struct tcp_hdr *tcp;
561 struct udp_hdr *udp;
562 int ret = 0;
563
564 memcpy(key.ip_dst, ipv6_hdr->dst_addr, IPV6_ADDR_LEN);
565 memcpy(key.ip_src, ipv6_hdr->src_addr, IPV6_ADDR_LEN);
566
567 key.proto = ipv6_hdr->proto;
568
569 switch (ipv6_hdr->proto) {
570 case IPPROTO_TCP:
571 tcp = (struct tcp_hdr *)((unsigned char *) ipv6_hdr +
572 sizeof(struct ipv6_hdr));
573 key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
574 key.port_src = rte_be_to_cpu_16(tcp->src_port);
575 break;
576
577 case IPPROTO_UDP:
578 udp = (struct udp_hdr *)((unsigned char *) ipv6_hdr +
579 sizeof(struct ipv6_hdr));
580 key.port_dst = rte_be_to_cpu_16(udp->dst_port);
581 key.port_src = rte_be_to_cpu_16(udp->src_port);
582 break;
583
584 default:
585 key.port_dst = 0;
586 key.port_src = 0;
587 break;
588 }
589
590 /* Find destination port */
591 ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
592 return (uint8_t)((ret < 0)? portid : ipv6_l3fwd_out_if[ret]);
593 }
594 #endif
595
596 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
597 static inline uint8_t
598 get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
599 lookup_struct_t *ipv4_l3fwd_lookup_struct)
600 {
601 uint32_t next_hop;
602
603 return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
604 rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
605 next_hop : portid);
606 }
607 #endif
608
609 static inline void
610 l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid,
611 struct lcore_conf *qconf)
612 {
613 struct ether_hdr *eth_hdr;
614 struct ipv4_hdr *ipv4_hdr;
615 void *d_addr_bytes;
616 uint8_t dst_port;
617
618 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
619
620 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
621 /* Handle IPv4 headers.*/
622 ipv4_hdr =
623 rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
624 sizeof(struct ether_hdr));
625
626 #ifdef DO_RFC_1812_CHECKS
627 /* Check to make sure the packet is valid (RFC1812) */
628 if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
629 rte_pktmbuf_free(m);
630 return;
631 }
632 #endif
633
634 dst_port = get_ipv4_dst_port(ipv4_hdr, portid,
635 qconf->ipv4_lookup_struct);
636 if (dst_port >= RTE_MAX_ETHPORTS ||
637 (enabled_port_mask & 1 << dst_port) == 0)
638 dst_port = portid;
639
640 /* 02:00:00:00:00:xx */
641 d_addr_bytes = &eth_hdr->d_addr.addr_bytes[0];
642 *((uint64_t *)d_addr_bytes) =
643 0x000000000002 + ((uint64_t)dst_port << 40);
644
645 #ifdef DO_RFC_1812_CHECKS
646 /* Update time to live and header checksum */
647 --(ipv4_hdr->time_to_live);
648 ++(ipv4_hdr->hdr_checksum);
649 #endif
650
651 /* src addr */
652 ether_addr_copy(&ports_eth_addr[dst_port], &eth_hdr->s_addr);
653
654 send_single_packet(m, dst_port);
655 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
656 /* Handle IPv6 headers.*/
657 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
658 struct ipv6_hdr *ipv6_hdr;
659
660 ipv6_hdr =
661 rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
662 sizeof(struct ether_hdr));
663
664 dst_port = get_ipv6_dst_port(ipv6_hdr, portid,
665 qconf->ipv6_lookup_struct);
666
667 if (dst_port >= RTE_MAX_ETHPORTS ||
668 (enabled_port_mask & 1 << dst_port) == 0)
669 dst_port = portid;
670
671 /* 02:00:00:00:00:xx */
672 d_addr_bytes = &eth_hdr->d_addr.addr_bytes[0];
673 *((uint64_t *)d_addr_bytes) =
674 0x000000000002 + ((uint64_t)dst_port << 40);
675
676 /* src addr */
677 ether_addr_copy(&ports_eth_addr[dst_port], &eth_hdr->s_addr);
678
679 send_single_packet(m, dst_port);
680 #else
681 /* We don't currently handle IPv6 packets in LPM mode. */
682 rte_pktmbuf_free(m);
683 #endif
684 } else
685 rte_pktmbuf_free(m);
686
687 }
688
689 #define MINIMUM_SLEEP_TIME 1
690 #define SUSPEND_THRESHOLD 300
691
692 static inline uint32_t
693 power_idle_heuristic(uint32_t zero_rx_packet_count)
694 {
695 /* If zero count is less than 100, sleep 1us */
696 if (zero_rx_packet_count < SUSPEND_THRESHOLD)
697 return MINIMUM_SLEEP_TIME;
698 /* If zero count is less than 1000, sleep 100 us which is the
699 minimum latency switching from C3/C6 to C0
700 */
701 else
702 return SUSPEND_THRESHOLD;
703
704 return 0;
705 }
706
707 static inline enum freq_scale_hint_t
708 power_freq_scaleup_heuristic(unsigned lcore_id,
709 uint8_t port_id,
710 uint16_t queue_id)
711 {
712 /**
713 * HW Rx queue size is 128 by default, Rx burst read at maximum 32 entries
714 * per iteration
715 */
716 #define FREQ_GEAR1_RX_PACKET_THRESHOLD MAX_PKT_BURST
717 #define FREQ_GEAR2_RX_PACKET_THRESHOLD (MAX_PKT_BURST*2)
718 #define FREQ_GEAR3_RX_PACKET_THRESHOLD (MAX_PKT_BURST*3)
719 #define FREQ_UP_TREND1_ACC 1
720 #define FREQ_UP_TREND2_ACC 100
721 #define FREQ_UP_THRESHOLD 10000
722
723 if (likely(rte_eth_rx_descriptor_done(port_id, queue_id,
724 FREQ_GEAR3_RX_PACKET_THRESHOLD) > 0)) {
725 stats[lcore_id].trend = 0;
726 return FREQ_HIGHEST;
727 } else if (likely(rte_eth_rx_descriptor_done(port_id, queue_id,
728 FREQ_GEAR2_RX_PACKET_THRESHOLD) > 0))
729 stats[lcore_id].trend += FREQ_UP_TREND2_ACC;
730 else if (likely(rte_eth_rx_descriptor_done(port_id, queue_id,
731 FREQ_GEAR1_RX_PACKET_THRESHOLD) > 0))
732 stats[lcore_id].trend += FREQ_UP_TREND1_ACC;
733
734 if (likely(stats[lcore_id].trend > FREQ_UP_THRESHOLD)) {
735 stats[lcore_id].trend = 0;
736 return FREQ_HIGHER;
737 }
738
739 return FREQ_CURRENT;
740 }
741
742 /**
743 * force polling thread sleep until one-shot rx interrupt triggers
744 * @param port_id
745 * Port id.
746 * @param queue_id
747 * Rx queue id.
748 * @return
749 * 0 on success
750 */
751 static int
752 sleep_until_rx_interrupt(int num)
753 {
754 struct rte_epoll_event event[num];
755 int n, i;
756 uint8_t port_id, queue_id;
757 void *data;
758
759 RTE_LOG(INFO, L3FWD_POWER,
760 "lcore %u sleeps until interrupt triggers\n",
761 rte_lcore_id());
762
763 n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, num, -1);
764 for (i = 0; i < n; i++) {
765 data = event[i].epdata.data;
766 port_id = ((uintptr_t)data) >> CHAR_BIT;
767 queue_id = ((uintptr_t)data) &
768 RTE_LEN2MASK(CHAR_BIT, uint8_t);
769 rte_eth_dev_rx_intr_disable(port_id, queue_id);
770 RTE_LOG(INFO, L3FWD_POWER,
771 "lcore %u is waked up from rx interrupt on"
772 " port %d queue %d\n",
773 rte_lcore_id(), port_id, queue_id);
774 }
775
776 return 0;
777 }
778
779 static void turn_on_intr(struct lcore_conf *qconf)
780 {
781 int i;
782 struct lcore_rx_queue *rx_queue;
783 uint8_t port_id, queue_id;
784
785 for (i = 0; i < qconf->n_rx_queue; ++i) {
786 rx_queue = &(qconf->rx_queue_list[i]);
787 port_id = rx_queue->port_id;
788 queue_id = rx_queue->queue_id;
789
790 rte_spinlock_lock(&(locks[port_id]));
791 rte_eth_dev_rx_intr_enable(port_id, queue_id);
792 rte_spinlock_unlock(&(locks[port_id]));
793 }
794 }
795
796 static int event_register(struct lcore_conf *qconf)
797 {
798 struct lcore_rx_queue *rx_queue;
799 uint8_t portid, queueid;
800 uint32_t data;
801 int ret;
802 int i;
803
804 for (i = 0; i < qconf->n_rx_queue; ++i) {
805 rx_queue = &(qconf->rx_queue_list[i]);
806 portid = rx_queue->port_id;
807 queueid = rx_queue->queue_id;
808 data = portid << CHAR_BIT | queueid;
809
810 ret = rte_eth_dev_rx_intr_ctl_q(portid, queueid,
811 RTE_EPOLL_PER_THREAD,
812 RTE_INTR_EVENT_ADD,
813 (void *)((uintptr_t)data));
814 if (ret)
815 return ret;
816 }
817
818 return 0;
819 }
820
821 /* main processing loop */
822 static int
823 main_loop(__attribute__((unused)) void *dummy)
824 {
825 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
826 unsigned lcore_id;
827 uint64_t prev_tsc, diff_tsc, cur_tsc;
828 uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
829 int i, j, nb_rx;
830 uint8_t portid, queueid;
831 struct lcore_conf *qconf;
832 struct lcore_rx_queue *rx_queue;
833 enum freq_scale_hint_t lcore_scaleup_hint;
834 uint32_t lcore_rx_idle_count = 0;
835 uint32_t lcore_idle_hint = 0;
836 int intr_en = 0;
837
838 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
839
840 prev_tsc = 0;
841
842 lcore_id = rte_lcore_id();
843 qconf = &lcore_conf[lcore_id];
844
845 if (qconf->n_rx_queue == 0) {
846 RTE_LOG(INFO, L3FWD_POWER, "lcore %u has nothing to do\n", lcore_id);
847 return 0;
848 }
849
850 RTE_LOG(INFO, L3FWD_POWER, "entering main loop on lcore %u\n", lcore_id);
851
852 for (i = 0; i < qconf->n_rx_queue; i++) {
853 portid = qconf->rx_queue_list[i].port_id;
854 queueid = qconf->rx_queue_list[i].queue_id;
855 RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%hhu "
856 "rxqueueid=%hhu\n", lcore_id, portid, queueid);
857 }
858
859 /* add into event wait list */
860 if (event_register(qconf) == 0)
861 intr_en = 1;
862 else
863 RTE_LOG(INFO, L3FWD_POWER, "RX interrupt won't enable.\n");
864
865 while (1) {
866 stats[lcore_id].nb_iteration_looped++;
867
868 cur_tsc = rte_rdtsc();
869 cur_tsc_power = cur_tsc;
870
871 /*
872 * TX burst queue drain
873 */
874 diff_tsc = cur_tsc - prev_tsc;
875 if (unlikely(diff_tsc > drain_tsc)) {
876 for (i = 0; i < qconf->n_tx_port; ++i) {
877 portid = qconf->tx_port_id[i];
878 rte_eth_tx_buffer_flush(portid,
879 qconf->tx_queue_id[portid],
880 qconf->tx_buffer[portid]);
881 }
882 prev_tsc = cur_tsc;
883 }
884
885 diff_tsc_power = cur_tsc_power - prev_tsc_power;
886 if (diff_tsc_power > TIMER_RESOLUTION_CYCLES) {
887 rte_timer_manage();
888 prev_tsc_power = cur_tsc_power;
889 }
890
891 start_rx:
892 /*
893 * Read packet from RX queues
894 */
895 lcore_scaleup_hint = FREQ_CURRENT;
896 lcore_rx_idle_count = 0;
897 for (i = 0; i < qconf->n_rx_queue; ++i) {
898 rx_queue = &(qconf->rx_queue_list[i]);
899 rx_queue->idle_hint = 0;
900 portid = rx_queue->port_id;
901 queueid = rx_queue->queue_id;
902
903 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
904 MAX_PKT_BURST);
905
906 stats[lcore_id].nb_rx_processed += nb_rx;
907 if (unlikely(nb_rx == 0)) {
908 /**
909 * no packet received from rx queue, try to
910 * sleep for a while forcing CPU enter deeper
911 * C states.
912 */
913 rx_queue->zero_rx_packet_count++;
914
915 if (rx_queue->zero_rx_packet_count <=
916 MIN_ZERO_POLL_COUNT)
917 continue;
918
919 rx_queue->idle_hint = power_idle_heuristic(\
920 rx_queue->zero_rx_packet_count);
921 lcore_rx_idle_count++;
922 } else {
923 rx_queue->zero_rx_packet_count = 0;
924
925 /**
926 * do not scale up frequency immediately as
927 * user to kernel space communication is costly
928 * which might impact packet I/O for received
929 * packets.
930 */
931 rx_queue->freq_up_hint =
932 power_freq_scaleup_heuristic(lcore_id,
933 portid, queueid);
934 }
935
936 /* Prefetch first packets */
937 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
938 rte_prefetch0(rte_pktmbuf_mtod(
939 pkts_burst[j], void *));
940 }
941
942 /* Prefetch and forward already prefetched packets */
943 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
944 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
945 j + PREFETCH_OFFSET], void *));
946 l3fwd_simple_forward(pkts_burst[j], portid,
947 qconf);
948 }
949
950 /* Forward remaining prefetched packets */
951 for (; j < nb_rx; j++) {
952 l3fwd_simple_forward(pkts_burst[j], portid,
953 qconf);
954 }
955 }
956
957 if (likely(lcore_rx_idle_count != qconf->n_rx_queue)) {
958 for (i = 1, lcore_scaleup_hint =
959 qconf->rx_queue_list[0].freq_up_hint;
960 i < qconf->n_rx_queue; ++i) {
961 rx_queue = &(qconf->rx_queue_list[i]);
962 if (rx_queue->freq_up_hint >
963 lcore_scaleup_hint)
964 lcore_scaleup_hint =
965 rx_queue->freq_up_hint;
966 }
967
968 if (lcore_scaleup_hint == FREQ_HIGHEST) {
969 if (rte_power_freq_max)
970 rte_power_freq_max(lcore_id);
971 } else if (lcore_scaleup_hint == FREQ_HIGHER) {
972 if (rte_power_freq_up)
973 rte_power_freq_up(lcore_id);
974 }
975 } else {
976 /**
977 * All Rx queues empty in recent consecutive polls,
978 * sleep in a conservative manner, meaning sleep as
979 * less as possible.
980 */
981 for (i = 1, lcore_idle_hint =
982 qconf->rx_queue_list[0].idle_hint;
983 i < qconf->n_rx_queue; ++i) {
984 rx_queue = &(qconf->rx_queue_list[i]);
985 if (rx_queue->idle_hint < lcore_idle_hint)
986 lcore_idle_hint = rx_queue->idle_hint;
987 }
988
989 if (lcore_idle_hint < SUSPEND_THRESHOLD)
990 /**
991 * execute "pause" instruction to avoid context
992 * switch which generally take hundred of
993 * microseconds for short sleep.
994 */
995 rte_delay_us(lcore_idle_hint);
996 else {
997 /* suspend until rx interrupt trigges */
998 if (intr_en) {
999 turn_on_intr(qconf);
1000 sleep_until_rx_interrupt(
1001 qconf->n_rx_queue);
1002 }
1003 /* start receiving packets immediately */
1004 goto start_rx;
1005 }
1006 stats[lcore_id].sleep_time += lcore_idle_hint;
1007 }
1008 }
1009 }
1010
1011 static int
1012 check_lcore_params(void)
1013 {
1014 uint8_t queue, lcore;
1015 uint16_t i;
1016 int socketid;
1017
1018 for (i = 0; i < nb_lcore_params; ++i) {
1019 queue = lcore_params[i].queue_id;
1020 if (queue >= MAX_RX_QUEUE_PER_PORT) {
1021 printf("invalid queue number: %hhu\n", queue);
1022 return -1;
1023 }
1024 lcore = lcore_params[i].lcore_id;
1025 if (!rte_lcore_is_enabled(lcore)) {
1026 printf("error: lcore %hhu is not enabled in lcore "
1027 "mask\n", lcore);
1028 return -1;
1029 }
1030 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
1031 (numa_on == 0)) {
1032 printf("warning: lcore %hhu is on socket %d with numa "
1033 "off\n", lcore, socketid);
1034 }
1035 }
1036 return 0;
1037 }
1038
1039 static int
1040 check_port_config(const unsigned nb_ports)
1041 {
1042 unsigned portid;
1043 uint16_t i;
1044
1045 for (i = 0; i < nb_lcore_params; ++i) {
1046 portid = lcore_params[i].port_id;
1047 if ((enabled_port_mask & (1 << portid)) == 0) {
1048 printf("port %u is not enabled in port mask\n",
1049 portid);
1050 return -1;
1051 }
1052 if (portid >= nb_ports) {
1053 printf("port %u is not present on the board\n",
1054 portid);
1055 return -1;
1056 }
1057 }
1058 return 0;
1059 }
1060
1061 static uint8_t
1062 get_port_n_rx_queues(const uint8_t port)
1063 {
1064 int queue = -1;
1065 uint16_t i;
1066
1067 for (i = 0; i < nb_lcore_params; ++i) {
1068 if (lcore_params[i].port_id == port &&
1069 lcore_params[i].queue_id > queue)
1070 queue = lcore_params[i].queue_id;
1071 }
1072 return (uint8_t)(++queue);
1073 }
1074
1075 static int
1076 init_lcore_rx_queues(void)
1077 {
1078 uint16_t i, nb_rx_queue;
1079 uint8_t lcore;
1080
1081 for (i = 0; i < nb_lcore_params; ++i) {
1082 lcore = lcore_params[i].lcore_id;
1083 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
1084 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1085 printf("error: too many queues (%u) for lcore: %u\n",
1086 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
1087 return -1;
1088 } else {
1089 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1090 lcore_params[i].port_id;
1091 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1092 lcore_params[i].queue_id;
1093 lcore_conf[lcore].n_rx_queue++;
1094 }
1095 }
1096 return 0;
1097 }
1098
1099 /* display usage */
1100 static void
1101 print_usage(const char *prgname)
1102 {
1103 printf ("%s [EAL options] -- -p PORTMASK -P"
1104 " [--config (port,queue,lcore)[,(port,queue,lcore]]"
1105 " [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
1106 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
1107 " -P : enable promiscuous mode\n"
1108 " --config (port,queue,lcore): rx queues configuration\n"
1109 " --no-numa: optional, disable numa awareness\n"
1110 " --enable-jumbo: enable jumbo frame"
1111 " which max packet len is PKTLEN in decimal (64-9600)\n",
1112 prgname);
1113 }
1114
1115 static int parse_max_pkt_len(const char *pktlen)
1116 {
1117 char *end = NULL;
1118 unsigned long len;
1119
1120 /* parse decimal string */
1121 len = strtoul(pktlen, &end, 10);
1122 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
1123 return -1;
1124
1125 if (len == 0)
1126 return -1;
1127
1128 return len;
1129 }
1130
1131 static int
1132 parse_portmask(const char *portmask)
1133 {
1134 char *end = NULL;
1135 unsigned long pm;
1136
1137 /* parse hexadecimal string */
1138 pm = strtoul(portmask, &end, 16);
1139 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1140 return -1;
1141
1142 if (pm == 0)
1143 return -1;
1144
1145 return pm;
1146 }
1147
1148 static int
1149 parse_config(const char *q_arg)
1150 {
1151 char s[256];
1152 const char *p, *p0 = q_arg;
1153 char *end;
1154 enum fieldnames {
1155 FLD_PORT = 0,
1156 FLD_QUEUE,
1157 FLD_LCORE,
1158 _NUM_FLD
1159 };
1160 unsigned long int_fld[_NUM_FLD];
1161 char *str_fld[_NUM_FLD];
1162 int i;
1163 unsigned size;
1164
1165 nb_lcore_params = 0;
1166
1167 while ((p = strchr(p0,'(')) != NULL) {
1168 ++p;
1169 if((p0 = strchr(p,')')) == NULL)
1170 return -1;
1171
1172 size = p0 - p;
1173 if(size >= sizeof(s))
1174 return -1;
1175
1176 snprintf(s, sizeof(s), "%.*s", size, p);
1177 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1178 _NUM_FLD)
1179 return -1;
1180 for (i = 0; i < _NUM_FLD; i++){
1181 errno = 0;
1182 int_fld[i] = strtoul(str_fld[i], &end, 0);
1183 if (errno != 0 || end == str_fld[i] || int_fld[i] >
1184 255)
1185 return -1;
1186 }
1187 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1188 printf("exceeded max number of lcore params: %hu\n",
1189 nb_lcore_params);
1190 return -1;
1191 }
1192 lcore_params_array[nb_lcore_params].port_id =
1193 (uint8_t)int_fld[FLD_PORT];
1194 lcore_params_array[nb_lcore_params].queue_id =
1195 (uint8_t)int_fld[FLD_QUEUE];
1196 lcore_params_array[nb_lcore_params].lcore_id =
1197 (uint8_t)int_fld[FLD_LCORE];
1198 ++nb_lcore_params;
1199 }
1200 lcore_params = lcore_params_array;
1201
1202 return 0;
1203 }
1204
1205 /* Parse the argument given in the command line of the application */
1206 static int
1207 parse_args(int argc, char **argv)
1208 {
1209 int opt, ret;
1210 char **argvopt;
1211 int option_index;
1212 char *prgname = argv[0];
1213 static struct option lgopts[] = {
1214 {"config", 1, 0, 0},
1215 {"no-numa", 0, 0, 0},
1216 {"enable-jumbo", 0, 0, 0},
1217 {NULL, 0, 0, 0}
1218 };
1219
1220 argvopt = argv;
1221
1222 while ((opt = getopt_long(argc, argvopt, "p:P",
1223 lgopts, &option_index)) != EOF) {
1224
1225 switch (opt) {
1226 /* portmask */
1227 case 'p':
1228 enabled_port_mask = parse_portmask(optarg);
1229 if (enabled_port_mask == 0) {
1230 printf("invalid portmask\n");
1231 print_usage(prgname);
1232 return -1;
1233 }
1234 break;
1235 case 'P':
1236 printf("Promiscuous mode selected\n");
1237 promiscuous_on = 1;
1238 break;
1239
1240 /* long options */
1241 case 0:
1242 if (!strncmp(lgopts[option_index].name, "config", 6)) {
1243 ret = parse_config(optarg);
1244 if (ret) {
1245 printf("invalid config\n");
1246 print_usage(prgname);
1247 return -1;
1248 }
1249 }
1250
1251 if (!strncmp(lgopts[option_index].name,
1252 "no-numa", 7)) {
1253 printf("numa is disabled \n");
1254 numa_on = 0;
1255 }
1256
1257 if (!strncmp(lgopts[option_index].name,
1258 "enable-jumbo", 12)) {
1259 struct option lenopts =
1260 {"max-pkt-len", required_argument, \
1261 0, 0};
1262
1263 printf("jumbo frame is enabled \n");
1264 port_conf.rxmode.jumbo_frame = 1;
1265
1266 /**
1267 * if no max-pkt-len set, use the default value
1268 * ETHER_MAX_LEN
1269 */
1270 if (0 == getopt_long(argc, argvopt, "",
1271 &lenopts, &option_index)) {
1272 ret = parse_max_pkt_len(optarg);
1273 if ((ret < 64) ||
1274 (ret > MAX_JUMBO_PKT_LEN)){
1275 printf("invalid packet "
1276 "length\n");
1277 print_usage(prgname);
1278 return -1;
1279 }
1280 port_conf.rxmode.max_rx_pkt_len = ret;
1281 }
1282 printf("set jumbo frame "
1283 "max packet length to %u\n",
1284 (unsigned int)port_conf.rxmode.max_rx_pkt_len);
1285 }
1286
1287 break;
1288
1289 default:
1290 print_usage(prgname);
1291 return -1;
1292 }
1293 }
1294
1295 if (optind >= 0)
1296 argv[optind-1] = prgname;
1297
1298 ret = optind-1;
1299 optind = 0; /* reset getopt lib */
1300 return ret;
1301 }
1302
1303 static void
1304 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
1305 {
1306 char buf[ETHER_ADDR_FMT_SIZE];
1307 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
1308 printf("%s%s", name, buf);
1309 }
1310
1311 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1312 static void
1313 setup_hash(int socketid)
1314 {
1315 struct rte_hash_parameters ipv4_l3fwd_hash_params = {
1316 .name = NULL,
1317 .entries = L3FWD_HASH_ENTRIES,
1318 .key_len = sizeof(struct ipv4_5tuple),
1319 .hash_func = DEFAULT_HASH_FUNC,
1320 .hash_func_init_val = 0,
1321 };
1322
1323 struct rte_hash_parameters ipv6_l3fwd_hash_params = {
1324 .name = NULL,
1325 .entries = L3FWD_HASH_ENTRIES,
1326 .key_len = sizeof(struct ipv6_5tuple),
1327 .hash_func = DEFAULT_HASH_FUNC,
1328 .hash_func_init_val = 0,
1329 };
1330
1331 unsigned i;
1332 int ret;
1333 char s[64];
1334
1335 /* create ipv4 hash */
1336 snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid);
1337 ipv4_l3fwd_hash_params.name = s;
1338 ipv4_l3fwd_hash_params.socket_id = socketid;
1339 ipv4_l3fwd_lookup_struct[socketid] =
1340 rte_hash_create(&ipv4_l3fwd_hash_params);
1341 if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
1342 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
1343 "socket %d\n", socketid);
1344
1345 /* create ipv6 hash */
1346 snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid);
1347 ipv6_l3fwd_hash_params.name = s;
1348 ipv6_l3fwd_hash_params.socket_id = socketid;
1349 ipv6_l3fwd_lookup_struct[socketid] =
1350 rte_hash_create(&ipv6_l3fwd_hash_params);
1351 if (ipv6_l3fwd_lookup_struct[socketid] == NULL)
1352 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
1353 "socket %d\n", socketid);
1354
1355
1356 /* populate the ipv4 hash */
1357 for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) {
1358 ret = rte_hash_add_key (ipv4_l3fwd_lookup_struct[socketid],
1359 (void *) &ipv4_l3fwd_route_array[i].key);
1360 if (ret < 0) {
1361 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the"
1362 "l3fwd hash on socket %d\n", i, socketid);
1363 }
1364 ipv4_l3fwd_out_if[ret] = ipv4_l3fwd_route_array[i].if_out;
1365 printf("Hash: Adding key\n");
1366 print_ipv4_key(ipv4_l3fwd_route_array[i].key);
1367 }
1368
1369 /* populate the ipv6 hash */
1370 for (i = 0; i < IPV6_L3FWD_NUM_ROUTES; i++) {
1371 ret = rte_hash_add_key (ipv6_l3fwd_lookup_struct[socketid],
1372 (void *) &ipv6_l3fwd_route_array[i].key);
1373 if (ret < 0) {
1374 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the"
1375 "l3fwd hash on socket %d\n", i, socketid);
1376 }
1377 ipv6_l3fwd_out_if[ret] = ipv6_l3fwd_route_array[i].if_out;
1378 printf("Hash: Adding key\n");
1379 print_ipv6_key(ipv6_l3fwd_route_array[i].key);
1380 }
1381 }
1382 #endif
1383
1384 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1385 static void
1386 setup_lpm(int socketid)
1387 {
1388 unsigned i;
1389 int ret;
1390 char s[64];
1391
1392 /* create the LPM table */
1393 struct rte_lpm_config lpm_ipv4_config;
1394
1395 lpm_ipv4_config.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
1396 lpm_ipv4_config.number_tbl8s = 256;
1397 lpm_ipv4_config.flags = 0;
1398
1399 snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
1400 ipv4_l3fwd_lookup_struct[socketid] =
1401 rte_lpm_create(s, socketid, &lpm_ipv4_config);
1402 if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
1403 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
1404 " on socket %d\n", socketid);
1405
1406 /* populate the LPM table */
1407 for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) {
1408 ret = rte_lpm_add(ipv4_l3fwd_lookup_struct[socketid],
1409 ipv4_l3fwd_route_array[i].ip,
1410 ipv4_l3fwd_route_array[i].depth,
1411 ipv4_l3fwd_route_array[i].if_out);
1412
1413 if (ret < 0) {
1414 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
1415 "l3fwd LPM table on socket %d\n",
1416 i, socketid);
1417 }
1418
1419 printf("LPM: Adding route 0x%08x / %d (%d)\n",
1420 (unsigned)ipv4_l3fwd_route_array[i].ip,
1421 ipv4_l3fwd_route_array[i].depth,
1422 ipv4_l3fwd_route_array[i].if_out);
1423 }
1424 }
1425 #endif
1426
1427 static int
1428 init_mem(unsigned nb_mbuf)
1429 {
1430 struct lcore_conf *qconf;
1431 int socketid;
1432 unsigned lcore_id;
1433 char s[64];
1434
1435 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1436 if (rte_lcore_is_enabled(lcore_id) == 0)
1437 continue;
1438
1439 if (numa_on)
1440 socketid = rte_lcore_to_socket_id(lcore_id);
1441 else
1442 socketid = 0;
1443
1444 if (socketid >= NB_SOCKETS) {
1445 rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is "
1446 "out of range %d\n", socketid,
1447 lcore_id, NB_SOCKETS);
1448 }
1449 if (pktmbuf_pool[socketid] == NULL) {
1450 snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
1451 pktmbuf_pool[socketid] =
1452 rte_pktmbuf_pool_create(s, nb_mbuf,
1453 MEMPOOL_CACHE_SIZE, 0,
1454 RTE_MBUF_DEFAULT_BUF_SIZE,
1455 socketid);
1456 if (pktmbuf_pool[socketid] == NULL)
1457 rte_exit(EXIT_FAILURE,
1458 "Cannot init mbuf pool on socket %d\n",
1459 socketid);
1460 else
1461 printf("Allocated mbuf pool on socket %d\n",
1462 socketid);
1463
1464 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1465 setup_lpm(socketid);
1466 #else
1467 setup_hash(socketid);
1468 #endif
1469 }
1470 qconf = &lcore_conf[lcore_id];
1471 qconf->ipv4_lookup_struct = ipv4_l3fwd_lookup_struct[socketid];
1472 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1473 qconf->ipv6_lookup_struct = ipv6_l3fwd_lookup_struct[socketid];
1474 #endif
1475 }
1476 return 0;
1477 }
1478
1479 /* Check the link status of all ports in up to 9s, and print them finally */
1480 static void
1481 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1482 {
1483 #define CHECK_INTERVAL 100 /* 100ms */
1484 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1485 uint8_t portid, count, all_ports_up, print_flag = 0;
1486 struct rte_eth_link link;
1487
1488 printf("\nChecking link status");
1489 fflush(stdout);
1490 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1491 all_ports_up = 1;
1492 for (portid = 0; portid < port_num; portid++) {
1493 if ((port_mask & (1 << portid)) == 0)
1494 continue;
1495 memset(&link, 0, sizeof(link));
1496 rte_eth_link_get_nowait(portid, &link);
1497 /* print link status if flag set */
1498 if (print_flag == 1) {
1499 if (link.link_status)
1500 printf("Port %d Link Up - speed %u "
1501 "Mbps - %s\n", (uint8_t)portid,
1502 (unsigned)link.link_speed,
1503 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1504 ("full-duplex") : ("half-duplex\n"));
1505 else
1506 printf("Port %d Link Down\n",
1507 (uint8_t)portid);
1508 continue;
1509 }
1510 /* clear all_ports_up flag if any link down */
1511 if (link.link_status == ETH_LINK_DOWN) {
1512 all_ports_up = 0;
1513 break;
1514 }
1515 }
1516 /* after finally printing all link status, get out */
1517 if (print_flag == 1)
1518 break;
1519
1520 if (all_ports_up == 0) {
1521 printf(".");
1522 fflush(stdout);
1523 rte_delay_ms(CHECK_INTERVAL);
1524 }
1525
1526 /* set the print_flag if all ports up or timeout */
1527 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1528 print_flag = 1;
1529 printf("done\n");
1530 }
1531 }
1532 }
1533
1534 int
1535 main(int argc, char **argv)
1536 {
1537 struct lcore_conf *qconf;
1538 struct rte_eth_dev_info dev_info;
1539 struct rte_eth_txconf *txconf;
1540 int ret;
1541 unsigned nb_ports;
1542 uint16_t queueid;
1543 unsigned lcore_id;
1544 uint64_t hz;
1545 uint32_t n_tx_queue, nb_lcores;
1546 uint32_t dev_rxq_num, dev_txq_num;
1547 uint8_t portid, nb_rx_queue, queue, socketid;
1548
1549 /* catch SIGINT and restore cpufreq governor to ondemand */
1550 signal(SIGINT, signal_exit_now);
1551
1552 /* init EAL */
1553 ret = rte_eal_init(argc, argv);
1554 if (ret < 0)
1555 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1556 argc -= ret;
1557 argv += ret;
1558
1559 /* init RTE timer library to be used late */
1560 rte_timer_subsystem_init();
1561
1562 /* parse application arguments (after the EAL ones) */
1563 ret = parse_args(argc, argv);
1564 if (ret < 0)
1565 rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1566
1567 if (check_lcore_params() < 0)
1568 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
1569
1570 ret = init_lcore_rx_queues();
1571 if (ret < 0)
1572 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1573
1574 nb_ports = rte_eth_dev_count();
1575
1576 if (check_port_config(nb_ports) < 0)
1577 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
1578
1579 nb_lcores = rte_lcore_count();
1580
1581 /* initialize all ports */
1582 for (portid = 0; portid < nb_ports; portid++) {
1583 /* skip ports that are not enabled */
1584 if ((enabled_port_mask & (1 << portid)) == 0) {
1585 printf("\nSkipping disabled port %d\n", portid);
1586 continue;
1587 }
1588
1589 /* init port */
1590 printf("Initializing port %d ... ", portid );
1591 fflush(stdout);
1592
1593 rte_eth_dev_info_get(portid, &dev_info);
1594 dev_rxq_num = dev_info.max_rx_queues;
1595 dev_txq_num = dev_info.max_tx_queues;
1596
1597 nb_rx_queue = get_port_n_rx_queues(portid);
1598 if (nb_rx_queue > dev_rxq_num)
1599 rte_exit(EXIT_FAILURE,
1600 "Cannot configure not existed rxq: "
1601 "port=%d\n", portid);
1602
1603 n_tx_queue = nb_lcores;
1604 if (n_tx_queue > dev_txq_num)
1605 n_tx_queue = dev_txq_num;
1606 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1607 nb_rx_queue, (unsigned)n_tx_queue );
1608 ret = rte_eth_dev_configure(portid, nb_rx_queue,
1609 (uint16_t)n_tx_queue, &port_conf);
1610 if (ret < 0)
1611 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1612 "err=%d, port=%d\n", ret, portid);
1613
1614 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1615 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1616 printf(", ");
1617
1618 /* init memory */
1619 ret = init_mem(NB_MBUF);
1620 if (ret < 0)
1621 rte_exit(EXIT_FAILURE, "init_mem failed\n");
1622
1623 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1624 if (rte_lcore_is_enabled(lcore_id) == 0)
1625 continue;
1626
1627 /* Initialize TX buffers */
1628 qconf = &lcore_conf[lcore_id];
1629 qconf->tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
1630 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
1631 rte_eth_dev_socket_id(portid));
1632 if (qconf->tx_buffer[portid] == NULL)
1633 rte_exit(EXIT_FAILURE, "Can't allocate tx buffer for port %u\n",
1634 (unsigned) portid);
1635
1636 rte_eth_tx_buffer_init(qconf->tx_buffer[portid], MAX_PKT_BURST);
1637 }
1638
1639 /* init one TX queue per couple (lcore,port) */
1640 queueid = 0;
1641 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1642 if (rte_lcore_is_enabled(lcore_id) == 0)
1643 continue;
1644
1645 if (queueid >= dev_txq_num)
1646 continue;
1647
1648 if (numa_on)
1649 socketid = \
1650 (uint8_t)rte_lcore_to_socket_id(lcore_id);
1651 else
1652 socketid = 0;
1653
1654 printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1655 fflush(stdout);
1656
1657 rte_eth_dev_info_get(portid, &dev_info);
1658 txconf = &dev_info.default_txconf;
1659 if (port_conf.rxmode.jumbo_frame)
1660 txconf->txq_flags = 0;
1661 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1662 socketid, txconf);
1663 if (ret < 0)
1664 rte_exit(EXIT_FAILURE,
1665 "rte_eth_tx_queue_setup: err=%d, "
1666 "port=%d\n", ret, portid);
1667
1668 qconf = &lcore_conf[lcore_id];
1669 qconf->tx_queue_id[portid] = queueid;
1670 queueid++;
1671
1672 qconf->tx_port_id[qconf->n_tx_port] = portid;
1673 qconf->n_tx_port++;
1674 }
1675 printf("\n");
1676 }
1677
1678 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1679 if (rte_lcore_is_enabled(lcore_id) == 0)
1680 continue;
1681
1682 /* init power management library */
1683 ret = rte_power_init(lcore_id);
1684 if (ret)
1685 RTE_LOG(ERR, POWER,
1686 "Library initialization failed on core %u\n", lcore_id);
1687
1688 /* init timer structures for each enabled lcore */
1689 rte_timer_init(&power_timers[lcore_id]);
1690 hz = rte_get_timer_hz();
1691 rte_timer_reset(&power_timers[lcore_id],
1692 hz/TIMER_NUMBER_PER_SECOND, SINGLE, lcore_id,
1693 power_timer_cb, NULL);
1694
1695 qconf = &lcore_conf[lcore_id];
1696 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1697 fflush(stdout);
1698 /* init RX queues */
1699 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1700 portid = qconf->rx_queue_list[queue].port_id;
1701 queueid = qconf->rx_queue_list[queue].queue_id;
1702
1703 if (numa_on)
1704 socketid = \
1705 (uint8_t)rte_lcore_to_socket_id(lcore_id);
1706 else
1707 socketid = 0;
1708
1709 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1710 fflush(stdout);
1711
1712 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
1713 socketid, NULL,
1714 pktmbuf_pool[socketid]);
1715 if (ret < 0)
1716 rte_exit(EXIT_FAILURE,
1717 "rte_eth_rx_queue_setup: err=%d, "
1718 "port=%d\n", ret, portid);
1719 }
1720 }
1721
1722 printf("\n");
1723
1724 /* start ports */
1725 for (portid = 0; portid < nb_ports; portid++) {
1726 if ((enabled_port_mask & (1 << portid)) == 0) {
1727 continue;
1728 }
1729 /* Start device */
1730 ret = rte_eth_dev_start(portid);
1731 if (ret < 0)
1732 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, "
1733 "port=%d\n", ret, portid);
1734 /*
1735 * If enabled, put device in promiscuous mode.
1736 * This allows IO forwarding mode to forward packets
1737 * to itself through 2 cross-connected ports of the
1738 * target machine.
1739 */
1740 if (promiscuous_on)
1741 rte_eth_promiscuous_enable(portid);
1742 /* initialize spinlock for each port */
1743 rte_spinlock_init(&(locks[portid]));
1744 }
1745
1746 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
1747
1748 /* launch per-lcore init on every lcore */
1749 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1750 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1751 if (rte_eal_wait_lcore(lcore_id) < 0)
1752 return -1;
1753 }
1754
1755 return 0;
1756 }