]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /*- |
2 | * BSD LICENSE | |
3 | * | |
4 | * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. | |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * * Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * * Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * * Neither the name of Intel Corporation nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
24 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
25 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
27 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
32 | */ | |
33 | ||
34 | #include <stdio.h> | |
35 | #include <stdlib.h> | |
36 | #include <stdint.h> | |
37 | #include <inttypes.h> | |
38 | #include <sys/types.h> | |
39 | #include <string.h> | |
40 | #include <sys/queue.h> | |
41 | #include <stdarg.h> | |
42 | #include <errno.h> | |
43 | #include <getopt.h> | |
44 | ||
45 | #include <rte_common.h> | |
46 | #include <rte_byteorder.h> | |
47 | #include <rte_log.h> | |
48 | #include <rte_memory.h> | |
49 | #include <rte_memzone.h> | |
50 | #include <rte_eal.h> | |
51 | #include <rte_per_lcore.h> | |
52 | #include <rte_launch.h> | |
53 | #include <rte_atomic.h> | |
54 | #include <rte_cycles.h> | |
55 | #include <rte_prefetch.h> | |
56 | #include <rte_lcore.h> | |
57 | #include <rte_per_lcore.h> | |
58 | #include <rte_branch_prediction.h> | |
59 | #include <rte_interrupts.h> | |
60 | #include <rte_pci.h> | |
61 | #include <rte_random.h> | |
62 | #include <rte_debug.h> | |
63 | #include <rte_ether.h> | |
64 | #include <rte_ethdev.h> | |
65 | #include <rte_mempool.h> | |
66 | #include <rte_mbuf.h> | |
67 | #include <rte_ip.h> | |
68 | #include <rte_string_fns.h> | |
69 | ||
70 | #include "crypto.h" | |
71 | ||
72 | #define NB_MBUF (32 * 1024) | |
73 | ||
74 | #define MAX_PKT_BURST 32 | |
75 | #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ | |
76 | ||
77 | #define TX_QUEUE_FLUSH_MASK 0xFFFFFFFF | |
78 | #define TSC_COUNT_LIMIT 1000 | |
79 | ||
80 | #define ACTION_ENCRYPT 1 | |
81 | #define ACTION_DECRYPT 2 | |
82 | ||
83 | /* | |
84 | * Configurable number of RX/TX ring descriptors | |
85 | */ | |
86 | #define RTE_TEST_RX_DESC_DEFAULT 128 | |
87 | #define RTE_TEST_TX_DESC_DEFAULT 512 | |
88 | static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; | |
89 | static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; | |
90 | ||
91 | /* ethernet addresses of ports */ | |
92 | static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; | |
93 | ||
94 | /* mask of enabled ports */ | |
95 | static unsigned enabled_port_mask = 0; | |
96 | static int promiscuous_on = 1; /**< Ports set in promiscuous mode on by default. */ | |
97 | ||
98 | /* list of enabled ports */ | |
99 | static uint32_t dst_ports[RTE_MAX_ETHPORTS]; | |
100 | ||
101 | struct mbuf_table { | |
102 | uint16_t len; | |
103 | struct rte_mbuf *m_table[MAX_PKT_BURST]; | |
104 | }; | |
105 | ||
106 | struct lcore_rx_queue { | |
107 | uint8_t port_id; | |
108 | uint8_t queue_id; | |
109 | }; | |
110 | ||
111 | #define MAX_RX_QUEUE_PER_LCORE 16 | |
112 | ||
113 | #define MAX_LCORE_PARAMS 1024 | |
114 | struct lcore_params { | |
115 | uint8_t port_id; | |
116 | uint8_t queue_id; | |
117 | uint8_t lcore_id; | |
118 | }; | |
119 | ||
120 | static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; | |
121 | static struct lcore_params lcore_params_array_default[] = { | |
122 | {0, 0, 2}, | |
123 | {0, 1, 2}, | |
124 | {0, 2, 2}, | |
125 | {1, 0, 2}, | |
126 | {1, 1, 2}, | |
127 | {1, 2, 2}, | |
128 | {2, 0, 2}, | |
129 | {3, 0, 3}, | |
130 | {3, 1, 3}, | |
131 | }; | |
132 | ||
133 | static struct lcore_params * lcore_params = lcore_params_array_default; | |
134 | static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) / | |
135 | sizeof(lcore_params_array_default[0]); | |
136 | ||
137 | static struct rte_eth_conf port_conf = { | |
138 | .rxmode = { | |
139 | .mq_mode = ETH_MQ_RX_RSS, | |
140 | .split_hdr_size = 0, | |
141 | .header_split = 0, /**< Header Split disabled */ | |
142 | .hw_ip_checksum = 1, /**< IP checksum offload enabled */ | |
143 | .hw_vlan_filter = 0, /**< VLAN filtering disabled */ | |
144 | .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ | |
145 | .hw_strip_crc = 0, /**< CRC stripped by hardware */ | |
146 | }, | |
147 | .rx_adv_conf = { | |
148 | .rss_conf = { | |
149 | .rss_key = NULL, | |
150 | .rss_hf = ETH_RSS_IP, | |
151 | }, | |
152 | }, | |
153 | .txmode = { | |
154 | .mq_mode = ETH_MQ_TX_NONE, | |
155 | }, | |
156 | }; | |
157 | ||
158 | static struct rte_mempool * pktmbuf_pool[RTE_MAX_NUMA_NODES]; | |
159 | ||
160 | struct lcore_conf { | |
161 | uint64_t tsc; | |
162 | uint64_t tsc_count; | |
163 | uint32_t tx_mask; | |
164 | uint16_t n_rx_queue; | |
165 | uint16_t rx_queue_list_pos; | |
166 | struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; | |
167 | uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; | |
168 | struct mbuf_table rx_mbuf; | |
169 | uint32_t rx_mbuf_pos; | |
170 | uint32_t rx_curr_queue; | |
171 | struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; | |
172 | } __rte_cache_aligned; | |
173 | ||
174 | static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; | |
175 | ||
176 | static inline struct rte_mbuf * | |
177 | nic_rx_get_packet(struct lcore_conf *qconf) | |
178 | { | |
179 | struct rte_mbuf *pkt; | |
180 | ||
181 | if (unlikely(qconf->n_rx_queue == 0)) | |
182 | return NULL; | |
183 | ||
184 | /* Look for the next queue with packets; return if none */ | |
185 | if (unlikely(qconf->rx_mbuf_pos == qconf->rx_mbuf.len)) { | |
186 | uint32_t i; | |
187 | ||
188 | qconf->rx_mbuf_pos = 0; | |
189 | for (i = 0; i < qconf->n_rx_queue; i++) { | |
190 | qconf->rx_mbuf.len = rte_eth_rx_burst( | |
191 | qconf->rx_queue_list[qconf->rx_curr_queue].port_id, | |
192 | qconf->rx_queue_list[qconf->rx_curr_queue].queue_id, | |
193 | qconf->rx_mbuf.m_table, MAX_PKT_BURST); | |
194 | ||
195 | qconf->rx_curr_queue++; | |
196 | if (unlikely(qconf->rx_curr_queue == qconf->n_rx_queue)) | |
197 | qconf->rx_curr_queue = 0; | |
198 | if (likely(qconf->rx_mbuf.len > 0)) | |
199 | break; | |
200 | } | |
201 | if (unlikely(i == qconf->n_rx_queue)) | |
202 | return NULL; | |
203 | } | |
204 | ||
205 | /* Get the next packet from the current queue; if last packet, go to next queue */ | |
206 | pkt = qconf->rx_mbuf.m_table[qconf->rx_mbuf_pos]; | |
207 | qconf->rx_mbuf_pos++; | |
208 | ||
209 | return pkt; | |
210 | } | |
211 | ||
212 | static inline void | |
213 | nic_tx_flush_queues(struct lcore_conf *qconf) | |
214 | { | |
215 | uint8_t portid; | |
216 | ||
217 | for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { | |
218 | struct rte_mbuf **m_table = NULL; | |
219 | uint16_t queueid, len; | |
220 | uint32_t n, i; | |
221 | ||
222 | if (likely((qconf->tx_mask & (1 << portid)) == 0)) | |
223 | continue; | |
224 | ||
225 | len = qconf->tx_mbufs[portid].len; | |
226 | if (likely(len == 0)) | |
227 | continue; | |
228 | ||
229 | queueid = qconf->tx_queue_id[portid]; | |
230 | m_table = qconf->tx_mbufs[portid].m_table; | |
231 | ||
232 | n = rte_eth_tx_burst(portid, queueid, m_table, len); | |
233 | for (i = n; i < len; i++){ | |
234 | rte_pktmbuf_free(m_table[i]); | |
235 | } | |
236 | ||
237 | qconf->tx_mbufs[portid].len = 0; | |
238 | } | |
239 | ||
240 | qconf->tx_mask = TX_QUEUE_FLUSH_MASK; | |
241 | } | |
242 | ||
243 | static inline void | |
244 | nic_tx_send_packet(struct rte_mbuf *pkt, uint8_t port) | |
245 | { | |
246 | struct lcore_conf *qconf; | |
247 | uint32_t lcoreid; | |
248 | uint16_t len; | |
249 | ||
250 | if (unlikely(pkt == NULL)) { | |
251 | return; | |
252 | } | |
253 | ||
254 | lcoreid = rte_lcore_id(); | |
255 | qconf = &lcore_conf[lcoreid]; | |
256 | ||
257 | len = qconf->tx_mbufs[port].len; | |
258 | qconf->tx_mbufs[port].m_table[len] = pkt; | |
259 | len++; | |
260 | ||
261 | /* enough pkts to be sent */ | |
262 | if (unlikely(len == MAX_PKT_BURST)) { | |
263 | uint32_t n, i; | |
264 | uint16_t queueid; | |
265 | ||
266 | queueid = qconf->tx_queue_id[port]; | |
267 | n = rte_eth_tx_burst(port, queueid, qconf->tx_mbufs[port].m_table, MAX_PKT_BURST); | |
268 | for (i = n; i < MAX_PKT_BURST; i++){ | |
269 | rte_pktmbuf_free(qconf->tx_mbufs[port].m_table[i]); | |
270 | } | |
271 | ||
272 | qconf->tx_mask &= ~(1 << port); | |
273 | len = 0; | |
274 | } | |
275 | ||
276 | qconf->tx_mbufs[port].len = len; | |
277 | } | |
278 | ||
279 | /* main processing loop */ | |
280 | static __attribute__((noreturn)) int | |
281 | main_loop(__attribute__((unused)) void *dummy) | |
282 | { | |
283 | uint32_t lcoreid; | |
284 | struct lcore_conf *qconf; | |
285 | const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; | |
286 | ||
287 | lcoreid = rte_lcore_id(); | |
288 | qconf = &lcore_conf[lcoreid]; | |
289 | ||
290 | printf("Thread %u starting...\n", lcoreid); | |
291 | ||
292 | for (;;) { | |
293 | struct rte_mbuf *pkt; | |
294 | uint32_t pkt_from_nic_rx = 0; | |
295 | uint8_t port; | |
296 | ||
297 | /* Flush TX queues */ | |
298 | qconf->tsc_count++; | |
299 | if (unlikely(qconf->tsc_count == TSC_COUNT_LIMIT)) { | |
300 | uint64_t tsc, diff_tsc; | |
301 | ||
302 | tsc = rte_rdtsc(); | |
303 | ||
304 | diff_tsc = tsc - qconf->tsc; | |
305 | if (unlikely(diff_tsc > drain_tsc)) { | |
306 | nic_tx_flush_queues(qconf); | |
307 | crypto_flush_tx_queue(lcoreid); | |
308 | qconf->tsc = tsc; | |
309 | } | |
310 | ||
311 | qconf->tsc_count = 0; | |
312 | } | |
313 | ||
314 | /* | |
315 | * Check the Intel QuickAssist queues first | |
316 | * | |
317 | ***/ | |
318 | pkt = (struct rte_mbuf *) crypto_get_next_response(); | |
319 | if (pkt == NULL) { | |
320 | pkt = nic_rx_get_packet(qconf); | |
321 | pkt_from_nic_rx = 1; | |
322 | } | |
323 | if (pkt == NULL) | |
324 | continue; | |
325 | /* Send packet to either QAT encrypt, QAT decrypt or NIC TX */ | |
326 | if (pkt_from_nic_rx) { | |
327 | struct ipv4_hdr *ip = rte_pktmbuf_mtod_offset(pkt, | |
328 | struct ipv4_hdr *, | |
329 | sizeof(struct ether_hdr)); | |
330 | if (ip->src_addr & rte_cpu_to_be_32(ACTION_ENCRYPT)) { | |
331 | if (CRYPTO_RESULT_FAIL == crypto_encrypt(pkt, | |
332 | (enum cipher_alg)((ip->src_addr >> 16) & 0xFF), | |
333 | (enum hash_alg)((ip->src_addr >> 8) & 0xFF))) | |
334 | rte_pktmbuf_free(pkt); | |
335 | continue; | |
336 | } | |
337 | ||
338 | if (ip->src_addr & rte_cpu_to_be_32(ACTION_DECRYPT)) { | |
339 | if(CRYPTO_RESULT_FAIL == crypto_decrypt(pkt, | |
340 | (enum cipher_alg)((ip->src_addr >> 16) & 0xFF), | |
341 | (enum hash_alg)((ip->src_addr >> 8) & 0xFF))) | |
342 | rte_pktmbuf_free(pkt); | |
343 | continue; | |
344 | } | |
345 | } | |
346 | ||
347 | port = dst_ports[pkt->port]; | |
348 | ||
349 | /* Transmit the packet */ | |
350 | nic_tx_send_packet(pkt, (uint8_t)port); | |
351 | } | |
352 | } | |
353 | ||
354 | static inline unsigned | |
355 | get_port_max_rx_queues(uint8_t port_id) | |
356 | { | |
357 | struct rte_eth_dev_info dev_info; | |
358 | ||
359 | rte_eth_dev_info_get(port_id, &dev_info); | |
360 | return dev_info.max_rx_queues; | |
361 | } | |
362 | ||
363 | static inline unsigned | |
364 | get_port_max_tx_queues(uint8_t port_id) | |
365 | { | |
366 | struct rte_eth_dev_info dev_info; | |
367 | ||
368 | rte_eth_dev_info_get(port_id, &dev_info); | |
369 | return dev_info.max_tx_queues; | |
370 | } | |
371 | ||
372 | static int | |
373 | check_lcore_params(void) | |
374 | { | |
375 | uint16_t i; | |
376 | ||
377 | for (i = 0; i < nb_lcore_params; ++i) { | |
378 | if (lcore_params[i].queue_id >= get_port_max_rx_queues(lcore_params[i].port_id)) { | |
379 | printf("invalid queue number: %hhu\n", lcore_params[i].queue_id); | |
380 | return -1; | |
381 | } | |
382 | if (!rte_lcore_is_enabled(lcore_params[i].lcore_id)) { | |
383 | printf("error: lcore %hhu is not enabled in lcore mask\n", | |
384 | lcore_params[i].lcore_id); | |
385 | return -1; | |
386 | } | |
387 | } | |
388 | return 0; | |
389 | } | |
390 | ||
391 | static int | |
392 | check_port_config(const unsigned nb_ports) | |
393 | { | |
394 | unsigned portid; | |
395 | uint16_t i; | |
396 | ||
397 | for (i = 0; i < nb_lcore_params; ++i) { | |
398 | portid = lcore_params[i].port_id; | |
399 | if ((enabled_port_mask & (1 << portid)) == 0) { | |
400 | printf("port %u is not enabled in port mask\n", portid); | |
401 | return -1; | |
402 | } | |
403 | if (portid >= nb_ports) { | |
404 | printf("port %u is not present on the board\n", portid); | |
405 | return -1; | |
406 | } | |
407 | } | |
408 | return 0; | |
409 | } | |
410 | ||
411 | static uint8_t | |
412 | get_port_n_rx_queues(const uint8_t port) | |
413 | { | |
414 | int queue = -1; | |
415 | uint16_t i; | |
416 | ||
417 | for (i = 0; i < nb_lcore_params; ++i) { | |
418 | if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue) | |
419 | queue = lcore_params[i].queue_id; | |
420 | } | |
421 | return (uint8_t)(++queue); | |
422 | } | |
423 | ||
424 | static int | |
425 | init_lcore_rx_queues(void) | |
426 | { | |
427 | uint16_t i, nb_rx_queue; | |
428 | uint8_t lcore; | |
429 | ||
430 | for (i = 0; i < nb_lcore_params; ++i) { | |
431 | lcore = lcore_params[i].lcore_id; | |
432 | nb_rx_queue = lcore_conf[lcore].n_rx_queue; | |
433 | if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { | |
434 | printf("error: too many queues (%u) for lcore: %u\n", | |
435 | (unsigned)nb_rx_queue + 1, (unsigned)lcore); | |
436 | return -1; | |
437 | } | |
438 | lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = | |
439 | lcore_params[i].port_id; | |
440 | lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id = | |
441 | lcore_params[i].queue_id; | |
442 | lcore_conf[lcore].n_rx_queue++; | |
443 | } | |
444 | return 0; | |
445 | } | |
446 | ||
447 | /* display usage */ | |
448 | static void | |
449 | print_usage(const char *prgname) | |
450 | { | |
451 | printf ("%s [EAL options] -- -p PORTMASK [--no-promisc]" | |
452 | " [--config '(port,queue,lcore)[,(port,queue,lcore)]'\n" | |
453 | " -p PORTMASK: hexadecimal bitmask of ports to configure\n" | |
454 | " --no-promisc: disable promiscuous mode (default is ON)\n" | |
455 | " --config '(port,queue,lcore)': rx queues configuration\n", | |
456 | prgname); | |
457 | } | |
458 | ||
459 | static unsigned | |
460 | parse_portmask(const char *portmask) | |
461 | { | |
462 | char *end = NULL; | |
463 | unsigned pm; | |
464 | ||
465 | /* parse hexadecimal string */ | |
466 | pm = strtoul(portmask, &end, 16); | |
467 | if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) | |
468 | return 0; | |
469 | ||
470 | return pm; | |
471 | } | |
472 | ||
473 | static int | |
474 | parse_config(const char *q_arg) | |
475 | { | |
476 | char s[256]; | |
477 | const char *p, *p_end = q_arg; | |
478 | char *end; | |
479 | enum fieldnames { | |
480 | FLD_PORT = 0, | |
481 | FLD_QUEUE, | |
482 | FLD_LCORE, | |
483 | _NUM_FLD | |
484 | }; | |
485 | unsigned long int_fld[_NUM_FLD]; | |
486 | char *str_fld[_NUM_FLD]; | |
487 | int i; | |
488 | unsigned size; | |
489 | ||
490 | nb_lcore_params = 0; | |
491 | ||
492 | while ((p = strchr(p_end,'(')) != NULL) { | |
493 | if (nb_lcore_params >= MAX_LCORE_PARAMS) { | |
494 | printf("exceeded max number of lcore params: %hu\n", | |
495 | nb_lcore_params); | |
496 | return -1; | |
497 | } | |
498 | ++p; | |
499 | if((p_end = strchr(p,')')) == NULL) | |
500 | return -1; | |
501 | ||
502 | size = p_end - p; | |
503 | if(size >= sizeof(s)) | |
504 | return -1; | |
505 | ||
506 | snprintf(s, sizeof(s), "%.*s", size, p); | |
507 | if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD) | |
508 | return -1; | |
509 | for (i = 0; i < _NUM_FLD; i++) { | |
510 | errno = 0; | |
511 | int_fld[i] = strtoul(str_fld[i], &end, 0); | |
512 | if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) | |
513 | return -1; | |
514 | } | |
515 | lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT]; | |
516 | lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE]; | |
517 | lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE]; | |
518 | ++nb_lcore_params; | |
519 | } | |
520 | lcore_params = lcore_params_array; | |
521 | return 0; | |
522 | } | |
523 | ||
524 | /* Parse the argument given in the command line of the application */ | |
525 | static int | |
526 | parse_args(int argc, char **argv) | |
527 | { | |
528 | int opt, ret; | |
529 | char **argvopt; | |
530 | int option_index; | |
531 | char *prgname = argv[0]; | |
532 | static struct option lgopts[] = { | |
533 | {"config", 1, 0, 0}, | |
534 | {"no-promisc", 0, 0, 0}, | |
535 | {NULL, 0, 0, 0} | |
536 | }; | |
537 | ||
538 | argvopt = argv; | |
539 | ||
540 | while ((opt = getopt_long(argc, argvopt, "p:", | |
541 | lgopts, &option_index)) != EOF) { | |
542 | ||
543 | switch (opt) { | |
544 | /* portmask */ | |
545 | case 'p': | |
546 | enabled_port_mask = parse_portmask(optarg); | |
547 | if (enabled_port_mask == 0) { | |
548 | printf("invalid portmask\n"); | |
549 | print_usage(prgname); | |
550 | return -1; | |
551 | } | |
552 | break; | |
553 | ||
554 | /* long options */ | |
555 | case 0: | |
556 | if (strcmp(lgopts[option_index].name, "config") == 0) { | |
557 | ret = parse_config(optarg); | |
558 | if (ret) { | |
559 | printf("invalid config\n"); | |
560 | print_usage(prgname); | |
561 | return -1; | |
562 | } | |
563 | } | |
564 | if (strcmp(lgopts[option_index].name, "no-promisc") == 0) { | |
565 | printf("Promiscuous mode disabled\n"); | |
566 | promiscuous_on = 0; | |
567 | } | |
568 | break; | |
569 | default: | |
570 | print_usage(prgname); | |
571 | return -1; | |
572 | } | |
573 | } | |
574 | ||
575 | if (enabled_port_mask == 0) { | |
576 | printf("portmask not specified\n"); | |
577 | print_usage(prgname); | |
578 | return -1; | |
579 | } | |
580 | ||
581 | if (optind >= 0) | |
582 | argv[optind-1] = prgname; | |
583 | ||
584 | ret = optind-1; | |
585 | optind = 0; /* reset getopt lib */ | |
586 | return ret; | |
587 | } | |
588 | ||
589 | static void | |
590 | print_ethaddr(const char *name, const struct ether_addr *eth_addr) | |
591 | { | |
592 | char buf[ETHER_ADDR_FMT_SIZE]; | |
593 | ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); | |
594 | printf("%s%s", name, buf); | |
595 | } | |
596 | ||
597 | static int | |
598 | init_mem(void) | |
599 | { | |
600 | int socketid; | |
601 | unsigned lcoreid; | |
602 | char s[64]; | |
603 | ||
604 | RTE_LCORE_FOREACH(lcoreid) { | |
605 | socketid = rte_lcore_to_socket_id(lcoreid); | |
606 | if (socketid >= RTE_MAX_NUMA_NODES) { | |
607 | printf("Socket %d of lcore %u is out of range %d\n", | |
608 | socketid, lcoreid, RTE_MAX_NUMA_NODES); | |
609 | return -1; | |
610 | } | |
611 | if (pktmbuf_pool[socketid] == NULL) { | |
612 | snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); | |
613 | pktmbuf_pool[socketid] = | |
614 | rte_pktmbuf_pool_create(s, NB_MBUF, 32, 0, | |
615 | RTE_MBUF_DEFAULT_BUF_SIZE, socketid); | |
616 | if (pktmbuf_pool[socketid] == NULL) { | |
617 | printf("Cannot init mbuf pool on socket %d\n", socketid); | |
618 | return -1; | |
619 | } | |
620 | printf("Allocated mbuf pool on socket %d\n", socketid); | |
621 | } | |
622 | } | |
623 | return 0; | |
624 | } | |
625 | ||
626 | int | |
627 | main(int argc, char **argv) | |
628 | { | |
629 | struct lcore_conf *qconf; | |
630 | struct rte_eth_link link; | |
631 | int ret; | |
632 | unsigned nb_ports; | |
633 | uint16_t queueid; | |
634 | unsigned lcoreid; | |
635 | uint32_t nb_tx_queue; | |
636 | uint8_t portid, nb_rx_queue, queue, socketid, last_port; | |
637 | unsigned nb_ports_in_mask = 0; | |
638 | ||
639 | /* init EAL */ | |
640 | ret = rte_eal_init(argc, argv); | |
641 | if (ret < 0) | |
642 | return -1; | |
643 | argc -= ret; | |
644 | argv += ret; | |
645 | ||
646 | /* parse application arguments (after the EAL ones) */ | |
647 | ret = parse_args(argc, argv); | |
648 | if (ret < 0) | |
649 | return -1; | |
650 | ||
651 | if (check_lcore_params() < 0) | |
652 | rte_panic("check_lcore_params failed\n"); | |
653 | ||
654 | ret = init_lcore_rx_queues(); | |
655 | if (ret < 0) | |
656 | return -1; | |
657 | ||
658 | ret = init_mem(); | |
659 | if (ret < 0) | |
660 | return -1; | |
661 | ||
662 | nb_ports = rte_eth_dev_count(); | |
663 | ||
664 | if (check_port_config(nb_ports) < 0) | |
665 | rte_panic("check_port_config failed\n"); | |
666 | ||
667 | /* reset dst_ports */ | |
668 | for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) | |
669 | dst_ports[portid] = 0; | |
670 | last_port = 0; | |
671 | ||
672 | /* | |
673 | * Each logical core is assigned a dedicated TX queue on each port. | |
674 | */ | |
675 | for (portid = 0; portid < nb_ports; portid++) { | |
676 | /* skip ports that are not enabled */ | |
677 | if ((enabled_port_mask & (1 << portid)) == 0) | |
678 | continue; | |
679 | ||
680 | if (nb_ports_in_mask % 2) { | |
681 | dst_ports[portid] = last_port; | |
682 | dst_ports[last_port] = portid; | |
683 | } | |
684 | else | |
685 | last_port = portid; | |
686 | ||
687 | nb_ports_in_mask++; | |
688 | } | |
689 | if (nb_ports_in_mask % 2) { | |
690 | printf("Notice: odd number of ports in portmask.\n"); | |
691 | dst_ports[last_port] = last_port; | |
692 | } | |
693 | ||
694 | /* initialize all ports */ | |
695 | for (portid = 0; portid < nb_ports; portid++) { | |
696 | /* skip ports that are not enabled */ | |
697 | if ((enabled_port_mask & (1 << portid)) == 0) { | |
698 | printf("\nSkipping disabled port %d\n", portid); | |
699 | continue; | |
700 | } | |
701 | ||
702 | /* init port */ | |
703 | printf("Initializing port %d ... ", portid ); | |
704 | fflush(stdout); | |
705 | ||
706 | nb_rx_queue = get_port_n_rx_queues(portid); | |
707 | if (nb_rx_queue > get_port_max_rx_queues(portid)) | |
708 | rte_panic("Number of rx queues %d exceeds max number of rx queues %u" | |
709 | " for port %d\n", nb_rx_queue, get_port_max_rx_queues(portid), | |
710 | portid); | |
711 | nb_tx_queue = rte_lcore_count(); | |
712 | if (nb_tx_queue > get_port_max_tx_queues(portid)) | |
713 | rte_panic("Number of lcores %u exceeds max number of tx queues %u" | |
714 | " for port %d\n", nb_tx_queue, get_port_max_tx_queues(portid), | |
715 | portid); | |
716 | printf("Creating queues: nb_rxq=%d nb_txq=%u... ", | |
717 | nb_rx_queue, (unsigned)nb_tx_queue ); | |
718 | ret = rte_eth_dev_configure(portid, nb_rx_queue, | |
719 | (uint16_t)nb_tx_queue, &port_conf); | |
720 | if (ret < 0) | |
721 | rte_panic("Cannot configure device: err=%d, port=%d\n", | |
722 | ret, portid); | |
723 | ||
724 | rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); | |
725 | print_ethaddr(" Address:", &ports_eth_addr[portid]); | |
726 | printf(", "); | |
727 | ||
728 | /* init one TX queue per couple (lcore,port) */ | |
729 | queueid = 0; | |
730 | RTE_LCORE_FOREACH(lcoreid) { | |
731 | socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid); | |
732 | printf("txq=%u,%d,%d ", lcoreid, queueid, socketid); | |
733 | fflush(stdout); | |
734 | ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, | |
735 | socketid, | |
736 | NULL); | |
737 | if (ret < 0) | |
738 | rte_panic("rte_eth_tx_queue_setup: err=%d, " | |
739 | "port=%d\n", ret, portid); | |
740 | ||
741 | qconf = &lcore_conf[lcoreid]; | |
742 | qconf->tx_queue_id[portid] = queueid; | |
743 | queueid++; | |
744 | } | |
745 | printf("\n"); | |
746 | } | |
747 | ||
748 | RTE_LCORE_FOREACH(lcoreid) { | |
749 | qconf = &lcore_conf[lcoreid]; | |
750 | printf("\nInitializing rx queues on lcore %u ... ", lcoreid ); | |
751 | fflush(stdout); | |
752 | /* init RX queues */ | |
753 | for(queue = 0; queue < qconf->n_rx_queue; ++queue) { | |
754 | portid = qconf->rx_queue_list[queue].port_id; | |
755 | queueid = qconf->rx_queue_list[queue].queue_id; | |
756 | socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid); | |
757 | printf("rxq=%d,%d,%d ", portid, queueid, socketid); | |
758 | fflush(stdout); | |
759 | ||
760 | ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, | |
761 | socketid, | |
762 | NULL, | |
763 | pktmbuf_pool[socketid]); | |
764 | if (ret < 0) | |
765 | rte_panic("rte_eth_rx_queue_setup: err=%d," | |
766 | "port=%d\n", ret, portid); | |
767 | } | |
768 | } | |
769 | ||
770 | printf("\n"); | |
771 | ||
772 | /* start ports */ | |
773 | for (portid = 0; portid < nb_ports; portid++) { | |
774 | if ((enabled_port_mask & (1 << portid)) == 0) | |
775 | continue; | |
776 | /* Start device */ | |
777 | ret = rte_eth_dev_start(portid); | |
778 | if (ret < 0) | |
779 | rte_panic("rte_eth_dev_start: err=%d, port=%d\n", | |
780 | ret, portid); | |
781 | ||
782 | printf("done: Port %d ", portid); | |
783 | ||
784 | /* get link status */ | |
785 | rte_eth_link_get(portid, &link); | |
786 | if (link.link_status) | |
787 | printf(" Link Up - speed %u Mbps - %s\n", | |
788 | (unsigned) link.link_speed, | |
789 | (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? | |
790 | ("full-duplex") : ("half-duplex\n")); | |
791 | else | |
792 | printf(" Link Down\n"); | |
793 | /* | |
794 | * If enabled, put device in promiscuous mode. | |
795 | * This allows IO forwarding mode to forward packets | |
796 | * to itself through 2 cross-connected ports of the | |
797 | * target machine. | |
798 | */ | |
799 | if (promiscuous_on) | |
800 | rte_eth_promiscuous_enable(portid); | |
801 | } | |
802 | printf("Crypto: Initializing Crypto...\n"); | |
803 | if (crypto_init() != 0) | |
804 | return -1; | |
805 | ||
806 | RTE_LCORE_FOREACH(lcoreid) { | |
807 | if (per_core_crypto_init(lcoreid) != 0) { | |
808 | printf("Crypto: Cannot init lcore crypto on lcore %u\n", (unsigned)lcoreid); | |
809 | return -1; | |
810 | } | |
811 | } | |
812 | printf("Crypto: Initialization complete\n"); | |
813 | /* launch per-lcore init on every lcore */ | |
814 | rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); | |
815 | RTE_LCORE_FOREACH_SLAVE(lcoreid) { | |
816 | if (rte_eal_wait_lcore(lcoreid) < 0) | |
817 | return -1; | |
818 | } | |
819 | ||
820 | return 0; | |
821 | } |