]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/enic/enic_ethdev.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / enic / enic_ethdev.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 */
5
6#include <stdio.h>
7#include <stdint.h>
8
9#include <rte_dev.h>
10#include <rte_pci.h>
11#include <rte_bus_pci.h>
12#include <rte_ethdev_driver.h>
13#include <rte_ethdev_pci.h>
14#include <rte_kvargs.h>
15#include <rte_string_fns.h>
16
17#include "vnic_intr.h"
18#include "vnic_cq.h"
19#include "vnic_wq.h"
20#include "vnic_rq.h"
21#include "vnic_enet.h"
22#include "enic.h"
23
24int enicpmd_logtype_init;
25int enicpmd_logtype_flow;
26
27#define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
28
29/*
30 * The set of PCI devices this driver supports
31 */
32#define CISCO_PCI_VENDOR_ID 0x1137
33static const struct rte_pci_id pci_id_enic_map[] = {
34 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) },
35 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
36 {.vendor_id = 0, /* sentinel */},
37};
38
39#define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
9f95a23c 40#define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx"
11fdf7f2
TL
41#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
42
43RTE_INIT(enicpmd_init_log)
44{
45 enicpmd_logtype_init = rte_log_register("pmd.net.enic.init");
46 if (enicpmd_logtype_init >= 0)
47 rte_log_set_level(enicpmd_logtype_init, RTE_LOG_NOTICE);
48 enicpmd_logtype_flow = rte_log_register("pmd.net.enic.flow");
49 if (enicpmd_logtype_flow >= 0)
50 rte_log_set_level(enicpmd_logtype_flow, RTE_LOG_NOTICE);
51}
52
53static int
54enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
55 enum rte_filter_op filter_op, void *arg)
56{
57 struct enic *enic = pmd_priv(eth_dev);
58 int ret = 0;
59
60 ENICPMD_FUNC_TRACE();
61 if (filter_op == RTE_ETH_FILTER_NOP)
62 return 0;
63
64 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
65 return -EINVAL;
66
67 switch (filter_op) {
68 case RTE_ETH_FILTER_ADD:
69 case RTE_ETH_FILTER_UPDATE:
70 ret = enic_fdir_add_fltr(enic,
71 (struct rte_eth_fdir_filter *)arg);
72 break;
73
74 case RTE_ETH_FILTER_DELETE:
75 ret = enic_fdir_del_fltr(enic,
76 (struct rte_eth_fdir_filter *)arg);
77 break;
78
79 case RTE_ETH_FILTER_STATS:
80 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
81 break;
82
83 case RTE_ETH_FILTER_FLUSH:
84 dev_warning(enic, "unsupported operation %u", filter_op);
85 ret = -ENOTSUP;
86 break;
87 case RTE_ETH_FILTER_INFO:
88 enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
89 break;
90 default:
91 dev_err(enic, "unknown operation %u", filter_op);
92 ret = -EINVAL;
93 break;
94 }
95 return ret;
96}
97
98static int
99enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
100 enum rte_filter_type filter_type,
101 enum rte_filter_op filter_op,
102 void *arg)
103{
104 int ret = 0;
105
106 ENICPMD_FUNC_TRACE();
107
108 switch (filter_type) {
109 case RTE_ETH_FILTER_GENERIC:
110 if (filter_op != RTE_ETH_FILTER_GET)
111 return -EINVAL;
112 *(const void **)arg = &enic_flow_ops;
113 break;
114 case RTE_ETH_FILTER_FDIR:
115 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
116 break;
117 default:
118 dev_warning(enic, "Filter type (%d) not supported",
119 filter_type);
120 ret = -EINVAL;
121 break;
122 }
123
124 return ret;
125}
126
127static void enicpmd_dev_tx_queue_release(void *txq)
128{
129 ENICPMD_FUNC_TRACE();
130
131 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
132 return;
133
134 enic_free_wq(txq);
135}
136
137static int enicpmd_dev_setup_intr(struct enic *enic)
138{
139 int ret;
140 unsigned int index;
141
142 ENICPMD_FUNC_TRACE();
143
144 /* Are we done with the init of all the queues? */
145 for (index = 0; index < enic->cq_count; index++) {
146 if (!enic->cq[index].ctrl)
147 break;
148 }
149 if (enic->cq_count != index)
150 return 0;
151 for (index = 0; index < enic->wq_count; index++) {
152 if (!enic->wq[index].ctrl)
153 break;
154 }
155 if (enic->wq_count != index)
156 return 0;
157 /* check start of packet (SOP) RQs only in case scatter is disabled. */
158 for (index = 0; index < enic->rq_count; index++) {
159 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
160 break;
161 }
162 if (enic->rq_count != index)
163 return 0;
164
165 ret = enic_alloc_intr_resources(enic);
166 if (ret) {
167 dev_err(enic, "alloc intr failed\n");
168 return ret;
169 }
170 enic_init_vnic_resources(enic);
171
172 ret = enic_setup_finish(enic);
173 if (ret)
174 dev_err(enic, "setup could not be finished\n");
175
176 return ret;
177}
178
179static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
180 uint16_t queue_idx,
181 uint16_t nb_desc,
182 unsigned int socket_id,
183 const struct rte_eth_txconf *tx_conf)
184{
185 int ret;
186 struct enic *enic = pmd_priv(eth_dev);
187 struct vnic_wq *wq;
188
189 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
190 return -E_RTE_SECONDARY;
191
192 ENICPMD_FUNC_TRACE();
193 RTE_ASSERT(queue_idx < enic->conf_wq_count);
194 wq = &enic->wq[queue_idx];
195 wq->offloads = tx_conf->offloads |
196 eth_dev->data->dev_conf.txmode.offloads;
197 eth_dev->data->tx_queues[queue_idx] = (void *)wq;
198
199 ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
200 if (ret) {
201 dev_err(enic, "error in allocating wq\n");
202 return ret;
203 }
204
205 return enicpmd_dev_setup_intr(enic);
206}
207
208static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
209 uint16_t queue_idx)
210{
211 struct enic *enic = pmd_priv(eth_dev);
212
213 ENICPMD_FUNC_TRACE();
214
215 enic_start_wq(enic, queue_idx);
216
217 return 0;
218}
219
220static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
221 uint16_t queue_idx)
222{
223 int ret;
224 struct enic *enic = pmd_priv(eth_dev);
225
226 ENICPMD_FUNC_TRACE();
227
228 ret = enic_stop_wq(enic, queue_idx);
229 if (ret)
230 dev_err(enic, "error in stopping wq %d\n", queue_idx);
231
232 return ret;
233}
234
235static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
236 uint16_t queue_idx)
237{
238 struct enic *enic = pmd_priv(eth_dev);
239
240 ENICPMD_FUNC_TRACE();
241
242 enic_start_rq(enic, queue_idx);
243
244 return 0;
245}
246
247static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
248 uint16_t queue_idx)
249{
250 int ret;
251 struct enic *enic = pmd_priv(eth_dev);
252
253 ENICPMD_FUNC_TRACE();
254
255 ret = enic_stop_rq(enic, queue_idx);
256 if (ret)
257 dev_err(enic, "error in stopping rq %d\n", queue_idx);
258
259 return ret;
260}
261
262static void enicpmd_dev_rx_queue_release(void *rxq)
263{
264 ENICPMD_FUNC_TRACE();
265
266 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
267 return;
268
269 enic_free_rq(rxq);
270}
271
272static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
273 uint16_t rx_queue_id)
274{
275 struct enic *enic = pmd_priv(dev);
276 uint32_t queue_count = 0;
277 struct vnic_cq *cq;
278 uint32_t cq_tail;
279 uint16_t cq_idx;
280 int rq_num;
281
282 rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
283 cq = &enic->cq[enic_cq_rq(enic, rq_num)];
284 cq_idx = cq->to_clean;
285
286 cq_tail = ioread32(&cq->ctrl->cq_tail);
287
288 if (cq_tail < cq_idx)
289 cq_tail += cq->ring.desc_count;
290
291 queue_count = cq_tail - cq_idx;
292
293 return queue_count;
294}
295
296static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
297 uint16_t queue_idx,
298 uint16_t nb_desc,
299 unsigned int socket_id,
300 const struct rte_eth_rxconf *rx_conf,
301 struct rte_mempool *mp)
302{
303 int ret;
304 struct enic *enic = pmd_priv(eth_dev);
305
306 ENICPMD_FUNC_TRACE();
307
308 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
309 return -E_RTE_SECONDARY;
310 RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
311 eth_dev->data->rx_queues[queue_idx] =
312 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
313
314 ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
315 rx_conf->rx_free_thresh);
316 if (ret) {
317 dev_err(enic, "error in allocating rq\n");
318 return ret;
319 }
320
321 return enicpmd_dev_setup_intr(enic);
322}
323
324static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
325{
326 struct enic *enic = pmd_priv(eth_dev);
327 uint64_t offloads;
328
329 ENICPMD_FUNC_TRACE();
330
331 offloads = eth_dev->data->dev_conf.rxmode.offloads;
332 if (mask & ETH_VLAN_STRIP_MASK) {
333 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
334 enic->ig_vlan_strip_en = 1;
335 else
336 enic->ig_vlan_strip_en = 0;
337 }
338
339 if ((mask & ETH_VLAN_FILTER_MASK) &&
340 (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
341 dev_warning(enic,
342 "Configuration of VLAN filter is not supported\n");
343 }
344
345 if ((mask & ETH_VLAN_EXTEND_MASK) &&
346 (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) {
347 dev_warning(enic,
348 "Configuration of extended VLAN is not supported\n");
349 }
350
351 return enic_set_vlan_strip(enic);
352}
353
354static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
355{
356 int ret;
357 int mask;
358 struct enic *enic = pmd_priv(eth_dev);
359
360 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
361 return -E_RTE_SECONDARY;
362
363 ENICPMD_FUNC_TRACE();
364 ret = enic_set_vnic_res(enic);
365 if (ret) {
366 dev_err(enic, "Set vNIC resource num failed, aborting\n");
367 return ret;
368 }
369
9f95a23c 370 enic->mc_count = 0;
11fdf7f2
TL
371 enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
372 DEV_RX_OFFLOAD_CHECKSUM);
373 /* All vlan offload masks to apply the current settings */
374 mask = ETH_VLAN_STRIP_MASK |
375 ETH_VLAN_FILTER_MASK |
376 ETH_VLAN_EXTEND_MASK;
377 ret = enicpmd_vlan_offload_set(eth_dev, mask);
378 if (ret) {
379 dev_err(enic, "Failed to configure VLAN offloads\n");
380 return ret;
381 }
382 /*
383 * Initialize RSS with the default reta and key. If the user key is
384 * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
385 * default key.
386 */
387 return enic_init_rss_nic_cfg(enic);
388}
389
390/* Start the device.
391 * It returns 0 on success.
392 */
393static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
394{
395 struct enic *enic = pmd_priv(eth_dev);
396
397 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
398 return -E_RTE_SECONDARY;
399
400 ENICPMD_FUNC_TRACE();
401 return enic_enable(enic);
402}
403
404/*
405 * Stop device: disable rx and tx functions to allow for reconfiguring.
406 */
407static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
408{
409 struct rte_eth_link link;
410 struct enic *enic = pmd_priv(eth_dev);
411
412 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
413 return;
414
415 ENICPMD_FUNC_TRACE();
416 enic_disable(enic);
417
418 memset(&link, 0, sizeof(link));
419 rte_eth_linkstatus_set(eth_dev, &link);
420}
421
422/*
423 * Stop device.
424 */
425static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
426{
427 struct enic *enic = pmd_priv(eth_dev);
428
429 ENICPMD_FUNC_TRACE();
430 enic_remove(enic);
431}
432
433static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
434 __rte_unused int wait_to_complete)
435{
436 struct enic *enic = pmd_priv(eth_dev);
437
438 ENICPMD_FUNC_TRACE();
439 return enic_link_update(enic);
440}
441
442static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
443 struct rte_eth_stats *stats)
444{
445 struct enic *enic = pmd_priv(eth_dev);
446
447 ENICPMD_FUNC_TRACE();
448 return enic_dev_stats_get(enic, stats);
449}
450
451static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
452{
453 struct enic *enic = pmd_priv(eth_dev);
454
455 ENICPMD_FUNC_TRACE();
456 enic_dev_stats_clear(enic);
457}
458
459static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
460 struct rte_eth_dev_info *device_info)
461{
462 struct enic *enic = pmd_priv(eth_dev);
463
464 ENICPMD_FUNC_TRACE();
465 /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
466 device_info->max_rx_queues = enic->conf_rq_count / 2;
467 device_info->max_tx_queues = enic->conf_wq_count;
468 device_info->min_rx_bufsize = ENIC_MIN_MTU;
469 /* "Max" mtu is not a typo. HW receives packet sizes up to the
470 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
471 * a hint to the driver to size receive buffers accordingly so that
472 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
473 * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
474 * ignoring vNIC mtu.
475 */
476 device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
9f95a23c 477 device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
11fdf7f2
TL
478 device_info->rx_offload_capa = enic->rx_offload_capa;
479 device_info->tx_offload_capa = enic->tx_offload_capa;
480 device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
481 device_info->default_rxconf = (struct rte_eth_rxconf) {
482 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
483 };
484 device_info->reta_size = enic->reta_size;
485 device_info->hash_key_size = enic->hash_key_size;
486 device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
487 device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
488 .nb_max = enic->config.rq_desc_count,
489 .nb_min = ENIC_MIN_RQ_DESCS,
490 .nb_align = ENIC_ALIGN_DESCS,
491 };
492 device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
493 .nb_max = enic->config.wq_desc_count,
494 .nb_min = ENIC_MIN_WQ_DESCS,
495 .nb_align = ENIC_ALIGN_DESCS,
496 .nb_seg_max = ENIC_TX_XMIT_MAX,
497 .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
498 };
499 device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
500 .burst_size = ENIC_DEFAULT_RX_BURST,
501 .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
502 ENIC_DEFAULT_RX_RING_SIZE),
503 .nb_queues = ENIC_DEFAULT_RX_RINGS,
504 };
505 device_info->default_txportconf = (struct rte_eth_dev_portconf) {
506 .burst_size = ENIC_DEFAULT_TX_BURST,
507 .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
508 ENIC_DEFAULT_TX_RING_SIZE),
509 .nb_queues = ENIC_DEFAULT_TX_RINGS,
510 };
511}
512
513static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
514{
515 static const uint32_t ptypes[] = {
516 RTE_PTYPE_L2_ETHER,
517 RTE_PTYPE_L2_ETHER_VLAN,
518 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
519 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
520 RTE_PTYPE_L4_TCP,
521 RTE_PTYPE_L4_UDP,
522 RTE_PTYPE_L4_FRAG,
523 RTE_PTYPE_L4_NONFRAG,
524 RTE_PTYPE_UNKNOWN
525 };
9f95a23c
TL
526 static const uint32_t ptypes_overlay[] = {
527 RTE_PTYPE_L2_ETHER,
528 RTE_PTYPE_L2_ETHER_VLAN,
529 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
530 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
531 RTE_PTYPE_L4_TCP,
532 RTE_PTYPE_L4_UDP,
533 RTE_PTYPE_L4_FRAG,
534 RTE_PTYPE_L4_NONFRAG,
535 RTE_PTYPE_TUNNEL_GRENAT,
536 RTE_PTYPE_INNER_L2_ETHER,
537 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
538 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
539 RTE_PTYPE_INNER_L4_TCP,
540 RTE_PTYPE_INNER_L4_UDP,
541 RTE_PTYPE_INNER_L4_FRAG,
542 RTE_PTYPE_INNER_L4_NONFRAG,
543 RTE_PTYPE_UNKNOWN
544 };
11fdf7f2 545
9f95a23c
TL
546 if (dev->rx_pkt_burst != enic_dummy_recv_pkts &&
547 dev->rx_pkt_burst != NULL) {
548 struct enic *enic = pmd_priv(dev);
549 if (enic->overlay_offload)
550 return ptypes_overlay;
551 else
552 return ptypes;
553 }
11fdf7f2
TL
554 return NULL;
555}
556
557static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
558{
559 struct enic *enic = pmd_priv(eth_dev);
560
561 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
562 return;
563
564 ENICPMD_FUNC_TRACE();
565
566 enic->promisc = 1;
567 enic_add_packet_filter(enic);
568}
569
570static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
571{
572 struct enic *enic = pmd_priv(eth_dev);
573
574 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
575 return;
576
577 ENICPMD_FUNC_TRACE();
578 enic->promisc = 0;
579 enic_add_packet_filter(enic);
580}
581
582static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
583{
584 struct enic *enic = pmd_priv(eth_dev);
585
586 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
587 return;
588
589 ENICPMD_FUNC_TRACE();
590 enic->allmulti = 1;
591 enic_add_packet_filter(enic);
592}
593
594static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
595{
596 struct enic *enic = pmd_priv(eth_dev);
597
598 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
599 return;
600
601 ENICPMD_FUNC_TRACE();
602 enic->allmulti = 0;
603 enic_add_packet_filter(enic);
604}
605
606static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
607 struct ether_addr *mac_addr,
608 __rte_unused uint32_t index, __rte_unused uint32_t pool)
609{
610 struct enic *enic = pmd_priv(eth_dev);
611
612 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
613 return -E_RTE_SECONDARY;
614
615 ENICPMD_FUNC_TRACE();
616 return enic_set_mac_address(enic, mac_addr->addr_bytes);
617}
618
619static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
620{
621 struct enic *enic = pmd_priv(eth_dev);
622
623 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
624 return;
625
626 ENICPMD_FUNC_TRACE();
627 if (enic_del_mac_address(enic, index))
628 dev_err(enic, "del mac addr failed\n");
629}
630
631static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
632 struct ether_addr *addr)
633{
634 struct enic *enic = pmd_priv(eth_dev);
635 int ret;
636
637 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
638 return -E_RTE_SECONDARY;
639
640 ENICPMD_FUNC_TRACE();
641 ret = enic_del_mac_address(enic, 0);
642 if (ret)
643 return ret;
644 return enic_set_mac_address(enic, addr->addr_bytes);
645}
646
9f95a23c
TL
647static void debug_log_add_del_addr(struct ether_addr *addr, bool add)
648{
649 char mac_str[ETHER_ADDR_FMT_SIZE];
650
651 ether_format_addr(mac_str, ETHER_ADDR_FMT_SIZE, addr);
652 PMD_INIT_LOG(DEBUG, " %s address %s\n",
653 add ? "add" : "remove", mac_str);
654}
655
656static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,
657 struct ether_addr *mc_addr_set,
658 uint32_t nb_mc_addr)
659{
660 struct enic *enic = pmd_priv(eth_dev);
661 char mac_str[ETHER_ADDR_FMT_SIZE];
662 struct ether_addr *addr;
663 uint32_t i, j;
664 int ret;
665
666 ENICPMD_FUNC_TRACE();
667
668 /* Validate the given addresses first */
669 for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) {
670 addr = &mc_addr_set[i];
671 if (!is_multicast_ether_addr(addr) ||
672 is_broadcast_ether_addr(addr)) {
673 ether_format_addr(mac_str, ETHER_ADDR_FMT_SIZE, addr);
674 PMD_INIT_LOG(ERR, " invalid multicast address %s\n",
675 mac_str);
676 return -EINVAL;
677 }
678 }
679
680 /* Flush all if requested */
681 if (nb_mc_addr == 0 || mc_addr_set == NULL) {
682 PMD_INIT_LOG(DEBUG, " flush multicast addresses\n");
683 for (i = 0; i < enic->mc_count; i++) {
684 addr = &enic->mc_addrs[i];
685 debug_log_add_del_addr(addr, false);
686 ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
687 if (ret)
688 return ret;
689 }
690 enic->mc_count = 0;
691 return 0;
692 }
693
694 if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) {
695 PMD_INIT_LOG(ERR, " too many multicast addresses: max=%d\n",
696 ENIC_MULTICAST_PERFECT_FILTERS);
697 return -ENOSPC;
698 }
699 /*
700 * devcmd is slow, so apply the difference instead of flushing and
701 * adding everything.
702 * 1. Delete addresses on the NIC but not on the host
703 */
704 for (i = 0; i < enic->mc_count; i++) {
705 addr = &enic->mc_addrs[i];
706 for (j = 0; j < nb_mc_addr; j++) {
707 if (is_same_ether_addr(addr, &mc_addr_set[j]))
708 break;
709 }
710 if (j < nb_mc_addr)
711 continue;
712 debug_log_add_del_addr(addr, false);
713 ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
714 if (ret)
715 return ret;
716 }
717 /* 2. Add addresses on the host but not on the NIC */
718 for (i = 0; i < nb_mc_addr; i++) {
719 addr = &mc_addr_set[i];
720 for (j = 0; j < enic->mc_count; j++) {
721 if (is_same_ether_addr(addr, &enic->mc_addrs[j]))
722 break;
723 }
724 if (j < enic->mc_count)
725 continue;
726 debug_log_add_del_addr(addr, true);
727 ret = vnic_dev_add_addr(enic->vdev, addr->addr_bytes);
728 if (ret)
729 return ret;
730 }
731 /* Keep a copy so we can flush/apply later on.. */
732 memcpy(enic->mc_addrs, mc_addr_set,
733 nb_mc_addr * sizeof(struct ether_addr));
734 enic->mc_count = nb_mc_addr;
735 return 0;
736}
737
11fdf7f2
TL
738static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
739{
740 struct enic *enic = pmd_priv(eth_dev);
741
742 ENICPMD_FUNC_TRACE();
743 return enic_set_mtu(enic, mtu);
744}
745
746static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
747 struct rte_eth_rss_reta_entry64
748 *reta_conf,
749 uint16_t reta_size)
750{
751 struct enic *enic = pmd_priv(dev);
752 uint16_t i, idx, shift;
753
754 ENICPMD_FUNC_TRACE();
755 if (reta_size != ENIC_RSS_RETA_SIZE) {
756 dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
757 reta_size, ENIC_RSS_RETA_SIZE);
758 return -EINVAL;
759 }
760
761 for (i = 0; i < reta_size; i++) {
762 idx = i / RTE_RETA_GROUP_SIZE;
763 shift = i % RTE_RETA_GROUP_SIZE;
764 if (reta_conf[idx].mask & (1ULL << shift))
765 reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
766 enic->rss_cpu.cpu[i / 4].b[i % 4]);
767 }
768
769 return 0;
770}
771
772static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
773 struct rte_eth_rss_reta_entry64
774 *reta_conf,
775 uint16_t reta_size)
776{
777 struct enic *enic = pmd_priv(dev);
778 union vnic_rss_cpu rss_cpu;
779 uint16_t i, idx, shift;
780
781 ENICPMD_FUNC_TRACE();
782 if (reta_size != ENIC_RSS_RETA_SIZE) {
783 dev_err(enic, "reta_update: wrong reta_size. given=%u"
784 " expected=%u\n",
785 reta_size, ENIC_RSS_RETA_SIZE);
786 return -EINVAL;
787 }
788 /*
789 * Start with the current reta and modify it per reta_conf, as we
790 * need to push the entire reta even if we only modify one entry.
791 */
792 rss_cpu = enic->rss_cpu;
793 for (i = 0; i < reta_size; i++) {
794 idx = i / RTE_RETA_GROUP_SIZE;
795 shift = i % RTE_RETA_GROUP_SIZE;
796 if (reta_conf[idx].mask & (1ULL << shift))
797 rss_cpu.cpu[i / 4].b[i % 4] =
798 enic_rte_rq_idx_to_sop_idx(
799 reta_conf[idx].reta[shift]);
800 }
801 return enic_set_rss_reta(enic, &rss_cpu);
802}
803
804static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
805 struct rte_eth_rss_conf *rss_conf)
806{
807 struct enic *enic = pmd_priv(dev);
808
809 ENICPMD_FUNC_TRACE();
810 return enic_set_rss_conf(enic, rss_conf);
811}
812
813static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
814 struct rte_eth_rss_conf *rss_conf)
815{
816 struct enic *enic = pmd_priv(dev);
817
818 ENICPMD_FUNC_TRACE();
819 if (rss_conf == NULL)
820 return -EINVAL;
821 if (rss_conf->rss_key != NULL &&
822 rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
823 dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
824 " expected=%u+\n",
825 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
826 return -EINVAL;
827 }
828 rss_conf->rss_hf = enic->rss_hf;
829 if (rss_conf->rss_key != NULL) {
830 int i;
831 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
832 rss_conf->rss_key[i] =
833 enic->rss_key.key[i / 10].b[i % 10];
834 }
835 rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
836 }
837 return 0;
838}
839
840static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
841 uint16_t rx_queue_id,
842 struct rte_eth_rxq_info *qinfo)
843{
844 struct enic *enic = pmd_priv(dev);
845 struct vnic_rq *rq_sop;
846 struct vnic_rq *rq_data;
847 struct rte_eth_rxconf *conf;
848 uint16_t sop_queue_idx;
849 uint16_t data_queue_idx;
850
851 ENICPMD_FUNC_TRACE();
852 sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
853 data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
854 rq_sop = &enic->rq[sop_queue_idx];
855 rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
856 qinfo->mp = rq_sop->mp;
857 qinfo->scattered_rx = rq_sop->data_queue_enable;
858 qinfo->nb_desc = rq_sop->ring.desc_count;
859 if (qinfo->scattered_rx)
860 qinfo->nb_desc += rq_data->ring.desc_count;
861 conf = &qinfo->conf;
862 memset(conf, 0, sizeof(*conf));
863 conf->rx_free_thresh = rq_sop->rx_free_thresh;
864 conf->rx_drop_en = 1;
865 /*
866 * Except VLAN stripping (port setting), all the checksum offloads
867 * are always enabled.
868 */
869 conf->offloads = enic->rx_offload_capa;
870 if (!enic->ig_vlan_strip_en)
871 conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
872 /* rx_thresh and other fields are not applicable for enic */
873}
874
875static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
876 uint16_t tx_queue_id,
877 struct rte_eth_txq_info *qinfo)
878{
879 struct enic *enic = pmd_priv(dev);
880 struct vnic_wq *wq = &enic->wq[tx_queue_id];
881
882 ENICPMD_FUNC_TRACE();
883 qinfo->nb_desc = wq->ring.desc_count;
884 memset(&qinfo->conf, 0, sizeof(qinfo->conf));
885 qinfo->conf.offloads = wq->offloads;
886 /* tx_thresh, and all the other fields are not applicable for enic */
887}
888
889static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
890 uint16_t rx_queue_id)
891{
892 struct enic *enic = pmd_priv(eth_dev);
893
894 ENICPMD_FUNC_TRACE();
895 vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
896 return 0;
897}
898
899static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
900 uint16_t rx_queue_id)
901{
902 struct enic *enic = pmd_priv(eth_dev);
903
904 ENICPMD_FUNC_TRACE();
905 vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
906 return 0;
907}
908
909static int udp_tunnel_common_check(struct enic *enic,
910 struct rte_eth_udp_tunnel *tnl)
911{
912 if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN)
913 return -ENOTSUP;
914 if (!enic->overlay_offload) {
915 PMD_INIT_LOG(DEBUG, " vxlan (overlay offload) is not "
916 "supported\n");
917 return -ENOTSUP;
918 }
919 return 0;
920}
921
922static int update_vxlan_port(struct enic *enic, uint16_t port)
923{
924 if (vnic_dev_overlay_offload_cfg(enic->vdev,
925 OVERLAY_CFG_VXLAN_PORT_UPDATE,
926 port)) {
927 PMD_INIT_LOG(DEBUG, " failed to update vxlan port\n");
928 return -EINVAL;
929 }
930 PMD_INIT_LOG(DEBUG, " updated vxlan port to %u\n", port);
931 enic->vxlan_port = port;
932 return 0;
933}
934
935static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
936 struct rte_eth_udp_tunnel *tnl)
937{
938 struct enic *enic = pmd_priv(eth_dev);
939 int ret;
940
941 ENICPMD_FUNC_TRACE();
942 ret = udp_tunnel_common_check(enic, tnl);
943 if (ret)
944 return ret;
945 /*
946 * The NIC has 1 configurable VXLAN port number. "Adding" a new port
947 * number replaces it.
948 */
949 if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) {
950 PMD_INIT_LOG(DEBUG, " %u is already configured or invalid\n",
951 tnl->udp_port);
952 return -EINVAL;
953 }
954 return update_vxlan_port(enic, tnl->udp_port);
955}
956
957static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
958 struct rte_eth_udp_tunnel *tnl)
959{
960 struct enic *enic = pmd_priv(eth_dev);
961 int ret;
962
963 ENICPMD_FUNC_TRACE();
964 ret = udp_tunnel_common_check(enic, tnl);
965 if (ret)
966 return ret;
967 /*
968 * Clear the previously set port number and restore the
969 * hardware default port number. Some drivers disable VXLAN
970 * offloads when there are no configured port numbers. But
971 * enic does not do that as VXLAN is part of overlay offload,
972 * which is tied to inner RSS and TSO.
973 */
974 if (tnl->udp_port != enic->vxlan_port) {
975 PMD_INIT_LOG(DEBUG, " %u is not a configured vxlan port\n",
976 tnl->udp_port);
977 return -EINVAL;
978 }
979 return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT);
980}
981
9f95a23c
TL
982static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
983 char *fw_version, size_t fw_size)
984{
985 struct vnic_devcmd_fw_info *info;
986 struct enic *enic;
987 int ret;
988
989 ENICPMD_FUNC_TRACE();
990 if (fw_version == NULL || fw_size <= 0)
991 return -EINVAL;
992 enic = pmd_priv(eth_dev);
993 ret = vnic_dev_fw_info(enic->vdev, &info);
994 if (ret)
995 return ret;
996 snprintf(fw_version, fw_size, "%s %s",
997 info->fw_version, info->fw_build);
998 fw_version[fw_size - 1] = '\0';
999 return 0;
1000}
1001
11fdf7f2
TL
1002static const struct eth_dev_ops enicpmd_eth_dev_ops = {
1003 .dev_configure = enicpmd_dev_configure,
1004 .dev_start = enicpmd_dev_start,
1005 .dev_stop = enicpmd_dev_stop,
1006 .dev_set_link_up = NULL,
1007 .dev_set_link_down = NULL,
1008 .dev_close = enicpmd_dev_close,
1009 .promiscuous_enable = enicpmd_dev_promiscuous_enable,
1010 .promiscuous_disable = enicpmd_dev_promiscuous_disable,
1011 .allmulticast_enable = enicpmd_dev_allmulticast_enable,
1012 .allmulticast_disable = enicpmd_dev_allmulticast_disable,
1013 .link_update = enicpmd_dev_link_update,
1014 .stats_get = enicpmd_dev_stats_get,
1015 .stats_reset = enicpmd_dev_stats_reset,
1016 .queue_stats_mapping_set = NULL,
1017 .dev_infos_get = enicpmd_dev_info_get,
1018 .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
1019 .mtu_set = enicpmd_mtu_set,
1020 .vlan_filter_set = NULL,
1021 .vlan_tpid_set = NULL,
1022 .vlan_offload_set = enicpmd_vlan_offload_set,
1023 .vlan_strip_queue_set = NULL,
1024 .rx_queue_start = enicpmd_dev_rx_queue_start,
1025 .rx_queue_stop = enicpmd_dev_rx_queue_stop,
1026 .tx_queue_start = enicpmd_dev_tx_queue_start,
1027 .tx_queue_stop = enicpmd_dev_tx_queue_stop,
1028 .rx_queue_setup = enicpmd_dev_rx_queue_setup,
1029 .rx_queue_release = enicpmd_dev_rx_queue_release,
1030 .rx_queue_count = enicpmd_dev_rx_queue_count,
1031 .rx_descriptor_done = NULL,
1032 .tx_queue_setup = enicpmd_dev_tx_queue_setup,
1033 .tx_queue_release = enicpmd_dev_tx_queue_release,
1034 .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
1035 .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
1036 .rxq_info_get = enicpmd_dev_rxq_info_get,
1037 .txq_info_get = enicpmd_dev_txq_info_get,
1038 .dev_led_on = NULL,
1039 .dev_led_off = NULL,
1040 .flow_ctrl_get = NULL,
1041 .flow_ctrl_set = NULL,
1042 .priority_flow_ctrl_set = NULL,
1043 .mac_addr_add = enicpmd_add_mac_addr,
1044 .mac_addr_remove = enicpmd_remove_mac_addr,
1045 .mac_addr_set = enicpmd_set_mac_addr,
9f95a23c 1046 .set_mc_addr_list = enicpmd_set_mc_addr_list,
11fdf7f2
TL
1047 .filter_ctrl = enicpmd_dev_filter_ctrl,
1048 .reta_query = enicpmd_dev_rss_reta_query,
1049 .reta_update = enicpmd_dev_rss_reta_update,
1050 .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get,
1051 .rss_hash_update = enicpmd_dev_rss_hash_update,
1052 .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add,
1053 .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del,
9f95a23c 1054 .fw_version_get = enicpmd_dev_fw_version_get,
11fdf7f2
TL
1055};
1056
9f95a23c
TL
1057static int enic_parse_zero_one(const char *key,
1058 const char *value,
1059 void *opaque)
11fdf7f2
TL
1060{
1061 struct enic *enic;
9f95a23c 1062 bool b;
11fdf7f2
TL
1063
1064 enic = (struct enic *)opaque;
1065 if (strcmp(value, "0") == 0) {
9f95a23c 1066 b = false;
11fdf7f2 1067 } else if (strcmp(value, "1") == 0) {
9f95a23c 1068 b = true;
11fdf7f2 1069 } else {
9f95a23c
TL
1070 dev_err(enic, "Invalid value for %s"
1071 ": expected=0|1 given=%s\n", key, value);
11fdf7f2
TL
1072 return -EINVAL;
1073 }
9f95a23c
TL
1074 if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0)
1075 enic->disable_overlay = b;
1076 if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
1077 enic->enable_avx2_rx = b;
11fdf7f2
TL
1078 return 0;
1079}
1080
1081static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
1082 const char *value,
1083 void *opaque)
1084{
1085 struct enic *enic;
1086
1087 enic = (struct enic *)opaque;
1088 if (strcmp(value, "trunk") == 0) {
1089 /* Trunk mode: always tag */
1090 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
1091 } else if (strcmp(value, "untag") == 0) {
1092 /* Untag default VLAN mode: untag if VLAN = default VLAN */
1093 enic->ig_vlan_rewrite_mode =
1094 IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
1095 } else if (strcmp(value, "priority") == 0) {
1096 /*
1097 * Priority-tag default VLAN mode: priority tag (VLAN header
1098 * with ID=0) if VLAN = default
1099 */
1100 enic->ig_vlan_rewrite_mode =
1101 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
1102 } else if (strcmp(value, "pass") == 0) {
1103 /* Pass through mode: do not touch tags */
1104 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
1105 } else {
1106 dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
1107 ": expected=trunk|untag|priority|pass given=%s\n",
1108 value);
1109 return -EINVAL;
1110 }
1111 return 0;
1112}
1113
1114static int enic_check_devargs(struct rte_eth_dev *dev)
1115{
1116 static const char *const valid_keys[] = {
1117 ENIC_DEVARG_DISABLE_OVERLAY,
9f95a23c 1118 ENIC_DEVARG_ENABLE_AVX2_RX,
11fdf7f2
TL
1119 ENIC_DEVARG_IG_VLAN_REWRITE,
1120 NULL};
1121 struct enic *enic = pmd_priv(dev);
1122 struct rte_kvargs *kvlist;
1123
1124 ENICPMD_FUNC_TRACE();
1125
1126 enic->disable_overlay = false;
9f95a23c 1127 enic->enable_avx2_rx = false;
11fdf7f2
TL
1128 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
1129 if (!dev->device->devargs)
1130 return 0;
1131 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1132 if (!kvlist)
1133 return -EINVAL;
1134 if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
9f95a23c
TL
1135 enic_parse_zero_one, enic) < 0 ||
1136 rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
1137 enic_parse_zero_one, enic) < 0 ||
11fdf7f2
TL
1138 rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
1139 enic_parse_ig_vlan_rewrite, enic) < 0) {
1140 rte_kvargs_free(kvlist);
1141 return -EINVAL;
1142 }
1143 rte_kvargs_free(kvlist);
1144 return 0;
1145}
1146
11fdf7f2
TL
1147/* Initialize the driver
1148 * It returns 0 on success.
1149 */
1150static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
1151{
1152 struct rte_pci_device *pdev;
1153 struct rte_pci_addr *addr;
1154 struct enic *enic = pmd_priv(eth_dev);
1155 int err;
1156
1157 ENICPMD_FUNC_TRACE();
1158
1159 enic->port_id = eth_dev->data->port_id;
1160 enic->rte_dev = eth_dev;
1161 eth_dev->dev_ops = &enicpmd_eth_dev_ops;
1162 eth_dev->rx_pkt_burst = &enic_recv_pkts;
1163 eth_dev->tx_pkt_burst = &enic_xmit_pkts;
1164 eth_dev->tx_pkt_prepare = &enic_prep_pkts;
9f95a23c
TL
1165 /* Let rte_eth_dev_close() release the port resources */
1166 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
11fdf7f2
TL
1167
1168 pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
1169 rte_eth_copy_pci_info(eth_dev, pdev);
1170 enic->pdev = pdev;
1171 addr = &pdev->addr;
1172
1173 snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
1174 addr->domain, addr->bus, addr->devid, addr->function);
1175
1176 err = enic_check_devargs(eth_dev);
1177 if (err)
1178 return err;
1179 return enic_probe(enic);
1180}
1181
1182static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1183 struct rte_pci_device *pci_dev)
1184{
1185 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
1186 eth_enicpmd_dev_init);
1187}
1188
1189static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
1190{
1191 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
1192}
1193
1194static struct rte_pci_driver rte_enic_pmd = {
1195 .id_table = pci_id_enic_map,
9f95a23c
TL
1196 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1197 RTE_PCI_DRV_IOVA_AS_VA,
11fdf7f2
TL
1198 .probe = eth_enic_pci_probe,
1199 .remove = eth_enic_pci_remove,
1200};
1201
1202RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
1203RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
1204RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
1205RTE_PMD_REGISTER_PARAM_STRING(net_enic,
1206 ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
9f95a23c 1207 ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
11fdf7f2 1208 ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");