4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
46 #pragma GCC diagnostic ignored "-Wpedantic"
48 #include <infiniband/verbs.h>
50 #pragma GCC diagnostic error "-Wpedantic"
53 /* DPDK headers don't like -pedantic. */
55 #pragma GCC diagnostic ignored "-Wpedantic"
57 #include <rte_malloc.h>
58 #include <rte_ethdev.h>
60 #include <rte_common.h>
61 #include <rte_kvargs.h>
63 #pragma GCC diagnostic error "-Wpedantic"
67 #include "mlx5_utils.h"
68 #include "mlx5_rxtx.h"
69 #include "mlx5_autoconf.h"
70 #include "mlx5_defs.h"
72 /* Device parameter to enable RX completion queue compression. */
73 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
75 /* Device parameter to configure inline send. */
76 #define MLX5_TXQ_INLINE "txq_inline"
79 * Device parameter to configure the number of TX queues threshold for
80 * enabling inline send.
82 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
84 /* Device parameter to enable multi-packet send WQEs. */
85 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
88 * Retrieve integer value from environment variable.
91 * Environment variable name.
94 * Integer value, 0 if the variable is not set.
97 mlx5_getenv_int(const char *name
)
99 const char *val
= getenv(name
);
107 * DPDK callback to close the device.
109 * Destroy all queues and objects, free memory.
112 * Pointer to Ethernet device structure.
115 mlx5_dev_close(struct rte_eth_dev
*dev
)
117 struct priv
*priv
= mlx5_get_priv(dev
);
121 DEBUG("%p: closing device \"%s\"",
123 ((priv
->ctx
!= NULL
) ? priv
->ctx
->device
->name
: ""));
124 /* In case mlx5_dev_stop() has not been called. */
125 priv_dev_interrupt_handler_uninstall(priv
, dev
);
126 priv_special_flow_disable_all(priv
);
127 priv_mac_addrs_disable(priv
);
128 priv_destroy_hash_rxqs(priv
);
130 /* Remove flow director elements. */
131 priv_fdir_disable(priv
);
132 priv_fdir_delete_filters_list(priv
);
134 /* Prevent crashes when queues are still in use. */
135 dev
->rx_pkt_burst
= removed_rx_burst
;
136 dev
->tx_pkt_burst
= removed_tx_burst
;
137 if (priv
->rxqs
!= NULL
) {
138 /* XXX race condition if mlx5_rx_burst() is still running. */
140 for (i
= 0; (i
!= priv
->rxqs_n
); ++i
) {
141 struct rxq
*rxq
= (*priv
->rxqs
)[i
];
142 struct rxq_ctrl
*rxq_ctrl
;
146 rxq_ctrl
= container_of(rxq
, struct rxq_ctrl
, rxq
);
147 (*priv
->rxqs
)[i
] = NULL
;
148 rxq_cleanup(rxq_ctrl
);
154 if (priv
->txqs
!= NULL
) {
155 /* XXX race condition if mlx5_tx_burst() is still running. */
157 for (i
= 0; (i
!= priv
->txqs_n
); ++i
) {
158 struct txq
*txq
= (*priv
->txqs
)[i
];
159 struct txq_ctrl
*txq_ctrl
;
163 txq_ctrl
= container_of(txq
, struct txq_ctrl
, txq
);
164 (*priv
->txqs
)[i
] = NULL
;
165 txq_cleanup(txq_ctrl
);
171 if (priv
->pd
!= NULL
) {
172 assert(priv
->ctx
!= NULL
);
173 claim_zero(ibv_dealloc_pd(priv
->pd
));
174 claim_zero(ibv_close_device(priv
->ctx
));
176 assert(priv
->ctx
== NULL
);
177 if (priv
->rss_conf
!= NULL
) {
178 for (i
= 0; (i
!= hash_rxq_init_n
); ++i
)
179 rte_free((*priv
->rss_conf
)[i
]);
180 rte_free(priv
->rss_conf
);
182 if (priv
->reta_idx
!= NULL
)
183 rte_free(priv
->reta_idx
);
185 memset(priv
, 0, sizeof(*priv
));
188 static const struct eth_dev_ops mlx5_dev_ops
= {
189 .dev_configure
= mlx5_dev_configure
,
190 .dev_start
= mlx5_dev_start
,
191 .dev_stop
= mlx5_dev_stop
,
192 .dev_set_link_down
= mlx5_set_link_down
,
193 .dev_set_link_up
= mlx5_set_link_up
,
194 .dev_close
= mlx5_dev_close
,
195 .promiscuous_enable
= mlx5_promiscuous_enable
,
196 .promiscuous_disable
= mlx5_promiscuous_disable
,
197 .allmulticast_enable
= mlx5_allmulticast_enable
,
198 .allmulticast_disable
= mlx5_allmulticast_disable
,
199 .link_update
= mlx5_link_update
,
200 .stats_get
= mlx5_stats_get
,
201 .stats_reset
= mlx5_stats_reset
,
202 .dev_infos_get
= mlx5_dev_infos_get
,
203 .dev_supported_ptypes_get
= mlx5_dev_supported_ptypes_get
,
204 .vlan_filter_set
= mlx5_vlan_filter_set
,
205 .rx_queue_setup
= mlx5_rx_queue_setup
,
206 .tx_queue_setup
= mlx5_tx_queue_setup
,
207 .rx_queue_release
= mlx5_rx_queue_release
,
208 .tx_queue_release
= mlx5_tx_queue_release
,
209 .flow_ctrl_get
= mlx5_dev_get_flow_ctrl
,
210 .flow_ctrl_set
= mlx5_dev_set_flow_ctrl
,
211 .mac_addr_remove
= mlx5_mac_addr_remove
,
212 .mac_addr_add
= mlx5_mac_addr_add
,
213 .mac_addr_set
= mlx5_mac_addr_set
,
214 .mtu_set
= mlx5_dev_set_mtu
,
215 .vlan_strip_queue_set
= mlx5_vlan_strip_queue_set
,
216 .vlan_offload_set
= mlx5_vlan_offload_set
,
217 .reta_update
= mlx5_dev_rss_reta_update
,
218 .reta_query
= mlx5_dev_rss_reta_query
,
219 .rss_hash_update
= mlx5_rss_hash_update
,
220 .rss_hash_conf_get
= mlx5_rss_hash_conf_get
,
221 .filter_ctrl
= mlx5_dev_filter_ctrl
,
225 struct rte_pci_addr pci_addr
; /* associated PCI address */
226 uint32_t ports
; /* physical ports bitfield. */
230 * Get device index in mlx5_dev[] from PCI bus address.
232 * @param[in] pci_addr
233 * PCI bus address to look for.
236 * mlx5_dev[] index on success, -1 on failure.
239 mlx5_dev_idx(struct rte_pci_addr
*pci_addr
)
244 assert(pci_addr
!= NULL
);
245 for (i
= 0; (i
!= RTE_DIM(mlx5_dev
)); ++i
) {
246 if ((mlx5_dev
[i
].pci_addr
.domain
== pci_addr
->domain
) &&
247 (mlx5_dev
[i
].pci_addr
.bus
== pci_addr
->bus
) &&
248 (mlx5_dev
[i
].pci_addr
.devid
== pci_addr
->devid
) &&
249 (mlx5_dev
[i
].pci_addr
.function
== pci_addr
->function
))
251 if ((mlx5_dev
[i
].ports
== 0) && (ret
== -1))
258 * Verify and store value for device argument.
261 * Key argument to verify.
263 * Value associated with key.
268 * 0 on success, negative errno value on failure.
271 mlx5_args_check(const char *key
, const char *val
, void *opaque
)
273 struct priv
*priv
= opaque
;
277 tmp
= strtoul(val
, NULL
, 0);
279 WARN("%s: \"%s\" is not a valid integer", key
, val
);
282 if (strcmp(MLX5_RXQ_CQE_COMP_EN
, key
) == 0) {
283 priv
->cqe_comp
= !!tmp
;
284 } else if (strcmp(MLX5_TXQ_INLINE
, key
) == 0) {
285 priv
->txq_inline
= tmp
;
286 } else if (strcmp(MLX5_TXQS_MIN_INLINE
, key
) == 0) {
287 priv
->txqs_inline
= tmp
;
288 } else if (strcmp(MLX5_TXQ_MPW_EN
, key
) == 0) {
291 WARN("%s: unknown parameter", key
);
298 * Parse device parameters.
301 * Pointer to private structure.
303 * Device arguments structure.
306 * 0 on success, errno value on failure.
309 mlx5_args(struct priv
*priv
, struct rte_devargs
*devargs
)
311 const char **params
= (const char *[]){
312 MLX5_RXQ_CQE_COMP_EN
,
314 MLX5_TXQS_MIN_INLINE
,
318 struct rte_kvargs
*kvlist
;
324 /* Following UGLY cast is done to pass checkpatch. */
325 kvlist
= rte_kvargs_parse(devargs
->args
, params
);
328 /* Process parameters. */
329 for (i
= 0; (params
[i
] != NULL
); ++i
) {
330 if (rte_kvargs_count(kvlist
, params
[i
])) {
331 ret
= rte_kvargs_process(kvlist
, params
[i
],
332 mlx5_args_check
, priv
);
337 rte_kvargs_free(kvlist
);
341 static struct eth_driver mlx5_driver
;
344 * DPDK callback to register a PCI device.
346 * This function creates an Ethernet device for each port of a given
350 * PCI driver structure (mlx5_driver).
352 * PCI device information.
355 * 0 on success, negative errno value on failure.
358 mlx5_pci_probe(struct rte_pci_driver
*pci_drv
, struct rte_pci_device
*pci_dev
)
360 struct ibv_device
**list
;
361 struct ibv_device
*ibv_dev
;
363 struct ibv_context
*attr_ctx
= NULL
;
364 struct ibv_device_attr device_attr
;
371 assert(pci_drv
== &mlx5_driver
.pci_drv
);
372 /* Get mlx5_dev[] index. */
373 idx
= mlx5_dev_idx(&pci_dev
->addr
);
375 ERROR("this driver cannot support any more adapters");
378 DEBUG("using driver device index %d", idx
);
380 /* Save PCI address. */
381 mlx5_dev
[idx
].pci_addr
= pci_dev
->addr
;
382 list
= ibv_get_device_list(&i
);
385 if (errno
== ENOSYS
) {
386 WARN("cannot list devices, is ib_uverbs loaded?");
393 * For each listed device, check related sysfs entry against
394 * the provided PCI ID.
397 struct rte_pci_addr pci_addr
;
400 DEBUG("checking device \"%s\"", list
[i
]->name
);
401 if (mlx5_ibv_device_to_pci_addr(list
[i
], &pci_addr
))
403 if ((pci_dev
->addr
.domain
!= pci_addr
.domain
) ||
404 (pci_dev
->addr
.bus
!= pci_addr
.bus
) ||
405 (pci_dev
->addr
.devid
!= pci_addr
.devid
) ||
406 (pci_dev
->addr
.function
!= pci_addr
.function
))
408 sriov
= ((pci_dev
->id
.device_id
==
409 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF
) ||
410 (pci_dev
->id
.device_id
==
411 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF
));
412 /* Multi-packet send is only supported by ConnectX-4 Lx PF. */
413 mps
= (pci_dev
->id
.device_id
==
414 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX
);
415 INFO("PCI information matches, using device \"%s\""
416 " (SR-IOV: %s, MPS: %s)",
418 sriov
? "true" : "false",
419 mps
? "true" : "false");
420 attr_ctx
= ibv_open_device(list
[i
]);
424 if (attr_ctx
== NULL
) {
425 ibv_free_device_list(list
);
428 WARN("cannot access device, is mlx5_ib loaded?");
431 WARN("cannot use device, are drivers up to date?");
439 DEBUG("device opened");
440 if (ibv_query_device(attr_ctx
, &device_attr
))
442 INFO("%u port(s) detected", device_attr
.phys_port_cnt
);
444 for (i
= 0; i
< device_attr
.phys_port_cnt
; i
++) {
445 uint32_t port
= i
+ 1; /* ports are indexed from one */
446 uint32_t test
= (1 << i
);
447 struct ibv_context
*ctx
= NULL
;
448 struct ibv_port_attr port_attr
;
449 struct ibv_pd
*pd
= NULL
;
450 struct priv
*priv
= NULL
;
451 struct rte_eth_dev
*eth_dev
;
452 struct ibv_exp_device_attr exp_device_attr
;
453 struct ether_addr mac
;
454 uint16_t num_vfs
= 0;
456 exp_device_attr
.comp_mask
=
457 IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS
|
458 IBV_EXP_DEVICE_ATTR_RX_HASH
|
459 IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS
|
460 IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN
|
463 DEBUG("using port %u (%08" PRIx32
")", port
, test
);
465 ctx
= ibv_open_device(ibv_dev
);
469 /* Check port status. */
470 err
= ibv_query_port(ctx
, port
, &port_attr
);
472 ERROR("port query failed: %s", strerror(err
));
476 if (port_attr
.link_layer
!= IBV_LINK_LAYER_ETHERNET
) {
477 ERROR("port %d is not configured in Ethernet mode",
482 if (port_attr
.state
!= IBV_PORT_ACTIVE
)
483 DEBUG("port %d is not active: \"%s\" (%d)",
484 port
, ibv_port_state_str(port_attr
.state
),
487 /* Allocate protection domain. */
488 pd
= ibv_alloc_pd(ctx
);
490 ERROR("PD allocation failure");
495 mlx5_dev
[idx
].ports
|= test
;
497 /* from rte_ethdev.c */
498 priv
= rte_zmalloc("ethdev private structure",
500 RTE_CACHE_LINE_SIZE
);
502 ERROR("priv allocation failure");
508 priv
->device_attr
= device_attr
;
511 priv
->mtu
= ETHER_MTU
;
512 priv
->mps
= mps
; /* Enable MPW by default if supported. */
513 priv
->cqe_comp
= 1; /* Enable compression by default. */
514 err
= mlx5_args(priv
, pci_dev
->device
.devargs
);
516 ERROR("failed to process device arguments: %s",
520 if (ibv_exp_query_device(ctx
, &exp_device_attr
)) {
521 ERROR("ibv_exp_query_device() failed");
526 ((exp_device_attr
.exp_device_cap_flags
&
527 IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT
) &&
528 (exp_device_attr
.exp_device_cap_flags
&
529 IBV_EXP_DEVICE_RX_CSUM_IP_PKT
));
530 DEBUG("checksum offloading is %ssupported",
531 (priv
->hw_csum
? "" : "not "));
533 priv
->hw_csum_l2tun
= !!(exp_device_attr
.exp_device_cap_flags
&
534 IBV_EXP_DEVICE_VXLAN_SUPPORT
);
535 DEBUG("L2 tunnel checksum offloads are %ssupported",
536 (priv
->hw_csum_l2tun
? "" : "not "));
538 priv
->ind_table_max_size
= exp_device_attr
.rx_hash_caps
.max_rwq_indirection_table_size
;
539 /* Remove this check once DPDK supports larger/variable
540 * indirection tables. */
541 if (priv
->ind_table_max_size
> (unsigned int)RSS_INDIRECTION_TABLE_SIZE
)
542 priv
->ind_table_max_size
= RSS_INDIRECTION_TABLE_SIZE
;
543 DEBUG("maximum RX indirection table size is %u",
544 priv
->ind_table_max_size
);
545 priv
->hw_vlan_strip
= !!(exp_device_attr
.wq_vlan_offloads_cap
&
546 IBV_EXP_RECEIVE_WQ_CVLAN_STRIP
);
547 DEBUG("VLAN stripping is %ssupported",
548 (priv
->hw_vlan_strip
? "" : "not "));
550 priv
->hw_fcs_strip
= !!(exp_device_attr
.exp_device_cap_flags
&
551 IBV_EXP_DEVICE_SCATTER_FCS
);
552 DEBUG("FCS stripping configuration is %ssupported",
553 (priv
->hw_fcs_strip
? "" : "not "));
555 priv
->hw_padding
= !!exp_device_attr
.rx_pad_end_addr_align
;
556 DEBUG("hardware RX end alignment padding is %ssupported",
557 (priv
->hw_padding
? "" : "not "));
559 priv_get_num_vfs(priv
, &num_vfs
);
560 priv
->sriov
= (num_vfs
|| sriov
);
561 if (priv
->mps
&& !mps
) {
562 ERROR("multi-packet send not supported on this device"
563 " (" MLX5_TXQ_MPW_EN
")");
567 /* Allocate and register default RSS hash keys. */
568 priv
->rss_conf
= rte_calloc(__func__
, hash_rxq_init_n
,
569 sizeof((*priv
->rss_conf
)[0]), 0);
570 if (priv
->rss_conf
== NULL
) {
574 err
= rss_hash_rss_conf_new_key(priv
,
575 rss_hash_default_key
,
576 rss_hash_default_key_len
,
580 /* Configure the first MAC address by default. */
581 if (priv_get_mac(priv
, &mac
.addr_bytes
)) {
582 ERROR("cannot get MAC address, is mlx5_en loaded?"
583 " (errno: %s)", strerror(errno
));
586 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
588 mac
.addr_bytes
[0], mac
.addr_bytes
[1],
589 mac
.addr_bytes
[2], mac
.addr_bytes
[3],
590 mac
.addr_bytes
[4], mac
.addr_bytes
[5]);
591 /* Register MAC address. */
592 claim_zero(priv_mac_addr_add(priv
, 0,
593 (const uint8_t (*)[ETHER_ADDR_LEN
])
595 /* Initialize FD filters list. */
596 err
= fdir_init_filters_list(priv
);
601 char ifname
[IF_NAMESIZE
];
603 if (priv_get_ifname(priv
, &ifname
) == 0)
604 DEBUG("port %u ifname is \"%s\"",
607 DEBUG("port %u ifname is unknown", priv
->port
);
610 /* Get actual MTU if possible. */
611 priv_get_mtu(priv
, &priv
->mtu
);
612 DEBUG("port %u MTU is %u", priv
->port
, priv
->mtu
);
614 /* from rte_ethdev.c */
616 char name
[RTE_ETH_NAME_MAX_LEN
];
618 snprintf(name
, sizeof(name
), "%s port %u",
619 ibv_get_device_name(ibv_dev
), port
);
620 eth_dev
= rte_eth_dev_allocate(name
);
622 if (eth_dev
== NULL
) {
623 ERROR("can not allocate rte ethdev");
628 /* Secondary processes have to use local storage for their
629 * private data as well as a copy of eth_dev->data, but this
630 * pointer must not be modified before burst functions are
631 * actually called. */
632 if (mlx5_is_secondary()) {
633 struct mlx5_secondary_data
*sd
=
634 &mlx5_secondary_data
[eth_dev
->data
->port_id
];
635 sd
->primary_priv
= eth_dev
->data
->dev_private
;
636 if (sd
->primary_priv
== NULL
) {
637 ERROR("no private data for port %u",
638 eth_dev
->data
->port_id
);
642 sd
->shared_dev_data
= eth_dev
->data
;
643 rte_spinlock_init(&sd
->lock
);
644 memcpy(sd
->data
.name
, sd
->shared_dev_data
->name
,
645 sizeof(sd
->data
.name
));
646 sd
->data
.dev_private
= priv
;
647 sd
->data
.rx_mbuf_alloc_failed
= 0;
648 sd
->data
.mtu
= ETHER_MTU
;
649 sd
->data
.port_id
= sd
->shared_dev_data
->port_id
;
650 sd
->data
.mac_addrs
= priv
->mac
;
651 eth_dev
->tx_pkt_burst
= mlx5_tx_burst_secondary_setup
;
652 eth_dev
->rx_pkt_burst
= mlx5_rx_burst_secondary_setup
;
654 eth_dev
->data
->dev_private
= priv
;
655 eth_dev
->data
->rx_mbuf_alloc_failed
= 0;
656 eth_dev
->data
->mtu
= ETHER_MTU
;
657 eth_dev
->data
->mac_addrs
= priv
->mac
;
660 eth_dev
->pci_dev
= pci_dev
;
661 rte_eth_copy_pci_info(eth_dev
, pci_dev
);
662 eth_dev
->driver
= &mlx5_driver
;
664 eth_dev
->dev_ops
= &mlx5_dev_ops
;
666 TAILQ_INIT(ð_dev
->link_intr_cbs
);
668 /* Bring Ethernet device up. */
669 DEBUG("forcing Ethernet interface up");
670 priv_set_flags(priv
, ~IFF_UP
, IFF_UP
);
671 mlx5_link_update_unlocked(priv
->dev
, 1);
676 rte_free(priv
->rss_conf
);
680 claim_zero(ibv_dealloc_pd(pd
));
682 claim_zero(ibv_close_device(ctx
));
687 * XXX if something went wrong in the loop above, there is a resource
688 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
689 * long as the dpdk does not provide a way to deallocate a ethdev and a
690 * way to enumerate the registered ethdevs to free the previous ones.
693 /* no port found, complain */
694 if (!mlx5_dev
[idx
].ports
) {
701 claim_zero(ibv_close_device(attr_ctx
));
703 ibv_free_device_list(list
);
708 static const struct rte_pci_id mlx5_pci_id_map
[] = {
710 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX
,
711 PCI_DEVICE_ID_MELLANOX_CONNECTX4
)
714 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX
,
715 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF
)
718 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX
,
719 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX
)
722 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX
,
723 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF
)
730 static struct eth_driver mlx5_driver
= {
733 .name
= MLX5_DRIVER_NAME
735 .id_table
= mlx5_pci_id_map
,
736 .probe
= mlx5_pci_probe
,
737 .drv_flags
= RTE_PCI_DRV_INTR_LSC
,
739 .dev_private_size
= sizeof(struct priv
)
743 * Driver initialization routine.
745 RTE_INIT(rte_mlx5_pmd_init
);
747 rte_mlx5_pmd_init(void)
750 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
751 * huge pages. Calling ibv_fork_init() during init allows
752 * applications to use fork() safely for purposes other than
753 * using this PMD, which is not supported in forked processes.
755 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
757 rte_eal_pci_register(&mlx5_driver
.pci_drv
);
760 RTE_PMD_EXPORT_NAME(net_mlx5
, __COUNTER__
);
761 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5
, mlx5_pci_id_map
);