1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
20 #include "bnxt_ring.h"
23 #include "bnxt_stats.h"
26 #include "bnxt_vnic.h"
27 #include "hsi_struct_def_dpdk.h"
28 #include "bnxt_nvm_defs.h"
29 #include "bnxt_util.h"
31 #define DRV_MODULE_NAME "bnxt"
32 static const char bnxt_version
[] =
33 "Broadcom NetXtreme driver " DRV_MODULE_NAME
;
34 int bnxt_logtype_driver
;
36 #define PCI_VENDOR_ID_BROADCOM 0x14E4
38 #define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606
39 #define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609
40 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
41 #define BROADCOM_DEV_ID_57414_VF 0x16c1
42 #define BROADCOM_DEV_ID_57301 0x16c8
43 #define BROADCOM_DEV_ID_57302 0x16c9
44 #define BROADCOM_DEV_ID_57304_PF 0x16ca
45 #define BROADCOM_DEV_ID_57304_VF 0x16cb
46 #define BROADCOM_DEV_ID_57417_MF 0x16cc
47 #define BROADCOM_DEV_ID_NS2 0x16cd
48 #define BROADCOM_DEV_ID_57311 0x16ce
49 #define BROADCOM_DEV_ID_57312 0x16cf
50 #define BROADCOM_DEV_ID_57402 0x16d0
51 #define BROADCOM_DEV_ID_57404 0x16d1
52 #define BROADCOM_DEV_ID_57406_PF 0x16d2
53 #define BROADCOM_DEV_ID_57406_VF 0x16d3
54 #define BROADCOM_DEV_ID_57402_MF 0x16d4
55 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
56 #define BROADCOM_DEV_ID_57412 0x16d6
57 #define BROADCOM_DEV_ID_57414 0x16d7
58 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8
59 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9
60 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
61 #define BROADCOM_DEV_ID_57412_MF 0x16de
62 #define BROADCOM_DEV_ID_57314 0x16df
63 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0
64 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
65 #define BROADCOM_DEV_ID_57417_SFP 0x16e2
66 #define BROADCOM_DEV_ID_57416_SFP 0x16e3
67 #define BROADCOM_DEV_ID_57317_SFP 0x16e4
68 #define BROADCOM_DEV_ID_57404_MF 0x16e7
69 #define BROADCOM_DEV_ID_57406_MF 0x16e8
70 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
71 #define BROADCOM_DEV_ID_57407_MF 0x16ea
72 #define BROADCOM_DEV_ID_57414_MF 0x16ec
73 #define BROADCOM_DEV_ID_57416_MF 0x16ee
74 #define BROADCOM_DEV_ID_58802 0xd802
75 #define BROADCOM_DEV_ID_58804 0xd804
76 #define BROADCOM_DEV_ID_58808 0x16f0
77 #define BROADCOM_DEV_ID_58802_VF 0xd800
79 static const struct rte_pci_id bnxt_pci_id_map
[] = {
80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
,
81 BROADCOM_DEV_ID_STRATUS_NIC_VF1
) },
82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
,
83 BROADCOM_DEV_ID_STRATUS_NIC_VF2
) },
84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_STRATUS_NIC
) },
85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57414_VF
) },
86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57301
) },
87 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57302
) },
88 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57304_PF
) },
89 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57304_VF
) },
90 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_NS2
) },
91 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57402
) },
92 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57404
) },
93 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57406_PF
) },
94 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57406_VF
) },
95 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57402_MF
) },
96 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57407_RJ45
) },
97 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57404_MF
) },
98 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57406_MF
) },
99 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57407_SFP
) },
100 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57407_MF
) },
101 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_5741X_VF
) },
102 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_5731X_VF
) },
103 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57314
) },
104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57417_MF
) },
105 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57311
) },
106 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57312
) },
107 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57412
) },
108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57414
) },
109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57416_RJ45
) },
110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57417_RJ45
) },
111 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57412_MF
) },
112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57317_RJ45
) },
113 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57417_SFP
) },
114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57416_SFP
) },
115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57317_SFP
) },
116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57414_MF
) },
117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57416_MF
) },
118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_58802
) },
119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_58804
) },
120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_58808
) },
121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_58802_VF
) },
122 { .vendor_id
= 0, /* sentinel */ },
125 #define BNXT_ETH_RSS_SUPPORT ( \
127 ETH_RSS_NONFRAG_IPV4_TCP | \
128 ETH_RSS_NONFRAG_IPV4_UDP | \
130 ETH_RSS_NONFRAG_IPV6_TCP | \
131 ETH_RSS_NONFRAG_IPV6_UDP)
133 #define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
134 DEV_TX_OFFLOAD_IPV4_CKSUM | \
135 DEV_TX_OFFLOAD_TCP_CKSUM | \
136 DEV_TX_OFFLOAD_UDP_CKSUM | \
137 DEV_TX_OFFLOAD_TCP_TSO | \
138 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
139 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
140 DEV_TX_OFFLOAD_GRE_TNL_TSO | \
141 DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
142 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
143 DEV_TX_OFFLOAD_MULTI_SEGS)
145 #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
146 DEV_RX_OFFLOAD_VLAN_STRIP | \
147 DEV_RX_OFFLOAD_IPV4_CKSUM | \
148 DEV_RX_OFFLOAD_UDP_CKSUM | \
149 DEV_RX_OFFLOAD_TCP_CKSUM | \
150 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
151 DEV_RX_OFFLOAD_JUMBO_FRAME | \
152 DEV_RX_OFFLOAD_KEEP_CRC | \
153 DEV_RX_OFFLOAD_TCP_LRO)
155 static int bnxt_vlan_offload_set_op(struct rte_eth_dev
*dev
, int mask
);
156 static void bnxt_print_link_info(struct rte_eth_dev
*eth_dev
);
157 static int bnxt_mtu_set_op(struct rte_eth_dev
*eth_dev
, uint16_t new_mtu
);
158 static int bnxt_dev_uninit(struct rte_eth_dev
*eth_dev
);
160 /***********************/
163 * High level utility functions
166 static void bnxt_free_mem(struct bnxt
*bp
)
168 bnxt_free_filter_mem(bp
);
169 bnxt_free_vnic_attributes(bp
);
170 bnxt_free_vnic_mem(bp
);
173 bnxt_free_tx_rings(bp
);
174 bnxt_free_rx_rings(bp
);
177 static int bnxt_alloc_mem(struct bnxt
*bp
)
181 rc
= bnxt_alloc_vnic_mem(bp
);
185 rc
= bnxt_alloc_vnic_attributes(bp
);
189 rc
= bnxt_alloc_filter_mem(bp
);
200 static int bnxt_init_chip(struct bnxt
*bp
)
202 struct bnxt_rx_queue
*rxq
;
203 struct rte_eth_link
new;
204 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(bp
->eth_dev
);
205 struct rte_eth_conf
*dev_conf
= &bp
->eth_dev
->data
->dev_conf
;
206 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
207 uint64_t rx_offloads
= dev_conf
->rxmode
.offloads
;
208 uint32_t intr_vector
= 0;
209 uint32_t queue_id
, base
= BNXT_MISC_VEC_ID
;
210 uint32_t vec
= BNXT_MISC_VEC_ID
;
214 /* disable uio/vfio intr/eventfd mapping */
215 rte_intr_disable(intr_handle
);
217 if (bp
->eth_dev
->data
->mtu
> ETHER_MTU
) {
218 bp
->eth_dev
->data
->dev_conf
.rxmode
.offloads
|=
219 DEV_RX_OFFLOAD_JUMBO_FRAME
;
220 bp
->flags
|= BNXT_FLAG_JUMBO
;
222 bp
->eth_dev
->data
->dev_conf
.rxmode
.offloads
&=
223 ~DEV_RX_OFFLOAD_JUMBO_FRAME
;
224 bp
->flags
&= ~BNXT_FLAG_JUMBO
;
227 rc
= bnxt_alloc_all_hwrm_stat_ctxs(bp
);
229 PMD_DRV_LOG(ERR
, "HWRM stat ctx alloc failure rc: %x\n", rc
);
233 rc
= bnxt_alloc_hwrm_rings(bp
);
235 PMD_DRV_LOG(ERR
, "HWRM ring alloc failure rc: %x\n", rc
);
239 rc
= bnxt_alloc_all_hwrm_ring_grps(bp
);
241 PMD_DRV_LOG(ERR
, "HWRM ring grp alloc failure: %x\n", rc
);
245 rc
= bnxt_mq_rx_configure(bp
);
247 PMD_DRV_LOG(ERR
, "MQ mode configure failure rc: %x\n", rc
);
251 /* VNIC configuration */
252 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
253 struct rte_eth_conf
*dev_conf
= &bp
->eth_dev
->data
->dev_conf
;
254 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
255 uint32_t size
= sizeof(*vnic
->fw_grp_ids
) * bp
->max_ring_grps
;
257 vnic
->fw_grp_ids
= rte_zmalloc("vnic_fw_grp_ids", size
, 0);
258 if (!vnic
->fw_grp_ids
) {
260 "Failed to alloc %d bytes for group ids\n",
265 memset(vnic
->fw_grp_ids
, -1, size
);
267 PMD_DRV_LOG(DEBUG
, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
268 i
, vnic
, vnic
->fw_grp_ids
);
270 rc
= bnxt_hwrm_vnic_alloc(bp
, vnic
);
272 PMD_DRV_LOG(ERR
, "HWRM vnic %d alloc failure rc: %x\n",
277 /* Alloc RSS context only if RSS mode is enabled */
278 if (dev_conf
->rxmode
.mq_mode
& ETH_MQ_RX_RSS
) {
279 rc
= bnxt_hwrm_vnic_ctx_alloc(bp
, vnic
);
282 "HWRM vnic %d ctx alloc failure rc: %x\n",
289 * Firmware sets pf pair in default vnic cfg. If the VLAN strip
290 * setting is not available at this time, it will not be
291 * configured correctly in the CFA.
293 if (rx_offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
)
294 vnic
->vlan_strip
= true;
296 vnic
->vlan_strip
= false;
298 rc
= bnxt_hwrm_vnic_cfg(bp
, vnic
);
300 PMD_DRV_LOG(ERR
, "HWRM vnic %d cfg failure rc: %x\n",
305 rc
= bnxt_set_hwrm_vnic_filters(bp
, vnic
);
308 "HWRM vnic %d filter failure rc: %x\n",
313 for (j
= 0; j
< bp
->rx_nr_rings
; j
++) {
314 rxq
= bp
->eth_dev
->data
->rx_queues
[j
];
317 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
318 j
, rxq
->vnic
, rxq
->vnic
->fw_grp_ids
);
320 if (rxq
->rx_deferred_start
)
321 rxq
->vnic
->fw_grp_ids
[j
] = INVALID_HW_RING_ID
;
324 rc
= bnxt_vnic_rss_configure(bp
, vnic
);
327 "HWRM vnic set RSS failure rc: %x\n", rc
);
331 bnxt_hwrm_vnic_plcmode_cfg(bp
, vnic
);
333 if (bp
->eth_dev
->data
->dev_conf
.rxmode
.offloads
&
334 DEV_RX_OFFLOAD_TCP_LRO
)
335 bnxt_hwrm_vnic_tpa_cfg(bp
, vnic
, 1);
337 bnxt_hwrm_vnic_tpa_cfg(bp
, vnic
, 0);
339 rc
= bnxt_hwrm_cfa_l2_set_rx_mask(bp
, &bp
->vnic_info
[0], 0, NULL
);
342 "HWRM cfa l2 rx mask failure rc: %x\n", rc
);
346 /* check and configure queue intr-vector mapping */
347 if ((rte_intr_cap_multiple(intr_handle
) ||
348 !RTE_ETH_DEV_SRIOV(bp
->eth_dev
).active
) &&
349 bp
->eth_dev
->data
->dev_conf
.intr_conf
.rxq
!= 0) {
350 intr_vector
= bp
->eth_dev
->data
->nb_rx_queues
;
351 PMD_DRV_LOG(DEBUG
, "intr_vector = %d\n", intr_vector
);
352 if (intr_vector
> bp
->rx_cp_nr_rings
) {
353 PMD_DRV_LOG(ERR
, "At most %d intr queues supported",
357 if (rte_intr_efd_enable(intr_handle
, intr_vector
))
361 if (rte_intr_dp_is_en(intr_handle
) && !intr_handle
->intr_vec
) {
362 intr_handle
->intr_vec
=
363 rte_zmalloc("intr_vec",
364 bp
->eth_dev
->data
->nb_rx_queues
*
366 if (intr_handle
->intr_vec
== NULL
) {
367 PMD_DRV_LOG(ERR
, "Failed to allocate %d rx_queues"
368 " intr_vec", bp
->eth_dev
->data
->nb_rx_queues
);
371 PMD_DRV_LOG(DEBUG
, "intr_handle->intr_vec = %p "
372 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
373 intr_handle
->intr_vec
, intr_handle
->nb_efd
,
374 intr_handle
->max_intr
);
377 for (queue_id
= 0; queue_id
< bp
->eth_dev
->data
->nb_rx_queues
;
379 intr_handle
->intr_vec
[queue_id
] = vec
;
380 if (vec
< base
+ intr_handle
->nb_efd
- 1)
384 /* enable uio/vfio intr/eventfd mapping */
385 rte_intr_enable(intr_handle
);
387 rc
= bnxt_get_hwrm_link_config(bp
, &new);
389 PMD_DRV_LOG(ERR
, "HWRM Get link config failure rc: %x\n", rc
);
393 if (!bp
->link_info
.link_up
) {
394 rc
= bnxt_set_hwrm_link_config(bp
, true);
397 "HWRM link config failure rc: %x\n", rc
);
401 bnxt_print_link_info(bp
->eth_dev
);
406 bnxt_free_all_hwrm_resources(bp
);
408 /* Some of the error status returned by FW may not be from errno.h */
415 static int bnxt_shutdown_nic(struct bnxt
*bp
)
417 bnxt_free_all_hwrm_resources(bp
);
418 bnxt_free_all_filters(bp
);
419 bnxt_free_all_vnics(bp
);
423 static int bnxt_init_nic(struct bnxt
*bp
)
427 rc
= bnxt_init_ring_grps(bp
);
432 bnxt_init_filters(bp
);
438 * Device configuration and status function
441 static void bnxt_dev_info_get_op(struct rte_eth_dev
*eth_dev
,
442 struct rte_eth_dev_info
*dev_info
)
444 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
445 uint16_t max_vnics
, i
, j
, vpool
, vrxq
;
446 unsigned int max_rx_rings
;
449 dev_info
->max_mac_addrs
= bp
->max_l2_ctx
;
450 dev_info
->max_hash_mac_addrs
= 0;
452 /* PF/VF specifics */
454 dev_info
->max_vfs
= bp
->pdev
->max_vfs
;
455 max_rx_rings
= RTE_MIN(bp
->max_vnics
, bp
->max_stat_ctx
);
456 /* For the sake of symmetry, max_rx_queues = max_tx_queues */
457 dev_info
->max_rx_queues
= max_rx_rings
;
458 dev_info
->max_tx_queues
= max_rx_rings
;
459 dev_info
->reta_size
= HW_HASH_INDEX_SIZE
;
460 dev_info
->hash_key_size
= 40;
461 max_vnics
= bp
->max_vnics
;
463 /* Fast path specifics */
464 dev_info
->min_rx_bufsize
= 1;
465 dev_info
->max_rx_pktlen
= BNXT_MAX_MTU
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
468 dev_info
->rx_offload_capa
= BNXT_DEV_RX_OFFLOAD_SUPPORT
;
469 if (bp
->flags
& BNXT_FLAG_PTP_SUPPORTED
)
470 dev_info
->rx_offload_capa
|= DEV_RX_OFFLOAD_TIMESTAMP
;
471 dev_info
->tx_offload_capa
= BNXT_DEV_TX_OFFLOAD_SUPPORT
;
472 dev_info
->flow_type_rss_offloads
= BNXT_ETH_RSS_SUPPORT
;
475 dev_info
->default_rxconf
= (struct rte_eth_rxconf
) {
481 .rx_free_thresh
= 32,
482 /* If no descriptors available, pkts are dropped by default */
486 dev_info
->default_txconf
= (struct rte_eth_txconf
) {
492 .tx_free_thresh
= 32,
495 eth_dev
->data
->dev_conf
.intr_conf
.lsc
= 1;
497 eth_dev
->data
->dev_conf
.intr_conf
.rxq
= 1;
498 dev_info
->rx_desc_lim
.nb_min
= BNXT_MIN_RING_DESC
;
499 dev_info
->rx_desc_lim
.nb_max
= BNXT_MAX_RX_RING_DESC
;
500 dev_info
->tx_desc_lim
.nb_min
= BNXT_MIN_RING_DESC
;
501 dev_info
->tx_desc_lim
.nb_max
= BNXT_MAX_TX_RING_DESC
;
506 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
507 * need further investigation.
511 vpool
= 64; /* ETH_64_POOLS */
512 vrxq
= 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
513 for (i
= 0; i
< 4; vpool
>>= 1, i
++) {
514 if (max_vnics
> vpool
) {
515 for (j
= 0; j
< 5; vrxq
>>= 1, j
++) {
516 if (dev_info
->max_rx_queues
> vrxq
) {
522 /* Not enough resources to support VMDq */
526 /* Not enough resources to support VMDq */
530 dev_info
->max_vmdq_pools
= vpool
;
531 dev_info
->vmdq_queue_num
= vrxq
;
533 dev_info
->vmdq_pool_base
= 0;
534 dev_info
->vmdq_queue_base
= 0;
537 /* Configure the device based on the configuration provided */
538 static int bnxt_dev_configure_op(struct rte_eth_dev
*eth_dev
)
540 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
541 uint64_t rx_offloads
= eth_dev
->data
->dev_conf
.rxmode
.offloads
;
544 bp
->rx_queues
= (void *)eth_dev
->data
->rx_queues
;
545 bp
->tx_queues
= (void *)eth_dev
->data
->tx_queues
;
546 bp
->tx_nr_rings
= eth_dev
->data
->nb_tx_queues
;
547 bp
->rx_nr_rings
= eth_dev
->data
->nb_rx_queues
;
549 if (BNXT_VF(bp
) && (bp
->flags
& BNXT_FLAG_NEW_RM
)) {
550 rc
= bnxt_hwrm_check_vf_rings(bp
);
552 PMD_DRV_LOG(ERR
, "HWRM insufficient resources\n");
556 rc
= bnxt_hwrm_func_reserve_vf_resc(bp
, false);
558 PMD_DRV_LOG(ERR
, "HWRM resource alloc fail:%x\n", rc
);
562 /* legacy driver needs to get updated values */
563 rc
= bnxt_hwrm_func_qcaps(bp
);
565 PMD_DRV_LOG(ERR
, "hwrm func qcaps fail:%d\n", rc
);
570 /* Inherit new configurations */
571 if (eth_dev
->data
->nb_rx_queues
> bp
->max_rx_rings
||
572 eth_dev
->data
->nb_tx_queues
> bp
->max_tx_rings
||
573 eth_dev
->data
->nb_rx_queues
+ eth_dev
->data
->nb_tx_queues
>
575 eth_dev
->data
->nb_rx_queues
+ eth_dev
->data
->nb_tx_queues
>
577 (uint32_t)(eth_dev
->data
->nb_rx_queues
) > bp
->max_ring_grps
||
578 (!(eth_dev
->data
->dev_conf
.rxmode
.mq_mode
& ETH_MQ_RX_RSS
) &&
579 bp
->max_vnics
< eth_dev
->data
->nb_rx_queues
)) {
581 "Insufficient resources to support requested config\n");
583 "Num Queues Requested: Tx %d, Rx %d\n",
584 eth_dev
->data
->nb_tx_queues
,
585 eth_dev
->data
->nb_rx_queues
);
587 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
588 bp
->max_tx_rings
, bp
->max_rx_rings
, bp
->max_cp_rings
,
589 bp
->max_stat_ctx
, bp
->max_ring_grps
, bp
->max_vnics
);
593 bp
->rx_cp_nr_rings
= bp
->rx_nr_rings
;
594 bp
->tx_cp_nr_rings
= bp
->tx_nr_rings
;
596 if (rx_offloads
& DEV_RX_OFFLOAD_JUMBO_FRAME
) {
598 eth_dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
-
599 ETHER_HDR_LEN
- ETHER_CRC_LEN
- VLAN_TAG_SIZE
*
601 bnxt_mtu_set_op(eth_dev
, eth_dev
->data
->mtu
);
606 static void bnxt_print_link_info(struct rte_eth_dev
*eth_dev
)
608 struct rte_eth_link
*link
= ð_dev
->data
->dev_link
;
610 if (link
->link_status
)
611 PMD_DRV_LOG(INFO
, "Port %d Link Up - speed %u Mbps - %s\n",
612 eth_dev
->data
->port_id
,
613 (uint32_t)link
->link_speed
,
614 (link
->link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
615 ("full-duplex") : ("half-duplex\n"));
617 PMD_DRV_LOG(INFO
, "Port %d Link Down\n",
618 eth_dev
->data
->port_id
);
621 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev
*eth_dev
)
623 bnxt_print_link_info(eth_dev
);
627 static int bnxt_dev_start_op(struct rte_eth_dev
*eth_dev
)
629 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
630 uint64_t rx_offloads
= eth_dev
->data
->dev_conf
.rxmode
.offloads
;
634 if (bp
->rx_cp_nr_rings
> RTE_ETHDEV_QUEUE_STAT_CNTRS
) {
636 "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
637 bp
->rx_cp_nr_rings
, RTE_ETHDEV_QUEUE_STAT_CNTRS
);
641 rc
= bnxt_init_chip(bp
);
645 bnxt_link_update_op(eth_dev
, 1);
647 if (rx_offloads
& DEV_RX_OFFLOAD_VLAN_FILTER
)
648 vlan_mask
|= ETH_VLAN_FILTER_MASK
;
649 if (rx_offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
)
650 vlan_mask
|= ETH_VLAN_STRIP_MASK
;
651 rc
= bnxt_vlan_offload_set_op(eth_dev
, vlan_mask
);
655 bp
->flags
|= BNXT_FLAG_INIT_DONE
;
659 bnxt_shutdown_nic(bp
);
660 bnxt_free_tx_mbufs(bp
);
661 bnxt_free_rx_mbufs(bp
);
665 static int bnxt_dev_set_link_up_op(struct rte_eth_dev
*eth_dev
)
667 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
670 if (!bp
->link_info
.link_up
)
671 rc
= bnxt_set_hwrm_link_config(bp
, true);
673 eth_dev
->data
->dev_link
.link_status
= 1;
675 bnxt_print_link_info(eth_dev
);
679 static int bnxt_dev_set_link_down_op(struct rte_eth_dev
*eth_dev
)
681 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
683 eth_dev
->data
->dev_link
.link_status
= 0;
684 bnxt_set_hwrm_link_config(bp
, false);
685 bp
->link_info
.link_up
= 0;
690 /* Unload the driver, release resources */
691 static void bnxt_dev_stop_op(struct rte_eth_dev
*eth_dev
)
693 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
695 bp
->flags
&= ~BNXT_FLAG_INIT_DONE
;
696 if (bp
->eth_dev
->data
->dev_started
) {
697 /* TBD: STOP HW queues DMA */
698 eth_dev
->data
->dev_link
.link_status
= 0;
700 bnxt_set_hwrm_link_config(bp
, false);
701 bnxt_hwrm_port_clr_stats(bp
);
702 bnxt_free_tx_mbufs(bp
);
703 bnxt_free_rx_mbufs(bp
);
704 bnxt_shutdown_nic(bp
);
708 static void bnxt_dev_close_op(struct rte_eth_dev
*eth_dev
)
710 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
712 if (bp
->dev_stopped
== 0)
713 bnxt_dev_stop_op(eth_dev
);
715 if (eth_dev
->data
->mac_addrs
!= NULL
) {
716 rte_free(eth_dev
->data
->mac_addrs
);
717 eth_dev
->data
->mac_addrs
= NULL
;
719 if (bp
->grp_info
!= NULL
) {
720 rte_free(bp
->grp_info
);
724 bnxt_dev_uninit(eth_dev
);
727 static void bnxt_mac_addr_remove_op(struct rte_eth_dev
*eth_dev
,
730 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
731 uint64_t pool_mask
= eth_dev
->data
->mac_pool_sel
[index
];
732 struct bnxt_vnic_info
*vnic
;
733 struct bnxt_filter_info
*filter
, *temp_filter
;
737 * Loop through all VNICs from the specified filter flow pools to
738 * remove the corresponding MAC addr filter
740 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
741 if (!(pool_mask
& (1ULL << i
)))
744 vnic
= &bp
->vnic_info
[i
];
745 filter
= STAILQ_FIRST(&vnic
->filter
);
747 temp_filter
= STAILQ_NEXT(filter
, next
);
748 if (filter
->mac_index
== index
) {
749 STAILQ_REMOVE(&vnic
->filter
, filter
,
750 bnxt_filter_info
, next
);
751 bnxt_hwrm_clear_l2_filter(bp
, filter
);
752 filter
->mac_index
= INVALID_MAC_INDEX
;
753 memset(&filter
->l2_addr
, 0, ETHER_ADDR_LEN
);
754 STAILQ_INSERT_TAIL(&bp
->free_filter_list
,
757 filter
= temp_filter
;
762 static int bnxt_mac_addr_add_op(struct rte_eth_dev
*eth_dev
,
763 struct ether_addr
*mac_addr
,
764 uint32_t index
, uint32_t pool
)
766 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
767 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[pool
];
768 struct bnxt_filter_info
*filter
;
770 if (BNXT_VF(bp
) & !BNXT_VF_IS_TRUSTED(bp
)) {
771 PMD_DRV_LOG(ERR
, "Cannot add MAC address to a VF interface\n");
776 PMD_DRV_LOG(ERR
, "VNIC not found for pool %d!\n", pool
);
779 /* Attach requested MAC address to the new l2_filter */
780 STAILQ_FOREACH(filter
, &vnic
->filter
, next
) {
781 if (filter
->mac_index
== index
) {
783 "MAC addr already existed for pool %d\n", pool
);
787 filter
= bnxt_alloc_filter(bp
);
789 PMD_DRV_LOG(ERR
, "L2 filter alloc failed\n");
792 STAILQ_INSERT_TAIL(&vnic
->filter
, filter
, next
);
793 filter
->mac_index
= index
;
794 memcpy(filter
->l2_addr
, mac_addr
, ETHER_ADDR_LEN
);
795 return bnxt_hwrm_set_l2_filter(bp
, vnic
->fw_vnic_id
, filter
);
798 int bnxt_link_update_op(struct rte_eth_dev
*eth_dev
, int wait_to_complete
)
801 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
802 struct rte_eth_link
new;
803 unsigned int cnt
= BNXT_LINK_WAIT_CNT
;
805 memset(&new, 0, sizeof(new));
807 /* Retrieve link info from hardware */
808 rc
= bnxt_get_hwrm_link_config(bp
, &new);
810 new.link_speed
= ETH_LINK_SPEED_100M
;
811 new.link_duplex
= ETH_LINK_FULL_DUPLEX
;
813 "Failed to retrieve link rc = 0x%x!\n", rc
);
816 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL
);
818 if (!wait_to_complete
)
820 } while (!new.link_status
&& cnt
--);
823 /* Timed out or success */
824 if (new.link_status
!= eth_dev
->data
->dev_link
.link_status
||
825 new.link_speed
!= eth_dev
->data
->dev_link
.link_speed
) {
826 memcpy(ð_dev
->data
->dev_link
, &new,
827 sizeof(struct rte_eth_link
));
829 _rte_eth_dev_callback_process(eth_dev
,
830 RTE_ETH_EVENT_INTR_LSC
,
833 bnxt_print_link_info(eth_dev
);
839 static void bnxt_promiscuous_enable_op(struct rte_eth_dev
*eth_dev
)
841 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
842 struct bnxt_vnic_info
*vnic
;
844 if (bp
->vnic_info
== NULL
)
847 vnic
= &bp
->vnic_info
[0];
849 vnic
->flags
|= BNXT_VNIC_INFO_PROMISC
;
850 bnxt_hwrm_cfa_l2_set_rx_mask(bp
, vnic
, 0, NULL
);
853 static void bnxt_promiscuous_disable_op(struct rte_eth_dev
*eth_dev
)
855 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
856 struct bnxt_vnic_info
*vnic
;
858 if (bp
->vnic_info
== NULL
)
861 vnic
= &bp
->vnic_info
[0];
863 vnic
->flags
&= ~BNXT_VNIC_INFO_PROMISC
;
864 bnxt_hwrm_cfa_l2_set_rx_mask(bp
, vnic
, 0, NULL
);
867 static void bnxt_allmulticast_enable_op(struct rte_eth_dev
*eth_dev
)
869 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
870 struct bnxt_vnic_info
*vnic
;
872 if (bp
->vnic_info
== NULL
)
875 vnic
= &bp
->vnic_info
[0];
877 vnic
->flags
|= BNXT_VNIC_INFO_ALLMULTI
;
878 bnxt_hwrm_cfa_l2_set_rx_mask(bp
, vnic
, 0, NULL
);
881 static void bnxt_allmulticast_disable_op(struct rte_eth_dev
*eth_dev
)
883 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
884 struct bnxt_vnic_info
*vnic
;
886 if (bp
->vnic_info
== NULL
)
889 vnic
= &bp
->vnic_info
[0];
891 vnic
->flags
&= ~BNXT_VNIC_INFO_ALLMULTI
;
892 bnxt_hwrm_cfa_l2_set_rx_mask(bp
, vnic
, 0, NULL
);
895 static int bnxt_reta_update_op(struct rte_eth_dev
*eth_dev
,
896 struct rte_eth_rss_reta_entry64
*reta_conf
,
899 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
900 struct rte_eth_conf
*dev_conf
= &bp
->eth_dev
->data
->dev_conf
;
901 struct bnxt_vnic_info
*vnic
;
904 if (!(dev_conf
->rxmode
.mq_mode
& ETH_MQ_RX_RSS_FLAG
))
907 if (reta_size
!= HW_HASH_INDEX_SIZE
) {
908 PMD_DRV_LOG(ERR
, "The configured hash table lookup size "
909 "(%d) must equal the size supported by the hardware "
910 "(%d)\n", reta_size
, HW_HASH_INDEX_SIZE
);
913 /* Update the RSS VNIC(s) */
914 for (i
= 0; i
< bp
->max_vnics
; i
++) {
915 vnic
= &bp
->vnic_info
[i
];
916 memcpy(vnic
->rss_table
, reta_conf
, reta_size
);
917 bnxt_hwrm_vnic_rss_cfg(bp
, vnic
);
922 static int bnxt_reta_query_op(struct rte_eth_dev
*eth_dev
,
923 struct rte_eth_rss_reta_entry64
*reta_conf
,
926 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
927 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
928 struct rte_intr_handle
*intr_handle
929 = &bp
->pdev
->intr_handle
;
931 /* Retrieve from the default VNIC */
934 if (!vnic
->rss_table
)
937 if (reta_size
!= HW_HASH_INDEX_SIZE
) {
938 PMD_DRV_LOG(ERR
, "The configured hash table lookup size "
939 "(%d) must equal the size supported by the hardware "
940 "(%d)\n", reta_size
, HW_HASH_INDEX_SIZE
);
943 /* EW - need to revisit here copying from uint64_t to uint16_t */
944 memcpy(reta_conf
, vnic
->rss_table
, reta_size
);
946 if (rte_intr_allow_others(intr_handle
)) {
947 if (eth_dev
->data
->dev_conf
.intr_conf
.lsc
!= 0)
948 bnxt_dev_lsc_intr_setup(eth_dev
);
954 static int bnxt_rss_hash_update_op(struct rte_eth_dev
*eth_dev
,
955 struct rte_eth_rss_conf
*rss_conf
)
957 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
958 struct rte_eth_conf
*dev_conf
= &bp
->eth_dev
->data
->dev_conf
;
959 struct bnxt_vnic_info
*vnic
;
960 uint16_t hash_type
= 0;
964 * If RSS enablement were different than dev_configure,
965 * then return -EINVAL
967 if (dev_conf
->rxmode
.mq_mode
& ETH_MQ_RX_RSS_FLAG
) {
968 if (!rss_conf
->rss_hf
)
969 PMD_DRV_LOG(ERR
, "Hash type NONE\n");
971 if (rss_conf
->rss_hf
& BNXT_ETH_RSS_SUPPORT
)
975 bp
->flags
|= BNXT_FLAG_UPDATE_HASH
;
976 memcpy(&bp
->rss_conf
, rss_conf
, sizeof(*rss_conf
));
978 if (rss_conf
->rss_hf
& ETH_RSS_IPV4
)
979 hash_type
|= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4
;
980 if (rss_conf
->rss_hf
& ETH_RSS_NONFRAG_IPV4_TCP
)
981 hash_type
|= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4
;
982 if (rss_conf
->rss_hf
& ETH_RSS_NONFRAG_IPV4_UDP
)
983 hash_type
|= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4
;
984 if (rss_conf
->rss_hf
& ETH_RSS_IPV6
)
985 hash_type
|= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6
;
986 if (rss_conf
->rss_hf
& ETH_RSS_NONFRAG_IPV6_TCP
)
987 hash_type
|= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6
;
988 if (rss_conf
->rss_hf
& ETH_RSS_NONFRAG_IPV6_UDP
)
989 hash_type
|= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6
;
991 /* Update the RSS VNIC(s) */
992 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
993 vnic
= &bp
->vnic_info
[i
];
994 vnic
->hash_type
= hash_type
;
997 * Use the supplied key if the key length is
998 * acceptable and the rss_key is not NULL
1000 if (rss_conf
->rss_key
&&
1001 rss_conf
->rss_key_len
<= HW_HASH_KEY_SIZE
)
1002 memcpy(vnic
->rss_hash_key
, rss_conf
->rss_key
,
1003 rss_conf
->rss_key_len
);
1005 bnxt_hwrm_vnic_rss_cfg(bp
, vnic
);
1010 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev
*eth_dev
,
1011 struct rte_eth_rss_conf
*rss_conf
)
1013 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
1014 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
1016 uint32_t hash_types
;
1018 /* RSS configuration is the same for all VNICs */
1019 if (vnic
&& vnic
->rss_hash_key
) {
1020 if (rss_conf
->rss_key
) {
1021 len
= rss_conf
->rss_key_len
<= HW_HASH_KEY_SIZE
?
1022 rss_conf
->rss_key_len
: HW_HASH_KEY_SIZE
;
1023 memcpy(rss_conf
->rss_key
, vnic
->rss_hash_key
, len
);
1026 hash_types
= vnic
->hash_type
;
1027 rss_conf
->rss_hf
= 0;
1028 if (hash_types
& HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4
) {
1029 rss_conf
->rss_hf
|= ETH_RSS_IPV4
;
1030 hash_types
&= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4
;
1032 if (hash_types
& HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4
) {
1033 rss_conf
->rss_hf
|= ETH_RSS_NONFRAG_IPV4_TCP
;
1035 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4
;
1037 if (hash_types
& HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4
) {
1038 rss_conf
->rss_hf
|= ETH_RSS_NONFRAG_IPV4_UDP
;
1040 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4
;
1042 if (hash_types
& HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6
) {
1043 rss_conf
->rss_hf
|= ETH_RSS_IPV6
;
1044 hash_types
&= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6
;
1046 if (hash_types
& HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6
) {
1047 rss_conf
->rss_hf
|= ETH_RSS_NONFRAG_IPV6_TCP
;
1049 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6
;
1051 if (hash_types
& HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6
) {
1052 rss_conf
->rss_hf
|= ETH_RSS_NONFRAG_IPV6_UDP
;
1054 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6
;
1058 "Unknwon RSS config from firmware (%08x), RSS disabled",
1063 rss_conf
->rss_hf
= 0;
1068 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev
*dev
,
1069 struct rte_eth_fc_conf
*fc_conf
)
1071 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1072 struct rte_eth_link link_info
;
1075 rc
= bnxt_get_hwrm_link_config(bp
, &link_info
);
1079 memset(fc_conf
, 0, sizeof(*fc_conf
));
1080 if (bp
->link_info
.auto_pause
)
1081 fc_conf
->autoneg
= 1;
1082 switch (bp
->link_info
.pause
) {
1084 fc_conf
->mode
= RTE_FC_NONE
;
1086 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX
:
1087 fc_conf
->mode
= RTE_FC_TX_PAUSE
;
1089 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX
:
1090 fc_conf
->mode
= RTE_FC_RX_PAUSE
;
1092 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX
|
1093 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX
):
1094 fc_conf
->mode
= RTE_FC_FULL
;
1100 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev
*dev
,
1101 struct rte_eth_fc_conf
*fc_conf
)
1103 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1105 if (!BNXT_SINGLE_PF(bp
) || BNXT_VF(bp
)) {
1106 PMD_DRV_LOG(ERR
, "Flow Control Settings cannot be modified\n");
1110 switch (fc_conf
->mode
) {
1112 bp
->link_info
.auto_pause
= 0;
1113 bp
->link_info
.force_pause
= 0;
1115 case RTE_FC_RX_PAUSE
:
1116 if (fc_conf
->autoneg
) {
1117 bp
->link_info
.auto_pause
=
1118 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX
;
1119 bp
->link_info
.force_pause
= 0;
1121 bp
->link_info
.auto_pause
= 0;
1122 bp
->link_info
.force_pause
=
1123 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX
;
1126 case RTE_FC_TX_PAUSE
:
1127 if (fc_conf
->autoneg
) {
1128 bp
->link_info
.auto_pause
=
1129 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX
;
1130 bp
->link_info
.force_pause
= 0;
1132 bp
->link_info
.auto_pause
= 0;
1133 bp
->link_info
.force_pause
=
1134 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX
;
1138 if (fc_conf
->autoneg
) {
1139 bp
->link_info
.auto_pause
=
1140 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX
|
1141 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX
;
1142 bp
->link_info
.force_pause
= 0;
1144 bp
->link_info
.auto_pause
= 0;
1145 bp
->link_info
.force_pause
=
1146 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX
|
1147 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX
;
1151 return bnxt_set_hwrm_link_config(bp
, true);
1154 /* Add UDP tunneling port */
1156 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev
*eth_dev
,
1157 struct rte_eth_udp_tunnel
*udp_tunnel
)
1159 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
1160 uint16_t tunnel_type
= 0;
1163 switch (udp_tunnel
->prot_type
) {
1164 case RTE_TUNNEL_TYPE_VXLAN
:
1165 if (bp
->vxlan_port_cnt
) {
1166 PMD_DRV_LOG(ERR
, "Tunnel Port %d already programmed\n",
1167 udp_tunnel
->udp_port
);
1168 if (bp
->vxlan_port
!= udp_tunnel
->udp_port
) {
1169 PMD_DRV_LOG(ERR
, "Only one port allowed\n");
1172 bp
->vxlan_port_cnt
++;
1176 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN
;
1177 bp
->vxlan_port_cnt
++;
1179 case RTE_TUNNEL_TYPE_GENEVE
:
1180 if (bp
->geneve_port_cnt
) {
1181 PMD_DRV_LOG(ERR
, "Tunnel Port %d already programmed\n",
1182 udp_tunnel
->udp_port
);
1183 if (bp
->geneve_port
!= udp_tunnel
->udp_port
) {
1184 PMD_DRV_LOG(ERR
, "Only one port allowed\n");
1187 bp
->geneve_port_cnt
++;
1191 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE
;
1192 bp
->geneve_port_cnt
++;
1195 PMD_DRV_LOG(ERR
, "Tunnel type is not supported\n");
1198 rc
= bnxt_hwrm_tunnel_dst_port_alloc(bp
, udp_tunnel
->udp_port
,
1204 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev
*eth_dev
,
1205 struct rte_eth_udp_tunnel
*udp_tunnel
)
1207 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
1208 uint16_t tunnel_type
= 0;
1212 switch (udp_tunnel
->prot_type
) {
1213 case RTE_TUNNEL_TYPE_VXLAN
:
1214 if (!bp
->vxlan_port_cnt
) {
1215 PMD_DRV_LOG(ERR
, "No Tunnel port configured yet\n");
1218 if (bp
->vxlan_port
!= udp_tunnel
->udp_port
) {
1219 PMD_DRV_LOG(ERR
, "Req Port: %d. Configured port: %d\n",
1220 udp_tunnel
->udp_port
, bp
->vxlan_port
);
1223 if (--bp
->vxlan_port_cnt
)
1227 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN
;
1228 port
= bp
->vxlan_fw_dst_port_id
;
1230 case RTE_TUNNEL_TYPE_GENEVE
:
1231 if (!bp
->geneve_port_cnt
) {
1232 PMD_DRV_LOG(ERR
, "No Tunnel port configured yet\n");
1235 if (bp
->geneve_port
!= udp_tunnel
->udp_port
) {
1236 PMD_DRV_LOG(ERR
, "Req Port: %d. Configured port: %d\n",
1237 udp_tunnel
->udp_port
, bp
->geneve_port
);
1240 if (--bp
->geneve_port_cnt
)
1244 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE
;
1245 port
= bp
->geneve_fw_dst_port_id
;
1248 PMD_DRV_LOG(ERR
, "Tunnel type is not supported\n");
1252 rc
= bnxt_hwrm_tunnel_dst_port_free(bp
, port
, tunnel_type
);
1255 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN
)
1258 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE
)
1259 bp
->geneve_port
= 0;
1264 static int bnxt_del_vlan_filter(struct bnxt
*bp
, uint16_t vlan_id
)
1266 struct bnxt_filter_info
*filter
, *temp_filter
, *new_filter
;
1267 struct bnxt_vnic_info
*vnic
;
1270 uint32_t chk
= HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN
;
1272 /* Cycle through all VNICs */
1273 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
1275 * For each VNIC and each associated filter(s)
1276 * if VLAN exists && VLAN matches vlan_id
1277 * remove the MAC+VLAN filter
1278 * add a new MAC only filter
1280 * VLAN filter doesn't exist, just skip and continue
1282 vnic
= &bp
->vnic_info
[i
];
1283 filter
= STAILQ_FIRST(&vnic
->filter
);
1285 temp_filter
= STAILQ_NEXT(filter
, next
);
1287 if (filter
->enables
& chk
&&
1288 filter
->l2_ovlan
== vlan_id
) {
1289 /* Must delete the filter */
1290 STAILQ_REMOVE(&vnic
->filter
, filter
,
1291 bnxt_filter_info
, next
);
1292 bnxt_hwrm_clear_l2_filter(bp
, filter
);
1293 STAILQ_INSERT_TAIL(&bp
->free_filter_list
,
1297 * Need to examine to see if the MAC
1298 * filter already existed or not before
1299 * allocating a new one
1302 new_filter
= bnxt_alloc_filter(bp
);
1305 "MAC/VLAN filter alloc failed\n");
1309 STAILQ_INSERT_TAIL(&vnic
->filter
,
1311 /* Inherit MAC from previous filter */
1312 new_filter
->mac_index
=
1314 memcpy(new_filter
->l2_addr
, filter
->l2_addr
,
1316 /* MAC only filter */
1317 rc
= bnxt_hwrm_set_l2_filter(bp
,
1323 "Del Vlan filter for %d\n",
1326 filter
= temp_filter
;
1333 static int bnxt_add_vlan_filter(struct bnxt
*bp
, uint16_t vlan_id
)
1335 struct bnxt_filter_info
*filter
, *temp_filter
, *new_filter
;
1336 struct bnxt_vnic_info
*vnic
;
1339 uint32_t en
= HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN
|
1340 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK
;
1341 uint32_t chk
= HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN
;
1343 /* Cycle through all VNICs */
1344 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
1346 * For each VNIC and each associated filter(s)
1348 * if VLAN matches vlan_id
1349 * VLAN filter already exists, just skip and continue
1351 * add a new MAC+VLAN filter
1353 * Remove the old MAC only filter
1354 * Add a new MAC+VLAN filter
1356 vnic
= &bp
->vnic_info
[i
];
1357 filter
= STAILQ_FIRST(&vnic
->filter
);
1359 temp_filter
= STAILQ_NEXT(filter
, next
);
1361 if (filter
->enables
& chk
) {
1362 if (filter
->l2_ivlan
== vlan_id
)
1365 /* Must delete the MAC filter */
1366 STAILQ_REMOVE(&vnic
->filter
, filter
,
1367 bnxt_filter_info
, next
);
1368 bnxt_hwrm_clear_l2_filter(bp
, filter
);
1369 filter
->l2_ovlan
= 0;
1370 STAILQ_INSERT_TAIL(&bp
->free_filter_list
,
1373 new_filter
= bnxt_alloc_filter(bp
);
1376 "MAC/VLAN filter alloc failed\n");
1380 STAILQ_INSERT_TAIL(&vnic
->filter
, new_filter
, next
);
1381 /* Inherit MAC from the previous filter */
1382 new_filter
->mac_index
= filter
->mac_index
;
1383 memcpy(new_filter
->l2_addr
, filter
->l2_addr
,
1385 /* MAC + VLAN ID filter */
1386 new_filter
->l2_ivlan
= vlan_id
;
1387 new_filter
->l2_ivlan_mask
= 0xF000;
1388 new_filter
->enables
|= en
;
1389 rc
= bnxt_hwrm_set_l2_filter(bp
,
1395 "Added Vlan filter for %d\n", vlan_id
);
1397 filter
= temp_filter
;
1404 static int bnxt_vlan_filter_set_op(struct rte_eth_dev
*eth_dev
,
1405 uint16_t vlan_id
, int on
)
1407 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
1409 /* These operations apply to ALL existing MAC/VLAN filters */
1411 return bnxt_add_vlan_filter(bp
, vlan_id
);
1413 return bnxt_del_vlan_filter(bp
, vlan_id
);
1417 bnxt_vlan_offload_set_op(struct rte_eth_dev
*dev
, int mask
)
1419 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1420 uint64_t rx_offloads
= dev
->data
->dev_conf
.rxmode
.offloads
;
1423 if (mask
& ETH_VLAN_FILTER_MASK
) {
1424 if (!(rx_offloads
& DEV_RX_OFFLOAD_VLAN_FILTER
)) {
1425 /* Remove any VLAN filters programmed */
1426 for (i
= 0; i
< 4095; i
++)
1427 bnxt_del_vlan_filter(bp
, i
);
1429 PMD_DRV_LOG(DEBUG
, "VLAN Filtering: %d\n",
1430 !!(rx_offloads
& DEV_RX_OFFLOAD_VLAN_FILTER
));
1433 if (mask
& ETH_VLAN_STRIP_MASK
) {
1434 /* Enable or disable VLAN stripping */
1435 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
1436 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
1437 if (rx_offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
)
1438 vnic
->vlan_strip
= true;
1440 vnic
->vlan_strip
= false;
1441 bnxt_hwrm_vnic_cfg(bp
, vnic
);
1443 PMD_DRV_LOG(DEBUG
, "VLAN Strip Offload: %d\n",
1444 !!(rx_offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
));
1447 if (mask
& ETH_VLAN_EXTEND_MASK
)
1448 PMD_DRV_LOG(ERR
, "Extend VLAN Not supported\n");
1454 bnxt_set_default_mac_addr_op(struct rte_eth_dev
*dev
, struct ether_addr
*addr
)
1456 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1457 /* Default Filter is tied to VNIC 0 */
1458 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
1459 struct bnxt_filter_info
*filter
;
1462 if (BNXT_VF(bp
) && !BNXT_VF_IS_TRUSTED(bp
))
1465 memcpy(bp
->mac_addr
, addr
, sizeof(bp
->mac_addr
));
1467 STAILQ_FOREACH(filter
, &vnic
->filter
, next
) {
1468 /* Default Filter is at Index 0 */
1469 if (filter
->mac_index
!= 0)
1471 rc
= bnxt_hwrm_clear_l2_filter(bp
, filter
);
1474 memcpy(filter
->l2_addr
, bp
->mac_addr
, ETHER_ADDR_LEN
);
1475 memset(filter
->l2_addr_mask
, 0xff, ETHER_ADDR_LEN
);
1476 filter
->flags
|= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
;
1478 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
|
1479 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
;
1480 rc
= bnxt_hwrm_set_l2_filter(bp
, vnic
->fw_vnic_id
, filter
);
1483 filter
->mac_index
= 0;
1484 PMD_DRV_LOG(DEBUG
, "Set MAC addr\n");
1491 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev
*eth_dev
,
1492 struct ether_addr
*mc_addr_set
,
1493 uint32_t nb_mc_addr
)
1495 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
1496 char *mc_addr_list
= (char *)mc_addr_set
;
1497 struct bnxt_vnic_info
*vnic
;
1498 uint32_t off
= 0, i
= 0;
1500 vnic
= &bp
->vnic_info
[0];
1502 if (nb_mc_addr
> BNXT_MAX_MC_ADDRS
) {
1503 vnic
->flags
|= BNXT_VNIC_INFO_ALLMULTI
;
1507 /* TODO Check for Duplicate mcast addresses */
1508 vnic
->flags
&= ~BNXT_VNIC_INFO_ALLMULTI
;
1509 for (i
= 0; i
< nb_mc_addr
; i
++) {
1510 memcpy(vnic
->mc_list
+ off
, &mc_addr_list
[i
], ETHER_ADDR_LEN
);
1511 off
+= ETHER_ADDR_LEN
;
1514 vnic
->mc_addr_cnt
= i
;
1517 return bnxt_hwrm_cfa_l2_set_rx_mask(bp
, vnic
, 0, NULL
);
1521 bnxt_fw_version_get(struct rte_eth_dev
*dev
, char *fw_version
, size_t fw_size
)
1523 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1524 uint8_t fw_major
= (bp
->fw_ver
>> 24) & 0xff;
1525 uint8_t fw_minor
= (bp
->fw_ver
>> 16) & 0xff;
1526 uint8_t fw_updt
= (bp
->fw_ver
>> 8) & 0xff;
1529 ret
= snprintf(fw_version
, fw_size
, "%d.%d.%d",
1530 fw_major
, fw_minor
, fw_updt
);
1532 ret
+= 1; /* add the size of '\0' */
1533 if (fw_size
< (uint32_t)ret
)
1540 bnxt_rxq_info_get_op(struct rte_eth_dev
*dev
, uint16_t queue_id
,
1541 struct rte_eth_rxq_info
*qinfo
)
1543 struct bnxt_rx_queue
*rxq
;
1545 rxq
= dev
->data
->rx_queues
[queue_id
];
1547 qinfo
->mp
= rxq
->mb_pool
;
1548 qinfo
->scattered_rx
= dev
->data
->scattered_rx
;
1549 qinfo
->nb_desc
= rxq
->nb_rx_desc
;
1551 qinfo
->conf
.rx_free_thresh
= rxq
->rx_free_thresh
;
1552 qinfo
->conf
.rx_drop_en
= 0;
1553 qinfo
->conf
.rx_deferred_start
= 0;
1557 bnxt_txq_info_get_op(struct rte_eth_dev
*dev
, uint16_t queue_id
,
1558 struct rte_eth_txq_info
*qinfo
)
1560 struct bnxt_tx_queue
*txq
;
1562 txq
= dev
->data
->tx_queues
[queue_id
];
1564 qinfo
->nb_desc
= txq
->nb_tx_desc
;
1566 qinfo
->conf
.tx_thresh
.pthresh
= txq
->pthresh
;
1567 qinfo
->conf
.tx_thresh
.hthresh
= txq
->hthresh
;
1568 qinfo
->conf
.tx_thresh
.wthresh
= txq
->wthresh
;
1570 qinfo
->conf
.tx_free_thresh
= txq
->tx_free_thresh
;
1571 qinfo
->conf
.tx_rs_thresh
= 0;
1572 qinfo
->conf
.tx_deferred_start
= txq
->tx_deferred_start
;
1575 static int bnxt_mtu_set_op(struct rte_eth_dev
*eth_dev
, uint16_t new_mtu
)
1577 struct bnxt
*bp
= eth_dev
->data
->dev_private
;
1578 struct rte_eth_dev_info dev_info
;
1582 bnxt_dev_info_get_op(eth_dev
, &dev_info
);
1584 if (new_mtu
< ETHER_MIN_MTU
|| new_mtu
> BNXT_MAX_MTU
) {
1585 PMD_DRV_LOG(ERR
, "MTU requested must be within (%d, %d)\n",
1586 ETHER_MIN_MTU
, BNXT_MAX_MTU
);
1590 if (new_mtu
> ETHER_MTU
) {
1591 bp
->flags
|= BNXT_FLAG_JUMBO
;
1592 bp
->eth_dev
->data
->dev_conf
.rxmode
.offloads
|=
1593 DEV_RX_OFFLOAD_JUMBO_FRAME
;
1595 bp
->eth_dev
->data
->dev_conf
.rxmode
.offloads
&=
1596 ~DEV_RX_OFFLOAD_JUMBO_FRAME
;
1597 bp
->flags
&= ~BNXT_FLAG_JUMBO
;
1600 eth_dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
=
1601 new_mtu
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
+ VLAN_TAG_SIZE
* 2;
1603 eth_dev
->data
->mtu
= new_mtu
;
1604 PMD_DRV_LOG(INFO
, "New MTU is %d\n", eth_dev
->data
->mtu
);
1606 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
1607 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
1610 vnic
->mru
= bp
->eth_dev
->data
->mtu
+ ETHER_HDR_LEN
+
1611 ETHER_CRC_LEN
+ VLAN_TAG_SIZE
* 2;
1612 rc
= bnxt_hwrm_vnic_cfg(bp
, vnic
);
1616 size
= rte_pktmbuf_data_room_size(bp
->rx_queues
[0]->mb_pool
);
1617 size
-= RTE_PKTMBUF_HEADROOM
;
1619 if (size
< new_mtu
) {
1620 rc
= bnxt_hwrm_vnic_plcmode_cfg(bp
, vnic
);
1630 bnxt_vlan_pvid_set_op(struct rte_eth_dev
*dev
, uint16_t pvid
, int on
)
1632 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1633 uint16_t vlan
= bp
->vlan
;
1636 if (!BNXT_SINGLE_PF(bp
) || BNXT_VF(bp
)) {
1638 "PVID cannot be modified for this function\n");
1641 bp
->vlan
= on
? pvid
: 0;
1643 rc
= bnxt_hwrm_set_default_vlan(bp
, 0, 0);
1650 bnxt_dev_led_on_op(struct rte_eth_dev
*dev
)
1652 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1654 return bnxt_hwrm_port_led_cfg(bp
, true);
1658 bnxt_dev_led_off_op(struct rte_eth_dev
*dev
)
1660 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1662 return bnxt_hwrm_port_led_cfg(bp
, false);
1666 bnxt_rx_queue_count_op(struct rte_eth_dev
*dev
, uint16_t rx_queue_id
)
1668 uint32_t desc
= 0, raw_cons
= 0, cons
;
1669 struct bnxt_cp_ring_info
*cpr
;
1670 struct bnxt_rx_queue
*rxq
;
1671 struct rx_pkt_cmpl
*rxcmp
;
1676 rxq
= dev
->data
->rx_queues
[rx_queue_id
];
1680 while (raw_cons
< rxq
->nb_rx_desc
) {
1681 cons
= RING_CMP(cpr
->cp_ring_struct
, raw_cons
);
1682 rxcmp
= (struct rx_pkt_cmpl
*)&cpr
->cp_desc_ring
[cons
];
1684 if (!CMPL_VALID(rxcmp
, valid
))
1686 valid
= FLIP_VALID(cons
, cpr
->cp_ring_struct
->ring_mask
, valid
);
1687 cmp_type
= CMP_TYPE(rxcmp
);
1688 if (cmp_type
== RX_TPA_END_CMPL_TYPE_RX_TPA_END
) {
1689 cmp
= (rte_le_to_cpu_32(
1690 ((struct rx_tpa_end_cmpl
*)
1691 (rxcmp
))->agg_bufs_v1
) &
1692 RX_TPA_END_CMPL_AGG_BUFS_MASK
) >>
1693 RX_TPA_END_CMPL_AGG_BUFS_SFT
;
1695 } else if (cmp_type
== 0x11) {
1697 cmp
= (rxcmp
->agg_bufs_v1
&
1698 RX_PKT_CMPL_AGG_BUFS_MASK
) >>
1699 RX_PKT_CMPL_AGG_BUFS_SFT
;
1704 raw_cons
+= cmp
? cmp
: 2;
1711 bnxt_rx_descriptor_status_op(void *rx_queue
, uint16_t offset
)
1713 struct bnxt_rx_queue
*rxq
= (struct bnxt_rx_queue
*)rx_queue
;
1714 struct bnxt_rx_ring_info
*rxr
;
1715 struct bnxt_cp_ring_info
*cpr
;
1716 struct bnxt_sw_rx_bd
*rx_buf
;
1717 struct rx_pkt_cmpl
*rxcmp
;
1718 uint32_t cons
, cp_cons
;
1726 if (offset
>= rxq
->nb_rx_desc
)
1729 cons
= RING_CMP(cpr
->cp_ring_struct
, offset
);
1730 cp_cons
= cpr
->cp_raw_cons
;
1731 rxcmp
= (struct rx_pkt_cmpl
*)&cpr
->cp_desc_ring
[cons
];
1733 if (cons
> cp_cons
) {
1734 if (CMPL_VALID(rxcmp
, cpr
->valid
))
1735 return RTE_ETH_RX_DESC_DONE
;
1737 if (CMPL_VALID(rxcmp
, !cpr
->valid
))
1738 return RTE_ETH_RX_DESC_DONE
;
1740 rx_buf
= &rxr
->rx_buf_ring
[cons
];
1741 if (rx_buf
->mbuf
== NULL
)
1742 return RTE_ETH_RX_DESC_UNAVAIL
;
1745 return RTE_ETH_RX_DESC_AVAIL
;
1749 bnxt_tx_descriptor_status_op(void *tx_queue
, uint16_t offset
)
1751 struct bnxt_tx_queue
*txq
= (struct bnxt_tx_queue
*)tx_queue
;
1752 struct bnxt_tx_ring_info
*txr
;
1753 struct bnxt_cp_ring_info
*cpr
;
1754 struct bnxt_sw_tx_bd
*tx_buf
;
1755 struct tx_pkt_cmpl
*txcmp
;
1756 uint32_t cons
, cp_cons
;
1764 if (offset
>= txq
->nb_tx_desc
)
1767 cons
= RING_CMP(cpr
->cp_ring_struct
, offset
);
1768 txcmp
= (struct tx_pkt_cmpl
*)&cpr
->cp_desc_ring
[cons
];
1769 cp_cons
= cpr
->cp_raw_cons
;
1771 if (cons
> cp_cons
) {
1772 if (CMPL_VALID(txcmp
, cpr
->valid
))
1773 return RTE_ETH_TX_DESC_UNAVAIL
;
1775 if (CMPL_VALID(txcmp
, !cpr
->valid
))
1776 return RTE_ETH_TX_DESC_UNAVAIL
;
1778 tx_buf
= &txr
->tx_buf_ring
[cons
];
1779 if (tx_buf
->mbuf
== NULL
)
1780 return RTE_ETH_TX_DESC_DONE
;
1782 return RTE_ETH_TX_DESC_FULL
;
1785 static struct bnxt_filter_info
*
1786 bnxt_match_and_validate_ether_filter(struct bnxt
*bp
,
1787 struct rte_eth_ethertype_filter
*efilter
,
1788 struct bnxt_vnic_info
*vnic0
,
1789 struct bnxt_vnic_info
*vnic
,
1792 struct bnxt_filter_info
*mfilter
= NULL
;
1796 if (efilter
->ether_type
== ETHER_TYPE_IPv4
||
1797 efilter
->ether_type
== ETHER_TYPE_IPv6
) {
1798 PMD_DRV_LOG(ERR
, "invalid ether_type(0x%04x) in"
1799 " ethertype filter.", efilter
->ether_type
);
1803 if (efilter
->queue
>= bp
->rx_nr_rings
) {
1804 PMD_DRV_LOG(ERR
, "Invalid queue %d\n", efilter
->queue
);
1809 vnic0
= &bp
->vnic_info
[0];
1810 vnic
= &bp
->vnic_info
[efilter
->queue
];
1812 PMD_DRV_LOG(ERR
, "Invalid queue %d\n", efilter
->queue
);
1817 if (efilter
->flags
& RTE_ETHTYPE_FLAGS_DROP
) {
1818 STAILQ_FOREACH(mfilter
, &vnic0
->filter
, next
) {
1819 if ((!memcmp(efilter
->mac_addr
.addr_bytes
,
1820 mfilter
->l2_addr
, ETHER_ADDR_LEN
) &&
1822 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP
&&
1823 mfilter
->ethertype
== efilter
->ether_type
)) {
1829 STAILQ_FOREACH(mfilter
, &vnic
->filter
, next
)
1830 if ((!memcmp(efilter
->mac_addr
.addr_bytes
,
1831 mfilter
->l2_addr
, ETHER_ADDR_LEN
) &&
1832 mfilter
->ethertype
== efilter
->ether_type
&&
1834 HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX
)) {
1848 bnxt_ethertype_filter(struct rte_eth_dev
*dev
,
1849 enum rte_filter_op filter_op
,
1852 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1853 struct rte_eth_ethertype_filter
*efilter
=
1854 (struct rte_eth_ethertype_filter
*)arg
;
1855 struct bnxt_filter_info
*bfilter
, *filter1
;
1856 struct bnxt_vnic_info
*vnic
, *vnic0
;
1859 if (filter_op
== RTE_ETH_FILTER_NOP
)
1863 PMD_DRV_LOG(ERR
, "arg shouldn't be NULL for operation %u.",
1868 vnic0
= &bp
->vnic_info
[0];
1869 vnic
= &bp
->vnic_info
[efilter
->queue
];
1871 switch (filter_op
) {
1872 case RTE_ETH_FILTER_ADD
:
1873 bnxt_match_and_validate_ether_filter(bp
, efilter
,
1878 bfilter
= bnxt_get_unused_filter(bp
);
1879 if (bfilter
== NULL
) {
1881 "Not enough resources for a new filter.\n");
1884 bfilter
->filter_type
= HWRM_CFA_NTUPLE_FILTER
;
1885 memcpy(bfilter
->l2_addr
, efilter
->mac_addr
.addr_bytes
,
1887 memcpy(bfilter
->dst_macaddr
, efilter
->mac_addr
.addr_bytes
,
1889 bfilter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR
;
1890 bfilter
->ethertype
= efilter
->ether_type
;
1891 bfilter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
1893 filter1
= bnxt_get_l2_filter(bp
, bfilter
, vnic0
);
1894 if (filter1
== NULL
) {
1899 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID
;
1900 bfilter
->fw_l2_filter_id
= filter1
->fw_l2_filter_id
;
1902 bfilter
->dst_id
= vnic
->fw_vnic_id
;
1904 if (efilter
->flags
& RTE_ETHTYPE_FLAGS_DROP
) {
1906 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP
;
1909 ret
= bnxt_hwrm_set_ntuple_filter(bp
, bfilter
->dst_id
, bfilter
);
1912 STAILQ_INSERT_TAIL(&vnic
->filter
, bfilter
, next
);
1914 case RTE_ETH_FILTER_DELETE
:
1915 filter1
= bnxt_match_and_validate_ether_filter(bp
, efilter
,
1917 if (ret
== -EEXIST
) {
1918 ret
= bnxt_hwrm_clear_ntuple_filter(bp
, filter1
);
1920 STAILQ_REMOVE(&vnic
->filter
, filter1
, bnxt_filter_info
,
1922 bnxt_free_filter(bp
, filter1
);
1923 } else if (ret
== 0) {
1924 PMD_DRV_LOG(ERR
, "No matching filter found\n");
1928 PMD_DRV_LOG(ERR
, "unsupported operation %u.", filter_op
);
1934 bnxt_free_filter(bp
, bfilter
);
1940 parse_ntuple_filter(struct bnxt
*bp
,
1941 struct rte_eth_ntuple_filter
*nfilter
,
1942 struct bnxt_filter_info
*bfilter
)
1946 if (nfilter
->queue
>= bp
->rx_nr_rings
) {
1947 PMD_DRV_LOG(ERR
, "Invalid queue %d\n", nfilter
->queue
);
1951 switch (nfilter
->dst_port_mask
) {
1953 bfilter
->dst_port_mask
= -1;
1954 bfilter
->dst_port
= nfilter
->dst_port
;
1955 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT
|
1956 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK
;
1959 PMD_DRV_LOG(ERR
, "invalid dst_port mask.");
1963 bfilter
->ip_addr_type
= NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4
;
1964 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
1966 switch (nfilter
->proto_mask
) {
1968 if (nfilter
->proto
== 17) /* IPPROTO_UDP */
1969 bfilter
->ip_protocol
= 17;
1970 else if (nfilter
->proto
== 6) /* IPPROTO_TCP */
1971 bfilter
->ip_protocol
= 6;
1974 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
1977 PMD_DRV_LOG(ERR
, "invalid protocol mask.");
1981 switch (nfilter
->dst_ip_mask
) {
1983 bfilter
->dst_ipaddr_mask
[0] = -1;
1984 bfilter
->dst_ipaddr
[0] = nfilter
->dst_ip
;
1985 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
|
1986 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
1989 PMD_DRV_LOG(ERR
, "invalid dst_ip mask.");
1993 switch (nfilter
->src_ip_mask
) {
1995 bfilter
->src_ipaddr_mask
[0] = -1;
1996 bfilter
->src_ipaddr
[0] = nfilter
->src_ip
;
1997 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
|
1998 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2001 PMD_DRV_LOG(ERR
, "invalid src_ip mask.");
2005 switch (nfilter
->src_port_mask
) {
2007 bfilter
->src_port_mask
= -1;
2008 bfilter
->src_port
= nfilter
->src_port
;
2009 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT
|
2010 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK
;
2013 PMD_DRV_LOG(ERR
, "invalid src_port mask.");
2018 //nfilter->priority = (uint8_t)filter->priority;
2020 bfilter
->enables
= en
;
2024 static struct bnxt_filter_info
*
2025 bnxt_match_ntuple_filter(struct bnxt
*bp
,
2026 struct bnxt_filter_info
*bfilter
,
2027 struct bnxt_vnic_info
**mvnic
)
2029 struct bnxt_filter_info
*mfilter
= NULL
;
2032 for (i
= bp
->nr_vnics
- 1; i
>= 0; i
--) {
2033 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
2034 STAILQ_FOREACH(mfilter
, &vnic
->filter
, next
) {
2035 if (bfilter
->src_ipaddr
[0] == mfilter
->src_ipaddr
[0] &&
2036 bfilter
->src_ipaddr_mask
[0] ==
2037 mfilter
->src_ipaddr_mask
[0] &&
2038 bfilter
->src_port
== mfilter
->src_port
&&
2039 bfilter
->src_port_mask
== mfilter
->src_port_mask
&&
2040 bfilter
->dst_ipaddr
[0] == mfilter
->dst_ipaddr
[0] &&
2041 bfilter
->dst_ipaddr_mask
[0] ==
2042 mfilter
->dst_ipaddr_mask
[0] &&
2043 bfilter
->dst_port
== mfilter
->dst_port
&&
2044 bfilter
->dst_port_mask
== mfilter
->dst_port_mask
&&
2045 bfilter
->flags
== mfilter
->flags
&&
2046 bfilter
->enables
== mfilter
->enables
) {
2057 bnxt_cfg_ntuple_filter(struct bnxt
*bp
,
2058 struct rte_eth_ntuple_filter
*nfilter
,
2059 enum rte_filter_op filter_op
)
2061 struct bnxt_filter_info
*bfilter
, *mfilter
, *filter1
;
2062 struct bnxt_vnic_info
*vnic
, *vnic0
, *mvnic
;
2065 if (nfilter
->flags
!= RTE_5TUPLE_FLAGS
) {
2066 PMD_DRV_LOG(ERR
, "only 5tuple is supported.");
2070 if (nfilter
->flags
& RTE_NTUPLE_FLAGS_TCP_FLAG
) {
2071 PMD_DRV_LOG(ERR
, "Ntuple filter: TCP flags not supported\n");
2075 bfilter
= bnxt_get_unused_filter(bp
);
2076 if (bfilter
== NULL
) {
2078 "Not enough resources for a new filter.\n");
2081 ret
= parse_ntuple_filter(bp
, nfilter
, bfilter
);
2085 vnic
= &bp
->vnic_info
[nfilter
->queue
];
2086 vnic0
= &bp
->vnic_info
[0];
2087 filter1
= STAILQ_FIRST(&vnic0
->filter
);
2088 if (filter1
== NULL
) {
2093 bfilter
->dst_id
= vnic
->fw_vnic_id
;
2094 bfilter
->fw_l2_filter_id
= filter1
->fw_l2_filter_id
;
2096 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID
;
2097 bfilter
->ethertype
= 0x800;
2098 bfilter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2100 mfilter
= bnxt_match_ntuple_filter(bp
, bfilter
, &mvnic
);
2102 if (mfilter
!= NULL
&& filter_op
== RTE_ETH_FILTER_ADD
&&
2103 bfilter
->dst_id
== mfilter
->dst_id
) {
2104 PMD_DRV_LOG(ERR
, "filter exists.\n");
2107 } else if (mfilter
!= NULL
&& filter_op
== RTE_ETH_FILTER_ADD
&&
2108 bfilter
->dst_id
!= mfilter
->dst_id
) {
2109 mfilter
->dst_id
= vnic
->fw_vnic_id
;
2110 ret
= bnxt_hwrm_set_ntuple_filter(bp
, mfilter
->dst_id
, mfilter
);
2111 STAILQ_REMOVE(&mvnic
->filter
, mfilter
, bnxt_filter_info
, next
);
2112 STAILQ_INSERT_TAIL(&vnic
->filter
, mfilter
, next
);
2113 PMD_DRV_LOG(ERR
, "filter with matching pattern exists.\n");
2114 PMD_DRV_LOG(ERR
, " Updated it to the new destination queue\n");
2117 if (mfilter
== NULL
&& filter_op
== RTE_ETH_FILTER_DELETE
) {
2118 PMD_DRV_LOG(ERR
, "filter doesn't exist.");
2123 if (filter_op
== RTE_ETH_FILTER_ADD
) {
2124 bfilter
->filter_type
= HWRM_CFA_NTUPLE_FILTER
;
2125 ret
= bnxt_hwrm_set_ntuple_filter(bp
, bfilter
->dst_id
, bfilter
);
2128 STAILQ_INSERT_TAIL(&vnic
->filter
, bfilter
, next
);
2130 if (mfilter
== NULL
) {
2131 /* This should not happen. But for Coverity! */
2135 ret
= bnxt_hwrm_clear_ntuple_filter(bp
, mfilter
);
2137 STAILQ_REMOVE(&vnic
->filter
, mfilter
, bnxt_filter_info
, next
);
2138 bnxt_free_filter(bp
, mfilter
);
2139 mfilter
->fw_l2_filter_id
= -1;
2140 bnxt_free_filter(bp
, bfilter
);
2141 bfilter
->fw_l2_filter_id
= -1;
2146 bfilter
->fw_l2_filter_id
= -1;
2147 bnxt_free_filter(bp
, bfilter
);
2152 bnxt_ntuple_filter(struct rte_eth_dev
*dev
,
2153 enum rte_filter_op filter_op
,
2156 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2159 if (filter_op
== RTE_ETH_FILTER_NOP
)
2163 PMD_DRV_LOG(ERR
, "arg shouldn't be NULL for operation %u.",
2168 switch (filter_op
) {
2169 case RTE_ETH_FILTER_ADD
:
2170 ret
= bnxt_cfg_ntuple_filter(bp
,
2171 (struct rte_eth_ntuple_filter
*)arg
,
2174 case RTE_ETH_FILTER_DELETE
:
2175 ret
= bnxt_cfg_ntuple_filter(bp
,
2176 (struct rte_eth_ntuple_filter
*)arg
,
2180 PMD_DRV_LOG(ERR
, "unsupported operation %u.", filter_op
);
2188 bnxt_parse_fdir_filter(struct bnxt
*bp
,
2189 struct rte_eth_fdir_filter
*fdir
,
2190 struct bnxt_filter_info
*filter
)
2192 enum rte_fdir_mode fdir_mode
=
2193 bp
->eth_dev
->data
->dev_conf
.fdir_conf
.mode
;
2194 struct bnxt_vnic_info
*vnic0
, *vnic
;
2195 struct bnxt_filter_info
*filter1
;
2199 if (fdir_mode
== RTE_FDIR_MODE_PERFECT_TUNNEL
)
2202 filter
->l2_ovlan
= fdir
->input
.flow_ext
.vlan_tci
;
2203 en
|= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID
;
2205 switch (fdir
->input
.flow_type
) {
2206 case RTE_ETH_FLOW_IPV4
:
2207 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER
:
2209 filter
->src_ipaddr
[0] = fdir
->input
.flow
.ip4_flow
.src_ip
;
2210 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
;
2211 filter
->dst_ipaddr
[0] = fdir
->input
.flow
.ip4_flow
.dst_ip
;
2212 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
;
2213 filter
->ip_protocol
= fdir
->input
.flow
.ip4_flow
.proto
;
2214 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
2215 filter
->ip_addr_type
=
2216 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4
;
2217 filter
->src_ipaddr_mask
[0] = 0xffffffff;
2218 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2219 filter
->dst_ipaddr_mask
[0] = 0xffffffff;
2220 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
2221 filter
->ethertype
= 0x800;
2222 filter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2224 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP
:
2225 filter
->src_port
= fdir
->input
.flow
.tcp4_flow
.src_port
;
2226 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT
;
2227 filter
->dst_port
= fdir
->input
.flow
.tcp4_flow
.dst_port
;
2228 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT
;
2229 filter
->dst_port_mask
= 0xffff;
2230 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK
;
2231 filter
->src_port_mask
= 0xffff;
2232 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK
;
2233 filter
->src_ipaddr
[0] = fdir
->input
.flow
.tcp4_flow
.ip
.src_ip
;
2234 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
;
2235 filter
->dst_ipaddr
[0] = fdir
->input
.flow
.tcp4_flow
.ip
.dst_ip
;
2236 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
;
2237 filter
->ip_protocol
= 6;
2238 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
2239 filter
->ip_addr_type
=
2240 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4
;
2241 filter
->src_ipaddr_mask
[0] = 0xffffffff;
2242 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2243 filter
->dst_ipaddr_mask
[0] = 0xffffffff;
2244 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
2245 filter
->ethertype
= 0x800;
2246 filter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2248 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP
:
2249 filter
->src_port
= fdir
->input
.flow
.udp4_flow
.src_port
;
2250 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT
;
2251 filter
->dst_port
= fdir
->input
.flow
.udp4_flow
.dst_port
;
2252 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT
;
2253 filter
->dst_port_mask
= 0xffff;
2254 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK
;
2255 filter
->src_port_mask
= 0xffff;
2256 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK
;
2257 filter
->src_ipaddr
[0] = fdir
->input
.flow
.udp4_flow
.ip
.src_ip
;
2258 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
;
2259 filter
->dst_ipaddr
[0] = fdir
->input
.flow
.udp4_flow
.ip
.dst_ip
;
2260 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
;
2261 filter
->ip_protocol
= 17;
2262 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
2263 filter
->ip_addr_type
=
2264 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4
;
2265 filter
->src_ipaddr_mask
[0] = 0xffffffff;
2266 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2267 filter
->dst_ipaddr_mask
[0] = 0xffffffff;
2268 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
2269 filter
->ethertype
= 0x800;
2270 filter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2272 case RTE_ETH_FLOW_IPV6
:
2273 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER
:
2275 filter
->ip_addr_type
=
2276 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
;
2277 filter
->ip_protocol
= fdir
->input
.flow
.ipv6_flow
.proto
;
2278 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
2279 rte_memcpy(filter
->src_ipaddr
,
2280 fdir
->input
.flow
.ipv6_flow
.src_ip
, 16);
2281 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
;
2282 rte_memcpy(filter
->dst_ipaddr
,
2283 fdir
->input
.flow
.ipv6_flow
.dst_ip
, 16);
2284 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
;
2285 memset(filter
->dst_ipaddr_mask
, 0xff, 16);
2286 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
2287 memset(filter
->src_ipaddr_mask
, 0xff, 16);
2288 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2289 filter
->ethertype
= 0x86dd;
2290 filter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2292 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP
:
2293 filter
->src_port
= fdir
->input
.flow
.tcp6_flow
.src_port
;
2294 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT
;
2295 filter
->dst_port
= fdir
->input
.flow
.tcp6_flow
.dst_port
;
2296 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT
;
2297 filter
->dst_port_mask
= 0xffff;
2298 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK
;
2299 filter
->src_port_mask
= 0xffff;
2300 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK
;
2301 filter
->ip_addr_type
=
2302 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
;
2303 filter
->ip_protocol
= fdir
->input
.flow
.tcp6_flow
.ip
.proto
;
2304 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
2305 rte_memcpy(filter
->src_ipaddr
,
2306 fdir
->input
.flow
.tcp6_flow
.ip
.src_ip
, 16);
2307 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
;
2308 rte_memcpy(filter
->dst_ipaddr
,
2309 fdir
->input
.flow
.tcp6_flow
.ip
.dst_ip
, 16);
2310 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
;
2311 memset(filter
->dst_ipaddr_mask
, 0xff, 16);
2312 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
2313 memset(filter
->src_ipaddr_mask
, 0xff, 16);
2314 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2315 filter
->ethertype
= 0x86dd;
2316 filter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2318 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP
:
2319 filter
->src_port
= fdir
->input
.flow
.udp6_flow
.src_port
;
2320 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT
;
2321 filter
->dst_port
= fdir
->input
.flow
.udp6_flow
.dst_port
;
2322 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT
;
2323 filter
->dst_port_mask
= 0xffff;
2324 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK
;
2325 filter
->src_port_mask
= 0xffff;
2326 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK
;
2327 filter
->ip_addr_type
=
2328 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
;
2329 filter
->ip_protocol
= fdir
->input
.flow
.udp6_flow
.ip
.proto
;
2330 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
2331 rte_memcpy(filter
->src_ipaddr
,
2332 fdir
->input
.flow
.udp6_flow
.ip
.src_ip
, 16);
2333 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
;
2334 rte_memcpy(filter
->dst_ipaddr
,
2335 fdir
->input
.flow
.udp6_flow
.ip
.dst_ip
, 16);
2336 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
;
2337 memset(filter
->dst_ipaddr_mask
, 0xff, 16);
2338 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
2339 memset(filter
->src_ipaddr_mask
, 0xff, 16);
2340 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2341 filter
->ethertype
= 0x86dd;
2342 filter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2344 case RTE_ETH_FLOW_L2_PAYLOAD
:
2345 filter
->ethertype
= fdir
->input
.flow
.l2_flow
.ether_type
;
2346 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2348 case RTE_ETH_FLOW_VXLAN
:
2349 if (fdir
->action
.behavior
== RTE_ETH_FDIR_REJECT
)
2351 filter
->vni
= fdir
->input
.flow
.tunnel_flow
.tunnel_id
;
2352 filter
->tunnel_type
=
2353 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN
;
2354 en
|= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE
;
2356 case RTE_ETH_FLOW_NVGRE
:
2357 if (fdir
->action
.behavior
== RTE_ETH_FDIR_REJECT
)
2359 filter
->vni
= fdir
->input
.flow
.tunnel_flow
.tunnel_id
;
2360 filter
->tunnel_type
=
2361 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE
;
2362 en
|= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE
;
2364 case RTE_ETH_FLOW_UNKNOWN
:
2365 case RTE_ETH_FLOW_RAW
:
2366 case RTE_ETH_FLOW_FRAG_IPV4
:
2367 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP
:
2368 case RTE_ETH_FLOW_FRAG_IPV6
:
2369 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP
:
2370 case RTE_ETH_FLOW_IPV6_EX
:
2371 case RTE_ETH_FLOW_IPV6_TCP_EX
:
2372 case RTE_ETH_FLOW_IPV6_UDP_EX
:
2373 case RTE_ETH_FLOW_GENEVE
:
2379 vnic0
= &bp
->vnic_info
[0];
2380 vnic
= &bp
->vnic_info
[fdir
->action
.rx_queue
];
2382 PMD_DRV_LOG(ERR
, "Invalid queue %d\n", fdir
->action
.rx_queue
);
2387 if (fdir_mode
== RTE_FDIR_MODE_PERFECT_MAC_VLAN
) {
2388 rte_memcpy(filter
->dst_macaddr
,
2389 fdir
->input
.flow
.mac_vlan_flow
.mac_addr
.addr_bytes
, 6);
2390 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR
;
2393 if (fdir
->action
.behavior
== RTE_ETH_FDIR_REJECT
) {
2394 filter
->flags
= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP
;
2395 filter1
= STAILQ_FIRST(&vnic0
->filter
);
2396 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
2398 filter
->dst_id
= vnic
->fw_vnic_id
;
2399 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
2400 if (filter
->dst_macaddr
[i
] == 0x00)
2401 filter1
= STAILQ_FIRST(&vnic0
->filter
);
2403 filter1
= bnxt_get_l2_filter(bp
, filter
, vnic
);
2406 if (filter1
== NULL
)
2409 en
|= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID
;
2410 filter
->fw_l2_filter_id
= filter1
->fw_l2_filter_id
;
2412 filter
->enables
= en
;
2417 static struct bnxt_filter_info
*
2418 bnxt_match_fdir(struct bnxt
*bp
, struct bnxt_filter_info
*nf
,
2419 struct bnxt_vnic_info
**mvnic
)
2421 struct bnxt_filter_info
*mf
= NULL
;
2424 for (i
= bp
->nr_vnics
- 1; i
>= 0; i
--) {
2425 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
2427 STAILQ_FOREACH(mf
, &vnic
->filter
, next
) {
2428 if (mf
->filter_type
== nf
->filter_type
&&
2429 mf
->flags
== nf
->flags
&&
2430 mf
->src_port
== nf
->src_port
&&
2431 mf
->src_port_mask
== nf
->src_port_mask
&&
2432 mf
->dst_port
== nf
->dst_port
&&
2433 mf
->dst_port_mask
== nf
->dst_port_mask
&&
2434 mf
->ip_protocol
== nf
->ip_protocol
&&
2435 mf
->ip_addr_type
== nf
->ip_addr_type
&&
2436 mf
->ethertype
== nf
->ethertype
&&
2437 mf
->vni
== nf
->vni
&&
2438 mf
->tunnel_type
== nf
->tunnel_type
&&
2439 mf
->l2_ovlan
== nf
->l2_ovlan
&&
2440 mf
->l2_ovlan_mask
== nf
->l2_ovlan_mask
&&
2441 mf
->l2_ivlan
== nf
->l2_ivlan
&&
2442 mf
->l2_ivlan_mask
== nf
->l2_ivlan_mask
&&
2443 !memcmp(mf
->l2_addr
, nf
->l2_addr
, ETHER_ADDR_LEN
) &&
2444 !memcmp(mf
->l2_addr_mask
, nf
->l2_addr_mask
,
2446 !memcmp(mf
->src_macaddr
, nf
->src_macaddr
,
2448 !memcmp(mf
->dst_macaddr
, nf
->dst_macaddr
,
2450 !memcmp(mf
->src_ipaddr
, nf
->src_ipaddr
,
2451 sizeof(nf
->src_ipaddr
)) &&
2452 !memcmp(mf
->src_ipaddr_mask
, nf
->src_ipaddr_mask
,
2453 sizeof(nf
->src_ipaddr_mask
)) &&
2454 !memcmp(mf
->dst_ipaddr
, nf
->dst_ipaddr
,
2455 sizeof(nf
->dst_ipaddr
)) &&
2456 !memcmp(mf
->dst_ipaddr_mask
, nf
->dst_ipaddr_mask
,
2457 sizeof(nf
->dst_ipaddr_mask
))) {
2468 bnxt_fdir_filter(struct rte_eth_dev
*dev
,
2469 enum rte_filter_op filter_op
,
2472 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2473 struct rte_eth_fdir_filter
*fdir
= (struct rte_eth_fdir_filter
*)arg
;
2474 struct bnxt_filter_info
*filter
, *match
;
2475 struct bnxt_vnic_info
*vnic
, *mvnic
;
2478 if (filter_op
== RTE_ETH_FILTER_NOP
)
2481 if (arg
== NULL
&& filter_op
!= RTE_ETH_FILTER_FLUSH
)
2484 switch (filter_op
) {
2485 case RTE_ETH_FILTER_ADD
:
2486 case RTE_ETH_FILTER_DELETE
:
2488 filter
= bnxt_get_unused_filter(bp
);
2489 if (filter
== NULL
) {
2491 "Not enough resources for a new flow.\n");
2495 ret
= bnxt_parse_fdir_filter(bp
, fdir
, filter
);
2498 filter
->filter_type
= HWRM_CFA_NTUPLE_FILTER
;
2500 if (fdir
->action
.behavior
== RTE_ETH_FDIR_REJECT
)
2501 vnic
= &bp
->vnic_info
[0];
2503 vnic
= &bp
->vnic_info
[fdir
->action
.rx_queue
];
2505 match
= bnxt_match_fdir(bp
, filter
, &mvnic
);
2506 if (match
!= NULL
&& filter_op
== RTE_ETH_FILTER_ADD
) {
2507 if (match
->dst_id
== vnic
->fw_vnic_id
) {
2508 PMD_DRV_LOG(ERR
, "Flow already exists.\n");
2512 match
->dst_id
= vnic
->fw_vnic_id
;
2513 ret
= bnxt_hwrm_set_ntuple_filter(bp
,
2516 STAILQ_REMOVE(&mvnic
->filter
, match
,
2517 bnxt_filter_info
, next
);
2518 STAILQ_INSERT_TAIL(&vnic
->filter
, match
, next
);
2520 "Filter with matching pattern exist\n");
2522 "Updated it to new destination q\n");
2526 if (match
== NULL
&& filter_op
== RTE_ETH_FILTER_DELETE
) {
2527 PMD_DRV_LOG(ERR
, "Flow does not exist.\n");
2532 if (filter_op
== RTE_ETH_FILTER_ADD
) {
2533 ret
= bnxt_hwrm_set_ntuple_filter(bp
,
2538 STAILQ_INSERT_TAIL(&vnic
->filter
, filter
, next
);
2540 ret
= bnxt_hwrm_clear_ntuple_filter(bp
, match
);
2541 STAILQ_REMOVE(&vnic
->filter
, match
,
2542 bnxt_filter_info
, next
);
2543 bnxt_free_filter(bp
, match
);
2544 filter
->fw_l2_filter_id
= -1;
2545 bnxt_free_filter(bp
, filter
);
2548 case RTE_ETH_FILTER_FLUSH
:
2549 for (i
= bp
->nr_vnics
- 1; i
>= 0; i
--) {
2550 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
2552 STAILQ_FOREACH(filter
, &vnic
->filter
, next
) {
2553 if (filter
->filter_type
==
2554 HWRM_CFA_NTUPLE_FILTER
) {
2556 bnxt_hwrm_clear_ntuple_filter(bp
,
2558 STAILQ_REMOVE(&vnic
->filter
, filter
,
2559 bnxt_filter_info
, next
);
2564 case RTE_ETH_FILTER_UPDATE
:
2565 case RTE_ETH_FILTER_STATS
:
2566 case RTE_ETH_FILTER_INFO
:
2567 PMD_DRV_LOG(ERR
, "operation %u not implemented", filter_op
);
2570 PMD_DRV_LOG(ERR
, "unknown operation %u", filter_op
);
2577 filter
->fw_l2_filter_id
= -1;
2578 bnxt_free_filter(bp
, filter
);
2583 bnxt_filter_ctrl_op(struct rte_eth_dev
*dev __rte_unused
,
2584 enum rte_filter_type filter_type
,
2585 enum rte_filter_op filter_op
, void *arg
)
2589 switch (filter_type
) {
2590 case RTE_ETH_FILTER_TUNNEL
:
2592 "filter type: %d: To be implemented\n", filter_type
);
2594 case RTE_ETH_FILTER_FDIR
:
2595 ret
= bnxt_fdir_filter(dev
, filter_op
, arg
);
2597 case RTE_ETH_FILTER_NTUPLE
:
2598 ret
= bnxt_ntuple_filter(dev
, filter_op
, arg
);
2600 case RTE_ETH_FILTER_ETHERTYPE
:
2601 ret
= bnxt_ethertype_filter(dev
, filter_op
, arg
);
2603 case RTE_ETH_FILTER_GENERIC
:
2604 if (filter_op
!= RTE_ETH_FILTER_GET
)
2606 *(const void **)arg
= &bnxt_flow_ops
;
2610 "Filter type (%d) not supported", filter_type
);
2617 static const uint32_t *
2618 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev
*dev
)
2620 static const uint32_t ptypes
[] = {
2621 RTE_PTYPE_L2_ETHER_VLAN
,
2622 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
,
2623 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
,
2627 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
,
2628 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
,
2629 RTE_PTYPE_INNER_L4_ICMP
,
2630 RTE_PTYPE_INNER_L4_TCP
,
2631 RTE_PTYPE_INNER_L4_UDP
,
2635 if (dev
->rx_pkt_burst
== bnxt_recv_pkts
)
2640 static int bnxt_map_regs(struct bnxt
*bp
, uint32_t *reg_arr
, int count
,
2643 uint32_t reg_base
= *reg_arr
& 0xfffff000;
2647 for (i
= 0; i
< count
; i
++) {
2648 if ((reg_arr
[i
] & 0xfffff000) != reg_base
)
2651 win_off
= BNXT_GRCPF_REG_WINDOW_BASE_OUT
+ (reg_win
- 1) * 4;
2652 rte_write32(reg_base
, (uint8_t *)bp
->bar0
+ win_off
);
2656 static int bnxt_map_ptp_regs(struct bnxt
*bp
)
2658 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2662 reg_arr
= ptp
->rx_regs
;
2663 rc
= bnxt_map_regs(bp
, reg_arr
, BNXT_PTP_RX_REGS
, 5);
2667 reg_arr
= ptp
->tx_regs
;
2668 rc
= bnxt_map_regs(bp
, reg_arr
, BNXT_PTP_TX_REGS
, 6);
2672 for (i
= 0; i
< BNXT_PTP_RX_REGS
; i
++)
2673 ptp
->rx_mapped_regs
[i
] = 0x5000 + (ptp
->rx_regs
[i
] & 0xfff);
2675 for (i
= 0; i
< BNXT_PTP_TX_REGS
; i
++)
2676 ptp
->tx_mapped_regs
[i
] = 0x6000 + (ptp
->tx_regs
[i
] & 0xfff);
2681 static void bnxt_unmap_ptp_regs(struct bnxt
*bp
)
2683 rte_write32(0, (uint8_t *)bp
->bar0
+
2684 BNXT_GRCPF_REG_WINDOW_BASE_OUT
+ 16);
2685 rte_write32(0, (uint8_t *)bp
->bar0
+
2686 BNXT_GRCPF_REG_WINDOW_BASE_OUT
+ 20);
2689 static uint64_t bnxt_cc_read(struct bnxt
*bp
)
2693 ns
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2694 BNXT_GRCPF_REG_SYNC_TIME
));
2695 ns
|= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2696 BNXT_GRCPF_REG_SYNC_TIME
+ 4))) << 32;
2700 static int bnxt_get_tx_ts(struct bnxt
*bp
, uint64_t *ts
)
2702 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2705 fifo
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2706 ptp
->tx_mapped_regs
[BNXT_PTP_TX_FIFO
]));
2707 if (fifo
& BNXT_PTP_TX_FIFO_EMPTY
)
2710 fifo
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2711 ptp
->tx_mapped_regs
[BNXT_PTP_TX_FIFO
]));
2712 *ts
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2713 ptp
->tx_mapped_regs
[BNXT_PTP_TX_TS_L
]));
2714 *ts
|= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2715 ptp
->tx_mapped_regs
[BNXT_PTP_TX_TS_H
])) << 32;
2720 static int bnxt_get_rx_ts(struct bnxt
*bp
, uint64_t *ts
)
2722 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2723 struct bnxt_pf_info
*pf
= &bp
->pf
;
2730 fifo
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2731 ptp
->rx_mapped_regs
[BNXT_PTP_RX_FIFO
]));
2732 if (!(fifo
& BNXT_PTP_RX_FIFO_PENDING
))
2735 port_id
= pf
->port_id
;
2736 rte_write32(1 << port_id
, (uint8_t *)bp
->bar0
+
2737 ptp
->rx_mapped_regs
[BNXT_PTP_RX_FIFO_ADV
]);
2739 fifo
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2740 ptp
->rx_mapped_regs
[BNXT_PTP_RX_FIFO
]));
2741 if (fifo
& BNXT_PTP_RX_FIFO_PENDING
) {
2742 /* bnxt_clr_rx_ts(bp); TBD */
2746 *ts
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2747 ptp
->rx_mapped_regs
[BNXT_PTP_RX_TS_L
]));
2748 *ts
|= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2749 ptp
->rx_mapped_regs
[BNXT_PTP_RX_TS_H
])) << 32;
2755 bnxt_timesync_write_time(struct rte_eth_dev
*dev
, const struct timespec
*ts
)
2758 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2759 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2764 ns
= rte_timespec_to_ns(ts
);
2765 /* Set the timecounters to a new value. */
2772 bnxt_timesync_read_time(struct rte_eth_dev
*dev
, struct timespec
*ts
)
2774 uint64_t ns
, systime_cycles
;
2775 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2776 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2781 systime_cycles
= bnxt_cc_read(bp
);
2782 ns
= rte_timecounter_update(&ptp
->tc
, systime_cycles
);
2783 *ts
= rte_ns_to_timespec(ns
);
2788 bnxt_timesync_enable(struct rte_eth_dev
*dev
)
2790 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2791 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2798 ptp
->tx_tstamp_en
= 1;
2799 ptp
->rxctl
= BNXT_PTP_MSG_EVENTS
;
2801 if (!bnxt_hwrm_ptp_cfg(bp
))
2802 bnxt_map_ptp_regs(bp
);
2804 memset(&ptp
->tc
, 0, sizeof(struct rte_timecounter
));
2805 memset(&ptp
->rx_tstamp_tc
, 0, sizeof(struct rte_timecounter
));
2806 memset(&ptp
->tx_tstamp_tc
, 0, sizeof(struct rte_timecounter
));
2808 ptp
->tc
.cc_mask
= BNXT_CYCLECOUNTER_MASK
;
2809 ptp
->tc
.cc_shift
= shift
;
2810 ptp
->tc
.nsec_mask
= (1ULL << shift
) - 1;
2812 ptp
->rx_tstamp_tc
.cc_mask
= BNXT_CYCLECOUNTER_MASK
;
2813 ptp
->rx_tstamp_tc
.cc_shift
= shift
;
2814 ptp
->rx_tstamp_tc
.nsec_mask
= (1ULL << shift
) - 1;
2816 ptp
->tx_tstamp_tc
.cc_mask
= BNXT_CYCLECOUNTER_MASK
;
2817 ptp
->tx_tstamp_tc
.cc_shift
= shift
;
2818 ptp
->tx_tstamp_tc
.nsec_mask
= (1ULL << shift
) - 1;
2824 bnxt_timesync_disable(struct rte_eth_dev
*dev
)
2826 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2827 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2833 ptp
->tx_tstamp_en
= 0;
2836 bnxt_hwrm_ptp_cfg(bp
);
2838 bnxt_unmap_ptp_regs(bp
);
2844 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev
*dev
,
2845 struct timespec
*timestamp
,
2846 uint32_t flags __rte_unused
)
2848 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2849 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2850 uint64_t rx_tstamp_cycles
= 0;
2856 bnxt_get_rx_ts(bp
, &rx_tstamp_cycles
);
2857 ns
= rte_timecounter_update(&ptp
->rx_tstamp_tc
, rx_tstamp_cycles
);
2858 *timestamp
= rte_ns_to_timespec(ns
);
2863 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev
*dev
,
2864 struct timespec
*timestamp
)
2866 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2867 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2868 uint64_t tx_tstamp_cycles
= 0;
2874 bnxt_get_tx_ts(bp
, &tx_tstamp_cycles
);
2875 ns
= rte_timecounter_update(&ptp
->tx_tstamp_tc
, tx_tstamp_cycles
);
2876 *timestamp
= rte_ns_to_timespec(ns
);
2882 bnxt_timesync_adjust_time(struct rte_eth_dev
*dev
, int64_t delta
)
2884 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2885 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2890 ptp
->tc
.nsec
+= delta
;
2896 bnxt_get_eeprom_length_op(struct rte_eth_dev
*dev
)
2898 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2900 uint32_t dir_entries
;
2901 uint32_t entry_length
;
2903 PMD_DRV_LOG(INFO
, "%04x:%02x:%02x:%02x\n",
2904 bp
->pdev
->addr
.domain
, bp
->pdev
->addr
.bus
,
2905 bp
->pdev
->addr
.devid
, bp
->pdev
->addr
.function
);
2907 rc
= bnxt_hwrm_nvm_get_dir_info(bp
, &dir_entries
, &entry_length
);
2911 return dir_entries
* entry_length
;
2915 bnxt_get_eeprom_op(struct rte_eth_dev
*dev
,
2916 struct rte_dev_eeprom_info
*in_eeprom
)
2918 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2922 PMD_DRV_LOG(INFO
, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
2923 "len = %d\n", bp
->pdev
->addr
.domain
,
2924 bp
->pdev
->addr
.bus
, bp
->pdev
->addr
.devid
,
2925 bp
->pdev
->addr
.function
, in_eeprom
->offset
, in_eeprom
->length
);
2927 if (in_eeprom
->offset
== 0) /* special offset value to get directory */
2928 return bnxt_get_nvram_directory(bp
, in_eeprom
->length
,
2931 index
= in_eeprom
->offset
>> 24;
2932 offset
= in_eeprom
->offset
& 0xffffff;
2935 return bnxt_hwrm_get_nvram_item(bp
, index
- 1, offset
,
2936 in_eeprom
->length
, in_eeprom
->data
);
2941 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type
)
2944 case BNX_DIR_TYPE_CHIMP_PATCH
:
2945 case BNX_DIR_TYPE_BOOTCODE
:
2946 case BNX_DIR_TYPE_BOOTCODE_2
:
2947 case BNX_DIR_TYPE_APE_FW
:
2948 case BNX_DIR_TYPE_APE_PATCH
:
2949 case BNX_DIR_TYPE_KONG_FW
:
2950 case BNX_DIR_TYPE_KONG_PATCH
:
2951 case BNX_DIR_TYPE_BONO_FW
:
2952 case BNX_DIR_TYPE_BONO_PATCH
:
2960 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type
)
2963 case BNX_DIR_TYPE_AVS
:
2964 case BNX_DIR_TYPE_EXP_ROM_MBA
:
2965 case BNX_DIR_TYPE_PCIE
:
2966 case BNX_DIR_TYPE_TSCF_UCODE
:
2967 case BNX_DIR_TYPE_EXT_PHY
:
2968 case BNX_DIR_TYPE_CCM
:
2969 case BNX_DIR_TYPE_ISCSI_BOOT
:
2970 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6
:
2971 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6
:
2979 static bool bnxt_dir_type_is_executable(uint16_t dir_type
)
2981 return bnxt_dir_type_is_ape_bin_format(dir_type
) ||
2982 bnxt_dir_type_is_other_exec_format(dir_type
);
2986 bnxt_set_eeprom_op(struct rte_eth_dev
*dev
,
2987 struct rte_dev_eeprom_info
*in_eeprom
)
2989 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2990 uint8_t index
, dir_op
;
2991 uint16_t type
, ext
, ordinal
, attr
;
2993 PMD_DRV_LOG(INFO
, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
2994 "len = %d\n", bp
->pdev
->addr
.domain
,
2995 bp
->pdev
->addr
.bus
, bp
->pdev
->addr
.devid
,
2996 bp
->pdev
->addr
.function
, in_eeprom
->offset
, in_eeprom
->length
);
2999 PMD_DRV_LOG(ERR
, "NVM write not supported from a VF\n");
3003 type
= in_eeprom
->magic
>> 16;
3005 if (type
== 0xffff) { /* special value for directory operations */
3006 index
= in_eeprom
->magic
& 0xff;
3007 dir_op
= in_eeprom
->magic
>> 8;
3011 case 0x0e: /* erase */
3012 if (in_eeprom
->offset
!= ~in_eeprom
->magic
)
3014 return bnxt_hwrm_erase_nvram_directory(bp
, index
- 1);
3020 /* Create or re-write an NVM item: */
3021 if (bnxt_dir_type_is_executable(type
) == true)
3023 ext
= in_eeprom
->magic
& 0xffff;
3024 ordinal
= in_eeprom
->offset
>> 16;
3025 attr
= in_eeprom
->offset
& 0xffff;
3027 return bnxt_hwrm_flash_nvram(bp
, type
, ordinal
, ext
, attr
,
3028 in_eeprom
->data
, in_eeprom
->length
);
3036 static const struct eth_dev_ops bnxt_dev_ops
= {
3037 .dev_infos_get
= bnxt_dev_info_get_op
,
3038 .dev_close
= bnxt_dev_close_op
,
3039 .dev_configure
= bnxt_dev_configure_op
,
3040 .dev_start
= bnxt_dev_start_op
,
3041 .dev_stop
= bnxt_dev_stop_op
,
3042 .dev_set_link_up
= bnxt_dev_set_link_up_op
,
3043 .dev_set_link_down
= bnxt_dev_set_link_down_op
,
3044 .stats_get
= bnxt_stats_get_op
,
3045 .stats_reset
= bnxt_stats_reset_op
,
3046 .rx_queue_setup
= bnxt_rx_queue_setup_op
,
3047 .rx_queue_release
= bnxt_rx_queue_release_op
,
3048 .tx_queue_setup
= bnxt_tx_queue_setup_op
,
3049 .tx_queue_release
= bnxt_tx_queue_release_op
,
3050 .rx_queue_intr_enable
= bnxt_rx_queue_intr_enable_op
,
3051 .rx_queue_intr_disable
= bnxt_rx_queue_intr_disable_op
,
3052 .reta_update
= bnxt_reta_update_op
,
3053 .reta_query
= bnxt_reta_query_op
,
3054 .rss_hash_update
= bnxt_rss_hash_update_op
,
3055 .rss_hash_conf_get
= bnxt_rss_hash_conf_get_op
,
3056 .link_update
= bnxt_link_update_op
,
3057 .promiscuous_enable
= bnxt_promiscuous_enable_op
,
3058 .promiscuous_disable
= bnxt_promiscuous_disable_op
,
3059 .allmulticast_enable
= bnxt_allmulticast_enable_op
,
3060 .allmulticast_disable
= bnxt_allmulticast_disable_op
,
3061 .mac_addr_add
= bnxt_mac_addr_add_op
,
3062 .mac_addr_remove
= bnxt_mac_addr_remove_op
,
3063 .flow_ctrl_get
= bnxt_flow_ctrl_get_op
,
3064 .flow_ctrl_set
= bnxt_flow_ctrl_set_op
,
3065 .udp_tunnel_port_add
= bnxt_udp_tunnel_port_add_op
,
3066 .udp_tunnel_port_del
= bnxt_udp_tunnel_port_del_op
,
3067 .vlan_filter_set
= bnxt_vlan_filter_set_op
,
3068 .vlan_offload_set
= bnxt_vlan_offload_set_op
,
3069 .vlan_pvid_set
= bnxt_vlan_pvid_set_op
,
3070 .mtu_set
= bnxt_mtu_set_op
,
3071 .mac_addr_set
= bnxt_set_default_mac_addr_op
,
3072 .xstats_get
= bnxt_dev_xstats_get_op
,
3073 .xstats_get_names
= bnxt_dev_xstats_get_names_op
,
3074 .xstats_reset
= bnxt_dev_xstats_reset_op
,
3075 .fw_version_get
= bnxt_fw_version_get
,
3076 .set_mc_addr_list
= bnxt_dev_set_mc_addr_list_op
,
3077 .rxq_info_get
= bnxt_rxq_info_get_op
,
3078 .txq_info_get
= bnxt_txq_info_get_op
,
3079 .dev_led_on
= bnxt_dev_led_on_op
,
3080 .dev_led_off
= bnxt_dev_led_off_op
,
3081 .xstats_get_by_id
= bnxt_dev_xstats_get_by_id_op
,
3082 .xstats_get_names_by_id
= bnxt_dev_xstats_get_names_by_id_op
,
3083 .rx_queue_count
= bnxt_rx_queue_count_op
,
3084 .rx_descriptor_status
= bnxt_rx_descriptor_status_op
,
3085 .tx_descriptor_status
= bnxt_tx_descriptor_status_op
,
3086 .rx_queue_start
= bnxt_rx_queue_start
,
3087 .rx_queue_stop
= bnxt_rx_queue_stop
,
3088 .tx_queue_start
= bnxt_tx_queue_start
,
3089 .tx_queue_stop
= bnxt_tx_queue_stop
,
3090 .filter_ctrl
= bnxt_filter_ctrl_op
,
3091 .dev_supported_ptypes_get
= bnxt_dev_supported_ptypes_get_op
,
3092 .get_eeprom_length
= bnxt_get_eeprom_length_op
,
3093 .get_eeprom
= bnxt_get_eeprom_op
,
3094 .set_eeprom
= bnxt_set_eeprom_op
,
3095 .timesync_enable
= bnxt_timesync_enable
,
3096 .timesync_disable
= bnxt_timesync_disable
,
3097 .timesync_read_time
= bnxt_timesync_read_time
,
3098 .timesync_write_time
= bnxt_timesync_write_time
,
3099 .timesync_adjust_time
= bnxt_timesync_adjust_time
,
3100 .timesync_read_rx_timestamp
= bnxt_timesync_read_rx_timestamp
,
3101 .timesync_read_tx_timestamp
= bnxt_timesync_read_tx_timestamp
,
3104 static bool bnxt_vf_pciid(uint16_t id
)
3106 if (id
== BROADCOM_DEV_ID_57304_VF
||
3107 id
== BROADCOM_DEV_ID_57406_VF
||
3108 id
== BROADCOM_DEV_ID_5731X_VF
||
3109 id
== BROADCOM_DEV_ID_5741X_VF
||
3110 id
== BROADCOM_DEV_ID_57414_VF
||
3111 id
== BROADCOM_DEV_ID_STRATUS_NIC_VF1
||
3112 id
== BROADCOM_DEV_ID_STRATUS_NIC_VF2
||
3113 id
== BROADCOM_DEV_ID_58802_VF
)
3118 bool bnxt_stratus_device(struct bnxt
*bp
)
3120 uint16_t id
= bp
->pdev
->id
.device_id
;
3122 if (id
== BROADCOM_DEV_ID_STRATUS_NIC
||
3123 id
== BROADCOM_DEV_ID_STRATUS_NIC_VF1
||
3124 id
== BROADCOM_DEV_ID_STRATUS_NIC_VF2
)
3129 static int bnxt_init_board(struct rte_eth_dev
*eth_dev
)
3131 struct bnxt
*bp
= eth_dev
->data
->dev_private
;
3132 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
3135 /* enable device (incl. PCI PM wakeup), and bus-mastering */
3136 if (!pci_dev
->mem_resource
[0].addr
) {
3138 "Cannot find PCI device base address, aborting\n");
3140 goto init_err_disable
;
3143 bp
->eth_dev
= eth_dev
;
3146 bp
->bar0
= (void *)pci_dev
->mem_resource
[0].addr
;
3148 PMD_DRV_LOG(ERR
, "Cannot map device registers, aborting\n");
3150 goto init_err_release
;
3153 if (!pci_dev
->mem_resource
[2].addr
) {
3155 "Cannot find PCI device BAR 2 address, aborting\n");
3157 goto init_err_release
;
3159 bp
->doorbell_base
= (void *)pci_dev
->mem_resource
[2].addr
;
3167 if (bp
->doorbell_base
)
3168 bp
->doorbell_base
= NULL
;
3176 #define ALLOW_FUNC(x) \
3178 typeof(x) arg = (x); \
3179 bp->pf.vf_req_fwd[((arg) >> 5)] &= \
3180 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
3183 bnxt_dev_init(struct rte_eth_dev
*eth_dev
)
3185 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
3186 char mz_name
[RTE_MEMZONE_NAMESIZE
];
3187 const struct rte_memzone
*mz
= NULL
;
3188 static int version_printed
;
3189 uint32_t total_alloc_len
;
3190 rte_iova_t mz_phys_addr
;
3194 if (version_printed
++ == 0)
3195 PMD_DRV_LOG(INFO
, "%s\n", bnxt_version
);
3197 rte_eth_copy_pci_info(eth_dev
, pci_dev
);
3199 bp
= eth_dev
->data
->dev_private
;
3201 bp
->dev_stopped
= 1;
3203 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
3206 if (bnxt_vf_pciid(pci_dev
->id
.device_id
))
3207 bp
->flags
|= BNXT_FLAG_VF
;
3209 rc
= bnxt_init_board(eth_dev
);
3212 "Board initialization failed rc: %x\n", rc
);
3216 eth_dev
->dev_ops
= &bnxt_dev_ops
;
3217 eth_dev
->rx_pkt_burst
= &bnxt_recv_pkts
;
3218 eth_dev
->tx_pkt_burst
= &bnxt_xmit_pkts
;
3219 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
3222 if (pci_dev
->id
.device_id
!= BROADCOM_DEV_ID_NS2
) {
3223 snprintf(mz_name
, RTE_MEMZONE_NAMESIZE
,
3224 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev
->addr
.domain
,
3225 pci_dev
->addr
.bus
, pci_dev
->addr
.devid
,
3226 pci_dev
->addr
.function
, "rx_port_stats");
3227 mz_name
[RTE_MEMZONE_NAMESIZE
- 1] = 0;
3228 mz
= rte_memzone_lookup(mz_name
);
3229 total_alloc_len
= RTE_CACHE_LINE_ROUNDUP(
3230 sizeof(struct rx_port_stats
) +
3231 sizeof(struct rx_port_stats_ext
) +
3234 mz
= rte_memzone_reserve(mz_name
, total_alloc_len
,
3237 RTE_MEMZONE_SIZE_HINT_ONLY
|
3238 RTE_MEMZONE_IOVA_CONTIG
);
3242 memset(mz
->addr
, 0, mz
->len
);
3243 mz_phys_addr
= mz
->iova
;
3244 if ((unsigned long)mz
->addr
== mz_phys_addr
) {
3246 "Memzone physical address same as virtual using rte_mem_virt2iova()\n");
3247 mz_phys_addr
= rte_mem_virt2iova(mz
->addr
);
3248 if (mz_phys_addr
== 0) {
3250 "unable to map address to physical memory\n");
3255 bp
->rx_mem_zone
= (const void *)mz
;
3256 bp
->hw_rx_port_stats
= mz
->addr
;
3257 bp
->hw_rx_port_stats_map
= mz_phys_addr
;
3259 snprintf(mz_name
, RTE_MEMZONE_NAMESIZE
,
3260 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev
->addr
.domain
,
3261 pci_dev
->addr
.bus
, pci_dev
->addr
.devid
,
3262 pci_dev
->addr
.function
, "tx_port_stats");
3263 mz_name
[RTE_MEMZONE_NAMESIZE
- 1] = 0;
3264 mz
= rte_memzone_lookup(mz_name
);
3265 total_alloc_len
= RTE_CACHE_LINE_ROUNDUP(
3266 sizeof(struct tx_port_stats
) +
3267 sizeof(struct tx_port_stats_ext
) +
3270 mz
= rte_memzone_reserve(mz_name
,
3274 RTE_MEMZONE_SIZE_HINT_ONLY
|
3275 RTE_MEMZONE_IOVA_CONTIG
);
3279 memset(mz
->addr
, 0, mz
->len
);
3280 mz_phys_addr
= mz
->iova
;
3281 if ((unsigned long)mz
->addr
== mz_phys_addr
) {
3282 PMD_DRV_LOG(WARNING
,
3283 "Memzone physical address same as virtual.\n");
3284 PMD_DRV_LOG(WARNING
,
3285 "Using rte_mem_virt2iova()\n");
3286 mz_phys_addr
= rte_mem_virt2iova(mz
->addr
);
3287 if (mz_phys_addr
== 0) {
3289 "unable to map address to physical memory\n");
3294 bp
->tx_mem_zone
= (const void *)mz
;
3295 bp
->hw_tx_port_stats
= mz
->addr
;
3296 bp
->hw_tx_port_stats_map
= mz_phys_addr
;
3298 bp
->flags
|= BNXT_FLAG_PORT_STATS
;
3300 /* Display extended statistics if FW supports it */
3301 if (bp
->hwrm_spec_code
< HWRM_SPEC_CODE_1_8_4
||
3302 bp
->hwrm_spec_code
== HWRM_SPEC_CODE_1_9_0
)
3303 goto skip_ext_stats
;
3305 bp
->hw_rx_port_stats_ext
= (void *)
3306 (bp
->hw_rx_port_stats
+ sizeof(struct rx_port_stats
));
3307 bp
->hw_rx_port_stats_ext_map
= bp
->hw_rx_port_stats_map
+
3308 sizeof(struct rx_port_stats
);
3309 bp
->flags
|= BNXT_FLAG_EXT_RX_PORT_STATS
;
3312 if (bp
->hwrm_spec_code
< HWRM_SPEC_CODE_1_9_2
) {
3313 bp
->hw_tx_port_stats_ext
= (void *)
3314 (bp
->hw_tx_port_stats
+ sizeof(struct tx_port_stats
));
3315 bp
->hw_tx_port_stats_ext_map
=
3316 bp
->hw_tx_port_stats_map
+
3317 sizeof(struct tx_port_stats
);
3318 bp
->flags
|= BNXT_FLAG_EXT_TX_PORT_STATS
;
3323 rc
= bnxt_alloc_hwrm_resources(bp
);
3326 "hwrm resource allocation failure rc: %x\n", rc
);
3329 rc
= bnxt_hwrm_ver_get(bp
);
3332 rc
= bnxt_hwrm_queue_qportcfg(bp
);
3334 PMD_DRV_LOG(ERR
, "hwrm queue qportcfg failed\n");
3338 rc
= bnxt_hwrm_func_qcfg(bp
);
3340 PMD_DRV_LOG(ERR
, "hwrm func qcfg failed\n");
3344 /* Get the MAX capabilities for this function */
3345 rc
= bnxt_hwrm_func_qcaps(bp
);
3347 PMD_DRV_LOG(ERR
, "hwrm query capability failure rc: %x\n", rc
);
3350 if (bp
->max_tx_rings
== 0) {
3351 PMD_DRV_LOG(ERR
, "No TX rings available!\n");
3355 eth_dev
->data
->mac_addrs
= rte_zmalloc("bnxt_mac_addr_tbl",
3356 ETHER_ADDR_LEN
* bp
->max_l2_ctx
, 0);
3357 if (eth_dev
->data
->mac_addrs
== NULL
) {
3359 "Failed to alloc %u bytes needed to store MAC addr tbl",
3360 ETHER_ADDR_LEN
* bp
->max_l2_ctx
);
3365 if (bnxt_check_zero_bytes(bp
->dflt_mac_addr
, ETHER_ADDR_LEN
)) {
3367 "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
3368 bp
->dflt_mac_addr
[0], bp
->dflt_mac_addr
[1],
3369 bp
->dflt_mac_addr
[2], bp
->dflt_mac_addr
[3],
3370 bp
->dflt_mac_addr
[4], bp
->dflt_mac_addr
[5]);
3374 /* Copy the permanent MAC from the qcap response address now. */
3375 memcpy(bp
->mac_addr
, bp
->dflt_mac_addr
, sizeof(bp
->mac_addr
));
3376 memcpy(ð_dev
->data
->mac_addrs
[0], bp
->mac_addr
, ETHER_ADDR_LEN
);
3378 if (bp
->max_ring_grps
< bp
->rx_cp_nr_rings
) {
3379 /* 1 ring is for default completion ring */
3380 PMD_DRV_LOG(ERR
, "Insufficient resource: Ring Group\n");
3385 bp
->grp_info
= rte_zmalloc("bnxt_grp_info",
3386 sizeof(*bp
->grp_info
) * bp
->max_ring_grps
, 0);
3387 if (!bp
->grp_info
) {
3389 "Failed to alloc %zu bytes to store group info table\n",
3390 sizeof(*bp
->grp_info
) * bp
->max_ring_grps
);
3395 /* Forward all requests if firmware is new enough */
3396 if (((bp
->fw_ver
>= ((20 << 24) | (6 << 16) | (100 << 8))) &&
3397 (bp
->fw_ver
< ((20 << 24) | (7 << 16)))) ||
3398 ((bp
->fw_ver
>= ((20 << 24) | (8 << 16))))) {
3399 memset(bp
->pf
.vf_req_fwd
, 0xff, sizeof(bp
->pf
.vf_req_fwd
));
3401 PMD_DRV_LOG(WARNING
,
3402 "Firmware too old for VF mailbox functionality\n");
3403 memset(bp
->pf
.vf_req_fwd
, 0, sizeof(bp
->pf
.vf_req_fwd
));
3407 * The following are used for driver cleanup. If we disallow these,
3408 * VF drivers can't clean up cleanly.
3410 ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR
);
3411 ALLOW_FUNC(HWRM_VNIC_FREE
);
3412 ALLOW_FUNC(HWRM_RING_FREE
);
3413 ALLOW_FUNC(HWRM_RING_GRP_FREE
);
3414 ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE
);
3415 ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE
);
3416 ALLOW_FUNC(HWRM_STAT_CTX_FREE
);
3417 ALLOW_FUNC(HWRM_PORT_PHY_QCFG
);
3418 ALLOW_FUNC(HWRM_VNIC_TPA_CFG
);
3419 rc
= bnxt_hwrm_func_driver_register(bp
);
3422 "Failed to register driver");
3428 DRV_MODULE_NAME
" found at mem %" PRIx64
", node addr %pM\n",
3429 pci_dev
->mem_resource
[0].phys_addr
,
3430 pci_dev
->mem_resource
[0].addr
);
3432 rc
= bnxt_hwrm_func_reset(bp
);
3434 PMD_DRV_LOG(ERR
, "hwrm chip reset failure rc: %x\n", rc
);
3440 //if (bp->pf.active_vfs) {
3441 // TODO: Deallocate VF resources?
3443 if (bp
->pdev
->max_vfs
) {
3444 rc
= bnxt_hwrm_allocate_vfs(bp
, bp
->pdev
->max_vfs
);
3446 PMD_DRV_LOG(ERR
, "Failed to allocate VFs\n");
3450 rc
= bnxt_hwrm_allocate_pf_only(bp
);
3453 "Failed to allocate PF resources\n");
3459 bnxt_hwrm_port_led_qcaps(bp
);
3461 rc
= bnxt_setup_int(bp
);
3465 rc
= bnxt_alloc_mem(bp
);
3467 goto error_free_int
;
3469 rc
= bnxt_request_int(bp
);
3471 goto error_free_int
;
3473 bnxt_enable_int(bp
);
3479 bnxt_disable_int(bp
);
3480 bnxt_hwrm_func_buf_unrgtr(bp
);
3484 bnxt_dev_uninit(eth_dev
);
3490 bnxt_dev_uninit(struct rte_eth_dev
*eth_dev
)
3492 struct bnxt
*bp
= eth_dev
->data
->dev_private
;
3495 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
3498 PMD_DRV_LOG(DEBUG
, "Calling Device uninit\n");
3499 bnxt_disable_int(bp
);
3502 if (bp
->grp_info
!= NULL
) {
3503 rte_free(bp
->grp_info
);
3504 bp
->grp_info
= NULL
;
3506 rc
= bnxt_hwrm_func_driver_unregister(bp
, 0);
3507 bnxt_free_hwrm_resources(bp
);
3509 if (bp
->tx_mem_zone
) {
3510 rte_memzone_free((const struct rte_memzone
*)bp
->tx_mem_zone
);
3511 bp
->tx_mem_zone
= NULL
;
3514 if (bp
->rx_mem_zone
) {
3515 rte_memzone_free((const struct rte_memzone
*)bp
->rx_mem_zone
);
3516 bp
->rx_mem_zone
= NULL
;
3519 if (bp
->dev_stopped
== 0)
3520 bnxt_dev_close_op(eth_dev
);
3522 rte_free(bp
->pf
.vf_info
);
3523 eth_dev
->dev_ops
= NULL
;
3524 eth_dev
->rx_pkt_burst
= NULL
;
3525 eth_dev
->tx_pkt_burst
= NULL
;
3530 static int bnxt_pci_probe(struct rte_pci_driver
*pci_drv __rte_unused
,
3531 struct rte_pci_device
*pci_dev
)
3533 return rte_eth_dev_pci_generic_probe(pci_dev
, sizeof(struct bnxt
),
3537 static int bnxt_pci_remove(struct rte_pci_device
*pci_dev
)
3539 if (rte_eal_process_type() == RTE_PROC_PRIMARY
)
3540 return rte_eth_dev_pci_generic_remove(pci_dev
,
3543 return rte_eth_dev_pci_generic_remove(pci_dev
, NULL
);
3546 static struct rte_pci_driver bnxt_rte_pmd
= {
3547 .id_table
= bnxt_pci_id_map
,
3548 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
|
3549 RTE_PCI_DRV_INTR_LSC
| RTE_PCI_DRV_IOVA_AS_VA
,
3550 .probe
= bnxt_pci_probe
,
3551 .remove
= bnxt_pci_remove
,
3555 is_device_supported(struct rte_eth_dev
*dev
, struct rte_pci_driver
*drv
)
3557 if (strcmp(dev
->device
->driver
->name
, drv
->driver
.name
))
3563 bool is_bnxt_supported(struct rte_eth_dev
*dev
)
3565 return is_device_supported(dev
, &bnxt_rte_pmd
);
3568 RTE_INIT(bnxt_init_log
)
3570 bnxt_logtype_driver
= rte_log_register("pmd.net.bnxt.driver");
3571 if (bnxt_logtype_driver
>= 0)
3572 rte_log_set_level(bnxt_logtype_driver
, RTE_LOG_NOTICE
);
3575 RTE_PMD_REGISTER_PCI(net_bnxt
, bnxt_rte_pmd
);
3576 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt
, bnxt_pci_id_map
);
3577 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt
, "* igb_uio | uio_pci_generic | vfio-pci");