1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
20 #include "bnxt_ring.h"
23 #include "bnxt_stats.h"
26 #include "bnxt_vnic.h"
27 #include "hsi_struct_def_dpdk.h"
28 #include "bnxt_nvm_defs.h"
29 #include "bnxt_util.h"
31 #define DRV_MODULE_NAME "bnxt"
32 static const char bnxt_version
[] =
33 "Broadcom NetXtreme driver " DRV_MODULE_NAME
"\n";
34 int bnxt_logtype_driver
;
36 #define PCI_VENDOR_ID_BROADCOM 0x14E4
38 #define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606
39 #define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609
40 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
41 #define BROADCOM_DEV_ID_57414_VF 0x16c1
42 #define BROADCOM_DEV_ID_57301 0x16c8
43 #define BROADCOM_DEV_ID_57302 0x16c9
44 #define BROADCOM_DEV_ID_57304_PF 0x16ca
45 #define BROADCOM_DEV_ID_57304_VF 0x16cb
46 #define BROADCOM_DEV_ID_57417_MF 0x16cc
47 #define BROADCOM_DEV_ID_NS2 0x16cd
48 #define BROADCOM_DEV_ID_57311 0x16ce
49 #define BROADCOM_DEV_ID_57312 0x16cf
50 #define BROADCOM_DEV_ID_57402 0x16d0
51 #define BROADCOM_DEV_ID_57404 0x16d1
52 #define BROADCOM_DEV_ID_57406_PF 0x16d2
53 #define BROADCOM_DEV_ID_57406_VF 0x16d3
54 #define BROADCOM_DEV_ID_57402_MF 0x16d4
55 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
56 #define BROADCOM_DEV_ID_57412 0x16d6
57 #define BROADCOM_DEV_ID_57414 0x16d7
58 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8
59 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9
60 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
61 #define BROADCOM_DEV_ID_57412_MF 0x16de
62 #define BROADCOM_DEV_ID_57314 0x16df
63 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0
64 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
65 #define BROADCOM_DEV_ID_57417_SFP 0x16e2
66 #define BROADCOM_DEV_ID_57416_SFP 0x16e3
67 #define BROADCOM_DEV_ID_57317_SFP 0x16e4
68 #define BROADCOM_DEV_ID_57404_MF 0x16e7
69 #define BROADCOM_DEV_ID_57406_MF 0x16e8
70 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
71 #define BROADCOM_DEV_ID_57407_MF 0x16ea
72 #define BROADCOM_DEV_ID_57414_MF 0x16ec
73 #define BROADCOM_DEV_ID_57416_MF 0x16ee
74 #define BROADCOM_DEV_ID_58802 0xd802
75 #define BROADCOM_DEV_ID_58804 0xd804
76 #define BROADCOM_DEV_ID_58808 0x16f0
77 #define BROADCOM_DEV_ID_58802_VF 0xd800
79 static const struct rte_pci_id bnxt_pci_id_map
[] = {
80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
,
81 BROADCOM_DEV_ID_STRATUS_NIC_VF1
) },
82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
,
83 BROADCOM_DEV_ID_STRATUS_NIC_VF2
) },
84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_STRATUS_NIC
) },
85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57414_VF
) },
86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57301
) },
87 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57302
) },
88 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57304_PF
) },
89 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57304_VF
) },
90 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_NS2
) },
91 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57402
) },
92 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57404
) },
93 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57406_PF
) },
94 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57406_VF
) },
95 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57402_MF
) },
96 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57407_RJ45
) },
97 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57404_MF
) },
98 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57406_MF
) },
99 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57407_SFP
) },
100 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57407_MF
) },
101 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_5741X_VF
) },
102 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_5731X_VF
) },
103 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57314
) },
104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57417_MF
) },
105 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57311
) },
106 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57312
) },
107 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57412
) },
108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57414
) },
109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57416_RJ45
) },
110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57417_RJ45
) },
111 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57412_MF
) },
112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57317_RJ45
) },
113 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57417_SFP
) },
114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57416_SFP
) },
115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57317_SFP
) },
116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57414_MF
) },
117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_57416_MF
) },
118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_58802
) },
119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_58804
) },
120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_58808
) },
121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, BROADCOM_DEV_ID_58802_VF
) },
122 { .vendor_id
= 0, /* sentinel */ },
125 #define BNXT_ETH_RSS_SUPPORT ( \
127 ETH_RSS_NONFRAG_IPV4_TCP | \
128 ETH_RSS_NONFRAG_IPV4_UDP | \
130 ETH_RSS_NONFRAG_IPV6_TCP | \
131 ETH_RSS_NONFRAG_IPV6_UDP)
133 #define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
134 DEV_TX_OFFLOAD_IPV4_CKSUM | \
135 DEV_TX_OFFLOAD_TCP_CKSUM | \
136 DEV_TX_OFFLOAD_UDP_CKSUM | \
137 DEV_TX_OFFLOAD_TCP_TSO | \
138 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
139 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
140 DEV_TX_OFFLOAD_GRE_TNL_TSO | \
141 DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
142 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
143 DEV_TX_OFFLOAD_MULTI_SEGS)
145 #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
146 DEV_RX_OFFLOAD_VLAN_STRIP | \
147 DEV_RX_OFFLOAD_IPV4_CKSUM | \
148 DEV_RX_OFFLOAD_UDP_CKSUM | \
149 DEV_RX_OFFLOAD_TCP_CKSUM | \
150 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
151 DEV_RX_OFFLOAD_JUMBO_FRAME | \
152 DEV_RX_OFFLOAD_CRC_STRIP | \
153 DEV_RX_OFFLOAD_KEEP_CRC | \
154 DEV_RX_OFFLOAD_TCP_LRO)
156 static int bnxt_vlan_offload_set_op(struct rte_eth_dev
*dev
, int mask
);
157 static void bnxt_print_link_info(struct rte_eth_dev
*eth_dev
);
158 static int bnxt_mtu_set_op(struct rte_eth_dev
*eth_dev
, uint16_t new_mtu
);
159 static int bnxt_dev_uninit(struct rte_eth_dev
*eth_dev
);
161 /***********************/
164 * High level utility functions
167 static void bnxt_free_mem(struct bnxt
*bp
)
169 bnxt_free_filter_mem(bp
);
170 bnxt_free_vnic_attributes(bp
);
171 bnxt_free_vnic_mem(bp
);
174 bnxt_free_tx_rings(bp
);
175 bnxt_free_rx_rings(bp
);
178 static int bnxt_alloc_mem(struct bnxt
*bp
)
182 rc
= bnxt_alloc_vnic_mem(bp
);
186 rc
= bnxt_alloc_vnic_attributes(bp
);
190 rc
= bnxt_alloc_filter_mem(bp
);
201 static int bnxt_init_chip(struct bnxt
*bp
)
203 struct bnxt_rx_queue
*rxq
;
204 struct rte_eth_link
new;
205 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(bp
->eth_dev
);
206 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
207 uint32_t intr_vector
= 0;
208 uint32_t queue_id
, base
= BNXT_MISC_VEC_ID
;
209 uint32_t vec
= BNXT_MISC_VEC_ID
;
213 /* disable uio/vfio intr/eventfd mapping */
214 rte_intr_disable(intr_handle
);
216 if (bp
->eth_dev
->data
->mtu
> ETHER_MTU
) {
217 bp
->eth_dev
->data
->dev_conf
.rxmode
.offloads
|=
218 DEV_RX_OFFLOAD_JUMBO_FRAME
;
219 bp
->flags
|= BNXT_FLAG_JUMBO
;
221 bp
->eth_dev
->data
->dev_conf
.rxmode
.offloads
&=
222 ~DEV_RX_OFFLOAD_JUMBO_FRAME
;
223 bp
->flags
&= ~BNXT_FLAG_JUMBO
;
226 rc
= bnxt_alloc_all_hwrm_stat_ctxs(bp
);
228 PMD_DRV_LOG(ERR
, "HWRM stat ctx alloc failure rc: %x\n", rc
);
232 rc
= bnxt_alloc_hwrm_rings(bp
);
234 PMD_DRV_LOG(ERR
, "HWRM ring alloc failure rc: %x\n", rc
);
238 rc
= bnxt_alloc_all_hwrm_ring_grps(bp
);
240 PMD_DRV_LOG(ERR
, "HWRM ring grp alloc failure: %x\n", rc
);
244 rc
= bnxt_mq_rx_configure(bp
);
246 PMD_DRV_LOG(ERR
, "MQ mode configure failure rc: %x\n", rc
);
250 /* VNIC configuration */
251 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
252 struct rte_eth_conf
*dev_conf
= &bp
->eth_dev
->data
->dev_conf
;
253 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
254 uint32_t size
= sizeof(*vnic
->fw_grp_ids
) * bp
->max_ring_grps
;
256 vnic
->fw_grp_ids
= rte_zmalloc("vnic_fw_grp_ids", size
, 0);
257 if (!vnic
->fw_grp_ids
) {
259 "Failed to alloc %d bytes for group ids\n",
264 memset(vnic
->fw_grp_ids
, -1, size
);
266 rc
= bnxt_hwrm_vnic_alloc(bp
, vnic
);
268 PMD_DRV_LOG(ERR
, "HWRM vnic %d alloc failure rc: %x\n",
273 /* Alloc RSS context only if RSS mode is enabled */
274 if (dev_conf
->rxmode
.mq_mode
& ETH_MQ_RX_RSS
) {
275 rc
= bnxt_hwrm_vnic_ctx_alloc(bp
, vnic
);
278 "HWRM vnic %d ctx alloc failure rc: %x\n",
284 rc
= bnxt_hwrm_vnic_cfg(bp
, vnic
);
286 PMD_DRV_LOG(ERR
, "HWRM vnic %d cfg failure rc: %x\n",
291 rc
= bnxt_set_hwrm_vnic_filters(bp
, vnic
);
294 "HWRM vnic %d filter failure rc: %x\n",
299 for (j
= 0; j
< bp
->rx_nr_rings
; j
++) {
300 rxq
= bp
->eth_dev
->data
->rx_queues
[j
];
302 if (rxq
->rx_deferred_start
)
303 rxq
->vnic
->fw_grp_ids
[j
] = INVALID_HW_RING_ID
;
306 rc
= bnxt_vnic_rss_configure(bp
, vnic
);
309 "HWRM vnic set RSS failure rc: %x\n", rc
);
313 bnxt_hwrm_vnic_plcmode_cfg(bp
, vnic
);
315 if (bp
->eth_dev
->data
->dev_conf
.rxmode
.offloads
&
316 DEV_RX_OFFLOAD_TCP_LRO
)
317 bnxt_hwrm_vnic_tpa_cfg(bp
, vnic
, 1);
319 bnxt_hwrm_vnic_tpa_cfg(bp
, vnic
, 0);
321 rc
= bnxt_hwrm_cfa_l2_set_rx_mask(bp
, &bp
->vnic_info
[0], 0, NULL
);
324 "HWRM cfa l2 rx mask failure rc: %x\n", rc
);
328 /* check and configure queue intr-vector mapping */
329 if ((rte_intr_cap_multiple(intr_handle
) ||
330 !RTE_ETH_DEV_SRIOV(bp
->eth_dev
).active
) &&
331 bp
->eth_dev
->data
->dev_conf
.intr_conf
.rxq
!= 0) {
332 intr_vector
= bp
->eth_dev
->data
->nb_rx_queues
;
333 PMD_DRV_LOG(DEBUG
, "intr_vector = %d\n", intr_vector
);
334 if (intr_vector
> bp
->rx_cp_nr_rings
) {
335 PMD_DRV_LOG(ERR
, "At most %d intr queues supported",
339 if (rte_intr_efd_enable(intr_handle
, intr_vector
))
343 if (rte_intr_dp_is_en(intr_handle
) && !intr_handle
->intr_vec
) {
344 intr_handle
->intr_vec
=
345 rte_zmalloc("intr_vec",
346 bp
->eth_dev
->data
->nb_rx_queues
*
348 if (intr_handle
->intr_vec
== NULL
) {
349 PMD_DRV_LOG(ERR
, "Failed to allocate %d rx_queues"
350 " intr_vec", bp
->eth_dev
->data
->nb_rx_queues
);
353 PMD_DRV_LOG(DEBUG
, "intr_handle->intr_vec = %p "
354 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
355 intr_handle
->intr_vec
, intr_handle
->nb_efd
,
356 intr_handle
->max_intr
);
359 for (queue_id
= 0; queue_id
< bp
->eth_dev
->data
->nb_rx_queues
;
361 intr_handle
->intr_vec
[queue_id
] = vec
;
362 if (vec
< base
+ intr_handle
->nb_efd
- 1)
366 /* enable uio/vfio intr/eventfd mapping */
367 rte_intr_enable(intr_handle
);
369 rc
= bnxt_get_hwrm_link_config(bp
, &new);
371 PMD_DRV_LOG(ERR
, "HWRM Get link config failure rc: %x\n", rc
);
375 if (!bp
->link_info
.link_up
) {
376 rc
= bnxt_set_hwrm_link_config(bp
, true);
379 "HWRM link config failure rc: %x\n", rc
);
383 bnxt_print_link_info(bp
->eth_dev
);
388 bnxt_free_all_hwrm_resources(bp
);
390 /* Some of the error status returned by FW may not be from errno.h */
397 static int bnxt_shutdown_nic(struct bnxt
*bp
)
399 bnxt_free_all_hwrm_resources(bp
);
400 bnxt_free_all_filters(bp
);
401 bnxt_free_all_vnics(bp
);
405 static int bnxt_init_nic(struct bnxt
*bp
)
409 rc
= bnxt_init_ring_grps(bp
);
414 bnxt_init_filters(bp
);
420 * Device configuration and status function
423 static void bnxt_dev_info_get_op(struct rte_eth_dev
*eth_dev
,
424 struct rte_eth_dev_info
*dev_info
)
426 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
427 uint16_t max_vnics
, i
, j
, vpool
, vrxq
;
428 unsigned int max_rx_rings
;
431 dev_info
->max_mac_addrs
= bp
->max_l2_ctx
;
432 dev_info
->max_hash_mac_addrs
= 0;
434 /* PF/VF specifics */
436 dev_info
->max_vfs
= bp
->pdev
->max_vfs
;
437 max_rx_rings
= RTE_MIN(bp
->max_vnics
, bp
->max_stat_ctx
);
438 /* For the sake of symmetry, max_rx_queues = max_tx_queues */
439 dev_info
->max_rx_queues
= max_rx_rings
;
440 dev_info
->max_tx_queues
= max_rx_rings
;
441 dev_info
->reta_size
= HW_HASH_INDEX_SIZE
;
442 dev_info
->hash_key_size
= 40;
443 max_vnics
= bp
->max_vnics
;
445 /* Fast path specifics */
446 dev_info
->min_rx_bufsize
= 1;
447 dev_info
->max_rx_pktlen
= BNXT_MAX_MTU
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
450 dev_info
->rx_offload_capa
= BNXT_DEV_RX_OFFLOAD_SUPPORT
;
451 if (bp
->flags
& BNXT_FLAG_PTP_SUPPORTED
)
452 dev_info
->rx_offload_capa
|= DEV_RX_OFFLOAD_TIMESTAMP
;
453 dev_info
->tx_offload_capa
= BNXT_DEV_TX_OFFLOAD_SUPPORT
;
454 dev_info
->flow_type_rss_offloads
= BNXT_ETH_RSS_SUPPORT
;
457 dev_info
->default_rxconf
= (struct rte_eth_rxconf
) {
463 .rx_free_thresh
= 32,
464 /* If no descriptors available, pkts are dropped by default */
468 dev_info
->default_txconf
= (struct rte_eth_txconf
) {
474 .tx_free_thresh
= 32,
477 eth_dev
->data
->dev_conf
.intr_conf
.lsc
= 1;
479 eth_dev
->data
->dev_conf
.intr_conf
.rxq
= 1;
480 dev_info
->rx_desc_lim
.nb_min
= BNXT_MIN_RING_DESC
;
481 dev_info
->rx_desc_lim
.nb_max
= BNXT_MAX_RX_RING_DESC
;
482 dev_info
->tx_desc_lim
.nb_min
= BNXT_MIN_RING_DESC
;
483 dev_info
->tx_desc_lim
.nb_max
= BNXT_MAX_TX_RING_DESC
;
488 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
489 * need further investigation.
493 vpool
= 64; /* ETH_64_POOLS */
494 vrxq
= 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
495 for (i
= 0; i
< 4; vpool
>>= 1, i
++) {
496 if (max_vnics
> vpool
) {
497 for (j
= 0; j
< 5; vrxq
>>= 1, j
++) {
498 if (dev_info
->max_rx_queues
> vrxq
) {
504 /* Not enough resources to support VMDq */
508 /* Not enough resources to support VMDq */
512 dev_info
->max_vmdq_pools
= vpool
;
513 dev_info
->vmdq_queue_num
= vrxq
;
515 dev_info
->vmdq_pool_base
= 0;
516 dev_info
->vmdq_queue_base
= 0;
519 /* Configure the device based on the configuration provided */
520 static int bnxt_dev_configure_op(struct rte_eth_dev
*eth_dev
)
522 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
523 uint64_t rx_offloads
= eth_dev
->data
->dev_conf
.rxmode
.offloads
;
526 bp
->rx_queues
= (void *)eth_dev
->data
->rx_queues
;
527 bp
->tx_queues
= (void *)eth_dev
->data
->tx_queues
;
528 bp
->tx_nr_rings
= eth_dev
->data
->nb_tx_queues
;
529 bp
->rx_nr_rings
= eth_dev
->data
->nb_rx_queues
;
531 if (BNXT_VF(bp
) && (bp
->flags
& BNXT_FLAG_NEW_RM
)) {
532 rc
= bnxt_hwrm_check_vf_rings(bp
);
534 PMD_DRV_LOG(ERR
, "HWRM insufficient resources\n");
538 rc
= bnxt_hwrm_func_reserve_vf_resc(bp
, false);
540 PMD_DRV_LOG(ERR
, "HWRM resource alloc fail:%x\n", rc
);
544 /* legacy driver needs to get updated values */
545 rc
= bnxt_hwrm_func_qcaps(bp
);
547 PMD_DRV_LOG(ERR
, "hwrm func qcaps fail:%d\n", rc
);
552 /* Inherit new configurations */
553 if (eth_dev
->data
->nb_rx_queues
> bp
->max_rx_rings
||
554 eth_dev
->data
->nb_tx_queues
> bp
->max_tx_rings
||
555 eth_dev
->data
->nb_rx_queues
+ eth_dev
->data
->nb_tx_queues
>
557 eth_dev
->data
->nb_rx_queues
+ eth_dev
->data
->nb_tx_queues
>
559 (uint32_t)(eth_dev
->data
->nb_rx_queues
) > bp
->max_ring_grps
||
560 (!(eth_dev
->data
->dev_conf
.rxmode
.mq_mode
& ETH_MQ_RX_RSS
) &&
561 bp
->max_vnics
< eth_dev
->data
->nb_rx_queues
)) {
563 "Insufficient resources to support requested config\n");
565 "Num Queues Requested: Tx %d, Rx %d\n",
566 eth_dev
->data
->nb_tx_queues
,
567 eth_dev
->data
->nb_rx_queues
);
569 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
570 bp
->max_tx_rings
, bp
->max_rx_rings
, bp
->max_cp_rings
,
571 bp
->max_stat_ctx
, bp
->max_ring_grps
, bp
->max_vnics
);
575 bp
->rx_cp_nr_rings
= bp
->rx_nr_rings
;
576 bp
->tx_cp_nr_rings
= bp
->tx_nr_rings
;
578 if (rx_offloads
& DEV_RX_OFFLOAD_JUMBO_FRAME
) {
580 eth_dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
-
581 ETHER_HDR_LEN
- ETHER_CRC_LEN
- VLAN_TAG_SIZE
*
583 bnxt_mtu_set_op(eth_dev
, eth_dev
->data
->mtu
);
588 static void bnxt_print_link_info(struct rte_eth_dev
*eth_dev
)
590 struct rte_eth_link
*link
= ð_dev
->data
->dev_link
;
592 if (link
->link_status
)
593 PMD_DRV_LOG(INFO
, "Port %d Link Up - speed %u Mbps - %s\n",
594 eth_dev
->data
->port_id
,
595 (uint32_t)link
->link_speed
,
596 (link
->link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
597 ("full-duplex") : ("half-duplex\n"));
599 PMD_DRV_LOG(INFO
, "Port %d Link Down\n",
600 eth_dev
->data
->port_id
);
603 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev
*eth_dev
)
605 bnxt_print_link_info(eth_dev
);
609 static int bnxt_dev_start_op(struct rte_eth_dev
*eth_dev
)
611 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
612 uint64_t rx_offloads
= eth_dev
->data
->dev_conf
.rxmode
.offloads
;
616 if (bp
->rx_cp_nr_rings
> RTE_ETHDEV_QUEUE_STAT_CNTRS
) {
618 "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
619 bp
->rx_cp_nr_rings
, RTE_ETHDEV_QUEUE_STAT_CNTRS
);
623 rc
= bnxt_init_chip(bp
);
627 bnxt_link_update_op(eth_dev
, 1);
629 if (rx_offloads
& DEV_RX_OFFLOAD_VLAN_FILTER
)
630 vlan_mask
|= ETH_VLAN_FILTER_MASK
;
631 if (rx_offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
)
632 vlan_mask
|= ETH_VLAN_STRIP_MASK
;
633 rc
= bnxt_vlan_offload_set_op(eth_dev
, vlan_mask
);
637 bp
->flags
|= BNXT_FLAG_INIT_DONE
;
641 bnxt_shutdown_nic(bp
);
642 bnxt_free_tx_mbufs(bp
);
643 bnxt_free_rx_mbufs(bp
);
647 static int bnxt_dev_set_link_up_op(struct rte_eth_dev
*eth_dev
)
649 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
652 if (!bp
->link_info
.link_up
)
653 rc
= bnxt_set_hwrm_link_config(bp
, true);
655 eth_dev
->data
->dev_link
.link_status
= 1;
657 bnxt_print_link_info(eth_dev
);
661 static int bnxt_dev_set_link_down_op(struct rte_eth_dev
*eth_dev
)
663 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
665 eth_dev
->data
->dev_link
.link_status
= 0;
666 bnxt_set_hwrm_link_config(bp
, false);
667 bp
->link_info
.link_up
= 0;
672 /* Unload the driver, release resources */
673 static void bnxt_dev_stop_op(struct rte_eth_dev
*eth_dev
)
675 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
677 bp
->flags
&= ~BNXT_FLAG_INIT_DONE
;
678 if (bp
->eth_dev
->data
->dev_started
) {
679 /* TBD: STOP HW queues DMA */
680 eth_dev
->data
->dev_link
.link_status
= 0;
682 bnxt_set_hwrm_link_config(bp
, false);
683 bnxt_hwrm_port_clr_stats(bp
);
684 bnxt_free_tx_mbufs(bp
);
685 bnxt_free_rx_mbufs(bp
);
686 bnxt_shutdown_nic(bp
);
690 static void bnxt_dev_close_op(struct rte_eth_dev
*eth_dev
)
692 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
694 if (bp
->dev_stopped
== 0)
695 bnxt_dev_stop_op(eth_dev
);
698 if (eth_dev
->data
->mac_addrs
!= NULL
) {
699 rte_free(eth_dev
->data
->mac_addrs
);
700 eth_dev
->data
->mac_addrs
= NULL
;
702 if (bp
->grp_info
!= NULL
) {
703 rte_free(bp
->grp_info
);
707 bnxt_dev_uninit(eth_dev
);
710 static void bnxt_mac_addr_remove_op(struct rte_eth_dev
*eth_dev
,
713 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
714 uint64_t pool_mask
= eth_dev
->data
->mac_pool_sel
[index
];
715 struct bnxt_vnic_info
*vnic
;
716 struct bnxt_filter_info
*filter
, *temp_filter
;
717 uint32_t pool
= RTE_MIN(MAX_FF_POOLS
, ETH_64_POOLS
);
721 * Loop through all VNICs from the specified filter flow pools to
722 * remove the corresponding MAC addr filter
724 for (i
= 0; i
< pool
; i
++) {
725 if (!(pool_mask
& (1ULL << i
)))
728 STAILQ_FOREACH(vnic
, &bp
->ff_pool
[i
], next
) {
729 filter
= STAILQ_FIRST(&vnic
->filter
);
731 temp_filter
= STAILQ_NEXT(filter
, next
);
732 if (filter
->mac_index
== index
) {
733 STAILQ_REMOVE(&vnic
->filter
, filter
,
734 bnxt_filter_info
, next
);
735 bnxt_hwrm_clear_l2_filter(bp
, filter
);
736 filter
->mac_index
= INVALID_MAC_INDEX
;
737 memset(&filter
->l2_addr
, 0,
740 &bp
->free_filter_list
,
743 filter
= temp_filter
;
749 static int bnxt_mac_addr_add_op(struct rte_eth_dev
*eth_dev
,
750 struct ether_addr
*mac_addr
,
751 uint32_t index
, uint32_t pool
)
753 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
754 struct bnxt_vnic_info
*vnic
= STAILQ_FIRST(&bp
->ff_pool
[pool
]);
755 struct bnxt_filter_info
*filter
;
758 PMD_DRV_LOG(ERR
, "Cannot add MAC address to a VF interface\n");
763 PMD_DRV_LOG(ERR
, "VNIC not found for pool %d!\n", pool
);
766 /* Attach requested MAC address to the new l2_filter */
767 STAILQ_FOREACH(filter
, &vnic
->filter
, next
) {
768 if (filter
->mac_index
== index
) {
770 "MAC addr already existed for pool %d\n", pool
);
774 filter
= bnxt_alloc_filter(bp
);
776 PMD_DRV_LOG(ERR
, "L2 filter alloc failed\n");
779 STAILQ_INSERT_TAIL(&vnic
->filter
, filter
, next
);
780 filter
->mac_index
= index
;
781 memcpy(filter
->l2_addr
, mac_addr
, ETHER_ADDR_LEN
);
782 return bnxt_hwrm_set_l2_filter(bp
, vnic
->fw_vnic_id
, filter
);
785 int bnxt_link_update_op(struct rte_eth_dev
*eth_dev
, int wait_to_complete
)
788 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
789 struct rte_eth_link
new;
790 unsigned int cnt
= BNXT_LINK_WAIT_CNT
;
792 memset(&new, 0, sizeof(new));
794 /* Retrieve link info from hardware */
795 rc
= bnxt_get_hwrm_link_config(bp
, &new);
797 new.link_speed
= ETH_LINK_SPEED_100M
;
798 new.link_duplex
= ETH_LINK_FULL_DUPLEX
;
800 "Failed to retrieve link rc = 0x%x!\n", rc
);
803 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL
);
805 if (!wait_to_complete
)
807 } while (!new.link_status
&& cnt
--);
810 /* Timed out or success */
811 if (new.link_status
!= eth_dev
->data
->dev_link
.link_status
||
812 new.link_speed
!= eth_dev
->data
->dev_link
.link_speed
) {
813 memcpy(ð_dev
->data
->dev_link
, &new,
814 sizeof(struct rte_eth_link
));
816 _rte_eth_dev_callback_process(eth_dev
,
817 RTE_ETH_EVENT_INTR_LSC
,
820 bnxt_print_link_info(eth_dev
);
826 static void bnxt_promiscuous_enable_op(struct rte_eth_dev
*eth_dev
)
828 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
829 struct bnxt_vnic_info
*vnic
;
831 if (bp
->vnic_info
== NULL
)
834 vnic
= &bp
->vnic_info
[0];
836 vnic
->flags
|= BNXT_VNIC_INFO_PROMISC
;
837 bnxt_hwrm_cfa_l2_set_rx_mask(bp
, vnic
, 0, NULL
);
840 static void bnxt_promiscuous_disable_op(struct rte_eth_dev
*eth_dev
)
842 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
843 struct bnxt_vnic_info
*vnic
;
845 if (bp
->vnic_info
== NULL
)
848 vnic
= &bp
->vnic_info
[0];
850 vnic
->flags
&= ~BNXT_VNIC_INFO_PROMISC
;
851 bnxt_hwrm_cfa_l2_set_rx_mask(bp
, vnic
, 0, NULL
);
854 static void bnxt_allmulticast_enable_op(struct rte_eth_dev
*eth_dev
)
856 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
857 struct bnxt_vnic_info
*vnic
;
859 if (bp
->vnic_info
== NULL
)
862 vnic
= &bp
->vnic_info
[0];
864 vnic
->flags
|= BNXT_VNIC_INFO_ALLMULTI
;
865 bnxt_hwrm_cfa_l2_set_rx_mask(bp
, vnic
, 0, NULL
);
868 static void bnxt_allmulticast_disable_op(struct rte_eth_dev
*eth_dev
)
870 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
871 struct bnxt_vnic_info
*vnic
;
873 if (bp
->vnic_info
== NULL
)
876 vnic
= &bp
->vnic_info
[0];
878 vnic
->flags
&= ~BNXT_VNIC_INFO_ALLMULTI
;
879 bnxt_hwrm_cfa_l2_set_rx_mask(bp
, vnic
, 0, NULL
);
882 static int bnxt_reta_update_op(struct rte_eth_dev
*eth_dev
,
883 struct rte_eth_rss_reta_entry64
*reta_conf
,
886 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
887 struct rte_eth_conf
*dev_conf
= &bp
->eth_dev
->data
->dev_conf
;
888 struct bnxt_vnic_info
*vnic
;
891 if (!(dev_conf
->rxmode
.mq_mode
& ETH_MQ_RX_RSS_FLAG
))
894 if (reta_size
!= HW_HASH_INDEX_SIZE
) {
895 PMD_DRV_LOG(ERR
, "The configured hash table lookup size "
896 "(%d) must equal the size supported by the hardware "
897 "(%d)\n", reta_size
, HW_HASH_INDEX_SIZE
);
900 /* Update the RSS VNIC(s) */
901 for (i
= 0; i
< MAX_FF_POOLS
; i
++) {
902 STAILQ_FOREACH(vnic
, &bp
->ff_pool
[i
], next
) {
903 memcpy(vnic
->rss_table
, reta_conf
, reta_size
);
905 bnxt_hwrm_vnic_rss_cfg(bp
, vnic
);
911 static int bnxt_reta_query_op(struct rte_eth_dev
*eth_dev
,
912 struct rte_eth_rss_reta_entry64
*reta_conf
,
915 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
916 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
917 struct rte_intr_handle
*intr_handle
918 = &bp
->pdev
->intr_handle
;
920 /* Retrieve from the default VNIC */
923 if (!vnic
->rss_table
)
926 if (reta_size
!= HW_HASH_INDEX_SIZE
) {
927 PMD_DRV_LOG(ERR
, "The configured hash table lookup size "
928 "(%d) must equal the size supported by the hardware "
929 "(%d)\n", reta_size
, HW_HASH_INDEX_SIZE
);
932 /* EW - need to revisit here copying from uint64_t to uint16_t */
933 memcpy(reta_conf
, vnic
->rss_table
, reta_size
);
935 if (rte_intr_allow_others(intr_handle
)) {
936 if (eth_dev
->data
->dev_conf
.intr_conf
.lsc
!= 0)
937 bnxt_dev_lsc_intr_setup(eth_dev
);
943 static int bnxt_rss_hash_update_op(struct rte_eth_dev
*eth_dev
,
944 struct rte_eth_rss_conf
*rss_conf
)
946 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
947 struct rte_eth_conf
*dev_conf
= &bp
->eth_dev
->data
->dev_conf
;
948 struct bnxt_vnic_info
*vnic
;
949 uint16_t hash_type
= 0;
953 * If RSS enablement were different than dev_configure,
954 * then return -EINVAL
956 if (dev_conf
->rxmode
.mq_mode
& ETH_MQ_RX_RSS_FLAG
) {
957 if (!rss_conf
->rss_hf
)
958 PMD_DRV_LOG(ERR
, "Hash type NONE\n");
960 if (rss_conf
->rss_hf
& BNXT_ETH_RSS_SUPPORT
)
964 bp
->flags
|= BNXT_FLAG_UPDATE_HASH
;
965 memcpy(&bp
->rss_conf
, rss_conf
, sizeof(*rss_conf
));
967 if (rss_conf
->rss_hf
& ETH_RSS_IPV4
)
968 hash_type
|= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4
;
969 if (rss_conf
->rss_hf
& ETH_RSS_NONFRAG_IPV4_TCP
)
970 hash_type
|= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4
;
971 if (rss_conf
->rss_hf
& ETH_RSS_NONFRAG_IPV4_UDP
)
972 hash_type
|= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4
;
973 if (rss_conf
->rss_hf
& ETH_RSS_IPV6
)
974 hash_type
|= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6
;
975 if (rss_conf
->rss_hf
& ETH_RSS_NONFRAG_IPV6_TCP
)
976 hash_type
|= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6
;
977 if (rss_conf
->rss_hf
& ETH_RSS_NONFRAG_IPV6_UDP
)
978 hash_type
|= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6
;
980 /* Update the RSS VNIC(s) */
981 for (i
= 0; i
< MAX_FF_POOLS
; i
++) {
982 STAILQ_FOREACH(vnic
, &bp
->ff_pool
[i
], next
) {
983 vnic
->hash_type
= hash_type
;
986 * Use the supplied key if the key length is
987 * acceptable and the rss_key is not NULL
989 if (rss_conf
->rss_key
&&
990 rss_conf
->rss_key_len
<= HW_HASH_KEY_SIZE
)
991 memcpy(vnic
->rss_hash_key
, rss_conf
->rss_key
,
992 rss_conf
->rss_key_len
);
994 bnxt_hwrm_vnic_rss_cfg(bp
, vnic
);
1000 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev
*eth_dev
,
1001 struct rte_eth_rss_conf
*rss_conf
)
1003 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
1004 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
1006 uint32_t hash_types
;
1008 /* RSS configuration is the same for all VNICs */
1009 if (vnic
&& vnic
->rss_hash_key
) {
1010 if (rss_conf
->rss_key
) {
1011 len
= rss_conf
->rss_key_len
<= HW_HASH_KEY_SIZE
?
1012 rss_conf
->rss_key_len
: HW_HASH_KEY_SIZE
;
1013 memcpy(rss_conf
->rss_key
, vnic
->rss_hash_key
, len
);
1016 hash_types
= vnic
->hash_type
;
1017 rss_conf
->rss_hf
= 0;
1018 if (hash_types
& HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4
) {
1019 rss_conf
->rss_hf
|= ETH_RSS_IPV4
;
1020 hash_types
&= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4
;
1022 if (hash_types
& HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4
) {
1023 rss_conf
->rss_hf
|= ETH_RSS_NONFRAG_IPV4_TCP
;
1025 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4
;
1027 if (hash_types
& HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4
) {
1028 rss_conf
->rss_hf
|= ETH_RSS_NONFRAG_IPV4_UDP
;
1030 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4
;
1032 if (hash_types
& HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6
) {
1033 rss_conf
->rss_hf
|= ETH_RSS_IPV6
;
1034 hash_types
&= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6
;
1036 if (hash_types
& HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6
) {
1037 rss_conf
->rss_hf
|= ETH_RSS_NONFRAG_IPV6_TCP
;
1039 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6
;
1041 if (hash_types
& HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6
) {
1042 rss_conf
->rss_hf
|= ETH_RSS_NONFRAG_IPV6_UDP
;
1044 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6
;
1048 "Unknwon RSS config from firmware (%08x), RSS disabled",
1053 rss_conf
->rss_hf
= 0;
1058 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev
*dev
,
1059 struct rte_eth_fc_conf
*fc_conf
)
1061 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1062 struct rte_eth_link link_info
;
1065 rc
= bnxt_get_hwrm_link_config(bp
, &link_info
);
1069 memset(fc_conf
, 0, sizeof(*fc_conf
));
1070 if (bp
->link_info
.auto_pause
)
1071 fc_conf
->autoneg
= 1;
1072 switch (bp
->link_info
.pause
) {
1074 fc_conf
->mode
= RTE_FC_NONE
;
1076 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX
:
1077 fc_conf
->mode
= RTE_FC_TX_PAUSE
;
1079 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX
:
1080 fc_conf
->mode
= RTE_FC_RX_PAUSE
;
1082 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX
|
1083 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX
):
1084 fc_conf
->mode
= RTE_FC_FULL
;
1090 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev
*dev
,
1091 struct rte_eth_fc_conf
*fc_conf
)
1093 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1095 if (!BNXT_SINGLE_PF(bp
) || BNXT_VF(bp
)) {
1096 PMD_DRV_LOG(ERR
, "Flow Control Settings cannot be modified\n");
1100 switch (fc_conf
->mode
) {
1102 bp
->link_info
.auto_pause
= 0;
1103 bp
->link_info
.force_pause
= 0;
1105 case RTE_FC_RX_PAUSE
:
1106 if (fc_conf
->autoneg
) {
1107 bp
->link_info
.auto_pause
=
1108 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX
;
1109 bp
->link_info
.force_pause
= 0;
1111 bp
->link_info
.auto_pause
= 0;
1112 bp
->link_info
.force_pause
=
1113 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX
;
1116 case RTE_FC_TX_PAUSE
:
1117 if (fc_conf
->autoneg
) {
1118 bp
->link_info
.auto_pause
=
1119 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX
;
1120 bp
->link_info
.force_pause
= 0;
1122 bp
->link_info
.auto_pause
= 0;
1123 bp
->link_info
.force_pause
=
1124 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX
;
1128 if (fc_conf
->autoneg
) {
1129 bp
->link_info
.auto_pause
=
1130 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX
|
1131 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX
;
1132 bp
->link_info
.force_pause
= 0;
1134 bp
->link_info
.auto_pause
= 0;
1135 bp
->link_info
.force_pause
=
1136 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX
|
1137 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX
;
1141 return bnxt_set_hwrm_link_config(bp
, true);
1144 /* Add UDP tunneling port */
1146 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev
*eth_dev
,
1147 struct rte_eth_udp_tunnel
*udp_tunnel
)
1149 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
1150 uint16_t tunnel_type
= 0;
1153 switch (udp_tunnel
->prot_type
) {
1154 case RTE_TUNNEL_TYPE_VXLAN
:
1155 if (bp
->vxlan_port_cnt
) {
1156 PMD_DRV_LOG(ERR
, "Tunnel Port %d already programmed\n",
1157 udp_tunnel
->udp_port
);
1158 if (bp
->vxlan_port
!= udp_tunnel
->udp_port
) {
1159 PMD_DRV_LOG(ERR
, "Only one port allowed\n");
1162 bp
->vxlan_port_cnt
++;
1166 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN
;
1167 bp
->vxlan_port_cnt
++;
1169 case RTE_TUNNEL_TYPE_GENEVE
:
1170 if (bp
->geneve_port_cnt
) {
1171 PMD_DRV_LOG(ERR
, "Tunnel Port %d already programmed\n",
1172 udp_tunnel
->udp_port
);
1173 if (bp
->geneve_port
!= udp_tunnel
->udp_port
) {
1174 PMD_DRV_LOG(ERR
, "Only one port allowed\n");
1177 bp
->geneve_port_cnt
++;
1181 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE
;
1182 bp
->geneve_port_cnt
++;
1185 PMD_DRV_LOG(ERR
, "Tunnel type is not supported\n");
1188 rc
= bnxt_hwrm_tunnel_dst_port_alloc(bp
, udp_tunnel
->udp_port
,
1194 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev
*eth_dev
,
1195 struct rte_eth_udp_tunnel
*udp_tunnel
)
1197 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
1198 uint16_t tunnel_type
= 0;
1202 switch (udp_tunnel
->prot_type
) {
1203 case RTE_TUNNEL_TYPE_VXLAN
:
1204 if (!bp
->vxlan_port_cnt
) {
1205 PMD_DRV_LOG(ERR
, "No Tunnel port configured yet\n");
1208 if (bp
->vxlan_port
!= udp_tunnel
->udp_port
) {
1209 PMD_DRV_LOG(ERR
, "Req Port: %d. Configured port: %d\n",
1210 udp_tunnel
->udp_port
, bp
->vxlan_port
);
1213 if (--bp
->vxlan_port_cnt
)
1217 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN
;
1218 port
= bp
->vxlan_fw_dst_port_id
;
1220 case RTE_TUNNEL_TYPE_GENEVE
:
1221 if (!bp
->geneve_port_cnt
) {
1222 PMD_DRV_LOG(ERR
, "No Tunnel port configured yet\n");
1225 if (bp
->geneve_port
!= udp_tunnel
->udp_port
) {
1226 PMD_DRV_LOG(ERR
, "Req Port: %d. Configured port: %d\n",
1227 udp_tunnel
->udp_port
, bp
->geneve_port
);
1230 if (--bp
->geneve_port_cnt
)
1234 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE
;
1235 port
= bp
->geneve_fw_dst_port_id
;
1238 PMD_DRV_LOG(ERR
, "Tunnel type is not supported\n");
1242 rc
= bnxt_hwrm_tunnel_dst_port_free(bp
, port
, tunnel_type
);
1245 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN
)
1248 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE
)
1249 bp
->geneve_port
= 0;
1254 static int bnxt_del_vlan_filter(struct bnxt
*bp
, uint16_t vlan_id
)
1256 struct bnxt_filter_info
*filter
, *temp_filter
, *new_filter
;
1257 struct bnxt_vnic_info
*vnic
;
1260 uint32_t chk
= HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN
;
1262 /* Cycle through all VNICs */
1263 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
1265 * For each VNIC and each associated filter(s)
1266 * if VLAN exists && VLAN matches vlan_id
1267 * remove the MAC+VLAN filter
1268 * add a new MAC only filter
1270 * VLAN filter doesn't exist, just skip and continue
1272 STAILQ_FOREACH(vnic
, &bp
->ff_pool
[i
], next
) {
1273 filter
= STAILQ_FIRST(&vnic
->filter
);
1275 temp_filter
= STAILQ_NEXT(filter
, next
);
1277 if (filter
->enables
& chk
&&
1278 filter
->l2_ovlan
== vlan_id
) {
1279 /* Must delete the filter */
1280 STAILQ_REMOVE(&vnic
->filter
, filter
,
1281 bnxt_filter_info
, next
);
1282 bnxt_hwrm_clear_l2_filter(bp
, filter
);
1284 &bp
->free_filter_list
,
1288 * Need to examine to see if the MAC
1289 * filter already existed or not before
1290 * allocating a new one
1293 new_filter
= bnxt_alloc_filter(bp
);
1296 "MAC/VLAN filter alloc failed\n");
1300 STAILQ_INSERT_TAIL(&vnic
->filter
,
1302 /* Inherit MAC from previous filter */
1303 new_filter
->mac_index
=
1305 memcpy(new_filter
->l2_addr
,
1306 filter
->l2_addr
, ETHER_ADDR_LEN
);
1307 /* MAC only filter */
1308 rc
= bnxt_hwrm_set_l2_filter(bp
,
1314 "Del Vlan filter for %d\n",
1317 filter
= temp_filter
;
1325 static int bnxt_add_vlan_filter(struct bnxt
*bp
, uint16_t vlan_id
)
1327 struct bnxt_filter_info
*filter
, *temp_filter
, *new_filter
;
1328 struct bnxt_vnic_info
*vnic
;
1331 uint32_t en
= HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN
|
1332 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK
;
1333 uint32_t chk
= HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN
;
1335 /* Cycle through all VNICs */
1336 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
1338 * For each VNIC and each associated filter(s)
1340 * if VLAN matches vlan_id
1341 * VLAN filter already exists, just skip and continue
1343 * add a new MAC+VLAN filter
1345 * Remove the old MAC only filter
1346 * Add a new MAC+VLAN filter
1348 STAILQ_FOREACH(vnic
, &bp
->ff_pool
[i
], next
) {
1349 filter
= STAILQ_FIRST(&vnic
->filter
);
1351 temp_filter
= STAILQ_NEXT(filter
, next
);
1353 if (filter
->enables
& chk
) {
1354 if (filter
->l2_ovlan
== vlan_id
)
1357 /* Must delete the MAC filter */
1358 STAILQ_REMOVE(&vnic
->filter
, filter
,
1359 bnxt_filter_info
, next
);
1360 bnxt_hwrm_clear_l2_filter(bp
, filter
);
1361 filter
->l2_ovlan
= 0;
1363 &bp
->free_filter_list
,
1366 new_filter
= bnxt_alloc_filter(bp
);
1369 "MAC/VLAN filter alloc failed\n");
1373 STAILQ_INSERT_TAIL(&vnic
->filter
, new_filter
,
1375 /* Inherit MAC from the previous filter */
1376 new_filter
->mac_index
= filter
->mac_index
;
1377 memcpy(new_filter
->l2_addr
, filter
->l2_addr
,
1379 /* MAC + VLAN ID filter */
1380 new_filter
->l2_ivlan
= vlan_id
;
1381 new_filter
->l2_ivlan_mask
= 0xF000;
1382 new_filter
->enables
|= en
;
1383 rc
= bnxt_hwrm_set_l2_filter(bp
,
1389 "Added Vlan filter for %d\n", vlan_id
);
1391 filter
= temp_filter
;
1399 static int bnxt_vlan_filter_set_op(struct rte_eth_dev
*eth_dev
,
1400 uint16_t vlan_id
, int on
)
1402 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
1404 /* These operations apply to ALL existing MAC/VLAN filters */
1406 return bnxt_add_vlan_filter(bp
, vlan_id
);
1408 return bnxt_del_vlan_filter(bp
, vlan_id
);
1412 bnxt_vlan_offload_set_op(struct rte_eth_dev
*dev
, int mask
)
1414 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1415 uint64_t rx_offloads
= dev
->data
->dev_conf
.rxmode
.offloads
;
1418 if (mask
& ETH_VLAN_FILTER_MASK
) {
1419 if (!(rx_offloads
& DEV_RX_OFFLOAD_VLAN_FILTER
)) {
1420 /* Remove any VLAN filters programmed */
1421 for (i
= 0; i
< 4095; i
++)
1422 bnxt_del_vlan_filter(bp
, i
);
1424 PMD_DRV_LOG(DEBUG
, "VLAN Filtering: %d\n",
1425 !!(rx_offloads
& DEV_RX_OFFLOAD_VLAN_FILTER
));
1428 if (mask
& ETH_VLAN_STRIP_MASK
) {
1429 /* Enable or disable VLAN stripping */
1430 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
1431 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
1432 if (rx_offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
)
1433 vnic
->vlan_strip
= true;
1435 vnic
->vlan_strip
= false;
1436 bnxt_hwrm_vnic_cfg(bp
, vnic
);
1438 PMD_DRV_LOG(DEBUG
, "VLAN Strip Offload: %d\n",
1439 !!(rx_offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
));
1442 if (mask
& ETH_VLAN_EXTEND_MASK
)
1443 PMD_DRV_LOG(ERR
, "Extend VLAN Not supported\n");
1449 bnxt_set_default_mac_addr_op(struct rte_eth_dev
*dev
, struct ether_addr
*addr
)
1451 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1452 /* Default Filter is tied to VNIC 0 */
1453 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
1454 struct bnxt_filter_info
*filter
;
1460 memcpy(bp
->mac_addr
, addr
, sizeof(bp
->mac_addr
));
1462 STAILQ_FOREACH(filter
, &vnic
->filter
, next
) {
1463 /* Default Filter is at Index 0 */
1464 if (filter
->mac_index
!= 0)
1466 rc
= bnxt_hwrm_clear_l2_filter(bp
, filter
);
1469 memcpy(filter
->l2_addr
, bp
->mac_addr
, ETHER_ADDR_LEN
);
1470 memset(filter
->l2_addr_mask
, 0xff, ETHER_ADDR_LEN
);
1471 filter
->flags
|= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
;
1473 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
|
1474 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
;
1475 rc
= bnxt_hwrm_set_l2_filter(bp
, vnic
->fw_vnic_id
, filter
);
1478 filter
->mac_index
= 0;
1479 PMD_DRV_LOG(DEBUG
, "Set MAC addr\n");
1486 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev
*eth_dev
,
1487 struct ether_addr
*mc_addr_set
,
1488 uint32_t nb_mc_addr
)
1490 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
1491 char *mc_addr_list
= (char *)mc_addr_set
;
1492 struct bnxt_vnic_info
*vnic
;
1493 uint32_t off
= 0, i
= 0;
1495 vnic
= &bp
->vnic_info
[0];
1497 if (nb_mc_addr
> BNXT_MAX_MC_ADDRS
) {
1498 vnic
->flags
|= BNXT_VNIC_INFO_ALLMULTI
;
1502 /* TODO Check for Duplicate mcast addresses */
1503 vnic
->flags
&= ~BNXT_VNIC_INFO_ALLMULTI
;
1504 for (i
= 0; i
< nb_mc_addr
; i
++) {
1505 memcpy(vnic
->mc_list
+ off
, &mc_addr_list
[i
], ETHER_ADDR_LEN
);
1506 off
+= ETHER_ADDR_LEN
;
1509 vnic
->mc_addr_cnt
= i
;
1512 return bnxt_hwrm_cfa_l2_set_rx_mask(bp
, vnic
, 0, NULL
);
1516 bnxt_fw_version_get(struct rte_eth_dev
*dev
, char *fw_version
, size_t fw_size
)
1518 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1519 uint8_t fw_major
= (bp
->fw_ver
>> 24) & 0xff;
1520 uint8_t fw_minor
= (bp
->fw_ver
>> 16) & 0xff;
1521 uint8_t fw_updt
= (bp
->fw_ver
>> 8) & 0xff;
1524 ret
= snprintf(fw_version
, fw_size
, "%d.%d.%d",
1525 fw_major
, fw_minor
, fw_updt
);
1527 ret
+= 1; /* add the size of '\0' */
1528 if (fw_size
< (uint32_t)ret
)
1535 bnxt_rxq_info_get_op(struct rte_eth_dev
*dev
, uint16_t queue_id
,
1536 struct rte_eth_rxq_info
*qinfo
)
1538 struct bnxt_rx_queue
*rxq
;
1540 rxq
= dev
->data
->rx_queues
[queue_id
];
1542 qinfo
->mp
= rxq
->mb_pool
;
1543 qinfo
->scattered_rx
= dev
->data
->scattered_rx
;
1544 qinfo
->nb_desc
= rxq
->nb_rx_desc
;
1546 qinfo
->conf
.rx_free_thresh
= rxq
->rx_free_thresh
;
1547 qinfo
->conf
.rx_drop_en
= 0;
1548 qinfo
->conf
.rx_deferred_start
= 0;
1552 bnxt_txq_info_get_op(struct rte_eth_dev
*dev
, uint16_t queue_id
,
1553 struct rte_eth_txq_info
*qinfo
)
1555 struct bnxt_tx_queue
*txq
;
1557 txq
= dev
->data
->tx_queues
[queue_id
];
1559 qinfo
->nb_desc
= txq
->nb_tx_desc
;
1561 qinfo
->conf
.tx_thresh
.pthresh
= txq
->pthresh
;
1562 qinfo
->conf
.tx_thresh
.hthresh
= txq
->hthresh
;
1563 qinfo
->conf
.tx_thresh
.wthresh
= txq
->wthresh
;
1565 qinfo
->conf
.tx_free_thresh
= txq
->tx_free_thresh
;
1566 qinfo
->conf
.tx_rs_thresh
= 0;
1567 qinfo
->conf
.tx_deferred_start
= txq
->tx_deferred_start
;
1570 static int bnxt_mtu_set_op(struct rte_eth_dev
*eth_dev
, uint16_t new_mtu
)
1572 struct bnxt
*bp
= eth_dev
->data
->dev_private
;
1573 struct rte_eth_dev_info dev_info
;
1574 uint32_t max_dev_mtu
;
1578 bnxt_dev_info_get_op(eth_dev
, &dev_info
);
1579 max_dev_mtu
= dev_info
.max_rx_pktlen
-
1580 ETHER_HDR_LEN
- ETHER_CRC_LEN
- VLAN_TAG_SIZE
* 2;
1582 if (new_mtu
< ETHER_MIN_MTU
|| new_mtu
> max_dev_mtu
) {
1583 PMD_DRV_LOG(ERR
, "MTU requested must be within (%d, %d)\n",
1584 ETHER_MIN_MTU
, max_dev_mtu
);
1589 if (new_mtu
> ETHER_MTU
) {
1590 bp
->flags
|= BNXT_FLAG_JUMBO
;
1591 bp
->eth_dev
->data
->dev_conf
.rxmode
.offloads
|=
1592 DEV_RX_OFFLOAD_JUMBO_FRAME
;
1594 bp
->eth_dev
->data
->dev_conf
.rxmode
.offloads
&=
1595 ~DEV_RX_OFFLOAD_JUMBO_FRAME
;
1596 bp
->flags
&= ~BNXT_FLAG_JUMBO
;
1599 eth_dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
=
1600 new_mtu
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
+ VLAN_TAG_SIZE
* 2;
1602 eth_dev
->data
->mtu
= new_mtu
;
1603 PMD_DRV_LOG(INFO
, "New MTU is %d\n", eth_dev
->data
->mtu
);
1605 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
1606 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
1609 vnic
->mru
= bp
->eth_dev
->data
->mtu
+ ETHER_HDR_LEN
+
1610 ETHER_CRC_LEN
+ VLAN_TAG_SIZE
* 2;
1611 rc
= bnxt_hwrm_vnic_cfg(bp
, vnic
);
1615 size
= rte_pktmbuf_data_room_size(bp
->rx_queues
[0]->mb_pool
);
1616 size
-= RTE_PKTMBUF_HEADROOM
;
1618 if (size
< new_mtu
) {
1619 rc
= bnxt_hwrm_vnic_plcmode_cfg(bp
, vnic
);
1629 bnxt_vlan_pvid_set_op(struct rte_eth_dev
*dev
, uint16_t pvid
, int on
)
1631 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1632 uint16_t vlan
= bp
->vlan
;
1635 if (!BNXT_SINGLE_PF(bp
) || BNXT_VF(bp
)) {
1637 "PVID cannot be modified for this function\n");
1640 bp
->vlan
= on
? pvid
: 0;
1642 rc
= bnxt_hwrm_set_default_vlan(bp
, 0, 0);
1649 bnxt_dev_led_on_op(struct rte_eth_dev
*dev
)
1651 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1653 return bnxt_hwrm_port_led_cfg(bp
, true);
1657 bnxt_dev_led_off_op(struct rte_eth_dev
*dev
)
1659 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1661 return bnxt_hwrm_port_led_cfg(bp
, false);
1665 bnxt_rx_queue_count_op(struct rte_eth_dev
*dev
, uint16_t rx_queue_id
)
1667 uint32_t desc
= 0, raw_cons
= 0, cons
;
1668 struct bnxt_cp_ring_info
*cpr
;
1669 struct bnxt_rx_queue
*rxq
;
1670 struct rx_pkt_cmpl
*rxcmp
;
1675 rxq
= dev
->data
->rx_queues
[rx_queue_id
];
1679 while (raw_cons
< rxq
->nb_rx_desc
) {
1680 cons
= RING_CMP(cpr
->cp_ring_struct
, raw_cons
);
1681 rxcmp
= (struct rx_pkt_cmpl
*)&cpr
->cp_desc_ring
[cons
];
1683 if (!CMPL_VALID(rxcmp
, valid
))
1685 valid
= FLIP_VALID(cons
, cpr
->cp_ring_struct
->ring_mask
, valid
);
1686 cmp_type
= CMP_TYPE(rxcmp
);
1687 if (cmp_type
== RX_TPA_END_CMPL_TYPE_RX_TPA_END
) {
1688 cmp
= (rte_le_to_cpu_32(
1689 ((struct rx_tpa_end_cmpl
*)
1690 (rxcmp
))->agg_bufs_v1
) &
1691 RX_TPA_END_CMPL_AGG_BUFS_MASK
) >>
1692 RX_TPA_END_CMPL_AGG_BUFS_SFT
;
1694 } else if (cmp_type
== 0x11) {
1696 cmp
= (rxcmp
->agg_bufs_v1
&
1697 RX_PKT_CMPL_AGG_BUFS_MASK
) >>
1698 RX_PKT_CMPL_AGG_BUFS_SFT
;
1703 raw_cons
+= cmp
? cmp
: 2;
1710 bnxt_rx_descriptor_status_op(void *rx_queue
, uint16_t offset
)
1712 struct bnxt_rx_queue
*rxq
= (struct bnxt_rx_queue
*)rx_queue
;
1713 struct bnxt_rx_ring_info
*rxr
;
1714 struct bnxt_cp_ring_info
*cpr
;
1715 struct bnxt_sw_rx_bd
*rx_buf
;
1716 struct rx_pkt_cmpl
*rxcmp
;
1717 uint32_t cons
, cp_cons
;
1725 if (offset
>= rxq
->nb_rx_desc
)
1728 cons
= RING_CMP(cpr
->cp_ring_struct
, offset
);
1729 cp_cons
= cpr
->cp_raw_cons
;
1730 rxcmp
= (struct rx_pkt_cmpl
*)&cpr
->cp_desc_ring
[cons
];
1732 if (cons
> cp_cons
) {
1733 if (CMPL_VALID(rxcmp
, cpr
->valid
))
1734 return RTE_ETH_RX_DESC_DONE
;
1736 if (CMPL_VALID(rxcmp
, !cpr
->valid
))
1737 return RTE_ETH_RX_DESC_DONE
;
1739 rx_buf
= &rxr
->rx_buf_ring
[cons
];
1740 if (rx_buf
->mbuf
== NULL
)
1741 return RTE_ETH_RX_DESC_UNAVAIL
;
1744 return RTE_ETH_RX_DESC_AVAIL
;
1748 bnxt_tx_descriptor_status_op(void *tx_queue
, uint16_t offset
)
1750 struct bnxt_tx_queue
*txq
= (struct bnxt_tx_queue
*)tx_queue
;
1751 struct bnxt_tx_ring_info
*txr
;
1752 struct bnxt_cp_ring_info
*cpr
;
1753 struct bnxt_sw_tx_bd
*tx_buf
;
1754 struct tx_pkt_cmpl
*txcmp
;
1755 uint32_t cons
, cp_cons
;
1763 if (offset
>= txq
->nb_tx_desc
)
1766 cons
= RING_CMP(cpr
->cp_ring_struct
, offset
);
1767 txcmp
= (struct tx_pkt_cmpl
*)&cpr
->cp_desc_ring
[cons
];
1768 cp_cons
= cpr
->cp_raw_cons
;
1770 if (cons
> cp_cons
) {
1771 if (CMPL_VALID(txcmp
, cpr
->valid
))
1772 return RTE_ETH_TX_DESC_UNAVAIL
;
1774 if (CMPL_VALID(txcmp
, !cpr
->valid
))
1775 return RTE_ETH_TX_DESC_UNAVAIL
;
1777 tx_buf
= &txr
->tx_buf_ring
[cons
];
1778 if (tx_buf
->mbuf
== NULL
)
1779 return RTE_ETH_TX_DESC_DONE
;
1781 return RTE_ETH_TX_DESC_FULL
;
1784 static struct bnxt_filter_info
*
1785 bnxt_match_and_validate_ether_filter(struct bnxt
*bp
,
1786 struct rte_eth_ethertype_filter
*efilter
,
1787 struct bnxt_vnic_info
*vnic0
,
1788 struct bnxt_vnic_info
*vnic
,
1791 struct bnxt_filter_info
*mfilter
= NULL
;
1795 if (efilter
->ether_type
== ETHER_TYPE_IPv4
||
1796 efilter
->ether_type
== ETHER_TYPE_IPv6
) {
1797 PMD_DRV_LOG(ERR
, "invalid ether_type(0x%04x) in"
1798 " ethertype filter.", efilter
->ether_type
);
1802 if (efilter
->queue
>= bp
->rx_nr_rings
) {
1803 PMD_DRV_LOG(ERR
, "Invalid queue %d\n", efilter
->queue
);
1808 vnic0
= STAILQ_FIRST(&bp
->ff_pool
[0]);
1809 vnic
= STAILQ_FIRST(&bp
->ff_pool
[efilter
->queue
]);
1811 PMD_DRV_LOG(ERR
, "Invalid queue %d\n", efilter
->queue
);
1816 if (efilter
->flags
& RTE_ETHTYPE_FLAGS_DROP
) {
1817 STAILQ_FOREACH(mfilter
, &vnic0
->filter
, next
) {
1818 if ((!memcmp(efilter
->mac_addr
.addr_bytes
,
1819 mfilter
->l2_addr
, ETHER_ADDR_LEN
) &&
1821 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP
&&
1822 mfilter
->ethertype
== efilter
->ether_type
)) {
1828 STAILQ_FOREACH(mfilter
, &vnic
->filter
, next
)
1829 if ((!memcmp(efilter
->mac_addr
.addr_bytes
,
1830 mfilter
->l2_addr
, ETHER_ADDR_LEN
) &&
1831 mfilter
->ethertype
== efilter
->ether_type
&&
1833 HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX
)) {
1847 bnxt_ethertype_filter(struct rte_eth_dev
*dev
,
1848 enum rte_filter_op filter_op
,
1851 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
1852 struct rte_eth_ethertype_filter
*efilter
=
1853 (struct rte_eth_ethertype_filter
*)arg
;
1854 struct bnxt_filter_info
*bfilter
, *filter1
;
1855 struct bnxt_vnic_info
*vnic
, *vnic0
;
1858 if (filter_op
== RTE_ETH_FILTER_NOP
)
1862 PMD_DRV_LOG(ERR
, "arg shouldn't be NULL for operation %u.",
1867 vnic0
= STAILQ_FIRST(&bp
->ff_pool
[0]);
1868 vnic
= STAILQ_FIRST(&bp
->ff_pool
[efilter
->queue
]);
1870 switch (filter_op
) {
1871 case RTE_ETH_FILTER_ADD
:
1872 bnxt_match_and_validate_ether_filter(bp
, efilter
,
1877 bfilter
= bnxt_get_unused_filter(bp
);
1878 if (bfilter
== NULL
) {
1880 "Not enough resources for a new filter.\n");
1883 bfilter
->filter_type
= HWRM_CFA_NTUPLE_FILTER
;
1884 memcpy(bfilter
->l2_addr
, efilter
->mac_addr
.addr_bytes
,
1886 memcpy(bfilter
->dst_macaddr
, efilter
->mac_addr
.addr_bytes
,
1888 bfilter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR
;
1889 bfilter
->ethertype
= efilter
->ether_type
;
1890 bfilter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
1892 filter1
= bnxt_get_l2_filter(bp
, bfilter
, vnic0
);
1893 if (filter1
== NULL
) {
1898 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID
;
1899 bfilter
->fw_l2_filter_id
= filter1
->fw_l2_filter_id
;
1901 bfilter
->dst_id
= vnic
->fw_vnic_id
;
1903 if (efilter
->flags
& RTE_ETHTYPE_FLAGS_DROP
) {
1905 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP
;
1908 ret
= bnxt_hwrm_set_ntuple_filter(bp
, bfilter
->dst_id
, bfilter
);
1911 STAILQ_INSERT_TAIL(&vnic
->filter
, bfilter
, next
);
1913 case RTE_ETH_FILTER_DELETE
:
1914 filter1
= bnxt_match_and_validate_ether_filter(bp
, efilter
,
1916 if (ret
== -EEXIST
) {
1917 ret
= bnxt_hwrm_clear_ntuple_filter(bp
, filter1
);
1919 STAILQ_REMOVE(&vnic
->filter
, filter1
, bnxt_filter_info
,
1921 bnxt_free_filter(bp
, filter1
);
1922 } else if (ret
== 0) {
1923 PMD_DRV_LOG(ERR
, "No matching filter found\n");
1927 PMD_DRV_LOG(ERR
, "unsupported operation %u.", filter_op
);
1933 bnxt_free_filter(bp
, bfilter
);
1939 parse_ntuple_filter(struct bnxt
*bp
,
1940 struct rte_eth_ntuple_filter
*nfilter
,
1941 struct bnxt_filter_info
*bfilter
)
1945 if (nfilter
->queue
>= bp
->rx_nr_rings
) {
1946 PMD_DRV_LOG(ERR
, "Invalid queue %d\n", nfilter
->queue
);
1950 switch (nfilter
->dst_port_mask
) {
1952 bfilter
->dst_port_mask
= -1;
1953 bfilter
->dst_port
= nfilter
->dst_port
;
1954 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT
|
1955 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK
;
1958 PMD_DRV_LOG(ERR
, "invalid dst_port mask.");
1962 bfilter
->ip_addr_type
= NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4
;
1963 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
1965 switch (nfilter
->proto_mask
) {
1967 if (nfilter
->proto
== 17) /* IPPROTO_UDP */
1968 bfilter
->ip_protocol
= 17;
1969 else if (nfilter
->proto
== 6) /* IPPROTO_TCP */
1970 bfilter
->ip_protocol
= 6;
1973 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
1976 PMD_DRV_LOG(ERR
, "invalid protocol mask.");
1980 switch (nfilter
->dst_ip_mask
) {
1982 bfilter
->dst_ipaddr_mask
[0] = -1;
1983 bfilter
->dst_ipaddr
[0] = nfilter
->dst_ip
;
1984 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
|
1985 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
1988 PMD_DRV_LOG(ERR
, "invalid dst_ip mask.");
1992 switch (nfilter
->src_ip_mask
) {
1994 bfilter
->src_ipaddr_mask
[0] = -1;
1995 bfilter
->src_ipaddr
[0] = nfilter
->src_ip
;
1996 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
|
1997 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2000 PMD_DRV_LOG(ERR
, "invalid src_ip mask.");
2004 switch (nfilter
->src_port_mask
) {
2006 bfilter
->src_port_mask
= -1;
2007 bfilter
->src_port
= nfilter
->src_port
;
2008 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT
|
2009 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK
;
2012 PMD_DRV_LOG(ERR
, "invalid src_port mask.");
2017 //nfilter->priority = (uint8_t)filter->priority;
2019 bfilter
->enables
= en
;
2023 static struct bnxt_filter_info
*
2024 bnxt_match_ntuple_filter(struct bnxt
*bp
,
2025 struct bnxt_filter_info
*bfilter
,
2026 struct bnxt_vnic_info
**mvnic
)
2028 struct bnxt_filter_info
*mfilter
= NULL
;
2031 for (i
= bp
->nr_vnics
- 1; i
>= 0; i
--) {
2032 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
2033 STAILQ_FOREACH(mfilter
, &vnic
->filter
, next
) {
2034 if (bfilter
->src_ipaddr
[0] == mfilter
->src_ipaddr
[0] &&
2035 bfilter
->src_ipaddr_mask
[0] ==
2036 mfilter
->src_ipaddr_mask
[0] &&
2037 bfilter
->src_port
== mfilter
->src_port
&&
2038 bfilter
->src_port_mask
== mfilter
->src_port_mask
&&
2039 bfilter
->dst_ipaddr
[0] == mfilter
->dst_ipaddr
[0] &&
2040 bfilter
->dst_ipaddr_mask
[0] ==
2041 mfilter
->dst_ipaddr_mask
[0] &&
2042 bfilter
->dst_port
== mfilter
->dst_port
&&
2043 bfilter
->dst_port_mask
== mfilter
->dst_port_mask
&&
2044 bfilter
->flags
== mfilter
->flags
&&
2045 bfilter
->enables
== mfilter
->enables
) {
2056 bnxt_cfg_ntuple_filter(struct bnxt
*bp
,
2057 struct rte_eth_ntuple_filter
*nfilter
,
2058 enum rte_filter_op filter_op
)
2060 struct bnxt_filter_info
*bfilter
, *mfilter
, *filter1
;
2061 struct bnxt_vnic_info
*vnic
, *vnic0
, *mvnic
;
2064 if (nfilter
->flags
!= RTE_5TUPLE_FLAGS
) {
2065 PMD_DRV_LOG(ERR
, "only 5tuple is supported.");
2069 if (nfilter
->flags
& RTE_NTUPLE_FLAGS_TCP_FLAG
) {
2070 PMD_DRV_LOG(ERR
, "Ntuple filter: TCP flags not supported\n");
2074 bfilter
= bnxt_get_unused_filter(bp
);
2075 if (bfilter
== NULL
) {
2077 "Not enough resources for a new filter.\n");
2080 ret
= parse_ntuple_filter(bp
, nfilter
, bfilter
);
2084 vnic
= STAILQ_FIRST(&bp
->ff_pool
[nfilter
->queue
]);
2085 vnic0
= STAILQ_FIRST(&bp
->ff_pool
[0]);
2086 filter1
= STAILQ_FIRST(&vnic0
->filter
);
2087 if (filter1
== NULL
) {
2092 bfilter
->dst_id
= vnic
->fw_vnic_id
;
2093 bfilter
->fw_l2_filter_id
= filter1
->fw_l2_filter_id
;
2095 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID
;
2096 bfilter
->ethertype
= 0x800;
2097 bfilter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2099 mfilter
= bnxt_match_ntuple_filter(bp
, bfilter
, &mvnic
);
2101 if (mfilter
!= NULL
&& filter_op
== RTE_ETH_FILTER_ADD
&&
2102 bfilter
->dst_id
== mfilter
->dst_id
) {
2103 PMD_DRV_LOG(ERR
, "filter exists.\n");
2106 } else if (mfilter
!= NULL
&& filter_op
== RTE_ETH_FILTER_ADD
&&
2107 bfilter
->dst_id
!= mfilter
->dst_id
) {
2108 mfilter
->dst_id
= vnic
->fw_vnic_id
;
2109 ret
= bnxt_hwrm_set_ntuple_filter(bp
, mfilter
->dst_id
, mfilter
);
2110 STAILQ_REMOVE(&mvnic
->filter
, mfilter
, bnxt_filter_info
, next
);
2111 STAILQ_INSERT_TAIL(&vnic
->filter
, mfilter
, next
);
2112 PMD_DRV_LOG(ERR
, "filter with matching pattern exists.\n");
2113 PMD_DRV_LOG(ERR
, " Updated it to the new destination queue\n");
2116 if (mfilter
== NULL
&& filter_op
== RTE_ETH_FILTER_DELETE
) {
2117 PMD_DRV_LOG(ERR
, "filter doesn't exist.");
2122 if (filter_op
== RTE_ETH_FILTER_ADD
) {
2123 bfilter
->filter_type
= HWRM_CFA_NTUPLE_FILTER
;
2124 ret
= bnxt_hwrm_set_ntuple_filter(bp
, bfilter
->dst_id
, bfilter
);
2127 STAILQ_INSERT_TAIL(&vnic
->filter
, bfilter
, next
);
2129 if (mfilter
== NULL
) {
2130 /* This should not happen. But for Coverity! */
2134 ret
= bnxt_hwrm_clear_ntuple_filter(bp
, mfilter
);
2136 STAILQ_REMOVE(&vnic
->filter
, mfilter
, bnxt_filter_info
, next
);
2137 bnxt_free_filter(bp
, mfilter
);
2138 mfilter
->fw_l2_filter_id
= -1;
2139 bnxt_free_filter(bp
, bfilter
);
2140 bfilter
->fw_l2_filter_id
= -1;
2145 bfilter
->fw_l2_filter_id
= -1;
2146 bnxt_free_filter(bp
, bfilter
);
2151 bnxt_ntuple_filter(struct rte_eth_dev
*dev
,
2152 enum rte_filter_op filter_op
,
2155 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2158 if (filter_op
== RTE_ETH_FILTER_NOP
)
2162 PMD_DRV_LOG(ERR
, "arg shouldn't be NULL for operation %u.",
2167 switch (filter_op
) {
2168 case RTE_ETH_FILTER_ADD
:
2169 ret
= bnxt_cfg_ntuple_filter(bp
,
2170 (struct rte_eth_ntuple_filter
*)arg
,
2173 case RTE_ETH_FILTER_DELETE
:
2174 ret
= bnxt_cfg_ntuple_filter(bp
,
2175 (struct rte_eth_ntuple_filter
*)arg
,
2179 PMD_DRV_LOG(ERR
, "unsupported operation %u.", filter_op
);
2187 bnxt_parse_fdir_filter(struct bnxt
*bp
,
2188 struct rte_eth_fdir_filter
*fdir
,
2189 struct bnxt_filter_info
*filter
)
2191 enum rte_fdir_mode fdir_mode
=
2192 bp
->eth_dev
->data
->dev_conf
.fdir_conf
.mode
;
2193 struct bnxt_vnic_info
*vnic0
, *vnic
;
2194 struct bnxt_filter_info
*filter1
;
2198 if (fdir_mode
== RTE_FDIR_MODE_PERFECT_TUNNEL
)
2201 filter
->l2_ovlan
= fdir
->input
.flow_ext
.vlan_tci
;
2202 en
|= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID
;
2204 switch (fdir
->input
.flow_type
) {
2205 case RTE_ETH_FLOW_IPV4
:
2206 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER
:
2208 filter
->src_ipaddr
[0] = fdir
->input
.flow
.ip4_flow
.src_ip
;
2209 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
;
2210 filter
->dst_ipaddr
[0] = fdir
->input
.flow
.ip4_flow
.dst_ip
;
2211 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
;
2212 filter
->ip_protocol
= fdir
->input
.flow
.ip4_flow
.proto
;
2213 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
2214 filter
->ip_addr_type
=
2215 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4
;
2216 filter
->src_ipaddr_mask
[0] = 0xffffffff;
2217 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2218 filter
->dst_ipaddr_mask
[0] = 0xffffffff;
2219 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
2220 filter
->ethertype
= 0x800;
2221 filter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2223 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP
:
2224 filter
->src_port
= fdir
->input
.flow
.tcp4_flow
.src_port
;
2225 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT
;
2226 filter
->dst_port
= fdir
->input
.flow
.tcp4_flow
.dst_port
;
2227 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT
;
2228 filter
->dst_port_mask
= 0xffff;
2229 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK
;
2230 filter
->src_port_mask
= 0xffff;
2231 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK
;
2232 filter
->src_ipaddr
[0] = fdir
->input
.flow
.tcp4_flow
.ip
.src_ip
;
2233 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
;
2234 filter
->dst_ipaddr
[0] = fdir
->input
.flow
.tcp4_flow
.ip
.dst_ip
;
2235 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
;
2236 filter
->ip_protocol
= 6;
2237 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
2238 filter
->ip_addr_type
=
2239 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4
;
2240 filter
->src_ipaddr_mask
[0] = 0xffffffff;
2241 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2242 filter
->dst_ipaddr_mask
[0] = 0xffffffff;
2243 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
2244 filter
->ethertype
= 0x800;
2245 filter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2247 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP
:
2248 filter
->src_port
= fdir
->input
.flow
.udp4_flow
.src_port
;
2249 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT
;
2250 filter
->dst_port
= fdir
->input
.flow
.udp4_flow
.dst_port
;
2251 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT
;
2252 filter
->dst_port_mask
= 0xffff;
2253 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK
;
2254 filter
->src_port_mask
= 0xffff;
2255 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK
;
2256 filter
->src_ipaddr
[0] = fdir
->input
.flow
.udp4_flow
.ip
.src_ip
;
2257 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
;
2258 filter
->dst_ipaddr
[0] = fdir
->input
.flow
.udp4_flow
.ip
.dst_ip
;
2259 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
;
2260 filter
->ip_protocol
= 17;
2261 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
2262 filter
->ip_addr_type
=
2263 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4
;
2264 filter
->src_ipaddr_mask
[0] = 0xffffffff;
2265 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2266 filter
->dst_ipaddr_mask
[0] = 0xffffffff;
2267 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
2268 filter
->ethertype
= 0x800;
2269 filter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2271 case RTE_ETH_FLOW_IPV6
:
2272 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER
:
2274 filter
->ip_addr_type
=
2275 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
;
2276 filter
->ip_protocol
= fdir
->input
.flow
.ipv6_flow
.proto
;
2277 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
2278 rte_memcpy(filter
->src_ipaddr
,
2279 fdir
->input
.flow
.ipv6_flow
.src_ip
, 16);
2280 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
;
2281 rte_memcpy(filter
->dst_ipaddr
,
2282 fdir
->input
.flow
.ipv6_flow
.dst_ip
, 16);
2283 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
;
2284 memset(filter
->dst_ipaddr_mask
, 0xff, 16);
2285 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
2286 memset(filter
->src_ipaddr_mask
, 0xff, 16);
2287 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2288 filter
->ethertype
= 0x86dd;
2289 filter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2291 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP
:
2292 filter
->src_port
= fdir
->input
.flow
.tcp6_flow
.src_port
;
2293 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT
;
2294 filter
->dst_port
= fdir
->input
.flow
.tcp6_flow
.dst_port
;
2295 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT
;
2296 filter
->dst_port_mask
= 0xffff;
2297 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK
;
2298 filter
->src_port_mask
= 0xffff;
2299 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK
;
2300 filter
->ip_addr_type
=
2301 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
;
2302 filter
->ip_protocol
= fdir
->input
.flow
.tcp6_flow
.ip
.proto
;
2303 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
2304 rte_memcpy(filter
->src_ipaddr
,
2305 fdir
->input
.flow
.tcp6_flow
.ip
.src_ip
, 16);
2306 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
;
2307 rte_memcpy(filter
->dst_ipaddr
,
2308 fdir
->input
.flow
.tcp6_flow
.ip
.dst_ip
, 16);
2309 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
;
2310 memset(filter
->dst_ipaddr_mask
, 0xff, 16);
2311 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
2312 memset(filter
->src_ipaddr_mask
, 0xff, 16);
2313 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2314 filter
->ethertype
= 0x86dd;
2315 filter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2317 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP
:
2318 filter
->src_port
= fdir
->input
.flow
.udp6_flow
.src_port
;
2319 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT
;
2320 filter
->dst_port
= fdir
->input
.flow
.udp6_flow
.dst_port
;
2321 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT
;
2322 filter
->dst_port_mask
= 0xffff;
2323 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK
;
2324 filter
->src_port_mask
= 0xffff;
2325 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK
;
2326 filter
->ip_addr_type
=
2327 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
;
2328 filter
->ip_protocol
= fdir
->input
.flow
.udp6_flow
.ip
.proto
;
2329 en
|= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO
;
2330 rte_memcpy(filter
->src_ipaddr
,
2331 fdir
->input
.flow
.udp6_flow
.ip
.src_ip
, 16);
2332 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR
;
2333 rte_memcpy(filter
->dst_ipaddr
,
2334 fdir
->input
.flow
.udp6_flow
.ip
.dst_ip
, 16);
2335 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR
;
2336 memset(filter
->dst_ipaddr_mask
, 0xff, 16);
2337 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK
;
2338 memset(filter
->src_ipaddr_mask
, 0xff, 16);
2339 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK
;
2340 filter
->ethertype
= 0x86dd;
2341 filter
->enables
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2343 case RTE_ETH_FLOW_L2_PAYLOAD
:
2344 filter
->ethertype
= fdir
->input
.flow
.l2_flow
.ether_type
;
2345 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE
;
2347 case RTE_ETH_FLOW_VXLAN
:
2348 if (fdir
->action
.behavior
== RTE_ETH_FDIR_REJECT
)
2350 filter
->vni
= fdir
->input
.flow
.tunnel_flow
.tunnel_id
;
2351 filter
->tunnel_type
=
2352 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN
;
2353 en
|= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE
;
2355 case RTE_ETH_FLOW_NVGRE
:
2356 if (fdir
->action
.behavior
== RTE_ETH_FDIR_REJECT
)
2358 filter
->vni
= fdir
->input
.flow
.tunnel_flow
.tunnel_id
;
2359 filter
->tunnel_type
=
2360 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE
;
2361 en
|= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE
;
2363 case RTE_ETH_FLOW_UNKNOWN
:
2364 case RTE_ETH_FLOW_RAW
:
2365 case RTE_ETH_FLOW_FRAG_IPV4
:
2366 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP
:
2367 case RTE_ETH_FLOW_FRAG_IPV6
:
2368 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP
:
2369 case RTE_ETH_FLOW_IPV6_EX
:
2370 case RTE_ETH_FLOW_IPV6_TCP_EX
:
2371 case RTE_ETH_FLOW_IPV6_UDP_EX
:
2372 case RTE_ETH_FLOW_GENEVE
:
2378 vnic0
= STAILQ_FIRST(&bp
->ff_pool
[0]);
2379 vnic
= STAILQ_FIRST(&bp
->ff_pool
[fdir
->action
.rx_queue
]);
2381 PMD_DRV_LOG(ERR
, "Invalid queue %d\n", fdir
->action
.rx_queue
);
2386 if (fdir_mode
== RTE_FDIR_MODE_PERFECT_MAC_VLAN
) {
2387 rte_memcpy(filter
->dst_macaddr
,
2388 fdir
->input
.flow
.mac_vlan_flow
.mac_addr
.addr_bytes
, 6);
2389 en
|= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR
;
2392 if (fdir
->action
.behavior
== RTE_ETH_FDIR_REJECT
) {
2393 filter
->flags
= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP
;
2394 filter1
= STAILQ_FIRST(&vnic0
->filter
);
2395 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
2397 filter
->dst_id
= vnic
->fw_vnic_id
;
2398 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++)
2399 if (filter
->dst_macaddr
[i
] == 0x00)
2400 filter1
= STAILQ_FIRST(&vnic0
->filter
);
2402 filter1
= bnxt_get_l2_filter(bp
, filter
, vnic
);
2405 if (filter1
== NULL
)
2408 en
|= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID
;
2409 filter
->fw_l2_filter_id
= filter1
->fw_l2_filter_id
;
2411 filter
->enables
= en
;
2416 static struct bnxt_filter_info
*
2417 bnxt_match_fdir(struct bnxt
*bp
, struct bnxt_filter_info
*nf
,
2418 struct bnxt_vnic_info
**mvnic
)
2420 struct bnxt_filter_info
*mf
= NULL
;
2423 for (i
= bp
->nr_vnics
- 1; i
>= 0; i
--) {
2424 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
2426 STAILQ_FOREACH(mf
, &vnic
->filter
, next
) {
2427 if (mf
->filter_type
== nf
->filter_type
&&
2428 mf
->flags
== nf
->flags
&&
2429 mf
->src_port
== nf
->src_port
&&
2430 mf
->src_port_mask
== nf
->src_port_mask
&&
2431 mf
->dst_port
== nf
->dst_port
&&
2432 mf
->dst_port_mask
== nf
->dst_port_mask
&&
2433 mf
->ip_protocol
== nf
->ip_protocol
&&
2434 mf
->ip_addr_type
== nf
->ip_addr_type
&&
2435 mf
->ethertype
== nf
->ethertype
&&
2436 mf
->vni
== nf
->vni
&&
2437 mf
->tunnel_type
== nf
->tunnel_type
&&
2438 mf
->l2_ovlan
== nf
->l2_ovlan
&&
2439 mf
->l2_ovlan_mask
== nf
->l2_ovlan_mask
&&
2440 mf
->l2_ivlan
== nf
->l2_ivlan
&&
2441 mf
->l2_ivlan_mask
== nf
->l2_ivlan_mask
&&
2442 !memcmp(mf
->l2_addr
, nf
->l2_addr
, ETHER_ADDR_LEN
) &&
2443 !memcmp(mf
->l2_addr_mask
, nf
->l2_addr_mask
,
2445 !memcmp(mf
->src_macaddr
, nf
->src_macaddr
,
2447 !memcmp(mf
->dst_macaddr
, nf
->dst_macaddr
,
2449 !memcmp(mf
->src_ipaddr
, nf
->src_ipaddr
,
2450 sizeof(nf
->src_ipaddr
)) &&
2451 !memcmp(mf
->src_ipaddr_mask
, nf
->src_ipaddr_mask
,
2452 sizeof(nf
->src_ipaddr_mask
)) &&
2453 !memcmp(mf
->dst_ipaddr
, nf
->dst_ipaddr
,
2454 sizeof(nf
->dst_ipaddr
)) &&
2455 !memcmp(mf
->dst_ipaddr_mask
, nf
->dst_ipaddr_mask
,
2456 sizeof(nf
->dst_ipaddr_mask
))) {
2467 bnxt_fdir_filter(struct rte_eth_dev
*dev
,
2468 enum rte_filter_op filter_op
,
2471 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2472 struct rte_eth_fdir_filter
*fdir
= (struct rte_eth_fdir_filter
*)arg
;
2473 struct bnxt_filter_info
*filter
, *match
;
2474 struct bnxt_vnic_info
*vnic
, *mvnic
;
2477 if (filter_op
== RTE_ETH_FILTER_NOP
)
2480 if (arg
== NULL
&& filter_op
!= RTE_ETH_FILTER_FLUSH
)
2483 switch (filter_op
) {
2484 case RTE_ETH_FILTER_ADD
:
2485 case RTE_ETH_FILTER_DELETE
:
2487 filter
= bnxt_get_unused_filter(bp
);
2488 if (filter
== NULL
) {
2490 "Not enough resources for a new flow.\n");
2494 ret
= bnxt_parse_fdir_filter(bp
, fdir
, filter
);
2497 filter
->filter_type
= HWRM_CFA_NTUPLE_FILTER
;
2499 if (fdir
->action
.behavior
== RTE_ETH_FDIR_REJECT
)
2500 vnic
= STAILQ_FIRST(&bp
->ff_pool
[0]);
2502 vnic
= STAILQ_FIRST(&bp
->ff_pool
[fdir
->action
.rx_queue
]);
2504 match
= bnxt_match_fdir(bp
, filter
, &mvnic
);
2505 if (match
!= NULL
&& filter_op
== RTE_ETH_FILTER_ADD
) {
2506 if (match
->dst_id
== vnic
->fw_vnic_id
) {
2507 PMD_DRV_LOG(ERR
, "Flow already exists.\n");
2511 match
->dst_id
= vnic
->fw_vnic_id
;
2512 ret
= bnxt_hwrm_set_ntuple_filter(bp
,
2515 STAILQ_REMOVE(&mvnic
->filter
, match
,
2516 bnxt_filter_info
, next
);
2517 STAILQ_INSERT_TAIL(&vnic
->filter
, match
, next
);
2519 "Filter with matching pattern exist\n");
2521 "Updated it to new destination q\n");
2525 if (match
== NULL
&& filter_op
== RTE_ETH_FILTER_DELETE
) {
2526 PMD_DRV_LOG(ERR
, "Flow does not exist.\n");
2531 if (filter_op
== RTE_ETH_FILTER_ADD
) {
2532 ret
= bnxt_hwrm_set_ntuple_filter(bp
,
2537 STAILQ_INSERT_TAIL(&vnic
->filter
, filter
, next
);
2539 ret
= bnxt_hwrm_clear_ntuple_filter(bp
, match
);
2540 STAILQ_REMOVE(&vnic
->filter
, match
,
2541 bnxt_filter_info
, next
);
2542 bnxt_free_filter(bp
, match
);
2543 filter
->fw_l2_filter_id
= -1;
2544 bnxt_free_filter(bp
, filter
);
2547 case RTE_ETH_FILTER_FLUSH
:
2548 for (i
= bp
->nr_vnics
- 1; i
>= 0; i
--) {
2549 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
2551 STAILQ_FOREACH(filter
, &vnic
->filter
, next
) {
2552 if (filter
->filter_type
==
2553 HWRM_CFA_NTUPLE_FILTER
) {
2555 bnxt_hwrm_clear_ntuple_filter(bp
,
2557 STAILQ_REMOVE(&vnic
->filter
, filter
,
2558 bnxt_filter_info
, next
);
2563 case RTE_ETH_FILTER_UPDATE
:
2564 case RTE_ETH_FILTER_STATS
:
2565 case RTE_ETH_FILTER_INFO
:
2566 PMD_DRV_LOG(ERR
, "operation %u not implemented", filter_op
);
2569 PMD_DRV_LOG(ERR
, "unknown operation %u", filter_op
);
2576 filter
->fw_l2_filter_id
= -1;
2577 bnxt_free_filter(bp
, filter
);
2582 bnxt_filter_ctrl_op(struct rte_eth_dev
*dev __rte_unused
,
2583 enum rte_filter_type filter_type
,
2584 enum rte_filter_op filter_op
, void *arg
)
2588 switch (filter_type
) {
2589 case RTE_ETH_FILTER_TUNNEL
:
2591 "filter type: %d: To be implemented\n", filter_type
);
2593 case RTE_ETH_FILTER_FDIR
:
2594 ret
= bnxt_fdir_filter(dev
, filter_op
, arg
);
2596 case RTE_ETH_FILTER_NTUPLE
:
2597 ret
= bnxt_ntuple_filter(dev
, filter_op
, arg
);
2599 case RTE_ETH_FILTER_ETHERTYPE
:
2600 ret
= bnxt_ethertype_filter(dev
, filter_op
, arg
);
2602 case RTE_ETH_FILTER_GENERIC
:
2603 if (filter_op
!= RTE_ETH_FILTER_GET
)
2605 *(const void **)arg
= &bnxt_flow_ops
;
2609 "Filter type (%d) not supported", filter_type
);
2616 static const uint32_t *
2617 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev
*dev
)
2619 static const uint32_t ptypes
[] = {
2620 RTE_PTYPE_L2_ETHER_VLAN
,
2621 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
,
2622 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
,
2626 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
,
2627 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
,
2628 RTE_PTYPE_INNER_L4_ICMP
,
2629 RTE_PTYPE_INNER_L4_TCP
,
2630 RTE_PTYPE_INNER_L4_UDP
,
2634 if (dev
->rx_pkt_burst
== bnxt_recv_pkts
)
2639 static int bnxt_map_regs(struct bnxt
*bp
, uint32_t *reg_arr
, int count
,
2642 uint32_t reg_base
= *reg_arr
& 0xfffff000;
2646 for (i
= 0; i
< count
; i
++) {
2647 if ((reg_arr
[i
] & 0xfffff000) != reg_base
)
2650 win_off
= BNXT_GRCPF_REG_WINDOW_BASE_OUT
+ (reg_win
- 1) * 4;
2651 rte_cpu_to_le_32(rte_write32(reg_base
, (uint8_t *)bp
->bar0
+ win_off
));
2655 static int bnxt_map_ptp_regs(struct bnxt
*bp
)
2657 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2661 reg_arr
= ptp
->rx_regs
;
2662 rc
= bnxt_map_regs(bp
, reg_arr
, BNXT_PTP_RX_REGS
, 5);
2666 reg_arr
= ptp
->tx_regs
;
2667 rc
= bnxt_map_regs(bp
, reg_arr
, BNXT_PTP_TX_REGS
, 6);
2671 for (i
= 0; i
< BNXT_PTP_RX_REGS
; i
++)
2672 ptp
->rx_mapped_regs
[i
] = 0x5000 + (ptp
->rx_regs
[i
] & 0xfff);
2674 for (i
= 0; i
< BNXT_PTP_TX_REGS
; i
++)
2675 ptp
->tx_mapped_regs
[i
] = 0x6000 + (ptp
->tx_regs
[i
] & 0xfff);
2680 static void bnxt_unmap_ptp_regs(struct bnxt
*bp
)
2682 rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp
->bar0
+
2683 BNXT_GRCPF_REG_WINDOW_BASE_OUT
+ 16));
2684 rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp
->bar0
+
2685 BNXT_GRCPF_REG_WINDOW_BASE_OUT
+ 20));
2688 static uint64_t bnxt_cc_read(struct bnxt
*bp
)
2692 ns
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2693 BNXT_GRCPF_REG_SYNC_TIME
));
2694 ns
|= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2695 BNXT_GRCPF_REG_SYNC_TIME
+ 4))) << 32;
2699 static int bnxt_get_tx_ts(struct bnxt
*bp
, uint64_t *ts
)
2701 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2704 fifo
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2705 ptp
->tx_mapped_regs
[BNXT_PTP_TX_FIFO
]));
2706 if (fifo
& BNXT_PTP_TX_FIFO_EMPTY
)
2709 fifo
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2710 ptp
->tx_mapped_regs
[BNXT_PTP_TX_FIFO
]));
2711 *ts
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2712 ptp
->tx_mapped_regs
[BNXT_PTP_TX_TS_L
]));
2713 *ts
|= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2714 ptp
->tx_mapped_regs
[BNXT_PTP_TX_TS_H
])) << 32;
2719 static int bnxt_get_rx_ts(struct bnxt
*bp
, uint64_t *ts
)
2721 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2722 struct bnxt_pf_info
*pf
= &bp
->pf
;
2729 fifo
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2730 ptp
->rx_mapped_regs
[BNXT_PTP_RX_FIFO
]));
2731 if (!(fifo
& BNXT_PTP_RX_FIFO_PENDING
))
2734 port_id
= pf
->port_id
;
2735 rte_cpu_to_le_32(rte_write32(1 << port_id
, (uint8_t *)bp
->bar0
+
2736 ptp
->rx_mapped_regs
[BNXT_PTP_RX_FIFO_ADV
]));
2738 fifo
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2739 ptp
->rx_mapped_regs
[BNXT_PTP_RX_FIFO
]));
2740 if (fifo
& BNXT_PTP_RX_FIFO_PENDING
) {
2741 /* bnxt_clr_rx_ts(bp); TBD */
2745 *ts
= rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2746 ptp
->rx_mapped_regs
[BNXT_PTP_RX_TS_L
]));
2747 *ts
|= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp
->bar0
+
2748 ptp
->rx_mapped_regs
[BNXT_PTP_RX_TS_H
])) << 32;
2754 bnxt_timesync_write_time(struct rte_eth_dev
*dev
, const struct timespec
*ts
)
2757 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2758 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2763 ns
= rte_timespec_to_ns(ts
);
2764 /* Set the timecounters to a new value. */
2771 bnxt_timesync_read_time(struct rte_eth_dev
*dev
, struct timespec
*ts
)
2773 uint64_t ns
, systime_cycles
;
2774 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2775 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2780 systime_cycles
= bnxt_cc_read(bp
);
2781 ns
= rte_timecounter_update(&ptp
->tc
, systime_cycles
);
2782 *ts
= rte_ns_to_timespec(ns
);
2787 bnxt_timesync_enable(struct rte_eth_dev
*dev
)
2789 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2790 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2797 ptp
->tx_tstamp_en
= 1;
2798 ptp
->rxctl
= BNXT_PTP_MSG_EVENTS
;
2800 if (!bnxt_hwrm_ptp_cfg(bp
))
2801 bnxt_map_ptp_regs(bp
);
2803 memset(&ptp
->tc
, 0, sizeof(struct rte_timecounter
));
2804 memset(&ptp
->rx_tstamp_tc
, 0, sizeof(struct rte_timecounter
));
2805 memset(&ptp
->tx_tstamp_tc
, 0, sizeof(struct rte_timecounter
));
2807 ptp
->tc
.cc_mask
= BNXT_CYCLECOUNTER_MASK
;
2808 ptp
->tc
.cc_shift
= shift
;
2809 ptp
->tc
.nsec_mask
= (1ULL << shift
) - 1;
2811 ptp
->rx_tstamp_tc
.cc_mask
= BNXT_CYCLECOUNTER_MASK
;
2812 ptp
->rx_tstamp_tc
.cc_shift
= shift
;
2813 ptp
->rx_tstamp_tc
.nsec_mask
= (1ULL << shift
) - 1;
2815 ptp
->tx_tstamp_tc
.cc_mask
= BNXT_CYCLECOUNTER_MASK
;
2816 ptp
->tx_tstamp_tc
.cc_shift
= shift
;
2817 ptp
->tx_tstamp_tc
.nsec_mask
= (1ULL << shift
) - 1;
2823 bnxt_timesync_disable(struct rte_eth_dev
*dev
)
2825 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2826 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2832 ptp
->tx_tstamp_en
= 0;
2835 bnxt_hwrm_ptp_cfg(bp
);
2837 bnxt_unmap_ptp_regs(bp
);
2843 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev
*dev
,
2844 struct timespec
*timestamp
,
2845 uint32_t flags __rte_unused
)
2847 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2848 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2849 uint64_t rx_tstamp_cycles
= 0;
2855 bnxt_get_rx_ts(bp
, &rx_tstamp_cycles
);
2856 ns
= rte_timecounter_update(&ptp
->rx_tstamp_tc
, rx_tstamp_cycles
);
2857 *timestamp
= rte_ns_to_timespec(ns
);
2862 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev
*dev
,
2863 struct timespec
*timestamp
)
2865 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2866 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2867 uint64_t tx_tstamp_cycles
= 0;
2873 bnxt_get_tx_ts(bp
, &tx_tstamp_cycles
);
2874 ns
= rte_timecounter_update(&ptp
->tx_tstamp_tc
, tx_tstamp_cycles
);
2875 *timestamp
= rte_ns_to_timespec(ns
);
2881 bnxt_timesync_adjust_time(struct rte_eth_dev
*dev
, int64_t delta
)
2883 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2884 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
2889 ptp
->tc
.nsec
+= delta
;
2895 bnxt_get_eeprom_length_op(struct rte_eth_dev
*dev
)
2897 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2899 uint32_t dir_entries
;
2900 uint32_t entry_length
;
2902 PMD_DRV_LOG(INFO
, "%04x:%02x:%02x:%02x\n",
2903 bp
->pdev
->addr
.domain
, bp
->pdev
->addr
.bus
,
2904 bp
->pdev
->addr
.devid
, bp
->pdev
->addr
.function
);
2906 rc
= bnxt_hwrm_nvm_get_dir_info(bp
, &dir_entries
, &entry_length
);
2910 return dir_entries
* entry_length
;
2914 bnxt_get_eeprom_op(struct rte_eth_dev
*dev
,
2915 struct rte_dev_eeprom_info
*in_eeprom
)
2917 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2921 PMD_DRV_LOG(INFO
, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
2922 "len = %d\n", bp
->pdev
->addr
.domain
,
2923 bp
->pdev
->addr
.bus
, bp
->pdev
->addr
.devid
,
2924 bp
->pdev
->addr
.function
, in_eeprom
->offset
, in_eeprom
->length
);
2926 if (in_eeprom
->offset
== 0) /* special offset value to get directory */
2927 return bnxt_get_nvram_directory(bp
, in_eeprom
->length
,
2930 index
= in_eeprom
->offset
>> 24;
2931 offset
= in_eeprom
->offset
& 0xffffff;
2934 return bnxt_hwrm_get_nvram_item(bp
, index
- 1, offset
,
2935 in_eeprom
->length
, in_eeprom
->data
);
2940 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type
)
2943 case BNX_DIR_TYPE_CHIMP_PATCH
:
2944 case BNX_DIR_TYPE_BOOTCODE
:
2945 case BNX_DIR_TYPE_BOOTCODE_2
:
2946 case BNX_DIR_TYPE_APE_FW
:
2947 case BNX_DIR_TYPE_APE_PATCH
:
2948 case BNX_DIR_TYPE_KONG_FW
:
2949 case BNX_DIR_TYPE_KONG_PATCH
:
2950 case BNX_DIR_TYPE_BONO_FW
:
2951 case BNX_DIR_TYPE_BONO_PATCH
:
2959 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type
)
2962 case BNX_DIR_TYPE_AVS
:
2963 case BNX_DIR_TYPE_EXP_ROM_MBA
:
2964 case BNX_DIR_TYPE_PCIE
:
2965 case BNX_DIR_TYPE_TSCF_UCODE
:
2966 case BNX_DIR_TYPE_EXT_PHY
:
2967 case BNX_DIR_TYPE_CCM
:
2968 case BNX_DIR_TYPE_ISCSI_BOOT
:
2969 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6
:
2970 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6
:
2978 static bool bnxt_dir_type_is_executable(uint16_t dir_type
)
2980 return bnxt_dir_type_is_ape_bin_format(dir_type
) ||
2981 bnxt_dir_type_is_other_exec_format(dir_type
);
2985 bnxt_set_eeprom_op(struct rte_eth_dev
*dev
,
2986 struct rte_dev_eeprom_info
*in_eeprom
)
2988 struct bnxt
*bp
= (struct bnxt
*)dev
->data
->dev_private
;
2989 uint8_t index
, dir_op
;
2990 uint16_t type
, ext
, ordinal
, attr
;
2992 PMD_DRV_LOG(INFO
, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
2993 "len = %d\n", bp
->pdev
->addr
.domain
,
2994 bp
->pdev
->addr
.bus
, bp
->pdev
->addr
.devid
,
2995 bp
->pdev
->addr
.function
, in_eeprom
->offset
, in_eeprom
->length
);
2998 PMD_DRV_LOG(ERR
, "NVM write not supported from a VF\n");
3002 type
= in_eeprom
->magic
>> 16;
3004 if (type
== 0xffff) { /* special value for directory operations */
3005 index
= in_eeprom
->magic
& 0xff;
3006 dir_op
= in_eeprom
->magic
>> 8;
3010 case 0x0e: /* erase */
3011 if (in_eeprom
->offset
!= ~in_eeprom
->magic
)
3013 return bnxt_hwrm_erase_nvram_directory(bp
, index
- 1);
3019 /* Create or re-write an NVM item: */
3020 if (bnxt_dir_type_is_executable(type
) == true)
3022 ext
= in_eeprom
->magic
& 0xffff;
3023 ordinal
= in_eeprom
->offset
>> 16;
3024 attr
= in_eeprom
->offset
& 0xffff;
3026 return bnxt_hwrm_flash_nvram(bp
, type
, ordinal
, ext
, attr
,
3027 in_eeprom
->data
, in_eeprom
->length
);
3035 static const struct eth_dev_ops bnxt_dev_ops
= {
3036 .dev_infos_get
= bnxt_dev_info_get_op
,
3037 .dev_close
= bnxt_dev_close_op
,
3038 .dev_configure
= bnxt_dev_configure_op
,
3039 .dev_start
= bnxt_dev_start_op
,
3040 .dev_stop
= bnxt_dev_stop_op
,
3041 .dev_set_link_up
= bnxt_dev_set_link_up_op
,
3042 .dev_set_link_down
= bnxt_dev_set_link_down_op
,
3043 .stats_get
= bnxt_stats_get_op
,
3044 .stats_reset
= bnxt_stats_reset_op
,
3045 .rx_queue_setup
= bnxt_rx_queue_setup_op
,
3046 .rx_queue_release
= bnxt_rx_queue_release_op
,
3047 .tx_queue_setup
= bnxt_tx_queue_setup_op
,
3048 .tx_queue_release
= bnxt_tx_queue_release_op
,
3049 .rx_queue_intr_enable
= bnxt_rx_queue_intr_enable_op
,
3050 .rx_queue_intr_disable
= bnxt_rx_queue_intr_disable_op
,
3051 .reta_update
= bnxt_reta_update_op
,
3052 .reta_query
= bnxt_reta_query_op
,
3053 .rss_hash_update
= bnxt_rss_hash_update_op
,
3054 .rss_hash_conf_get
= bnxt_rss_hash_conf_get_op
,
3055 .link_update
= bnxt_link_update_op
,
3056 .promiscuous_enable
= bnxt_promiscuous_enable_op
,
3057 .promiscuous_disable
= bnxt_promiscuous_disable_op
,
3058 .allmulticast_enable
= bnxt_allmulticast_enable_op
,
3059 .allmulticast_disable
= bnxt_allmulticast_disable_op
,
3060 .mac_addr_add
= bnxt_mac_addr_add_op
,
3061 .mac_addr_remove
= bnxt_mac_addr_remove_op
,
3062 .flow_ctrl_get
= bnxt_flow_ctrl_get_op
,
3063 .flow_ctrl_set
= bnxt_flow_ctrl_set_op
,
3064 .udp_tunnel_port_add
= bnxt_udp_tunnel_port_add_op
,
3065 .udp_tunnel_port_del
= bnxt_udp_tunnel_port_del_op
,
3066 .vlan_filter_set
= bnxt_vlan_filter_set_op
,
3067 .vlan_offload_set
= bnxt_vlan_offload_set_op
,
3068 .vlan_pvid_set
= bnxt_vlan_pvid_set_op
,
3069 .mtu_set
= bnxt_mtu_set_op
,
3070 .mac_addr_set
= bnxt_set_default_mac_addr_op
,
3071 .xstats_get
= bnxt_dev_xstats_get_op
,
3072 .xstats_get_names
= bnxt_dev_xstats_get_names_op
,
3073 .xstats_reset
= bnxt_dev_xstats_reset_op
,
3074 .fw_version_get
= bnxt_fw_version_get
,
3075 .set_mc_addr_list
= bnxt_dev_set_mc_addr_list_op
,
3076 .rxq_info_get
= bnxt_rxq_info_get_op
,
3077 .txq_info_get
= bnxt_txq_info_get_op
,
3078 .dev_led_on
= bnxt_dev_led_on_op
,
3079 .dev_led_off
= bnxt_dev_led_off_op
,
3080 .xstats_get_by_id
= bnxt_dev_xstats_get_by_id_op
,
3081 .xstats_get_names_by_id
= bnxt_dev_xstats_get_names_by_id_op
,
3082 .rx_queue_count
= bnxt_rx_queue_count_op
,
3083 .rx_descriptor_status
= bnxt_rx_descriptor_status_op
,
3084 .tx_descriptor_status
= bnxt_tx_descriptor_status_op
,
3085 .rx_queue_start
= bnxt_rx_queue_start
,
3086 .rx_queue_stop
= bnxt_rx_queue_stop
,
3087 .tx_queue_start
= bnxt_tx_queue_start
,
3088 .tx_queue_stop
= bnxt_tx_queue_stop
,
3089 .filter_ctrl
= bnxt_filter_ctrl_op
,
3090 .dev_supported_ptypes_get
= bnxt_dev_supported_ptypes_get_op
,
3091 .get_eeprom_length
= bnxt_get_eeprom_length_op
,
3092 .get_eeprom
= bnxt_get_eeprom_op
,
3093 .set_eeprom
= bnxt_set_eeprom_op
,
3094 .timesync_enable
= bnxt_timesync_enable
,
3095 .timesync_disable
= bnxt_timesync_disable
,
3096 .timesync_read_time
= bnxt_timesync_read_time
,
3097 .timesync_write_time
= bnxt_timesync_write_time
,
3098 .timesync_adjust_time
= bnxt_timesync_adjust_time
,
3099 .timesync_read_rx_timestamp
= bnxt_timesync_read_rx_timestamp
,
3100 .timesync_read_tx_timestamp
= bnxt_timesync_read_tx_timestamp
,
3103 static bool bnxt_vf_pciid(uint16_t id
)
3105 if (id
== BROADCOM_DEV_ID_57304_VF
||
3106 id
== BROADCOM_DEV_ID_57406_VF
||
3107 id
== BROADCOM_DEV_ID_5731X_VF
||
3108 id
== BROADCOM_DEV_ID_5741X_VF
||
3109 id
== BROADCOM_DEV_ID_57414_VF
||
3110 id
== BROADCOM_DEV_ID_STRATUS_NIC_VF1
||
3111 id
== BROADCOM_DEV_ID_STRATUS_NIC_VF2
||
3112 id
== BROADCOM_DEV_ID_58802_VF
)
3117 bool bnxt_stratus_device(struct bnxt
*bp
)
3119 uint16_t id
= bp
->pdev
->id
.device_id
;
3121 if (id
== BROADCOM_DEV_ID_STRATUS_NIC
||
3122 id
== BROADCOM_DEV_ID_STRATUS_NIC_VF1
||
3123 id
== BROADCOM_DEV_ID_STRATUS_NIC_VF2
)
3128 static int bnxt_init_board(struct rte_eth_dev
*eth_dev
)
3130 struct bnxt
*bp
= eth_dev
->data
->dev_private
;
3131 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
3134 /* enable device (incl. PCI PM wakeup), and bus-mastering */
3135 if (!pci_dev
->mem_resource
[0].addr
) {
3137 "Cannot find PCI device base address, aborting\n");
3139 goto init_err_disable
;
3142 bp
->eth_dev
= eth_dev
;
3145 bp
->bar0
= (void *)pci_dev
->mem_resource
[0].addr
;
3147 PMD_DRV_LOG(ERR
, "Cannot map device registers, aborting\n");
3149 goto init_err_release
;
3152 if (!pci_dev
->mem_resource
[2].addr
) {
3154 "Cannot find PCI device BAR 2 address, aborting\n");
3156 goto init_err_release
;
3158 bp
->doorbell_base
= (void *)pci_dev
->mem_resource
[2].addr
;
3166 if (bp
->doorbell_base
)
3167 bp
->doorbell_base
= NULL
;
3175 #define ALLOW_FUNC(x) \
3177 typeof(x) arg = (x); \
3178 bp->pf.vf_req_fwd[((arg) >> 5)] &= \
3179 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
3182 bnxt_dev_init(struct rte_eth_dev
*eth_dev
)
3184 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
3185 char mz_name
[RTE_MEMZONE_NAMESIZE
];
3186 const struct rte_memzone
*mz
= NULL
;
3187 static int version_printed
;
3188 uint32_t total_alloc_len
;
3189 rte_iova_t mz_phys_addr
;
3193 if (version_printed
++ == 0)
3194 PMD_DRV_LOG(INFO
, "%s\n", bnxt_version
);
3196 rte_eth_copy_pci_info(eth_dev
, pci_dev
);
3198 bp
= eth_dev
->data
->dev_private
;
3200 bp
->dev_stopped
= 1;
3202 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
3205 if (bnxt_vf_pciid(pci_dev
->id
.device_id
))
3206 bp
->flags
|= BNXT_FLAG_VF
;
3208 rc
= bnxt_init_board(eth_dev
);
3211 "Board initialization failed rc: %x\n", rc
);
3215 eth_dev
->dev_ops
= &bnxt_dev_ops
;
3216 eth_dev
->rx_pkt_burst
= &bnxt_recv_pkts
;
3217 eth_dev
->tx_pkt_burst
= &bnxt_xmit_pkts
;
3218 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
3221 if (pci_dev
->id
.device_id
!= BROADCOM_DEV_ID_NS2
) {
3222 snprintf(mz_name
, RTE_MEMZONE_NAMESIZE
,
3223 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev
->addr
.domain
,
3224 pci_dev
->addr
.bus
, pci_dev
->addr
.devid
,
3225 pci_dev
->addr
.function
, "rx_port_stats");
3226 mz_name
[RTE_MEMZONE_NAMESIZE
- 1] = 0;
3227 mz
= rte_memzone_lookup(mz_name
);
3228 total_alloc_len
= RTE_CACHE_LINE_ROUNDUP(
3229 sizeof(struct rx_port_stats
) + 512);
3231 mz
= rte_memzone_reserve(mz_name
, total_alloc_len
,
3234 RTE_MEMZONE_SIZE_HINT_ONLY
|
3235 RTE_MEMZONE_IOVA_CONTIG
);
3239 memset(mz
->addr
, 0, mz
->len
);
3240 mz_phys_addr
= mz
->iova
;
3241 if ((unsigned long)mz
->addr
== mz_phys_addr
) {
3242 PMD_DRV_LOG(WARNING
,
3243 "Memzone physical address same as virtual.\n");
3244 PMD_DRV_LOG(WARNING
,
3245 "Using rte_mem_virt2iova()\n");
3246 mz_phys_addr
= rte_mem_virt2iova(mz
->addr
);
3247 if (mz_phys_addr
== 0) {
3249 "unable to map address to physical memory\n");
3254 bp
->rx_mem_zone
= (const void *)mz
;
3255 bp
->hw_rx_port_stats
= mz
->addr
;
3256 bp
->hw_rx_port_stats_map
= mz_phys_addr
;
3258 snprintf(mz_name
, RTE_MEMZONE_NAMESIZE
,
3259 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev
->addr
.domain
,
3260 pci_dev
->addr
.bus
, pci_dev
->addr
.devid
,
3261 pci_dev
->addr
.function
, "tx_port_stats");
3262 mz_name
[RTE_MEMZONE_NAMESIZE
- 1] = 0;
3263 mz
= rte_memzone_lookup(mz_name
);
3264 total_alloc_len
= RTE_CACHE_LINE_ROUNDUP(
3265 sizeof(struct tx_port_stats
) + 512);
3267 mz
= rte_memzone_reserve(mz_name
,
3271 RTE_MEMZONE_SIZE_HINT_ONLY
|
3272 RTE_MEMZONE_IOVA_CONTIG
);
3276 memset(mz
->addr
, 0, mz
->len
);
3277 mz_phys_addr
= mz
->iova
;
3278 if ((unsigned long)mz
->addr
== mz_phys_addr
) {
3279 PMD_DRV_LOG(WARNING
,
3280 "Memzone physical address same as virtual.\n");
3281 PMD_DRV_LOG(WARNING
,
3282 "Using rte_mem_virt2iova()\n");
3283 mz_phys_addr
= rte_mem_virt2iova(mz
->addr
);
3284 if (mz_phys_addr
== 0) {
3286 "unable to map address to physical memory\n");
3291 bp
->tx_mem_zone
= (const void *)mz
;
3292 bp
->hw_tx_port_stats
= mz
->addr
;
3293 bp
->hw_tx_port_stats_map
= mz_phys_addr
;
3295 bp
->flags
|= BNXT_FLAG_PORT_STATS
;
3298 rc
= bnxt_alloc_hwrm_resources(bp
);
3301 "hwrm resource allocation failure rc: %x\n", rc
);
3304 rc
= bnxt_hwrm_ver_get(bp
);
3307 rc
= bnxt_hwrm_queue_qportcfg(bp
);
3309 PMD_DRV_LOG(ERR
, "hwrm queue qportcfg failed\n");
3313 rc
= bnxt_hwrm_func_qcfg(bp
);
3315 PMD_DRV_LOG(ERR
, "hwrm func qcfg failed\n");
3319 /* Get the MAX capabilities for this function */
3320 rc
= bnxt_hwrm_func_qcaps(bp
);
3322 PMD_DRV_LOG(ERR
, "hwrm query capability failure rc: %x\n", rc
);
3325 if (bp
->max_tx_rings
== 0) {
3326 PMD_DRV_LOG(ERR
, "No TX rings available!\n");
3330 eth_dev
->data
->mac_addrs
= rte_zmalloc("bnxt_mac_addr_tbl",
3331 ETHER_ADDR_LEN
* bp
->max_l2_ctx
, 0);
3332 if (eth_dev
->data
->mac_addrs
== NULL
) {
3334 "Failed to alloc %u bytes needed to store MAC addr tbl",
3335 ETHER_ADDR_LEN
* bp
->max_l2_ctx
);
3340 if (bnxt_check_zero_bytes(bp
->dflt_mac_addr
, ETHER_ADDR_LEN
)) {
3342 "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
3343 bp
->dflt_mac_addr
[0], bp
->dflt_mac_addr
[1],
3344 bp
->dflt_mac_addr
[2], bp
->dflt_mac_addr
[3],
3345 bp
->dflt_mac_addr
[4], bp
->dflt_mac_addr
[5]);
3349 /* Copy the permanent MAC from the qcap response address now. */
3350 memcpy(bp
->mac_addr
, bp
->dflt_mac_addr
, sizeof(bp
->mac_addr
));
3351 memcpy(ð_dev
->data
->mac_addrs
[0], bp
->mac_addr
, ETHER_ADDR_LEN
);
3353 if (bp
->max_ring_grps
< bp
->rx_cp_nr_rings
) {
3354 /* 1 ring is for default completion ring */
3355 PMD_DRV_LOG(ERR
, "Insufficient resource: Ring Group\n");
3360 bp
->grp_info
= rte_zmalloc("bnxt_grp_info",
3361 sizeof(*bp
->grp_info
) * bp
->max_ring_grps
, 0);
3362 if (!bp
->grp_info
) {
3364 "Failed to alloc %zu bytes to store group info table\n",
3365 sizeof(*bp
->grp_info
) * bp
->max_ring_grps
);
3370 /* Forward all requests if firmware is new enough */
3371 if (((bp
->fw_ver
>= ((20 << 24) | (6 << 16) | (100 << 8))) &&
3372 (bp
->fw_ver
< ((20 << 24) | (7 << 16)))) ||
3373 ((bp
->fw_ver
>= ((20 << 24) | (8 << 16))))) {
3374 memset(bp
->pf
.vf_req_fwd
, 0xff, sizeof(bp
->pf
.vf_req_fwd
));
3376 PMD_DRV_LOG(WARNING
,
3377 "Firmware too old for VF mailbox functionality\n");
3378 memset(bp
->pf
.vf_req_fwd
, 0, sizeof(bp
->pf
.vf_req_fwd
));
3382 * The following are used for driver cleanup. If we disallow these,
3383 * VF drivers can't clean up cleanly.
3385 ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR
);
3386 ALLOW_FUNC(HWRM_VNIC_FREE
);
3387 ALLOW_FUNC(HWRM_RING_FREE
);
3388 ALLOW_FUNC(HWRM_RING_GRP_FREE
);
3389 ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE
);
3390 ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE
);
3391 ALLOW_FUNC(HWRM_STAT_CTX_FREE
);
3392 ALLOW_FUNC(HWRM_PORT_PHY_QCFG
);
3393 ALLOW_FUNC(HWRM_VNIC_TPA_CFG
);
3394 rc
= bnxt_hwrm_func_driver_register(bp
);
3397 "Failed to register driver");
3403 DRV_MODULE_NAME
" found at mem %" PRIx64
", node addr %pM\n",
3404 pci_dev
->mem_resource
[0].phys_addr
,
3405 pci_dev
->mem_resource
[0].addr
);
3407 rc
= bnxt_hwrm_func_reset(bp
);
3409 PMD_DRV_LOG(ERR
, "hwrm chip reset failure rc: %x\n", rc
);
3415 //if (bp->pf.active_vfs) {
3416 // TODO: Deallocate VF resources?
3418 if (bp
->pdev
->max_vfs
) {
3419 rc
= bnxt_hwrm_allocate_vfs(bp
, bp
->pdev
->max_vfs
);
3421 PMD_DRV_LOG(ERR
, "Failed to allocate VFs\n");
3425 rc
= bnxt_hwrm_allocate_pf_only(bp
);
3428 "Failed to allocate PF resources\n");
3434 bnxt_hwrm_port_led_qcaps(bp
);
3436 rc
= bnxt_setup_int(bp
);
3440 rc
= bnxt_alloc_mem(bp
);
3442 goto error_free_int
;
3444 rc
= bnxt_request_int(bp
);
3446 goto error_free_int
;
3448 bnxt_enable_int(bp
);
3454 bnxt_disable_int(bp
);
3455 bnxt_hwrm_func_buf_unrgtr(bp
);
3459 bnxt_dev_uninit(eth_dev
);
3465 bnxt_dev_uninit(struct rte_eth_dev
*eth_dev
)
3467 struct bnxt
*bp
= eth_dev
->data
->dev_private
;
3470 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
3473 PMD_DRV_LOG(DEBUG
, "Calling Device uninit\n");
3474 bnxt_disable_int(bp
);
3477 if (eth_dev
->data
->mac_addrs
!= NULL
) {
3478 rte_free(eth_dev
->data
->mac_addrs
);
3479 eth_dev
->data
->mac_addrs
= NULL
;
3481 if (bp
->grp_info
!= NULL
) {
3482 rte_free(bp
->grp_info
);
3483 bp
->grp_info
= NULL
;
3485 rc
= bnxt_hwrm_func_driver_unregister(bp
, 0);
3486 bnxt_free_hwrm_resources(bp
);
3488 if (bp
->tx_mem_zone
) {
3489 rte_memzone_free((const struct rte_memzone
*)bp
->tx_mem_zone
);
3490 bp
->tx_mem_zone
= NULL
;
3493 if (bp
->rx_mem_zone
) {
3494 rte_memzone_free((const struct rte_memzone
*)bp
->rx_mem_zone
);
3495 bp
->rx_mem_zone
= NULL
;
3498 if (bp
->dev_stopped
== 0)
3499 bnxt_dev_close_op(eth_dev
);
3501 rte_free(bp
->pf
.vf_info
);
3502 eth_dev
->dev_ops
= NULL
;
3503 eth_dev
->rx_pkt_burst
= NULL
;
3504 eth_dev
->tx_pkt_burst
= NULL
;
3509 static int bnxt_pci_probe(struct rte_pci_driver
*pci_drv __rte_unused
,
3510 struct rte_pci_device
*pci_dev
)
3512 return rte_eth_dev_pci_generic_probe(pci_dev
, sizeof(struct bnxt
),
3516 static int bnxt_pci_remove(struct rte_pci_device
*pci_dev
)
3518 return rte_eth_dev_pci_generic_remove(pci_dev
, bnxt_dev_uninit
);
3521 static struct rte_pci_driver bnxt_rte_pmd
= {
3522 .id_table
= bnxt_pci_id_map
,
3523 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
|
3524 RTE_PCI_DRV_INTR_LSC
,
3525 .probe
= bnxt_pci_probe
,
3526 .remove
= bnxt_pci_remove
,
3530 is_device_supported(struct rte_eth_dev
*dev
, struct rte_pci_driver
*drv
)
3532 if (strcmp(dev
->device
->driver
->name
, drv
->driver
.name
))
3538 bool is_bnxt_supported(struct rte_eth_dev
*dev
)
3540 return is_device_supported(dev
, &bnxt_rte_pmd
);
3543 RTE_INIT(bnxt_init_log
)
3545 bnxt_logtype_driver
= rte_log_register("pmd.bnxt.driver");
3546 if (bnxt_logtype_driver
>= 0)
3547 rte_log_set_level(bnxt_logtype_driver
, RTE_LOG_INFO
);
3550 RTE_PMD_REGISTER_PCI(net_bnxt
, bnxt_rte_pmd
);
3551 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt
, bnxt_pci_id_map
);
3552 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt
, "* igb_uio | uio_pci_generic | vfio-pci");