1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
21 #include "bnxt_ring.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
29 #define HWRM_CMD_TIMEOUT 6000000
30 #define HWRM_SPEC_CODE_1_8_3 0x10803
31 #define HWRM_VERSION_1_9_1 0x10901
33 struct bnxt_plcmodes_cfg
{
35 uint16_t jumbo_thresh
;
37 uint16_t hds_threshold
;
40 static int page_getenum(size_t size
)
56 PMD_DRV_LOG(ERR
, "Page size %zu out of range\n", size
);
57 return sizeof(void *) * 8 - 1;
60 static int page_roundup(size_t size
)
62 return 1 << page_getenum(size
);
66 * HWRM Functions (sent to HWRM)
67 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69 * command was failed by the ChiMP.
72 static int bnxt_hwrm_send_message(struct bnxt
*bp
, void *msg
,
73 uint32_t msg_len
, bool use_kong_mb
)
76 struct input
*req
= msg
;
77 struct output
*resp
= bp
->hwrm_cmd_resp_addr
;
81 uint16_t max_req_len
= bp
->max_req_len
;
82 struct hwrm_short_input short_input
= { 0 };
83 uint16_t bar_offset
= use_kong_mb
?
84 GRCPF_REG_KONG_CHANNEL_OFFSET
: GRCPF_REG_CHIMP_CHANNEL_OFFSET
;
85 uint16_t mb_trigger_offset
= use_kong_mb
?
86 GRCPF_REG_KONG_COMM_TRIGGER
: GRCPF_REG_CHIMP_COMM_TRIGGER
;
88 if (bp
->flags
& BNXT_FLAG_SHORT_CMD
) {
89 void *short_cmd_req
= bp
->hwrm_short_cmd_req_addr
;
91 memset(short_cmd_req
, 0, bp
->max_req_len
);
92 memcpy(short_cmd_req
, req
, msg_len
);
94 short_input
.req_type
= rte_cpu_to_le_16(req
->req_type
);
95 short_input
.signature
= rte_cpu_to_le_16(
96 HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD
);
97 short_input
.size
= rte_cpu_to_le_16(msg_len
);
98 short_input
.req_addr
=
99 rte_cpu_to_le_64(bp
->hwrm_short_cmd_req_dma_addr
);
101 data
= (uint32_t *)&short_input
;
102 msg_len
= sizeof(short_input
);
104 /* Sync memory write before updating doorbell */
107 max_req_len
= BNXT_HWRM_SHORT_REQ_LEN
;
110 /* Write request msg to hwrm channel */
111 for (i
= 0; i
< msg_len
; i
+= 4) {
112 bar
= (uint8_t *)bp
->bar0
+ bar_offset
+ i
;
113 rte_write32(*data
, bar
);
117 /* Zero the rest of the request space */
118 for (; i
< max_req_len
; i
+= 4) {
119 bar
= (uint8_t *)bp
->bar0
+ bar_offset
+ i
;
123 /* Ring channel doorbell */
124 bar
= (uint8_t *)bp
->bar0
+ mb_trigger_offset
;
127 /* Poll for the valid bit */
128 for (i
= 0; i
< HWRM_CMD_TIMEOUT
; i
++) {
129 /* Sanity check on the resp->resp_len */
131 if (resp
->resp_len
&& resp
->resp_len
<=
133 /* Last byte of resp contains the valid key */
134 valid
= (uint8_t *)resp
+ resp
->resp_len
- 1;
135 if (*valid
== HWRM_RESP_VALID_KEY
)
141 if (i
>= HWRM_CMD_TIMEOUT
) {
142 PMD_DRV_LOG(ERR
, "Error sending msg 0x%04x\n",
153 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
154 * spinlock, and does initial processing.
156 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
157 * releases the spinlock only if it returns. If the regular int return codes
158 * are not used by the function, HWRM_CHECK_RESULT() should not be used
159 * directly, rather it should be copied and modified to suit the function.
161 * HWRM_UNLOCK() must be called after all response processing is completed.
163 #define HWRM_PREP(req, type, kong) do { \
164 rte_spinlock_lock(&bp->hwrm_lock); \
165 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
166 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
167 req.cmpl_ring = rte_cpu_to_le_16(-1); \
168 req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
169 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
170 req.target_id = rte_cpu_to_le_16(0xffff); \
171 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
174 #define HWRM_CHECK_RESULT_SILENT() do {\
176 rte_spinlock_unlock(&bp->hwrm_lock); \
179 if (resp->error_code) { \
180 rc = rte_le_to_cpu_16(resp->error_code); \
181 rte_spinlock_unlock(&bp->hwrm_lock); \
186 #define HWRM_CHECK_RESULT() do {\
188 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
189 rte_spinlock_unlock(&bp->hwrm_lock); \
190 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
196 if (resp->error_code) { \
197 rc = rte_le_to_cpu_16(resp->error_code); \
198 if (resp->resp_len >= 16) { \
199 struct hwrm_err_output *tmp_hwrm_err_op = \
202 "error %d:%d:%08x:%04x\n", \
203 rc, tmp_hwrm_err_op->cmd_err, \
205 tmp_hwrm_err_op->opaque_0), \
207 tmp_hwrm_err_op->opaque_1)); \
209 PMD_DRV_LOG(ERR, "error %d\n", rc); \
211 rte_spinlock_unlock(&bp->hwrm_lock); \
212 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
220 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
222 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
225 struct hwrm_cfa_l2_set_rx_mask_input req
= {.req_type
= 0 };
226 struct hwrm_cfa_l2_set_rx_mask_output
*resp
= bp
->hwrm_cmd_resp_addr
;
228 HWRM_PREP(req
, CFA_L2_SET_RX_MASK
, BNXT_USE_CHIMP_MB
);
229 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
232 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
240 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt
*bp
,
241 struct bnxt_vnic_info
*vnic
,
243 struct bnxt_vlan_table_entry
*vlan_table
)
246 struct hwrm_cfa_l2_set_rx_mask_input req
= {.req_type
= 0 };
247 struct hwrm_cfa_l2_set_rx_mask_output
*resp
= bp
->hwrm_cmd_resp_addr
;
250 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
)
253 HWRM_PREP(req
, CFA_L2_SET_RX_MASK
, BNXT_USE_CHIMP_MB
);
254 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
256 /* FIXME add multicast flag, when multicast adding options is supported
259 if (vnic
->flags
& BNXT_VNIC_INFO_BCAST
)
260 mask
|= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST
;
261 if (vnic
->flags
& BNXT_VNIC_INFO_UNTAGGED
)
262 mask
|= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN
;
263 if (vnic
->flags
& BNXT_VNIC_INFO_PROMISC
)
264 mask
|= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS
;
265 if (vnic
->flags
& BNXT_VNIC_INFO_ALLMULTI
)
266 mask
|= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST
;
267 if (vnic
->flags
& BNXT_VNIC_INFO_MCAST
)
268 mask
|= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST
;
269 if (vnic
->mc_addr_cnt
) {
270 mask
|= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST
;
271 req
.num_mc_entries
= rte_cpu_to_le_32(vnic
->mc_addr_cnt
);
272 req
.mc_tbl_addr
= rte_cpu_to_le_64(vnic
->mc_list_dma_addr
);
275 if (!(mask
& HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN
))
276 mask
|= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY
;
277 req
.vlan_tag_tbl_addr
= rte_cpu_to_le_64(
278 rte_mem_virt2iova(vlan_table
));
279 req
.num_vlan_tags
= rte_cpu_to_le_32((uint32_t)vlan_count
);
281 req
.mask
= rte_cpu_to_le_32(mask
);
283 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
291 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt
*bp
, uint16_t fid
,
293 struct bnxt_vlan_antispoof_table_entry
*vlan_table
)
296 struct hwrm_cfa_vlan_antispoof_cfg_input req
= {.req_type
= 0 };
297 struct hwrm_cfa_vlan_antispoof_cfg_output
*resp
=
298 bp
->hwrm_cmd_resp_addr
;
301 * Older HWRM versions did not support this command, and the set_rx_mask
302 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
303 * removed from set_rx_mask call, and this command was added.
305 * This command is also present from 1.7.8.11 and higher,
308 if (bp
->fw_ver
< ((1 << 24) | (8 << 16))) {
309 if (bp
->fw_ver
!= ((1 << 24) | (7 << 16) | (8 << 8))) {
310 if (bp
->fw_ver
< ((1 << 24) | (7 << 16) | (8 << 8) |
315 HWRM_PREP(req
, CFA_VLAN_ANTISPOOF_CFG
, BNXT_USE_CHIMP_MB
);
316 req
.fid
= rte_cpu_to_le_16(fid
);
318 req
.vlan_tag_mask_tbl_addr
=
319 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table
));
320 req
.num_vlan_entries
= rte_cpu_to_le_32((uint32_t)vlan_count
);
322 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
330 int bnxt_hwrm_clear_l2_filter(struct bnxt
*bp
,
331 struct bnxt_filter_info
*filter
)
334 struct hwrm_cfa_l2_filter_free_input req
= {.req_type
= 0 };
335 struct hwrm_cfa_l2_filter_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
337 if (filter
->fw_l2_filter_id
== UINT64_MAX
)
340 HWRM_PREP(req
, CFA_L2_FILTER_FREE
, BNXT_USE_CHIMP_MB
);
342 req
.l2_filter_id
= rte_cpu_to_le_64(filter
->fw_l2_filter_id
);
344 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
349 filter
->fw_l2_filter_id
= UINT64_MAX
;
354 int bnxt_hwrm_set_l2_filter(struct bnxt
*bp
,
356 struct bnxt_filter_info
*filter
)
359 struct hwrm_cfa_l2_filter_alloc_input req
= {.req_type
= 0 };
360 struct hwrm_cfa_l2_filter_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
361 struct rte_eth_conf
*dev_conf
= &bp
->eth_dev
->data
->dev_conf
;
362 const struct rte_eth_vmdq_rx_conf
*conf
=
363 &dev_conf
->rx_adv_conf
.vmdq_rx_conf
;
364 uint32_t enables
= 0;
365 uint16_t j
= dst_id
- 1;
367 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
368 if ((dev_conf
->rxmode
.mq_mode
& ETH_MQ_RX_VMDQ_FLAG
) &&
369 conf
->pool_map
[j
].pools
& (1UL << j
)) {
371 "Add vlan %u to vmdq pool %u\n",
372 conf
->pool_map
[j
].vlan_id
, j
);
374 filter
->l2_ivlan
= conf
->pool_map
[j
].vlan_id
;
376 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN
|
377 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK
;
380 if (filter
->fw_l2_filter_id
!= UINT64_MAX
)
381 bnxt_hwrm_clear_l2_filter(bp
, filter
);
383 HWRM_PREP(req
, CFA_L2_FILTER_ALLOC
, BNXT_USE_CHIMP_MB
);
385 req
.flags
= rte_cpu_to_le_32(filter
->flags
);
387 rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST
);
389 enables
= filter
->enables
|
390 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID
;
391 req
.dst_id
= rte_cpu_to_le_16(dst_id
);
394 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
)
395 memcpy(req
.l2_addr
, filter
->l2_addr
,
398 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
)
399 memcpy(req
.l2_addr_mask
, filter
->l2_addr_mask
,
402 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN
)
403 req
.l2_ovlan
= filter
->l2_ovlan
;
405 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN
)
406 req
.l2_ivlan
= filter
->l2_ivlan
;
408 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK
)
409 req
.l2_ovlan_mask
= filter
->l2_ovlan_mask
;
411 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK
)
412 req
.l2_ivlan_mask
= filter
->l2_ivlan_mask
;
413 if (enables
& HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID
)
414 req
.src_id
= rte_cpu_to_le_32(filter
->src_id
);
415 if (enables
& HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE
)
416 req
.src_type
= filter
->src_type
;
418 req
.enables
= rte_cpu_to_le_32(enables
);
420 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
424 filter
->fw_l2_filter_id
= rte_le_to_cpu_64(resp
->l2_filter_id
);
430 int bnxt_hwrm_ptp_cfg(struct bnxt
*bp
)
432 struct hwrm_port_mac_cfg_input req
= {.req_type
= 0};
433 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
440 HWRM_PREP(req
, PORT_MAC_CFG
, BNXT_USE_CHIMP_MB
);
443 flags
|= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE
;
446 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE
;
447 if (ptp
->tx_tstamp_en
)
448 flags
|= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE
;
451 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE
;
452 req
.flags
= rte_cpu_to_le_32(flags
);
453 req
.enables
= rte_cpu_to_le_32
454 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE
);
455 req
.rx_ts_capture_ptp_msg_type
= rte_cpu_to_le_16(ptp
->rxctl
);
457 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
463 static int bnxt_hwrm_ptp_qcfg(struct bnxt
*bp
)
466 struct hwrm_port_mac_ptp_qcfg_input req
= {.req_type
= 0};
467 struct hwrm_port_mac_ptp_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
468 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
470 /* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
474 HWRM_PREP(req
, PORT_MAC_PTP_QCFG
, BNXT_USE_CHIMP_MB
);
476 req
.port_id
= rte_cpu_to_le_16(bp
->pf
.port_id
);
478 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
482 if (!(resp
->flags
& HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS
))
485 ptp
= rte_zmalloc("ptp_cfg", sizeof(*ptp
), 0);
489 ptp
->rx_regs
[BNXT_PTP_RX_TS_L
] =
490 rte_le_to_cpu_32(resp
->rx_ts_reg_off_lower
);
491 ptp
->rx_regs
[BNXT_PTP_RX_TS_H
] =
492 rte_le_to_cpu_32(resp
->rx_ts_reg_off_upper
);
493 ptp
->rx_regs
[BNXT_PTP_RX_SEQ
] =
494 rte_le_to_cpu_32(resp
->rx_ts_reg_off_seq_id
);
495 ptp
->rx_regs
[BNXT_PTP_RX_FIFO
] =
496 rte_le_to_cpu_32(resp
->rx_ts_reg_off_fifo
);
497 ptp
->rx_regs
[BNXT_PTP_RX_FIFO_ADV
] =
498 rte_le_to_cpu_32(resp
->rx_ts_reg_off_fifo_adv
);
499 ptp
->tx_regs
[BNXT_PTP_TX_TS_L
] =
500 rte_le_to_cpu_32(resp
->tx_ts_reg_off_lower
);
501 ptp
->tx_regs
[BNXT_PTP_TX_TS_H
] =
502 rte_le_to_cpu_32(resp
->tx_ts_reg_off_upper
);
503 ptp
->tx_regs
[BNXT_PTP_TX_SEQ
] =
504 rte_le_to_cpu_32(resp
->tx_ts_reg_off_seq_id
);
505 ptp
->tx_regs
[BNXT_PTP_TX_FIFO
] =
506 rte_le_to_cpu_32(resp
->tx_ts_reg_off_fifo
);
514 static int __bnxt_hwrm_func_qcaps(struct bnxt
*bp
)
517 struct hwrm_func_qcaps_input req
= {.req_type
= 0 };
518 struct hwrm_func_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
519 uint16_t new_max_vfs
;
523 HWRM_PREP(req
, FUNC_QCAPS
, BNXT_USE_CHIMP_MB
);
525 req
.fid
= rte_cpu_to_le_16(0xffff);
527 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
531 bp
->max_ring_grps
= rte_le_to_cpu_32(resp
->max_hw_ring_grps
);
532 flags
= rte_le_to_cpu_32(resp
->flags
);
534 bp
->pf
.port_id
= resp
->port_id
;
535 bp
->pf
.first_vf_id
= rte_le_to_cpu_16(resp
->first_vf_id
);
536 bp
->pf
.total_vfs
= rte_le_to_cpu_16(resp
->max_vfs
);
537 new_max_vfs
= bp
->pdev
->max_vfs
;
538 if (new_max_vfs
!= bp
->pf
.max_vfs
) {
540 rte_free(bp
->pf
.vf_info
);
541 bp
->pf
.vf_info
= rte_malloc("bnxt_vf_info",
542 sizeof(bp
->pf
.vf_info
[0]) * new_max_vfs
, 0);
543 bp
->pf
.max_vfs
= new_max_vfs
;
544 for (i
= 0; i
< new_max_vfs
; i
++) {
545 bp
->pf
.vf_info
[i
].fid
= bp
->pf
.first_vf_id
+ i
;
546 bp
->pf
.vf_info
[i
].vlan_table
=
547 rte_zmalloc("VF VLAN table",
550 if (bp
->pf
.vf_info
[i
].vlan_table
== NULL
)
552 "Fail to alloc VLAN table for VF %d\n",
556 bp
->pf
.vf_info
[i
].vlan_table
);
557 bp
->pf
.vf_info
[i
].vlan_as_table
=
558 rte_zmalloc("VF VLAN AS table",
561 if (bp
->pf
.vf_info
[i
].vlan_as_table
== NULL
)
563 "Alloc VLAN AS table for VF %d fail\n",
567 bp
->pf
.vf_info
[i
].vlan_as_table
);
568 STAILQ_INIT(&bp
->pf
.vf_info
[i
].filter
);
573 bp
->fw_fid
= rte_le_to_cpu_32(resp
->fid
);
574 memcpy(bp
->dflt_mac_addr
, &resp
->mac_address
, ETHER_ADDR_LEN
);
575 bp
->max_rsscos_ctx
= rte_le_to_cpu_16(resp
->max_rsscos_ctx
);
576 bp
->max_cp_rings
= rte_le_to_cpu_16(resp
->max_cmpl_rings
);
577 bp
->max_tx_rings
= rte_le_to_cpu_16(resp
->max_tx_rings
);
578 bp
->max_rx_rings
= rte_le_to_cpu_16(resp
->max_rx_rings
);
579 bp
->max_l2_ctx
= rte_le_to_cpu_16(resp
->max_l2_ctxs
);
580 /* TODO: For now, do not support VMDq/RFS on VFs. */
585 bp
->max_vnics
= rte_le_to_cpu_16(resp
->max_vnics
);
589 bp
->max_stat_ctx
= rte_le_to_cpu_16(resp
->max_stat_ctx
);
591 bp
->pf
.total_vnics
= rte_le_to_cpu_16(resp
->max_vnics
);
592 if (flags
& HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED
) {
593 bp
->flags
|= BNXT_FLAG_PTP_SUPPORTED
;
594 PMD_DRV_LOG(DEBUG
, "PTP SUPPORTED\n");
596 bnxt_hwrm_ptp_qcfg(bp
);
605 int bnxt_hwrm_func_qcaps(struct bnxt
*bp
)
609 rc
= __bnxt_hwrm_func_qcaps(bp
);
610 if (!rc
&& bp
->hwrm_spec_code
>= HWRM_SPEC_CODE_1_8_3
) {
611 rc
= bnxt_hwrm_func_resc_qcaps(bp
);
613 bp
->flags
|= BNXT_FLAG_NEW_RM
;
619 int bnxt_hwrm_func_reset(struct bnxt
*bp
)
622 struct hwrm_func_reset_input req
= {.req_type
= 0 };
623 struct hwrm_func_reset_output
*resp
= bp
->hwrm_cmd_resp_addr
;
625 HWRM_PREP(req
, FUNC_RESET
, BNXT_USE_CHIMP_MB
);
627 req
.enables
= rte_cpu_to_le_32(0);
629 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
637 int bnxt_hwrm_func_driver_register(struct bnxt
*bp
)
640 struct hwrm_func_drv_rgtr_input req
= {.req_type
= 0 };
641 struct hwrm_func_drv_rgtr_output
*resp
= bp
->hwrm_cmd_resp_addr
;
643 if (bp
->flags
& BNXT_FLAG_REGISTERED
)
646 HWRM_PREP(req
, FUNC_DRV_RGTR
, BNXT_USE_CHIMP_MB
);
647 req
.enables
= rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER
|
648 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD
);
649 req
.ver_maj
= RTE_VER_YEAR
;
650 req
.ver_min
= RTE_VER_MONTH
;
651 req
.ver_upd
= RTE_VER_MINOR
;
654 req
.enables
|= rte_cpu_to_le_32(
655 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD
);
656 memcpy(req
.vf_req_fwd
, bp
->pf
.vf_req_fwd
,
657 RTE_MIN(sizeof(req
.vf_req_fwd
),
658 sizeof(bp
->pf
.vf_req_fwd
)));
661 * PF can sniff HWRM API issued by VF. This can be set up by
662 * linux driver and inherited by the DPDK PF driver. Clear
663 * this HWRM sniffer list in FW because DPDK PF driver does
667 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE
);
670 req
.async_event_fwd
[0] |=
671 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE
|
672 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED
|
673 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
);
674 req
.async_event_fwd
[1] |=
675 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD
|
676 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE
);
678 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
683 bp
->flags
|= BNXT_FLAG_REGISTERED
;
688 int bnxt_hwrm_check_vf_rings(struct bnxt
*bp
)
690 if (!(BNXT_VF(bp
) && (bp
->flags
& BNXT_FLAG_NEW_RM
)))
693 return bnxt_hwrm_func_reserve_vf_resc(bp
, true);
696 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt
*bp
, bool test
)
701 struct hwrm_func_vf_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
702 struct hwrm_func_vf_cfg_input req
= {0};
704 HWRM_PREP(req
, FUNC_VF_CFG
, BNXT_USE_CHIMP_MB
);
706 req
.enables
= rte_cpu_to_le_32
707 (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS
|
708 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS
|
709 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS
|
710 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS
|
711 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS
|
712 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS
);
714 req
.num_tx_rings
= rte_cpu_to_le_16(bp
->tx_nr_rings
);
715 req
.num_rx_rings
= rte_cpu_to_le_16(bp
->rx_nr_rings
*
716 AGG_RING_MULTIPLIER
);
717 req
.num_stat_ctxs
= rte_cpu_to_le_16(bp
->rx_nr_rings
+ bp
->tx_nr_rings
);
718 req
.num_cmpl_rings
= rte_cpu_to_le_16(bp
->rx_nr_rings
+
720 req
.num_hw_ring_grps
= rte_cpu_to_le_16(bp
->rx_nr_rings
);
721 req
.num_vnics
= rte_cpu_to_le_16(bp
->rx_nr_rings
);
722 if (bp
->vf_resv_strategy
==
723 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC
) {
724 enables
= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS
|
725 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS
|
726 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS
;
727 req
.enables
|= rte_cpu_to_le_32(enables
);
728 req
.num_rsscos_ctxs
= rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX
);
729 req
.num_l2_ctxs
= rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX
);
730 req
.num_vnics
= rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC
);
734 flags
= HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST
|
735 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST
|
736 HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST
|
737 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST
|
738 HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST
|
739 HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST
;
741 req
.flags
= rte_cpu_to_le_32(flags
);
743 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
746 HWRM_CHECK_RESULT_SILENT();
754 int bnxt_hwrm_func_resc_qcaps(struct bnxt
*bp
)
757 struct hwrm_func_resource_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
758 struct hwrm_func_resource_qcaps_input req
= {0};
760 HWRM_PREP(req
, FUNC_RESOURCE_QCAPS
, BNXT_USE_CHIMP_MB
);
761 req
.fid
= rte_cpu_to_le_16(0xffff);
763 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
768 bp
->max_rsscos_ctx
= rte_le_to_cpu_16(resp
->max_rsscos_ctx
);
769 bp
->max_cp_rings
= rte_le_to_cpu_16(resp
->max_cmpl_rings
);
770 bp
->max_tx_rings
= rte_le_to_cpu_16(resp
->max_tx_rings
);
771 bp
->max_rx_rings
= rte_le_to_cpu_16(resp
->max_rx_rings
);
772 bp
->max_ring_grps
= rte_le_to_cpu_32(resp
->max_hw_ring_grps
);
773 bp
->max_l2_ctx
= rte_le_to_cpu_16(resp
->max_l2_ctxs
);
774 bp
->max_vnics
= rte_le_to_cpu_16(resp
->max_vnics
);
775 bp
->max_stat_ctx
= rte_le_to_cpu_16(resp
->max_stat_ctx
);
777 bp
->vf_resv_strategy
= rte_le_to_cpu_16(resp
->vf_reservation_strategy
);
778 if (bp
->vf_resv_strategy
>
779 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC
)
780 bp
->vf_resv_strategy
=
781 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL
;
787 int bnxt_hwrm_ver_get(struct bnxt
*bp
)
790 struct hwrm_ver_get_input req
= {.req_type
= 0 };
791 struct hwrm_ver_get_output
*resp
= bp
->hwrm_cmd_resp_addr
;
793 uint16_t max_resp_len
;
794 char type
[RTE_MEMZONE_NAMESIZE
];
795 uint32_t dev_caps_cfg
;
797 bp
->max_req_len
= HWRM_MAX_REQ_LEN
;
798 HWRM_PREP(req
, VER_GET
, BNXT_USE_CHIMP_MB
);
800 req
.hwrm_intf_maj
= HWRM_VERSION_MAJOR
;
801 req
.hwrm_intf_min
= HWRM_VERSION_MINOR
;
802 req
.hwrm_intf_upd
= HWRM_VERSION_UPDATE
;
804 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
808 PMD_DRV_LOG(INFO
, "%d.%d.%d:%d.%d.%d\n",
809 resp
->hwrm_intf_maj_8b
, resp
->hwrm_intf_min_8b
,
810 resp
->hwrm_intf_upd_8b
, resp
->hwrm_fw_maj_8b
,
811 resp
->hwrm_fw_min_8b
, resp
->hwrm_fw_bld_8b
);
812 bp
->fw_ver
= (resp
->hwrm_fw_maj_8b
<< 24) |
813 (resp
->hwrm_fw_min_8b
<< 16) |
814 (resp
->hwrm_fw_bld_8b
<< 8) |
815 resp
->hwrm_fw_rsvd_8b
;
816 PMD_DRV_LOG(INFO
, "Driver HWRM version: %d.%d.%d\n",
817 HWRM_VERSION_MAJOR
, HWRM_VERSION_MINOR
, HWRM_VERSION_UPDATE
);
819 fw_version
= resp
->hwrm_intf_maj_8b
<< 16;
820 fw_version
|= resp
->hwrm_intf_min_8b
<< 8;
821 fw_version
|= resp
->hwrm_intf_upd_8b
;
822 bp
->hwrm_spec_code
= fw_version
;
824 if (resp
->hwrm_intf_maj_8b
!= HWRM_VERSION_MAJOR
) {
825 PMD_DRV_LOG(ERR
, "Unsupported firmware API version\n");
830 if (bp
->max_req_len
> resp
->max_req_win_len
) {
831 PMD_DRV_LOG(ERR
, "Unsupported request length\n");
834 bp
->max_req_len
= rte_le_to_cpu_16(resp
->max_req_win_len
);
835 max_resp_len
= resp
->max_resp_len
;
836 dev_caps_cfg
= rte_le_to_cpu_32(resp
->dev_caps_cfg
);
838 if (bp
->max_resp_len
!= max_resp_len
) {
839 sprintf(type
, "bnxt_hwrm_%04x:%02x:%02x:%02x",
840 bp
->pdev
->addr
.domain
, bp
->pdev
->addr
.bus
,
841 bp
->pdev
->addr
.devid
, bp
->pdev
->addr
.function
);
843 rte_free(bp
->hwrm_cmd_resp_addr
);
845 bp
->hwrm_cmd_resp_addr
= rte_malloc(type
, max_resp_len
, 0);
846 if (bp
->hwrm_cmd_resp_addr
== NULL
) {
850 rte_mem_lock_page(bp
->hwrm_cmd_resp_addr
);
851 bp
->hwrm_cmd_resp_dma_addr
=
852 rte_mem_virt2iova(bp
->hwrm_cmd_resp_addr
);
853 if (bp
->hwrm_cmd_resp_dma_addr
== 0) {
855 "Unable to map response buffer to physical memory.\n");
859 bp
->max_resp_len
= max_resp_len
;
863 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED
) &&
865 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED
)) {
866 PMD_DRV_LOG(DEBUG
, "Short command supported\n");
868 rte_free(bp
->hwrm_short_cmd_req_addr
);
870 bp
->hwrm_short_cmd_req_addr
= rte_malloc(type
,
872 if (bp
->hwrm_short_cmd_req_addr
== NULL
) {
876 rte_mem_lock_page(bp
->hwrm_short_cmd_req_addr
);
877 bp
->hwrm_short_cmd_req_dma_addr
=
878 rte_mem_virt2iova(bp
->hwrm_short_cmd_req_addr
);
879 if (bp
->hwrm_short_cmd_req_dma_addr
== 0) {
880 rte_free(bp
->hwrm_short_cmd_req_addr
);
882 "Unable to map buffer to physical memory.\n");
887 bp
->flags
|= BNXT_FLAG_SHORT_CMD
;
890 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED
) {
891 bp
->flags
|= BNXT_FLAG_KONG_MB_EN
;
892 PMD_DRV_LOG(DEBUG
, "Kong mailbox channel enabled\n");
895 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED
)
896 PMD_DRV_LOG(DEBUG
, "FW supports Trusted VFs\n");
903 int bnxt_hwrm_func_driver_unregister(struct bnxt
*bp
, uint32_t flags
)
906 struct hwrm_func_drv_unrgtr_input req
= {.req_type
= 0 };
907 struct hwrm_func_drv_unrgtr_output
*resp
= bp
->hwrm_cmd_resp_addr
;
909 if (!(bp
->flags
& BNXT_FLAG_REGISTERED
))
912 HWRM_PREP(req
, FUNC_DRV_UNRGTR
, BNXT_USE_CHIMP_MB
);
915 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
920 bp
->flags
&= ~BNXT_FLAG_REGISTERED
;
925 static int bnxt_hwrm_port_phy_cfg(struct bnxt
*bp
, struct bnxt_link_info
*conf
)
928 struct hwrm_port_phy_cfg_input req
= {0};
929 struct hwrm_port_phy_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
930 uint32_t enables
= 0;
932 HWRM_PREP(req
, PORT_PHY_CFG
, BNXT_USE_CHIMP_MB
);
935 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
936 if (bp
->link_info
.auto_mode
&& conf
->link_speed
) {
937 req
.auto_mode
= HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE
;
938 PMD_DRV_LOG(DEBUG
, "Disabling AutoNeg\n");
941 req
.flags
= rte_cpu_to_le_32(conf
->phy_flags
);
942 req
.force_link_speed
= rte_cpu_to_le_16(conf
->link_speed
);
943 enables
|= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE
;
945 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
946 * any auto mode, even "none".
948 if (!conf
->link_speed
) {
949 /* No speeds specified. Enable AutoNeg - all speeds */
951 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS
;
953 /* AutoNeg - Advertise speeds specified. */
954 if (conf
->auto_link_speed_mask
&&
955 !(conf
->phy_flags
& HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE
)) {
957 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK
;
958 req
.auto_link_speed_mask
=
959 conf
->auto_link_speed_mask
;
961 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK
;
964 req
.auto_duplex
= conf
->duplex
;
965 enables
|= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX
;
966 req
.auto_pause
= conf
->auto_pause
;
967 req
.force_pause
= conf
->force_pause
;
968 /* Set force_pause if there is no auto or if there is a force */
969 if (req
.auto_pause
&& !req
.force_pause
)
970 enables
|= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE
;
972 enables
|= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE
;
974 req
.enables
= rte_cpu_to_le_32(enables
);
977 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN
);
978 PMD_DRV_LOG(INFO
, "Force Link Down\n");
981 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
989 static int bnxt_hwrm_port_phy_qcfg(struct bnxt
*bp
,
990 struct bnxt_link_info
*link_info
)
993 struct hwrm_port_phy_qcfg_input req
= {0};
994 struct hwrm_port_phy_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
996 HWRM_PREP(req
, PORT_PHY_QCFG
, BNXT_USE_CHIMP_MB
);
998 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1000 HWRM_CHECK_RESULT();
1002 link_info
->phy_link_status
= resp
->link
;
1003 link_info
->link_up
=
1004 (link_info
->phy_link_status
==
1005 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK
) ? 1 : 0;
1006 link_info
->link_speed
= rte_le_to_cpu_16(resp
->link_speed
);
1007 link_info
->duplex
= resp
->duplex_cfg
;
1008 link_info
->pause
= resp
->pause
;
1009 link_info
->auto_pause
= resp
->auto_pause
;
1010 link_info
->force_pause
= resp
->force_pause
;
1011 link_info
->auto_mode
= resp
->auto_mode
;
1012 link_info
->phy_type
= resp
->phy_type
;
1013 link_info
->media_type
= resp
->media_type
;
1015 link_info
->support_speeds
= rte_le_to_cpu_16(resp
->support_speeds
);
1016 link_info
->auto_link_speed
= rte_le_to_cpu_16(resp
->auto_link_speed
);
1017 link_info
->preemphasis
= rte_le_to_cpu_32(resp
->preemphasis
);
1018 link_info
->force_link_speed
= rte_le_to_cpu_16(resp
->force_link_speed
);
1019 link_info
->phy_ver
[0] = resp
->phy_maj
;
1020 link_info
->phy_ver
[1] = resp
->phy_min
;
1021 link_info
->phy_ver
[2] = resp
->phy_bld
;
1025 PMD_DRV_LOG(DEBUG
, "Link Speed %d\n", link_info
->link_speed
);
1026 PMD_DRV_LOG(DEBUG
, "Auto Mode %d\n", link_info
->auto_mode
);
1027 PMD_DRV_LOG(DEBUG
, "Support Speeds %x\n", link_info
->support_speeds
);
1028 PMD_DRV_LOG(DEBUG
, "Auto Link Speed %x\n", link_info
->auto_link_speed
);
1029 PMD_DRV_LOG(DEBUG
, "Auto Link Speed Mask %x\n",
1030 link_info
->auto_link_speed_mask
);
1031 PMD_DRV_LOG(DEBUG
, "Forced Link Speed %x\n",
1032 link_info
->force_link_speed
);
1037 int bnxt_hwrm_queue_qportcfg(struct bnxt
*bp
)
1040 struct hwrm_queue_qportcfg_input req
= {.req_type
= 0 };
1041 struct hwrm_queue_qportcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1044 HWRM_PREP(req
, QUEUE_QPORTCFG
, BNXT_USE_CHIMP_MB
);
1046 req
.flags
= HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX
;
1047 /* HWRM Version >= 1.9.1 */
1048 if (bp
->hwrm_spec_code
>= HWRM_VERSION_1_9_1
)
1050 HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED
;
1051 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1053 HWRM_CHECK_RESULT();
1055 #define GET_QUEUE_INFO(x) \
1056 bp->cos_queue[x].id = resp->queue_id##x; \
1057 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1070 if (bp
->hwrm_spec_code
< HWRM_VERSION_1_9_1
) {
1071 bp
->tx_cosq_id
= bp
->cos_queue
[0].id
;
1073 /* iterate and find the COSq profile to use for Tx */
1074 for (i
= 0; i
< BNXT_COS_QUEUE_COUNT
; i
++) {
1075 if (bp
->cos_queue
[i
].profile
==
1076 HWRM_QUEUE_SERVICE_PROFILE_LOSSY
) {
1077 bp
->tx_cosq_id
= bp
->cos_queue
[i
].id
;
1082 PMD_DRV_LOG(DEBUG
, "Tx Cos Queue to use: %d\n", bp
->tx_cosq_id
);
1087 int bnxt_hwrm_ring_alloc(struct bnxt
*bp
,
1088 struct bnxt_ring
*ring
,
1089 uint32_t ring_type
, uint32_t map_index
,
1090 uint32_t stats_ctx_id
, uint32_t cmpl_ring_id
)
1093 uint32_t enables
= 0;
1094 struct hwrm_ring_alloc_input req
= {.req_type
= 0 };
1095 struct hwrm_ring_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1097 HWRM_PREP(req
, RING_ALLOC
, BNXT_USE_CHIMP_MB
);
1099 req
.page_tbl_addr
= rte_cpu_to_le_64(ring
->bd_dma
);
1100 req
.fbo
= rte_cpu_to_le_32(0);
1101 /* Association of ring index with doorbell index */
1102 req
.logical_id
= rte_cpu_to_le_16(map_index
);
1103 req
.length
= rte_cpu_to_le_32(ring
->ring_size
);
1105 switch (ring_type
) {
1106 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX
:
1107 req
.queue_id
= rte_cpu_to_le_16(bp
->tx_cosq_id
);
1109 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX
:
1110 req
.ring_type
= ring_type
;
1111 req
.cmpl_ring_id
= rte_cpu_to_le_16(cmpl_ring_id
);
1112 req
.stat_ctx_id
= rte_cpu_to_le_16(stats_ctx_id
);
1113 if (stats_ctx_id
!= INVALID_STATS_CTX_ID
)
1115 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID
;
1117 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL
:
1118 req
.ring_type
= ring_type
;
1120 * TODO: Some HWRM versions crash with
1121 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1123 req
.int_mode
= HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX
;
1126 PMD_DRV_LOG(ERR
, "hwrm alloc invalid ring type %d\n",
1131 req
.enables
= rte_cpu_to_le_32(enables
);
1133 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1135 if (rc
|| resp
->error_code
) {
1136 if (rc
== 0 && resp
->error_code
)
1137 rc
= rte_le_to_cpu_16(resp
->error_code
);
1138 switch (ring_type
) {
1139 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL
:
1141 "hwrm_ring_alloc cp failed. rc:%d\n", rc
);
1144 case HWRM_RING_FREE_INPUT_RING_TYPE_RX
:
1146 "hwrm_ring_alloc rx failed. rc:%d\n", rc
);
1149 case HWRM_RING_FREE_INPUT_RING_TYPE_TX
:
1151 "hwrm_ring_alloc tx failed. rc:%d\n", rc
);
1155 PMD_DRV_LOG(ERR
, "Invalid ring. rc:%d\n", rc
);
1161 ring
->fw_ring_id
= rte_le_to_cpu_16(resp
->ring_id
);
1166 int bnxt_hwrm_ring_free(struct bnxt
*bp
,
1167 struct bnxt_ring
*ring
, uint32_t ring_type
)
1170 struct hwrm_ring_free_input req
= {.req_type
= 0 };
1171 struct hwrm_ring_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1173 HWRM_PREP(req
, RING_FREE
, BNXT_USE_CHIMP_MB
);
1175 req
.ring_type
= ring_type
;
1176 req
.ring_id
= rte_cpu_to_le_16(ring
->fw_ring_id
);
1178 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1180 if (rc
|| resp
->error_code
) {
1181 if (rc
== 0 && resp
->error_code
)
1182 rc
= rte_le_to_cpu_16(resp
->error_code
);
1185 switch (ring_type
) {
1186 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL
:
1187 PMD_DRV_LOG(ERR
, "hwrm_ring_free cp failed. rc:%d\n",
1190 case HWRM_RING_FREE_INPUT_RING_TYPE_RX
:
1191 PMD_DRV_LOG(ERR
, "hwrm_ring_free rx failed. rc:%d\n",
1194 case HWRM_RING_FREE_INPUT_RING_TYPE_TX
:
1195 PMD_DRV_LOG(ERR
, "hwrm_ring_free tx failed. rc:%d\n",
1199 PMD_DRV_LOG(ERR
, "Invalid ring, rc:%d\n", rc
);
1207 int bnxt_hwrm_ring_grp_alloc(struct bnxt
*bp
, unsigned int idx
)
1210 struct hwrm_ring_grp_alloc_input req
= {.req_type
= 0 };
1211 struct hwrm_ring_grp_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1213 HWRM_PREP(req
, RING_GRP_ALLOC
, BNXT_USE_CHIMP_MB
);
1215 req
.cr
= rte_cpu_to_le_16(bp
->grp_info
[idx
].cp_fw_ring_id
);
1216 req
.rr
= rte_cpu_to_le_16(bp
->grp_info
[idx
].rx_fw_ring_id
);
1217 req
.ar
= rte_cpu_to_le_16(bp
->grp_info
[idx
].ag_fw_ring_id
);
1218 req
.sc
= rte_cpu_to_le_16(bp
->grp_info
[idx
].fw_stats_ctx
);
1220 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1222 HWRM_CHECK_RESULT();
1224 bp
->grp_info
[idx
].fw_grp_id
=
1225 rte_le_to_cpu_16(resp
->ring_group_id
);
1232 int bnxt_hwrm_ring_grp_free(struct bnxt
*bp
, unsigned int idx
)
1235 struct hwrm_ring_grp_free_input req
= {.req_type
= 0 };
1236 struct hwrm_ring_grp_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1238 HWRM_PREP(req
, RING_GRP_FREE
, BNXT_USE_CHIMP_MB
);
1240 req
.ring_group_id
= rte_cpu_to_le_16(bp
->grp_info
[idx
].fw_grp_id
);
1242 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1244 HWRM_CHECK_RESULT();
1247 bp
->grp_info
[idx
].fw_grp_id
= INVALID_HW_RING_ID
;
1251 int bnxt_hwrm_stat_clear(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
)
1254 struct hwrm_stat_ctx_clr_stats_input req
= {.req_type
= 0 };
1255 struct hwrm_stat_ctx_clr_stats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1257 if (cpr
->hw_stats_ctx_id
== (uint32_t)HWRM_NA_SIGNATURE
)
1260 HWRM_PREP(req
, STAT_CTX_CLR_STATS
, BNXT_USE_CHIMP_MB
);
1262 req
.stat_ctx_id
= rte_cpu_to_le_16(cpr
->hw_stats_ctx_id
);
1264 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1266 HWRM_CHECK_RESULT();
1272 int bnxt_hwrm_stat_ctx_alloc(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
1273 unsigned int idx __rte_unused
)
1276 struct hwrm_stat_ctx_alloc_input req
= {.req_type
= 0 };
1277 struct hwrm_stat_ctx_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1279 HWRM_PREP(req
, STAT_CTX_ALLOC
, BNXT_USE_CHIMP_MB
);
1281 req
.update_period_ms
= rte_cpu_to_le_32(0);
1283 req
.stats_dma_addr
=
1284 rte_cpu_to_le_64(cpr
->hw_stats_map
);
1286 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1288 HWRM_CHECK_RESULT();
1290 cpr
->hw_stats_ctx_id
= rte_le_to_cpu_16(resp
->stat_ctx_id
);
1297 int bnxt_hwrm_stat_ctx_free(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
1298 unsigned int idx __rte_unused
)
1301 struct hwrm_stat_ctx_free_input req
= {.req_type
= 0 };
1302 struct hwrm_stat_ctx_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1304 HWRM_PREP(req
, STAT_CTX_FREE
, BNXT_USE_CHIMP_MB
);
1306 req
.stat_ctx_id
= rte_cpu_to_le_16(cpr
->hw_stats_ctx_id
);
1308 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1310 HWRM_CHECK_RESULT();
1316 int bnxt_hwrm_vnic_alloc(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
1319 struct hwrm_vnic_alloc_input req
= { 0 };
1320 struct hwrm_vnic_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1322 /* map ring groups to this vnic */
1323 PMD_DRV_LOG(DEBUG
, "Alloc VNIC. Start %x, End %x\n",
1324 vnic
->start_grp_id
, vnic
->end_grp_id
);
1325 for (i
= vnic
->start_grp_id
, j
= 0; i
< vnic
->end_grp_id
; i
++, j
++)
1326 vnic
->fw_grp_ids
[j
] = bp
->grp_info
[i
].fw_grp_id
;
1328 vnic
->dflt_ring_grp
= bp
->grp_info
[vnic
->start_grp_id
].fw_grp_id
;
1329 vnic
->rss_rule
= (uint16_t)HWRM_NA_SIGNATURE
;
1330 vnic
->cos_rule
= (uint16_t)HWRM_NA_SIGNATURE
;
1331 vnic
->lb_rule
= (uint16_t)HWRM_NA_SIGNATURE
;
1332 vnic
->mru
= bp
->eth_dev
->data
->mtu
+ ETHER_HDR_LEN
+
1333 ETHER_CRC_LEN
+ VLAN_TAG_SIZE
;
1334 HWRM_PREP(req
, VNIC_ALLOC
, BNXT_USE_CHIMP_MB
);
1336 if (vnic
->func_default
)
1338 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT
);
1339 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1341 HWRM_CHECK_RESULT();
1343 vnic
->fw_vnic_id
= rte_le_to_cpu_16(resp
->vnic_id
);
1345 PMD_DRV_LOG(DEBUG
, "VNIC ID %x\n", vnic
->fw_vnic_id
);
1349 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt
*bp
,
1350 struct bnxt_vnic_info
*vnic
,
1351 struct bnxt_plcmodes_cfg
*pmode
)
1354 struct hwrm_vnic_plcmodes_qcfg_input req
= {.req_type
= 0 };
1355 struct hwrm_vnic_plcmodes_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1357 HWRM_PREP(req
, VNIC_PLCMODES_QCFG
, BNXT_USE_CHIMP_MB
);
1359 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
1361 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1363 HWRM_CHECK_RESULT();
1365 pmode
->flags
= rte_le_to_cpu_32(resp
->flags
);
1366 /* dflt_vnic bit doesn't exist in the _cfg command */
1367 pmode
->flags
&= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC
);
1368 pmode
->jumbo_thresh
= rte_le_to_cpu_16(resp
->jumbo_thresh
);
1369 pmode
->hds_offset
= rte_le_to_cpu_16(resp
->hds_offset
);
1370 pmode
->hds_threshold
= rte_le_to_cpu_16(resp
->hds_threshold
);
1377 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt
*bp
,
1378 struct bnxt_vnic_info
*vnic
,
1379 struct bnxt_plcmodes_cfg
*pmode
)
1382 struct hwrm_vnic_plcmodes_cfg_input req
= {.req_type
= 0 };
1383 struct hwrm_vnic_plcmodes_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1385 HWRM_PREP(req
, VNIC_PLCMODES_CFG
, BNXT_USE_CHIMP_MB
);
1387 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
1388 req
.flags
= rte_cpu_to_le_32(pmode
->flags
);
1389 req
.jumbo_thresh
= rte_cpu_to_le_16(pmode
->jumbo_thresh
);
1390 req
.hds_offset
= rte_cpu_to_le_16(pmode
->hds_offset
);
1391 req
.hds_threshold
= rte_cpu_to_le_16(pmode
->hds_threshold
);
1392 req
.enables
= rte_cpu_to_le_32(
1393 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID
|
1394 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID
|
1395 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1398 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1400 HWRM_CHECK_RESULT();
1406 int bnxt_hwrm_vnic_cfg(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
1409 struct hwrm_vnic_cfg_input req
= {.req_type
= 0 };
1410 struct hwrm_vnic_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1411 uint32_t ctx_enable_flag
= 0;
1412 struct bnxt_plcmodes_cfg pmodes
;
1414 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
) {
1415 PMD_DRV_LOG(DEBUG
, "VNIC ID %x\n", vnic
->fw_vnic_id
);
1419 rc
= bnxt_hwrm_vnic_plcmodes_qcfg(bp
, vnic
, &pmodes
);
1423 HWRM_PREP(req
, VNIC_CFG
, BNXT_USE_CHIMP_MB
);
1425 /* Only RSS support for now TBD: COS & LB */
1427 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP
);
1428 if (vnic
->lb_rule
!= 0xffff)
1429 ctx_enable_flag
|= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE
;
1430 if (vnic
->cos_rule
!= 0xffff)
1431 ctx_enable_flag
|= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE
;
1432 if (vnic
->rss_rule
!= 0xffff) {
1433 ctx_enable_flag
|= HWRM_VNIC_CFG_INPUT_ENABLES_MRU
;
1434 ctx_enable_flag
|= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE
;
1436 req
.enables
|= rte_cpu_to_le_32(ctx_enable_flag
);
1437 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
1438 req
.dflt_ring_grp
= rte_cpu_to_le_16(vnic
->dflt_ring_grp
);
1439 req
.rss_rule
= rte_cpu_to_le_16(vnic
->rss_rule
);
1440 req
.cos_rule
= rte_cpu_to_le_16(vnic
->cos_rule
);
1441 req
.lb_rule
= rte_cpu_to_le_16(vnic
->lb_rule
);
1442 req
.mru
= rte_cpu_to_le_16(vnic
->mru
);
1443 /* Configure default VNIC only once. */
1444 if (vnic
->func_default
&& !(bp
->flags
& BNXT_FLAG_DFLT_VNIC_SET
)) {
1446 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT
);
1447 bp
->flags
|= BNXT_FLAG_DFLT_VNIC_SET
;
1449 if (vnic
->vlan_strip
)
1451 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE
);
1454 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE
);
1455 if (vnic
->roce_dual
)
1456 req
.flags
|= rte_cpu_to_le_32(
1457 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE
);
1458 if (vnic
->roce_only
)
1459 req
.flags
|= rte_cpu_to_le_32(
1460 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE
);
1461 if (vnic
->rss_dflt_cr
)
1462 req
.flags
|= rte_cpu_to_le_32(
1463 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE
);
1465 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1467 HWRM_CHECK_RESULT();
1470 rc
= bnxt_hwrm_vnic_plcmodes_cfg(bp
, vnic
, &pmodes
);
1475 int bnxt_hwrm_vnic_qcfg(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
,
1479 struct hwrm_vnic_qcfg_input req
= {.req_type
= 0 };
1480 struct hwrm_vnic_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1482 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
) {
1483 PMD_DRV_LOG(DEBUG
, "VNIC QCFG ID %d\n", vnic
->fw_vnic_id
);
1486 HWRM_PREP(req
, VNIC_QCFG
, BNXT_USE_CHIMP_MB
);
1489 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID
);
1490 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
1491 req
.vf_id
= rte_cpu_to_le_16(fw_vf_id
);
1493 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1495 HWRM_CHECK_RESULT();
1497 vnic
->dflt_ring_grp
= rte_le_to_cpu_16(resp
->dflt_ring_grp
);
1498 vnic
->rss_rule
= rte_le_to_cpu_16(resp
->rss_rule
);
1499 vnic
->cos_rule
= rte_le_to_cpu_16(resp
->cos_rule
);
1500 vnic
->lb_rule
= rte_le_to_cpu_16(resp
->lb_rule
);
1501 vnic
->mru
= rte_le_to_cpu_16(resp
->mru
);
1502 vnic
->func_default
= rte_le_to_cpu_32(
1503 resp
->flags
) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT
;
1504 vnic
->vlan_strip
= rte_le_to_cpu_32(resp
->flags
) &
1505 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE
;
1506 vnic
->bd_stall
= rte_le_to_cpu_32(resp
->flags
) &
1507 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE
;
1508 vnic
->roce_dual
= rte_le_to_cpu_32(resp
->flags
) &
1509 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE
;
1510 vnic
->roce_only
= rte_le_to_cpu_32(resp
->flags
) &
1511 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE
;
1512 vnic
->rss_dflt_cr
= rte_le_to_cpu_32(resp
->flags
) &
1513 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE
;
1520 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
1523 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req
= {.req_type
= 0 };
1524 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output
*resp
=
1525 bp
->hwrm_cmd_resp_addr
;
1527 HWRM_PREP(req
, VNIC_RSS_COS_LB_CTX_ALLOC
, BNXT_USE_CHIMP_MB
);
1529 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1531 HWRM_CHECK_RESULT();
1533 vnic
->rss_rule
= rte_le_to_cpu_16(resp
->rss_cos_lb_ctx_id
);
1535 PMD_DRV_LOG(DEBUG
, "VNIC RSS Rule %x\n", vnic
->rss_rule
);
1540 int bnxt_hwrm_vnic_ctx_free(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
1543 struct hwrm_vnic_rss_cos_lb_ctx_free_input req
= {.req_type
= 0 };
1544 struct hwrm_vnic_rss_cos_lb_ctx_free_output
*resp
=
1545 bp
->hwrm_cmd_resp_addr
;
1547 if (vnic
->rss_rule
== 0xffff) {
1548 PMD_DRV_LOG(DEBUG
, "VNIC RSS Rule %x\n", vnic
->rss_rule
);
1551 HWRM_PREP(req
, VNIC_RSS_COS_LB_CTX_FREE
, BNXT_USE_CHIMP_MB
);
1553 req
.rss_cos_lb_ctx_id
= rte_cpu_to_le_16(vnic
->rss_rule
);
1555 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1557 HWRM_CHECK_RESULT();
1560 vnic
->rss_rule
= INVALID_HW_RING_ID
;
1565 int bnxt_hwrm_vnic_free(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
1568 struct hwrm_vnic_free_input req
= {.req_type
= 0 };
1569 struct hwrm_vnic_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1571 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
) {
1572 PMD_DRV_LOG(DEBUG
, "VNIC FREE ID %x\n", vnic
->fw_vnic_id
);
1576 HWRM_PREP(req
, VNIC_FREE
, BNXT_USE_CHIMP_MB
);
1578 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
1580 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1582 HWRM_CHECK_RESULT();
1585 vnic
->fw_vnic_id
= INVALID_HW_RING_ID
;
1586 /* Configure default VNIC again if necessary. */
1587 if (vnic
->func_default
&& (bp
->flags
& BNXT_FLAG_DFLT_VNIC_SET
))
1588 bp
->flags
&= ~BNXT_FLAG_DFLT_VNIC_SET
;
1593 int bnxt_hwrm_vnic_rss_cfg(struct bnxt
*bp
,
1594 struct bnxt_vnic_info
*vnic
)
1597 struct hwrm_vnic_rss_cfg_input req
= {.req_type
= 0 };
1598 struct hwrm_vnic_rss_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1600 HWRM_PREP(req
, VNIC_RSS_CFG
, BNXT_USE_CHIMP_MB
);
1602 req
.hash_type
= rte_cpu_to_le_32(vnic
->hash_type
);
1603 req
.hash_mode_flags
= vnic
->hash_mode
;
1605 req
.ring_grp_tbl_addr
=
1606 rte_cpu_to_le_64(vnic
->rss_table_dma_addr
);
1607 req
.hash_key_tbl_addr
=
1608 rte_cpu_to_le_64(vnic
->rss_hash_key_dma_addr
);
1609 req
.rss_ctx_idx
= rte_cpu_to_le_16(vnic
->rss_rule
);
1611 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1613 HWRM_CHECK_RESULT();
1619 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt
*bp
,
1620 struct bnxt_vnic_info
*vnic
)
1623 struct hwrm_vnic_plcmodes_cfg_input req
= {.req_type
= 0 };
1624 struct hwrm_vnic_plcmodes_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1627 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
) {
1628 PMD_DRV_LOG(DEBUG
, "VNIC ID %x\n", vnic
->fw_vnic_id
);
1632 HWRM_PREP(req
, VNIC_PLCMODES_CFG
, BNXT_USE_CHIMP_MB
);
1634 req
.flags
= rte_cpu_to_le_32(
1635 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT
);
1637 req
.enables
= rte_cpu_to_le_32(
1638 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
);
1640 size
= rte_pktmbuf_data_room_size(bp
->rx_queues
[0]->mb_pool
);
1641 size
-= RTE_PKTMBUF_HEADROOM
;
1643 req
.jumbo_thresh
= rte_cpu_to_le_16(size
);
1644 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
1646 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1648 HWRM_CHECK_RESULT();
1654 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt
*bp
,
1655 struct bnxt_vnic_info
*vnic
, bool enable
)
1658 struct hwrm_vnic_tpa_cfg_input req
= {.req_type
= 0 };
1659 struct hwrm_vnic_tpa_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1661 HWRM_PREP(req
, VNIC_TPA_CFG
, BNXT_USE_CHIMP_MB
);
1664 req
.enables
= rte_cpu_to_le_32(
1665 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS
|
1666 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS
|
1667 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN
);
1668 req
.flags
= rte_cpu_to_le_32(
1669 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA
|
1670 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA
|
1671 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE
|
1672 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO
|
1673 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN
|
1674 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ
);
1675 req
.max_agg_segs
= rte_cpu_to_le_16(5);
1677 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX
);
1678 req
.min_agg_len
= rte_cpu_to_le_32(512);
1680 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
1682 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1684 HWRM_CHECK_RESULT();
1690 int bnxt_hwrm_func_vf_mac(struct bnxt
*bp
, uint16_t vf
, const uint8_t *mac_addr
)
1692 struct hwrm_func_cfg_input req
= {0};
1693 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1696 req
.flags
= rte_cpu_to_le_32(bp
->pf
.vf_info
[vf
].func_cfg_flags
);
1697 req
.enables
= rte_cpu_to_le_32(
1698 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR
);
1699 memcpy(req
.dflt_mac_addr
, mac_addr
, sizeof(req
.dflt_mac_addr
));
1700 req
.fid
= rte_cpu_to_le_16(bp
->pf
.vf_info
[vf
].fid
);
1702 HWRM_PREP(req
, FUNC_CFG
, BNXT_USE_CHIMP_MB
);
1704 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1705 HWRM_CHECK_RESULT();
1708 bp
->pf
.vf_info
[vf
].random_mac
= false;
1713 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt
*bp
, uint16_t fid
,
1717 struct hwrm_func_qstats_input req
= {.req_type
= 0};
1718 struct hwrm_func_qstats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1720 HWRM_PREP(req
, FUNC_QSTATS
, BNXT_USE_CHIMP_MB
);
1722 req
.fid
= rte_cpu_to_le_16(fid
);
1724 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1726 HWRM_CHECK_RESULT();
1729 *dropped
= rte_le_to_cpu_64(resp
->tx_drop_pkts
);
1736 int bnxt_hwrm_func_qstats(struct bnxt
*bp
, uint16_t fid
,
1737 struct rte_eth_stats
*stats
)
1740 struct hwrm_func_qstats_input req
= {.req_type
= 0};
1741 struct hwrm_func_qstats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1743 HWRM_PREP(req
, FUNC_QSTATS
, BNXT_USE_CHIMP_MB
);
1745 req
.fid
= rte_cpu_to_le_16(fid
);
1747 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1749 HWRM_CHECK_RESULT();
1751 stats
->ipackets
= rte_le_to_cpu_64(resp
->rx_ucast_pkts
);
1752 stats
->ipackets
+= rte_le_to_cpu_64(resp
->rx_mcast_pkts
);
1753 stats
->ipackets
+= rte_le_to_cpu_64(resp
->rx_bcast_pkts
);
1754 stats
->ibytes
= rte_le_to_cpu_64(resp
->rx_ucast_bytes
);
1755 stats
->ibytes
+= rte_le_to_cpu_64(resp
->rx_mcast_bytes
);
1756 stats
->ibytes
+= rte_le_to_cpu_64(resp
->rx_bcast_bytes
);
1758 stats
->opackets
= rte_le_to_cpu_64(resp
->tx_ucast_pkts
);
1759 stats
->opackets
+= rte_le_to_cpu_64(resp
->tx_mcast_pkts
);
1760 stats
->opackets
+= rte_le_to_cpu_64(resp
->tx_bcast_pkts
);
1761 stats
->obytes
= rte_le_to_cpu_64(resp
->tx_ucast_bytes
);
1762 stats
->obytes
+= rte_le_to_cpu_64(resp
->tx_mcast_bytes
);
1763 stats
->obytes
+= rte_le_to_cpu_64(resp
->tx_bcast_bytes
);
1765 stats
->imissed
= rte_le_to_cpu_64(resp
->rx_discard_pkts
);
1766 stats
->ierrors
= rte_le_to_cpu_64(resp
->rx_drop_pkts
);
1767 stats
->oerrors
= rte_le_to_cpu_64(resp
->tx_discard_pkts
);
1774 int bnxt_hwrm_func_clr_stats(struct bnxt
*bp
, uint16_t fid
)
1777 struct hwrm_func_clr_stats_input req
= {.req_type
= 0};
1778 struct hwrm_func_clr_stats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1780 HWRM_PREP(req
, FUNC_CLR_STATS
, BNXT_USE_CHIMP_MB
);
1782 req
.fid
= rte_cpu_to_le_16(fid
);
1784 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1786 HWRM_CHECK_RESULT();
1793 * HWRM utility functions
1796 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt
*bp
)
1801 for (i
= 0; i
< bp
->rx_cp_nr_rings
+ bp
->tx_cp_nr_rings
; i
++) {
1802 struct bnxt_tx_queue
*txq
;
1803 struct bnxt_rx_queue
*rxq
;
1804 struct bnxt_cp_ring_info
*cpr
;
1806 if (i
>= bp
->rx_cp_nr_rings
) {
1807 txq
= bp
->tx_queues
[i
- bp
->rx_cp_nr_rings
];
1810 rxq
= bp
->rx_queues
[i
];
1814 rc
= bnxt_hwrm_stat_clear(bp
, cpr
);
1821 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt
*bp
)
1825 struct bnxt_cp_ring_info
*cpr
;
1827 for (i
= 0; i
< bp
->rx_cp_nr_rings
+ bp
->tx_cp_nr_rings
; i
++) {
1829 if (i
>= bp
->rx_cp_nr_rings
) {
1830 cpr
= bp
->tx_queues
[i
- bp
->rx_cp_nr_rings
]->cp_ring
;
1832 cpr
= bp
->rx_queues
[i
]->cp_ring
;
1833 bp
->grp_info
[i
].fw_stats_ctx
= -1;
1835 if (cpr
->hw_stats_ctx_id
!= HWRM_NA_SIGNATURE
) {
1836 rc
= bnxt_hwrm_stat_ctx_free(bp
, cpr
, i
);
1837 cpr
->hw_stats_ctx_id
= HWRM_NA_SIGNATURE
;
1845 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt
*bp
)
1850 for (i
= 0; i
< bp
->rx_cp_nr_rings
+ bp
->tx_cp_nr_rings
; i
++) {
1851 struct bnxt_tx_queue
*txq
;
1852 struct bnxt_rx_queue
*rxq
;
1853 struct bnxt_cp_ring_info
*cpr
;
1855 if (i
>= bp
->rx_cp_nr_rings
) {
1856 txq
= bp
->tx_queues
[i
- bp
->rx_cp_nr_rings
];
1859 rxq
= bp
->rx_queues
[i
];
1863 rc
= bnxt_hwrm_stat_ctx_alloc(bp
, cpr
, i
);
1871 int bnxt_free_all_hwrm_ring_grps(struct bnxt
*bp
)
1876 for (idx
= 0; idx
< bp
->rx_cp_nr_rings
; idx
++) {
1878 if (bp
->grp_info
[idx
].fw_grp_id
== INVALID_HW_RING_ID
)
1881 rc
= bnxt_hwrm_ring_grp_free(bp
, idx
);
1889 static void bnxt_free_cp_ring(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
)
1891 struct bnxt_ring
*cp_ring
= cpr
->cp_ring_struct
;
1893 bnxt_hwrm_ring_free(bp
, cp_ring
,
1894 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL
);
1895 cp_ring
->fw_ring_id
= INVALID_HW_RING_ID
;
1896 memset(cpr
->cp_desc_ring
, 0, cpr
->cp_ring_struct
->ring_size
*
1897 sizeof(*cpr
->cp_desc_ring
));
1898 cpr
->cp_raw_cons
= 0;
1901 void bnxt_free_hwrm_rx_ring(struct bnxt
*bp
, int queue_index
)
1903 struct bnxt_rx_queue
*rxq
= bp
->rx_queues
[queue_index
];
1904 struct bnxt_rx_ring_info
*rxr
= rxq
->rx_ring
;
1905 struct bnxt_ring
*ring
= rxr
->rx_ring_struct
;
1906 struct bnxt_cp_ring_info
*cpr
= rxq
->cp_ring
;
1908 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
1909 bnxt_hwrm_ring_free(bp
, ring
,
1910 HWRM_RING_FREE_INPUT_RING_TYPE_RX
);
1911 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
1912 bp
->grp_info
[queue_index
].rx_fw_ring_id
= INVALID_HW_RING_ID
;
1913 memset(rxr
->rx_desc_ring
, 0,
1914 rxr
->rx_ring_struct
->ring_size
*
1915 sizeof(*rxr
->rx_desc_ring
));
1916 memset(rxr
->rx_buf_ring
, 0,
1917 rxr
->rx_ring_struct
->ring_size
*
1918 sizeof(*rxr
->rx_buf_ring
));
1921 ring
= rxr
->ag_ring_struct
;
1922 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
1923 bnxt_hwrm_ring_free(bp
, ring
,
1924 HWRM_RING_FREE_INPUT_RING_TYPE_RX
);
1925 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
1926 memset(rxr
->ag_buf_ring
, 0,
1927 rxr
->ag_ring_struct
->ring_size
*
1928 sizeof(*rxr
->ag_buf_ring
));
1930 bp
->grp_info
[queue_index
].ag_fw_ring_id
= INVALID_HW_RING_ID
;
1932 if (cpr
->cp_ring_struct
->fw_ring_id
!= INVALID_HW_RING_ID
)
1933 bnxt_free_cp_ring(bp
, cpr
);
1935 bp
->grp_info
[queue_index
].cp_fw_ring_id
= INVALID_HW_RING_ID
;
1938 int bnxt_free_all_hwrm_rings(struct bnxt
*bp
)
1942 for (i
= 0; i
< bp
->tx_cp_nr_rings
; i
++) {
1943 struct bnxt_tx_queue
*txq
= bp
->tx_queues
[i
];
1944 struct bnxt_tx_ring_info
*txr
= txq
->tx_ring
;
1945 struct bnxt_ring
*ring
= txr
->tx_ring_struct
;
1946 struct bnxt_cp_ring_info
*cpr
= txq
->cp_ring
;
1948 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
1949 bnxt_hwrm_ring_free(bp
, ring
,
1950 HWRM_RING_FREE_INPUT_RING_TYPE_TX
);
1951 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
1952 memset(txr
->tx_desc_ring
, 0,
1953 txr
->tx_ring_struct
->ring_size
*
1954 sizeof(*txr
->tx_desc_ring
));
1955 memset(txr
->tx_buf_ring
, 0,
1956 txr
->tx_ring_struct
->ring_size
*
1957 sizeof(*txr
->tx_buf_ring
));
1961 if (cpr
->cp_ring_struct
->fw_ring_id
!= INVALID_HW_RING_ID
) {
1962 bnxt_free_cp_ring(bp
, cpr
);
1963 cpr
->cp_ring_struct
->fw_ring_id
= INVALID_HW_RING_ID
;
1967 for (i
= 0; i
< bp
->rx_cp_nr_rings
; i
++)
1968 bnxt_free_hwrm_rx_ring(bp
, i
);
1973 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt
*bp
)
1978 for (i
= 0; i
< bp
->rx_cp_nr_rings
; i
++) {
1979 rc
= bnxt_hwrm_ring_grp_alloc(bp
, i
);
1986 void bnxt_free_hwrm_resources(struct bnxt
*bp
)
1988 /* Release memzone */
1989 rte_free(bp
->hwrm_cmd_resp_addr
);
1990 rte_free(bp
->hwrm_short_cmd_req_addr
);
1991 bp
->hwrm_cmd_resp_addr
= NULL
;
1992 bp
->hwrm_short_cmd_req_addr
= NULL
;
1993 bp
->hwrm_cmd_resp_dma_addr
= 0;
1994 bp
->hwrm_short_cmd_req_dma_addr
= 0;
1997 int bnxt_alloc_hwrm_resources(struct bnxt
*bp
)
1999 struct rte_pci_device
*pdev
= bp
->pdev
;
2000 char type
[RTE_MEMZONE_NAMESIZE
];
2002 sprintf(type
, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev
->addr
.domain
,
2003 pdev
->addr
.bus
, pdev
->addr
.devid
, pdev
->addr
.function
);
2004 bp
->max_resp_len
= HWRM_MAX_RESP_LEN
;
2005 bp
->hwrm_cmd_resp_addr
= rte_malloc(type
, bp
->max_resp_len
, 0);
2006 rte_mem_lock_page(bp
->hwrm_cmd_resp_addr
);
2007 if (bp
->hwrm_cmd_resp_addr
== NULL
)
2009 bp
->hwrm_cmd_resp_dma_addr
=
2010 rte_mem_virt2iova(bp
->hwrm_cmd_resp_addr
);
2011 if (bp
->hwrm_cmd_resp_dma_addr
== 0) {
2013 "unable to map response address to physical memory\n");
2016 rte_spinlock_init(&bp
->hwrm_lock
);
2021 int bnxt_clear_hwrm_vnic_filters(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
2023 struct bnxt_filter_info
*filter
;
2026 STAILQ_FOREACH(filter
, &vnic
->filter
, next
) {
2027 if (filter
->filter_type
== HWRM_CFA_EM_FILTER
)
2028 rc
= bnxt_hwrm_clear_em_filter(bp
, filter
);
2029 else if (filter
->filter_type
== HWRM_CFA_NTUPLE_FILTER
)
2030 rc
= bnxt_hwrm_clear_ntuple_filter(bp
, filter
);
2032 rc
= bnxt_hwrm_clear_l2_filter(bp
, filter
);
2033 STAILQ_REMOVE(&vnic
->filter
, filter
, bnxt_filter_info
, next
);
2041 bnxt_clear_hwrm_vnic_flows(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
2043 struct bnxt_filter_info
*filter
;
2044 struct rte_flow
*flow
;
2047 STAILQ_FOREACH(flow
, &vnic
->flow_list
, next
) {
2048 filter
= flow
->filter
;
2049 PMD_DRV_LOG(ERR
, "filter type %d\n", filter
->filter_type
);
2050 if (filter
->filter_type
== HWRM_CFA_EM_FILTER
)
2051 rc
= bnxt_hwrm_clear_em_filter(bp
, filter
);
2052 else if (filter
->filter_type
== HWRM_CFA_NTUPLE_FILTER
)
2053 rc
= bnxt_hwrm_clear_ntuple_filter(bp
, filter
);
2055 rc
= bnxt_hwrm_clear_l2_filter(bp
, filter
);
2057 STAILQ_REMOVE(&vnic
->flow_list
, flow
, rte_flow
, next
);
2065 int bnxt_set_hwrm_vnic_filters(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
2067 struct bnxt_filter_info
*filter
;
2070 STAILQ_FOREACH(filter
, &vnic
->filter
, next
) {
2071 if (filter
->filter_type
== HWRM_CFA_EM_FILTER
)
2072 rc
= bnxt_hwrm_set_em_filter(bp
, filter
->dst_id
,
2074 else if (filter
->filter_type
== HWRM_CFA_NTUPLE_FILTER
)
2075 rc
= bnxt_hwrm_set_ntuple_filter(bp
, filter
->dst_id
,
2078 rc
= bnxt_hwrm_set_l2_filter(bp
, vnic
->fw_vnic_id
,
2086 void bnxt_free_tunnel_ports(struct bnxt
*bp
)
2088 if (bp
->vxlan_port_cnt
)
2089 bnxt_hwrm_tunnel_dst_port_free(bp
, bp
->vxlan_fw_dst_port_id
,
2090 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN
);
2092 if (bp
->geneve_port_cnt
)
2093 bnxt_hwrm_tunnel_dst_port_free(bp
, bp
->geneve_fw_dst_port_id
,
2094 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE
);
2095 bp
->geneve_port
= 0;
2098 void bnxt_free_all_hwrm_resources(struct bnxt
*bp
)
2102 if (bp
->vnic_info
== NULL
)
2106 * Cleanup VNICs in reverse order, to make sure the L2 filter
2107 * from vnic0 is last to be cleaned up.
2109 for (i
= bp
->nr_vnics
- 1; i
>= 0; i
--) {
2110 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
2112 bnxt_clear_hwrm_vnic_flows(bp
, vnic
);
2114 bnxt_clear_hwrm_vnic_filters(bp
, vnic
);
2116 bnxt_hwrm_vnic_ctx_free(bp
, vnic
);
2118 bnxt_hwrm_vnic_tpa_cfg(bp
, vnic
, false);
2120 bnxt_hwrm_vnic_free(bp
, vnic
);
2122 rte_free(vnic
->fw_grp_ids
);
2124 /* Ring resources */
2125 bnxt_free_all_hwrm_rings(bp
);
2126 bnxt_free_all_hwrm_ring_grps(bp
);
2127 bnxt_free_all_hwrm_stat_ctxs(bp
);
2128 bnxt_free_tunnel_ports(bp
);
2131 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed
)
2133 uint8_t hw_link_duplex
= HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH
;
2135 if ((conf_link_speed
& ETH_LINK_SPEED_FIXED
) == ETH_LINK_SPEED_AUTONEG
)
2136 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH
;
2138 switch (conf_link_speed
) {
2139 case ETH_LINK_SPEED_10M_HD
:
2140 case ETH_LINK_SPEED_100M_HD
:
2142 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF
;
2144 return hw_link_duplex
;
2147 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link
)
2149 return (conf_link
& ETH_LINK_SPEED_FIXED
) ? 0 : 1;
2152 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed
)
2154 uint16_t eth_link_speed
= 0;
2156 if (conf_link_speed
== ETH_LINK_SPEED_AUTONEG
)
2157 return ETH_LINK_SPEED_AUTONEG
;
2159 switch (conf_link_speed
& ~ETH_LINK_SPEED_FIXED
) {
2160 case ETH_LINK_SPEED_100M
:
2161 case ETH_LINK_SPEED_100M_HD
:
2164 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB
;
2166 case ETH_LINK_SPEED_1G
:
2168 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB
;
2170 case ETH_LINK_SPEED_2_5G
:
2172 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB
;
2174 case ETH_LINK_SPEED_10G
:
2176 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB
;
2178 case ETH_LINK_SPEED_20G
:
2180 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB
;
2182 case ETH_LINK_SPEED_25G
:
2184 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB
;
2186 case ETH_LINK_SPEED_40G
:
2188 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB
;
2190 case ETH_LINK_SPEED_50G
:
2192 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB
;
2194 case ETH_LINK_SPEED_100G
:
2196 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB
;
2200 "Unsupported link speed %d; default to AUTO\n",
2204 return eth_link_speed
;
2207 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2208 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2209 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2210 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2212 static int bnxt_valid_link_speed(uint32_t link_speed
, uint16_t port_id
)
2216 if (link_speed
== ETH_LINK_SPEED_AUTONEG
)
2219 if (link_speed
& ETH_LINK_SPEED_FIXED
) {
2220 one_speed
= link_speed
& ~ETH_LINK_SPEED_FIXED
;
2222 if (one_speed
& (one_speed
- 1)) {
2224 "Invalid advertised speeds (%u) for port %u\n",
2225 link_speed
, port_id
);
2228 if ((one_speed
& BNXT_SUPPORTED_SPEEDS
) != one_speed
) {
2230 "Unsupported advertised speed (%u) for port %u\n",
2231 link_speed
, port_id
);
2235 if (!(link_speed
& BNXT_SUPPORTED_SPEEDS
)) {
2237 "Unsupported advertised speeds (%u) for port %u\n",
2238 link_speed
, port_id
);
2246 bnxt_parse_eth_link_speed_mask(struct bnxt
*bp
, uint32_t link_speed
)
2250 if (link_speed
== ETH_LINK_SPEED_AUTONEG
) {
2251 if (bp
->link_info
.support_speeds
)
2252 return bp
->link_info
.support_speeds
;
2253 link_speed
= BNXT_SUPPORTED_SPEEDS
;
2256 if (link_speed
& ETH_LINK_SPEED_100M
)
2257 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB
;
2258 if (link_speed
& ETH_LINK_SPEED_100M_HD
)
2259 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB
;
2260 if (link_speed
& ETH_LINK_SPEED_1G
)
2261 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB
;
2262 if (link_speed
& ETH_LINK_SPEED_2_5G
)
2263 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB
;
2264 if (link_speed
& ETH_LINK_SPEED_10G
)
2265 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB
;
2266 if (link_speed
& ETH_LINK_SPEED_20G
)
2267 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB
;
2268 if (link_speed
& ETH_LINK_SPEED_25G
)
2269 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB
;
2270 if (link_speed
& ETH_LINK_SPEED_40G
)
2271 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB
;
2272 if (link_speed
& ETH_LINK_SPEED_50G
)
2273 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB
;
2274 if (link_speed
& ETH_LINK_SPEED_100G
)
2275 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB
;
2279 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed
)
2281 uint32_t eth_link_speed
= ETH_SPEED_NUM_NONE
;
2283 switch (hw_link_speed
) {
2284 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB
:
2285 eth_link_speed
= ETH_SPEED_NUM_100M
;
2287 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB
:
2288 eth_link_speed
= ETH_SPEED_NUM_1G
;
2290 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB
:
2291 eth_link_speed
= ETH_SPEED_NUM_2_5G
;
2293 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB
:
2294 eth_link_speed
= ETH_SPEED_NUM_10G
;
2296 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB
:
2297 eth_link_speed
= ETH_SPEED_NUM_20G
;
2299 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB
:
2300 eth_link_speed
= ETH_SPEED_NUM_25G
;
2302 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB
:
2303 eth_link_speed
= ETH_SPEED_NUM_40G
;
2305 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB
:
2306 eth_link_speed
= ETH_SPEED_NUM_50G
;
2308 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB
:
2309 eth_link_speed
= ETH_SPEED_NUM_100G
;
2311 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB
:
2313 PMD_DRV_LOG(ERR
, "HWRM link speed %d not defined\n",
2317 return eth_link_speed
;
2320 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex
)
2322 uint16_t eth_link_duplex
= ETH_LINK_FULL_DUPLEX
;
2324 switch (hw_link_duplex
) {
2325 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH
:
2326 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL
:
2328 eth_link_duplex
= ETH_LINK_FULL_DUPLEX
;
2330 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF
:
2331 eth_link_duplex
= ETH_LINK_HALF_DUPLEX
;
2334 PMD_DRV_LOG(ERR
, "HWRM link duplex %d not defined\n",
2338 return eth_link_duplex
;
2341 int bnxt_get_hwrm_link_config(struct bnxt
*bp
, struct rte_eth_link
*link
)
2344 struct bnxt_link_info
*link_info
= &bp
->link_info
;
2346 rc
= bnxt_hwrm_port_phy_qcfg(bp
, link_info
);
2349 "Get link config failed with rc %d\n", rc
);
2352 if (link_info
->link_speed
)
2354 bnxt_parse_hw_link_speed(link_info
->link_speed
);
2356 link
->link_speed
= ETH_SPEED_NUM_NONE
;
2357 link
->link_duplex
= bnxt_parse_hw_link_duplex(link_info
->duplex
);
2358 link
->link_status
= link_info
->link_up
;
2359 link
->link_autoneg
= link_info
->auto_mode
==
2360 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE
?
2361 ETH_LINK_FIXED
: ETH_LINK_AUTONEG
;
2366 int bnxt_set_hwrm_link_config(struct bnxt
*bp
, bool link_up
)
2369 struct rte_eth_conf
*dev_conf
= &bp
->eth_dev
->data
->dev_conf
;
2370 struct bnxt_link_info link_req
;
2371 uint16_t speed
, autoneg
;
2373 if (!BNXT_SINGLE_PF(bp
) || BNXT_VF(bp
))
2376 rc
= bnxt_valid_link_speed(dev_conf
->link_speeds
,
2377 bp
->eth_dev
->data
->port_id
);
2381 memset(&link_req
, 0, sizeof(link_req
));
2382 link_req
.link_up
= link_up
;
2386 autoneg
= bnxt_check_eth_link_autoneg(dev_conf
->link_speeds
);
2387 speed
= bnxt_parse_eth_link_speed(dev_conf
->link_speeds
);
2388 link_req
.phy_flags
= HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY
;
2389 /* Autoneg can be done only when the FW allows */
2390 if (autoneg
== 1 && !(bp
->link_info
.auto_link_speed
||
2391 bp
->link_info
.force_link_speed
)) {
2392 link_req
.phy_flags
|=
2393 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG
;
2394 link_req
.auto_link_speed_mask
=
2395 bnxt_parse_eth_link_speed_mask(bp
,
2396 dev_conf
->link_speeds
);
2398 if (bp
->link_info
.phy_type
==
2399 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET
||
2400 bp
->link_info
.phy_type
==
2401 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE
||
2402 bp
->link_info
.media_type
==
2403 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP
) {
2404 PMD_DRV_LOG(ERR
, "10GBase-T devices must autoneg\n");
2408 link_req
.phy_flags
|= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE
;
2409 /* If user wants a particular speed try that first. */
2411 link_req
.link_speed
= speed
;
2412 else if (bp
->link_info
.force_link_speed
)
2413 link_req
.link_speed
= bp
->link_info
.force_link_speed
;
2415 link_req
.link_speed
= bp
->link_info
.auto_link_speed
;
2417 link_req
.duplex
= bnxt_parse_eth_link_duplex(dev_conf
->link_speeds
);
2418 link_req
.auto_pause
= bp
->link_info
.auto_pause
;
2419 link_req
.force_pause
= bp
->link_info
.force_pause
;
2422 rc
= bnxt_hwrm_port_phy_cfg(bp
, &link_req
);
2425 "Set link config failed with rc %d\n", rc
);
2433 int bnxt_hwrm_func_qcfg(struct bnxt
*bp
)
2435 struct hwrm_func_qcfg_input req
= {0};
2436 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2440 HWRM_PREP(req
, FUNC_QCFG
, BNXT_USE_CHIMP_MB
);
2441 req
.fid
= rte_cpu_to_le_16(0xffff);
2443 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2445 HWRM_CHECK_RESULT();
2447 /* Hard Coded.. 0xfff VLAN ID mask */
2448 bp
->vlan
= rte_le_to_cpu_16(resp
->vlan
) & 0xfff;
2449 flags
= rte_le_to_cpu_16(resp
->flags
);
2450 if (BNXT_PF(bp
) && (flags
& HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST
))
2451 bp
->flags
|= BNXT_FLAG_MULTI_HOST
;
2453 if (BNXT_VF(bp
) && (flags
& HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF
)) {
2454 bp
->flags
|= BNXT_FLAG_TRUSTED_VF_EN
;
2455 PMD_DRV_LOG(INFO
, "Trusted VF cap enabled\n");
2458 switch (resp
->port_partition_type
) {
2459 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0
:
2460 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5
:
2461 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0
:
2463 bp
->port_partition_type
= resp
->port_partition_type
;
2466 bp
->port_partition_type
= 0;
2475 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input
*fcfg
,
2476 struct hwrm_func_qcaps_output
*qcaps
)
2478 qcaps
->max_rsscos_ctx
= fcfg
->num_rsscos_ctxs
;
2479 memcpy(qcaps
->mac_address
, fcfg
->dflt_mac_addr
,
2480 sizeof(qcaps
->mac_address
));
2481 qcaps
->max_l2_ctxs
= fcfg
->num_l2_ctxs
;
2482 qcaps
->max_rx_rings
= fcfg
->num_rx_rings
;
2483 qcaps
->max_tx_rings
= fcfg
->num_tx_rings
;
2484 qcaps
->max_cmpl_rings
= fcfg
->num_cmpl_rings
;
2485 qcaps
->max_stat_ctx
= fcfg
->num_stat_ctxs
;
2487 qcaps
->first_vf_id
= 0;
2488 qcaps
->max_vnics
= fcfg
->num_vnics
;
2489 qcaps
->max_decap_records
= 0;
2490 qcaps
->max_encap_records
= 0;
2491 qcaps
->max_tx_wm_flows
= 0;
2492 qcaps
->max_tx_em_flows
= 0;
2493 qcaps
->max_rx_wm_flows
= 0;
2494 qcaps
->max_rx_em_flows
= 0;
2495 qcaps
->max_flow_id
= 0;
2496 qcaps
->max_mcast_filters
= fcfg
->num_mcast_filters
;
2497 qcaps
->max_sp_tx_rings
= 0;
2498 qcaps
->max_hw_ring_grps
= fcfg
->num_hw_ring_grps
;
2501 static int bnxt_hwrm_pf_func_cfg(struct bnxt
*bp
, int tx_rings
)
2503 struct hwrm_func_cfg_input req
= {0};
2504 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2507 req
.enables
= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU
|
2508 HWRM_FUNC_CFG_INPUT_ENABLES_MRU
|
2509 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS
|
2510 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS
|
2511 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS
|
2512 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS
|
2513 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS
|
2514 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS
|
2515 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS
|
2516 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS
);
2517 req
.flags
= rte_cpu_to_le_32(bp
->pf
.func_cfg_flags
);
2518 req
.mtu
= rte_cpu_to_le_16(BNXT_MAX_MTU
);
2519 req
.mru
= rte_cpu_to_le_16(bp
->eth_dev
->data
->mtu
+ ETHER_HDR_LEN
+
2520 ETHER_CRC_LEN
+ VLAN_TAG_SIZE
*
2522 req
.num_rsscos_ctxs
= rte_cpu_to_le_16(bp
->max_rsscos_ctx
);
2523 req
.num_stat_ctxs
= rte_cpu_to_le_16(bp
->max_stat_ctx
);
2524 req
.num_cmpl_rings
= rte_cpu_to_le_16(bp
->max_cp_rings
);
2525 req
.num_tx_rings
= rte_cpu_to_le_16(tx_rings
);
2526 req
.num_rx_rings
= rte_cpu_to_le_16(bp
->max_rx_rings
);
2527 req
.num_l2_ctxs
= rte_cpu_to_le_16(bp
->max_l2_ctx
);
2528 req
.num_vnics
= rte_cpu_to_le_16(bp
->max_vnics
);
2529 req
.num_hw_ring_grps
= rte_cpu_to_le_16(bp
->max_ring_grps
);
2530 req
.fid
= rte_cpu_to_le_16(0xffff);
2532 HWRM_PREP(req
, FUNC_CFG
, BNXT_USE_CHIMP_MB
);
2534 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2536 HWRM_CHECK_RESULT();
2542 static void populate_vf_func_cfg_req(struct bnxt
*bp
,
2543 struct hwrm_func_cfg_input
*req
,
2546 req
->enables
= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU
|
2547 HWRM_FUNC_CFG_INPUT_ENABLES_MRU
|
2548 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS
|
2549 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS
|
2550 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS
|
2551 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS
|
2552 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS
|
2553 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS
|
2554 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS
|
2555 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS
);
2557 req
->mtu
= rte_cpu_to_le_16(bp
->eth_dev
->data
->mtu
+ ETHER_HDR_LEN
+
2558 ETHER_CRC_LEN
+ VLAN_TAG_SIZE
*
2560 req
->mru
= rte_cpu_to_le_16(bp
->eth_dev
->data
->mtu
+ ETHER_HDR_LEN
+
2561 ETHER_CRC_LEN
+ VLAN_TAG_SIZE
*
2563 req
->num_rsscos_ctxs
= rte_cpu_to_le_16(bp
->max_rsscos_ctx
/
2565 req
->num_stat_ctxs
= rte_cpu_to_le_16(bp
->max_stat_ctx
/ (num_vfs
+ 1));
2566 req
->num_cmpl_rings
= rte_cpu_to_le_16(bp
->max_cp_rings
/
2568 req
->num_tx_rings
= rte_cpu_to_le_16(bp
->max_tx_rings
/ (num_vfs
+ 1));
2569 req
->num_rx_rings
= rte_cpu_to_le_16(bp
->max_rx_rings
/ (num_vfs
+ 1));
2570 req
->num_l2_ctxs
= rte_cpu_to_le_16(bp
->max_l2_ctx
/ (num_vfs
+ 1));
2571 /* TODO: For now, do not support VMDq/RFS on VFs. */
2572 req
->num_vnics
= rte_cpu_to_le_16(1);
2573 req
->num_hw_ring_grps
= rte_cpu_to_le_16(bp
->max_ring_grps
/
2577 static void add_random_mac_if_needed(struct bnxt
*bp
,
2578 struct hwrm_func_cfg_input
*cfg_req
,
2581 struct ether_addr mac
;
2583 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp
, vf
, &mac
))
2586 if (memcmp(mac
.addr_bytes
, "\x00\x00\x00\x00\x00", 6) == 0) {
2588 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR
);
2589 eth_random_addr(cfg_req
->dflt_mac_addr
);
2590 bp
->pf
.vf_info
[vf
].random_mac
= true;
2592 memcpy(cfg_req
->dflt_mac_addr
, mac
.addr_bytes
, ETHER_ADDR_LEN
);
2596 static void reserve_resources_from_vf(struct bnxt
*bp
,
2597 struct hwrm_func_cfg_input
*cfg_req
,
2600 struct hwrm_func_qcaps_input req
= {0};
2601 struct hwrm_func_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2604 /* Get the actual allocated values now */
2605 HWRM_PREP(req
, FUNC_QCAPS
, BNXT_USE_CHIMP_MB
);
2606 req
.fid
= rte_cpu_to_le_16(bp
->pf
.vf_info
[vf
].fid
);
2607 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2610 PMD_DRV_LOG(ERR
, "hwrm_func_qcaps failed rc:%d\n", rc
);
2611 copy_func_cfg_to_qcaps(cfg_req
, resp
);
2612 } else if (resp
->error_code
) {
2613 rc
= rte_le_to_cpu_16(resp
->error_code
);
2614 PMD_DRV_LOG(ERR
, "hwrm_func_qcaps error %d\n", rc
);
2615 copy_func_cfg_to_qcaps(cfg_req
, resp
);
2618 bp
->max_rsscos_ctx
-= rte_le_to_cpu_16(resp
->max_rsscos_ctx
);
2619 bp
->max_stat_ctx
-= rte_le_to_cpu_16(resp
->max_stat_ctx
);
2620 bp
->max_cp_rings
-= rte_le_to_cpu_16(resp
->max_cmpl_rings
);
2621 bp
->max_tx_rings
-= rte_le_to_cpu_16(resp
->max_tx_rings
);
2622 bp
->max_rx_rings
-= rte_le_to_cpu_16(resp
->max_rx_rings
);
2623 bp
->max_l2_ctx
-= rte_le_to_cpu_16(resp
->max_l2_ctxs
);
2625 * TODO: While not supporting VMDq with VFs, max_vnics is always
2626 * forced to 1 in this case
2628 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2629 bp
->max_ring_grps
-= rte_le_to_cpu_16(resp
->max_hw_ring_grps
);
2634 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt
*bp
, int vf
)
2636 struct hwrm_func_qcfg_input req
= {0};
2637 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2640 /* Check for zero MAC address */
2641 HWRM_PREP(req
, FUNC_QCFG
, BNXT_USE_CHIMP_MB
);
2642 req
.fid
= rte_cpu_to_le_16(bp
->pf
.vf_info
[vf
].fid
);
2643 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2645 PMD_DRV_LOG(ERR
, "hwrm_func_qcfg failed rc:%d\n", rc
);
2647 } else if (resp
->error_code
) {
2648 rc
= rte_le_to_cpu_16(resp
->error_code
);
2649 PMD_DRV_LOG(ERR
, "hwrm_func_qcfg error %d\n", rc
);
2652 rc
= rte_le_to_cpu_16(resp
->vlan
);
2659 static int update_pf_resource_max(struct bnxt
*bp
)
2661 struct hwrm_func_qcfg_input req
= {0};
2662 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2665 /* And copy the allocated numbers into the pf struct */
2666 HWRM_PREP(req
, FUNC_QCFG
, BNXT_USE_CHIMP_MB
);
2667 req
.fid
= rte_cpu_to_le_16(0xffff);
2668 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2669 HWRM_CHECK_RESULT();
2671 /* Only TX ring value reflects actual allocation? TODO */
2672 bp
->max_tx_rings
= rte_le_to_cpu_16(resp
->alloc_tx_rings
);
2673 bp
->pf
.evb_mode
= resp
->evb_mode
;
2680 int bnxt_hwrm_allocate_pf_only(struct bnxt
*bp
)
2685 PMD_DRV_LOG(ERR
, "Attempt to allcoate VFs on a VF!\n");
2689 rc
= bnxt_hwrm_func_qcaps(bp
);
2693 bp
->pf
.func_cfg_flags
&=
2694 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE
|
2695 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE
);
2696 bp
->pf
.func_cfg_flags
|=
2697 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE
;
2698 rc
= bnxt_hwrm_pf_func_cfg(bp
, bp
->max_tx_rings
);
2702 int bnxt_hwrm_allocate_vfs(struct bnxt
*bp
, int num_vfs
)
2704 struct hwrm_func_cfg_input req
= {0};
2705 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2712 PMD_DRV_LOG(ERR
, "Attempt to allcoate VFs on a VF!\n");
2716 rc
= bnxt_hwrm_func_qcaps(bp
);
2721 bp
->pf
.active_vfs
= num_vfs
;
2724 * First, configure the PF to only use one TX ring. This ensures that
2725 * there are enough rings for all VFs.
2727 * If we don't do this, when we call func_alloc() later, we will lock
2728 * extra rings to the PF that won't be available during func_cfg() of
2731 * This has been fixed with firmware versions above 20.6.54
2733 bp
->pf
.func_cfg_flags
&=
2734 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE
|
2735 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE
);
2736 bp
->pf
.func_cfg_flags
|=
2737 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE
;
2738 rc
= bnxt_hwrm_pf_func_cfg(bp
, 1);
2743 * Now, create and register a buffer to hold forwarded VF requests
2745 req_buf_sz
= num_vfs
* HWRM_MAX_REQ_LEN
;
2746 bp
->pf
.vf_req_buf
= rte_malloc("bnxt_vf_fwd", req_buf_sz
,
2747 page_roundup(num_vfs
* HWRM_MAX_REQ_LEN
));
2748 if (bp
->pf
.vf_req_buf
== NULL
) {
2752 for (sz
= 0; sz
< req_buf_sz
; sz
+= getpagesize())
2753 rte_mem_lock_page(((char *)bp
->pf
.vf_req_buf
) + sz
);
2754 for (i
= 0; i
< num_vfs
; i
++)
2755 bp
->pf
.vf_info
[i
].req_buf
= ((char *)bp
->pf
.vf_req_buf
) +
2756 (i
* HWRM_MAX_REQ_LEN
);
2758 rc
= bnxt_hwrm_func_buf_rgtr(bp
);
2762 populate_vf_func_cfg_req(bp
, &req
, num_vfs
);
2764 bp
->pf
.active_vfs
= 0;
2765 for (i
= 0; i
< num_vfs
; i
++) {
2766 add_random_mac_if_needed(bp
, &req
, i
);
2768 HWRM_PREP(req
, FUNC_CFG
, BNXT_USE_CHIMP_MB
);
2769 req
.flags
= rte_cpu_to_le_32(bp
->pf
.vf_info
[i
].func_cfg_flags
);
2770 req
.fid
= rte_cpu_to_le_16(bp
->pf
.vf_info
[i
].fid
);
2771 rc
= bnxt_hwrm_send_message(bp
,
2776 /* Clear enable flag for next pass */
2777 req
.enables
&= ~rte_cpu_to_le_32(
2778 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR
);
2780 if (rc
|| resp
->error_code
) {
2782 "Failed to initizlie VF %d\n", i
);
2784 "Not all VFs available. (%d, %d)\n",
2785 rc
, resp
->error_code
);
2792 reserve_resources_from_vf(bp
, &req
, i
);
2793 bp
->pf
.active_vfs
++;
2794 bnxt_hwrm_func_clr_stats(bp
, bp
->pf
.vf_info
[i
].fid
);
2798 * Now configure the PF to use "the rest" of the resources
2799 * We're using STD_TX_RING_MODE here though which will limit the TX
2800 * rings. This will allow QoS to function properly. Not setting this
2801 * will cause PF rings to break bandwidth settings.
2803 rc
= bnxt_hwrm_pf_func_cfg(bp
, bp
->max_tx_rings
);
2807 rc
= update_pf_resource_max(bp
);
2814 bnxt_hwrm_func_buf_unrgtr(bp
);
2818 int bnxt_hwrm_pf_evb_mode(struct bnxt
*bp
)
2820 struct hwrm_func_cfg_input req
= {0};
2821 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2824 HWRM_PREP(req
, FUNC_CFG
, BNXT_USE_CHIMP_MB
);
2826 req
.fid
= rte_cpu_to_le_16(0xffff);
2827 req
.enables
= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE
);
2828 req
.evb_mode
= bp
->pf
.evb_mode
;
2830 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2831 HWRM_CHECK_RESULT();
2837 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt
*bp
, uint16_t port
,
2838 uint8_t tunnel_type
)
2840 struct hwrm_tunnel_dst_port_alloc_input req
= {0};
2841 struct hwrm_tunnel_dst_port_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2844 HWRM_PREP(req
, TUNNEL_DST_PORT_ALLOC
, BNXT_USE_CHIMP_MB
);
2845 req
.tunnel_type
= tunnel_type
;
2846 req
.tunnel_dst_port_val
= port
;
2847 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2848 HWRM_CHECK_RESULT();
2850 switch (tunnel_type
) {
2851 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN
:
2852 bp
->vxlan_fw_dst_port_id
= resp
->tunnel_dst_port_id
;
2853 bp
->vxlan_port
= port
;
2855 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE
:
2856 bp
->geneve_fw_dst_port_id
= resp
->tunnel_dst_port_id
;
2857 bp
->geneve_port
= port
;
2868 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt
*bp
, uint16_t port
,
2869 uint8_t tunnel_type
)
2871 struct hwrm_tunnel_dst_port_free_input req
= {0};
2872 struct hwrm_tunnel_dst_port_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2875 HWRM_PREP(req
, TUNNEL_DST_PORT_FREE
, BNXT_USE_CHIMP_MB
);
2877 req
.tunnel_type
= tunnel_type
;
2878 req
.tunnel_dst_port_id
= rte_cpu_to_be_16(port
);
2879 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2881 HWRM_CHECK_RESULT();
2887 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt
*bp
, uint16_t vf
,
2890 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2891 struct hwrm_func_cfg_input req
= {0};
2894 HWRM_PREP(req
, FUNC_CFG
, BNXT_USE_CHIMP_MB
);
2896 req
.fid
= rte_cpu_to_le_16(bp
->pf
.vf_info
[vf
].fid
);
2897 req
.flags
= rte_cpu_to_le_32(flags
);
2898 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2900 HWRM_CHECK_RESULT();
2906 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info
*vnic
, void *flagp
)
2908 uint32_t *flag
= flagp
;
2910 vnic
->flags
= *flag
;
2913 int bnxt_set_rx_mask_no_vlan(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
2915 return bnxt_hwrm_cfa_l2_set_rx_mask(bp
, vnic
, 0, NULL
);
2918 int bnxt_hwrm_func_buf_rgtr(struct bnxt
*bp
)
2921 struct hwrm_func_buf_rgtr_input req
= {.req_type
= 0 };
2922 struct hwrm_func_buf_rgtr_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2924 HWRM_PREP(req
, FUNC_BUF_RGTR
, BNXT_USE_CHIMP_MB
);
2926 req
.req_buf_num_pages
= rte_cpu_to_le_16(1);
2927 req
.req_buf_page_size
= rte_cpu_to_le_16(
2928 page_getenum(bp
->pf
.active_vfs
* HWRM_MAX_REQ_LEN
));
2929 req
.req_buf_len
= rte_cpu_to_le_16(HWRM_MAX_REQ_LEN
);
2930 req
.req_buf_page_addr0
=
2931 rte_cpu_to_le_64(rte_mem_virt2iova(bp
->pf
.vf_req_buf
));
2932 if (req
.req_buf_page_addr0
== 0) {
2934 "unable to map buffer address to physical memory\n");
2938 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2940 HWRM_CHECK_RESULT();
2946 int bnxt_hwrm_func_buf_unrgtr(struct bnxt
*bp
)
2949 struct hwrm_func_buf_unrgtr_input req
= {.req_type
= 0 };
2950 struct hwrm_func_buf_unrgtr_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2952 HWRM_PREP(req
, FUNC_BUF_UNRGTR
, BNXT_USE_CHIMP_MB
);
2954 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2956 HWRM_CHECK_RESULT();
2962 int bnxt_hwrm_func_cfg_def_cp(struct bnxt
*bp
)
2964 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2965 struct hwrm_func_cfg_input req
= {0};
2968 HWRM_PREP(req
, FUNC_CFG
, BNXT_USE_CHIMP_MB
);
2970 req
.fid
= rte_cpu_to_le_16(0xffff);
2971 req
.flags
= rte_cpu_to_le_32(bp
->pf
.func_cfg_flags
);
2972 req
.enables
= rte_cpu_to_le_32(
2973 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR
);
2974 req
.async_event_cr
= rte_cpu_to_le_16(
2975 bp
->def_cp_ring
->cp_ring_struct
->fw_ring_id
);
2976 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2978 HWRM_CHECK_RESULT();
2984 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt
*bp
)
2986 struct hwrm_func_vf_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2987 struct hwrm_func_vf_cfg_input req
= {0};
2990 HWRM_PREP(req
, FUNC_VF_CFG
, BNXT_USE_CHIMP_MB
);
2992 req
.enables
= rte_cpu_to_le_32(
2993 HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR
);
2994 req
.async_event_cr
= rte_cpu_to_le_16(
2995 bp
->def_cp_ring
->cp_ring_struct
->fw_ring_id
);
2996 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2998 HWRM_CHECK_RESULT();
3004 int bnxt_hwrm_set_default_vlan(struct bnxt
*bp
, int vf
, uint8_t is_vf
)
3006 struct hwrm_func_cfg_input req
= {0};
3007 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3008 uint16_t dflt_vlan
, fid
;
3009 uint32_t func_cfg_flags
;
3012 HWRM_PREP(req
, FUNC_CFG
, BNXT_USE_CHIMP_MB
);
3015 dflt_vlan
= bp
->pf
.vf_info
[vf
].dflt_vlan
;
3016 fid
= bp
->pf
.vf_info
[vf
].fid
;
3017 func_cfg_flags
= bp
->pf
.vf_info
[vf
].func_cfg_flags
;
3019 fid
= rte_cpu_to_le_16(0xffff);
3020 func_cfg_flags
= bp
->pf
.func_cfg_flags
;
3021 dflt_vlan
= bp
->vlan
;
3024 req
.flags
= rte_cpu_to_le_32(func_cfg_flags
);
3025 req
.fid
= rte_cpu_to_le_16(fid
);
3026 req
.enables
|= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN
);
3027 req
.dflt_vlan
= rte_cpu_to_le_16(dflt_vlan
);
3029 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3031 HWRM_CHECK_RESULT();
3037 int bnxt_hwrm_func_bw_cfg(struct bnxt
*bp
, uint16_t vf
,
3038 uint16_t max_bw
, uint16_t enables
)
3040 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3041 struct hwrm_func_cfg_input req
= {0};
3044 HWRM_PREP(req
, FUNC_CFG
, BNXT_USE_CHIMP_MB
);
3046 req
.fid
= rte_cpu_to_le_16(bp
->pf
.vf_info
[vf
].fid
);
3047 req
.enables
|= rte_cpu_to_le_32(enables
);
3048 req
.flags
= rte_cpu_to_le_32(bp
->pf
.vf_info
[vf
].func_cfg_flags
);
3049 req
.max_bw
= rte_cpu_to_le_32(max_bw
);
3050 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3052 HWRM_CHECK_RESULT();
3058 int bnxt_hwrm_set_vf_vlan(struct bnxt
*bp
, int vf
)
3060 struct hwrm_func_cfg_input req
= {0};
3061 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3064 HWRM_PREP(req
, FUNC_CFG
, BNXT_USE_CHIMP_MB
);
3066 req
.flags
= rte_cpu_to_le_32(bp
->pf
.vf_info
[vf
].func_cfg_flags
);
3067 req
.fid
= rte_cpu_to_le_16(bp
->pf
.vf_info
[vf
].fid
);
3068 req
.enables
|= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN
);
3069 req
.dflt_vlan
= rte_cpu_to_le_16(bp
->pf
.vf_info
[vf
].dflt_vlan
);
3071 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3073 HWRM_CHECK_RESULT();
3079 int bnxt_hwrm_set_async_event_cr(struct bnxt
*bp
)
3084 rc
= bnxt_hwrm_func_cfg_def_cp(bp
);
3086 rc
= bnxt_hwrm_vf_func_cfg_def_cp(bp
);
3091 int bnxt_hwrm_reject_fwd_resp(struct bnxt
*bp
, uint16_t target_id
,
3092 void *encaped
, size_t ec_size
)
3095 struct hwrm_reject_fwd_resp_input req
= {.req_type
= 0};
3096 struct hwrm_reject_fwd_resp_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3098 if (ec_size
> sizeof(req
.encap_request
))
3101 HWRM_PREP(req
, REJECT_FWD_RESP
, BNXT_USE_CHIMP_MB
);
3103 req
.encap_resp_target_id
= rte_cpu_to_le_16(target_id
);
3104 memcpy(req
.encap_request
, encaped
, ec_size
);
3106 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3108 HWRM_CHECK_RESULT();
3114 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt
*bp
, uint16_t vf
,
3115 struct ether_addr
*mac
)
3117 struct hwrm_func_qcfg_input req
= {0};
3118 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3121 HWRM_PREP(req
, FUNC_QCFG
, BNXT_USE_CHIMP_MB
);
3123 req
.fid
= rte_cpu_to_le_16(bp
->pf
.vf_info
[vf
].fid
);
3124 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3126 HWRM_CHECK_RESULT();
3128 memcpy(mac
->addr_bytes
, resp
->mac_address
, ETHER_ADDR_LEN
);
3135 int bnxt_hwrm_exec_fwd_resp(struct bnxt
*bp
, uint16_t target_id
,
3136 void *encaped
, size_t ec_size
)
3139 struct hwrm_exec_fwd_resp_input req
= {.req_type
= 0};
3140 struct hwrm_exec_fwd_resp_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3142 if (ec_size
> sizeof(req
.encap_request
))
3145 HWRM_PREP(req
, EXEC_FWD_RESP
, BNXT_USE_CHIMP_MB
);
3147 req
.encap_resp_target_id
= rte_cpu_to_le_16(target_id
);
3148 memcpy(req
.encap_request
, encaped
, ec_size
);
3150 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3152 HWRM_CHECK_RESULT();
3158 int bnxt_hwrm_ctx_qstats(struct bnxt
*bp
, uint32_t cid
, int idx
,
3159 struct rte_eth_stats
*stats
, uint8_t rx
)
3162 struct hwrm_stat_ctx_query_input req
= {.req_type
= 0};
3163 struct hwrm_stat_ctx_query_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3165 HWRM_PREP(req
, STAT_CTX_QUERY
, BNXT_USE_CHIMP_MB
);
3167 req
.stat_ctx_id
= rte_cpu_to_le_32(cid
);
3169 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3171 HWRM_CHECK_RESULT();
3174 stats
->q_ipackets
[idx
] = rte_le_to_cpu_64(resp
->rx_ucast_pkts
);
3175 stats
->q_ipackets
[idx
] += rte_le_to_cpu_64(resp
->rx_mcast_pkts
);
3176 stats
->q_ipackets
[idx
] += rte_le_to_cpu_64(resp
->rx_bcast_pkts
);
3177 stats
->q_ibytes
[idx
] = rte_le_to_cpu_64(resp
->rx_ucast_bytes
);
3178 stats
->q_ibytes
[idx
] += rte_le_to_cpu_64(resp
->rx_mcast_bytes
);
3179 stats
->q_ibytes
[idx
] += rte_le_to_cpu_64(resp
->rx_bcast_bytes
);
3180 stats
->q_errors
[idx
] = rte_le_to_cpu_64(resp
->rx_err_pkts
);
3181 stats
->q_errors
[idx
] += rte_le_to_cpu_64(resp
->rx_drop_pkts
);
3183 stats
->q_opackets
[idx
] = rte_le_to_cpu_64(resp
->tx_ucast_pkts
);
3184 stats
->q_opackets
[idx
] += rte_le_to_cpu_64(resp
->tx_mcast_pkts
);
3185 stats
->q_opackets
[idx
] += rte_le_to_cpu_64(resp
->tx_bcast_pkts
);
3186 stats
->q_obytes
[idx
] = rte_le_to_cpu_64(resp
->tx_ucast_bytes
);
3187 stats
->q_obytes
[idx
] += rte_le_to_cpu_64(resp
->tx_mcast_bytes
);
3188 stats
->q_obytes
[idx
] += rte_le_to_cpu_64(resp
->tx_bcast_bytes
);
3189 stats
->q_errors
[idx
] += rte_le_to_cpu_64(resp
->tx_err_pkts
);
3198 int bnxt_hwrm_port_qstats(struct bnxt
*bp
)
3200 struct hwrm_port_qstats_input req
= {0};
3201 struct hwrm_port_qstats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3202 struct bnxt_pf_info
*pf
= &bp
->pf
;
3205 HWRM_PREP(req
, PORT_QSTATS
, BNXT_USE_CHIMP_MB
);
3207 req
.port_id
= rte_cpu_to_le_16(pf
->port_id
);
3208 req
.tx_stat_host_addr
= rte_cpu_to_le_64(bp
->hw_tx_port_stats_map
);
3209 req
.rx_stat_host_addr
= rte_cpu_to_le_64(bp
->hw_rx_port_stats_map
);
3210 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3212 HWRM_CHECK_RESULT();
3218 int bnxt_hwrm_port_clr_stats(struct bnxt
*bp
)
3220 struct hwrm_port_clr_stats_input req
= {0};
3221 struct hwrm_port_clr_stats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3222 struct bnxt_pf_info
*pf
= &bp
->pf
;
3225 /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3226 if (!(bp
->flags
& BNXT_FLAG_PORT_STATS
) || BNXT_VF(bp
) ||
3227 BNXT_NPAR(bp
) || BNXT_MH(bp
) || BNXT_TOTAL_VFS(bp
))
3230 HWRM_PREP(req
, PORT_CLR_STATS
, BNXT_USE_CHIMP_MB
);
3232 req
.port_id
= rte_cpu_to_le_16(pf
->port_id
);
3233 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3235 HWRM_CHECK_RESULT();
3241 int bnxt_hwrm_port_led_qcaps(struct bnxt
*bp
)
3243 struct hwrm_port_led_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3244 struct hwrm_port_led_qcaps_input req
= {0};
3250 HWRM_PREP(req
, PORT_LED_QCAPS
, BNXT_USE_CHIMP_MB
);
3251 req
.port_id
= bp
->pf
.port_id
;
3252 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3254 HWRM_CHECK_RESULT();
3256 if (resp
->num_leds
> 0 && resp
->num_leds
< BNXT_MAX_LED
) {
3259 bp
->num_leds
= resp
->num_leds
;
3260 memcpy(bp
->leds
, &resp
->led0_id
,
3261 sizeof(bp
->leds
[0]) * bp
->num_leds
);
3262 for (i
= 0; i
< bp
->num_leds
; i
++) {
3263 struct bnxt_led_info
*led
= &bp
->leds
[i
];
3265 uint16_t caps
= led
->led_state_caps
;
3267 if (!led
->led_group_id
||
3268 !BNXT_LED_ALT_BLINK_CAP(caps
)) {
3280 int bnxt_hwrm_port_led_cfg(struct bnxt
*bp
, bool led_on
)
3282 struct hwrm_port_led_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3283 struct hwrm_port_led_cfg_input req
= {0};
3284 struct bnxt_led_cfg
*led_cfg
;
3285 uint8_t led_state
= HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT
;
3286 uint16_t duration
= 0;
3289 if (!bp
->num_leds
|| BNXT_VF(bp
))
3292 HWRM_PREP(req
, PORT_LED_CFG
, BNXT_USE_CHIMP_MB
);
3295 led_state
= HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT
;
3296 duration
= rte_cpu_to_le_16(500);
3298 req
.port_id
= bp
->pf
.port_id
;
3299 req
.num_leds
= bp
->num_leds
;
3300 led_cfg
= (struct bnxt_led_cfg
*)&req
.led0_id
;
3301 for (i
= 0; i
< bp
->num_leds
; i
++, led_cfg
++) {
3302 req
.enables
|= BNXT_LED_DFLT_ENABLES(i
);
3303 led_cfg
->led_id
= bp
->leds
[i
].led_id
;
3304 led_cfg
->led_state
= led_state
;
3305 led_cfg
->led_blink_on
= duration
;
3306 led_cfg
->led_blink_off
= duration
;
3307 led_cfg
->led_group_id
= bp
->leds
[i
].led_group_id
;
3310 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3312 HWRM_CHECK_RESULT();
3318 int bnxt_hwrm_nvm_get_dir_info(struct bnxt
*bp
, uint32_t *entries
,
3322 struct hwrm_nvm_get_dir_info_input req
= {0};
3323 struct hwrm_nvm_get_dir_info_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3325 HWRM_PREP(req
, NVM_GET_DIR_INFO
, BNXT_USE_CHIMP_MB
);
3327 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3329 HWRM_CHECK_RESULT();
3333 *entries
= rte_le_to_cpu_32(resp
->entries
);
3334 *length
= rte_le_to_cpu_32(resp
->entry_length
);
3339 int bnxt_get_nvram_directory(struct bnxt
*bp
, uint32_t len
, uint8_t *data
)
3342 uint32_t dir_entries
;
3343 uint32_t entry_length
;
3346 rte_iova_t dma_handle
;
3347 struct hwrm_nvm_get_dir_entries_input req
= {0};
3348 struct hwrm_nvm_get_dir_entries_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3350 rc
= bnxt_hwrm_nvm_get_dir_info(bp
, &dir_entries
, &entry_length
);
3354 *data
++ = dir_entries
;
3355 *data
++ = entry_length
;
3357 memset(data
, 0xff, len
);
3359 buflen
= dir_entries
* entry_length
;
3360 buf
= rte_malloc("nvm_dir", buflen
, 0);
3361 rte_mem_lock_page(buf
);
3364 dma_handle
= rte_mem_virt2iova(buf
);
3365 if (dma_handle
== 0) {
3367 "unable to map response address to physical memory\n");
3370 HWRM_PREP(req
, NVM_GET_DIR_ENTRIES
, BNXT_USE_CHIMP_MB
);
3371 req
.host_dest_addr
= rte_cpu_to_le_64(dma_handle
);
3372 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3375 memcpy(data
, buf
, len
> buflen
? buflen
: len
);
3378 HWRM_CHECK_RESULT();
3384 int bnxt_hwrm_get_nvram_item(struct bnxt
*bp
, uint32_t index
,
3385 uint32_t offset
, uint32_t length
,
3390 rte_iova_t dma_handle
;
3391 struct hwrm_nvm_read_input req
= {0};
3392 struct hwrm_nvm_read_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3394 buf
= rte_malloc("nvm_item", length
, 0);
3395 rte_mem_lock_page(buf
);
3399 dma_handle
= rte_mem_virt2iova(buf
);
3400 if (dma_handle
== 0) {
3402 "unable to map response address to physical memory\n");
3405 HWRM_PREP(req
, NVM_READ
, BNXT_USE_CHIMP_MB
);
3406 req
.host_dest_addr
= rte_cpu_to_le_64(dma_handle
);
3407 req
.dir_idx
= rte_cpu_to_le_16(index
);
3408 req
.offset
= rte_cpu_to_le_32(offset
);
3409 req
.len
= rte_cpu_to_le_32(length
);
3410 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3412 memcpy(data
, buf
, length
);
3415 HWRM_CHECK_RESULT();
3421 int bnxt_hwrm_erase_nvram_directory(struct bnxt
*bp
, uint8_t index
)
3424 struct hwrm_nvm_erase_dir_entry_input req
= {0};
3425 struct hwrm_nvm_erase_dir_entry_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3427 HWRM_PREP(req
, NVM_ERASE_DIR_ENTRY
, BNXT_USE_CHIMP_MB
);
3428 req
.dir_idx
= rte_cpu_to_le_16(index
);
3429 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3430 HWRM_CHECK_RESULT();
3437 int bnxt_hwrm_flash_nvram(struct bnxt
*bp
, uint16_t dir_type
,
3438 uint16_t dir_ordinal
, uint16_t dir_ext
,
3439 uint16_t dir_attr
, const uint8_t *data
,
3443 struct hwrm_nvm_write_input req
= {0};
3444 struct hwrm_nvm_write_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3445 rte_iova_t dma_handle
;
3448 buf
= rte_malloc("nvm_write", data_len
, 0);
3449 rte_mem_lock_page(buf
);
3453 dma_handle
= rte_mem_virt2iova(buf
);
3454 if (dma_handle
== 0) {
3456 "unable to map response address to physical memory\n");
3459 memcpy(buf
, data
, data_len
);
3461 HWRM_PREP(req
, NVM_WRITE
, BNXT_USE_CHIMP_MB
);
3463 req
.dir_type
= rte_cpu_to_le_16(dir_type
);
3464 req
.dir_ordinal
= rte_cpu_to_le_16(dir_ordinal
);
3465 req
.dir_ext
= rte_cpu_to_le_16(dir_ext
);
3466 req
.dir_attr
= rte_cpu_to_le_16(dir_attr
);
3467 req
.dir_data_length
= rte_cpu_to_le_32(data_len
);
3468 req
.host_src_addr
= rte_cpu_to_le_64(dma_handle
);
3470 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3473 HWRM_CHECK_RESULT();
3480 bnxt_vnic_count(struct bnxt_vnic_info
*vnic __rte_unused
, void *cbdata
)
3482 uint32_t *count
= cbdata
;
3484 *count
= *count
+ 1;
3487 static int bnxt_vnic_count_hwrm_stub(struct bnxt
*bp __rte_unused
,
3488 struct bnxt_vnic_info
*vnic __rte_unused
)
3493 int bnxt_vf_vnic_count(struct bnxt
*bp
, uint16_t vf
)
3497 bnxt_hwrm_func_vf_vnic_query_and_config(bp
, vf
, bnxt_vnic_count
,
3498 &count
, bnxt_vnic_count_hwrm_stub
);
3503 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt
*bp
, uint16_t vf
,
3506 struct hwrm_func_vf_vnic_ids_query_input req
= {0};
3507 struct hwrm_func_vf_vnic_ids_query_output
*resp
=
3508 bp
->hwrm_cmd_resp_addr
;
3511 /* First query all VNIC ids */
3512 HWRM_PREP(req
, FUNC_VF_VNIC_IDS_QUERY
, BNXT_USE_CHIMP_MB
);
3514 req
.vf_id
= rte_cpu_to_le_16(bp
->pf
.first_vf_id
+ vf
);
3515 req
.max_vnic_id_cnt
= rte_cpu_to_le_32(bp
->pf
.total_vnics
);
3516 req
.vnic_id_tbl_addr
= rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids
));
3518 if (req
.vnic_id_tbl_addr
== 0) {
3521 "unable to map VNIC ID table address to physical memory\n");
3524 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3527 PMD_DRV_LOG(ERR
, "hwrm_func_vf_vnic_query failed rc:%d\n", rc
);
3529 } else if (resp
->error_code
) {
3530 rc
= rte_le_to_cpu_16(resp
->error_code
);
3532 PMD_DRV_LOG(ERR
, "hwrm_func_vf_vnic_query error %d\n", rc
);
3535 rc
= rte_le_to_cpu_32(resp
->vnic_id_cnt
);
3543 * This function queries the VNIC IDs for a specified VF. It then calls
3544 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3545 * Then it calls the hwrm_cb function to program this new vnic configuration.
3547 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt
*bp
, uint16_t vf
,
3548 void (*vnic_cb
)(struct bnxt_vnic_info
*, void *), void *cbdata
,
3549 int (*hwrm_cb
)(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
))
3551 struct bnxt_vnic_info vnic
;
3553 int i
, num_vnic_ids
;
3558 /* First query all VNIC ids */
3559 vnic_id_sz
= bp
->pf
.total_vnics
* sizeof(*vnic_ids
);
3560 vnic_ids
= rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz
,
3561 RTE_CACHE_LINE_SIZE
);
3562 if (vnic_ids
== NULL
) {
3566 for (sz
= 0; sz
< vnic_id_sz
; sz
+= getpagesize())
3567 rte_mem_lock_page(((char *)vnic_ids
) + sz
);
3569 num_vnic_ids
= bnxt_hwrm_func_vf_vnic_query(bp
, vf
, vnic_ids
);
3571 if (num_vnic_ids
< 0)
3572 return num_vnic_ids
;
3574 /* Retrieve VNIC, update bd_stall then update */
3576 for (i
= 0; i
< num_vnic_ids
; i
++) {
3577 memset(&vnic
, 0, sizeof(struct bnxt_vnic_info
));
3578 vnic
.fw_vnic_id
= rte_le_to_cpu_16(vnic_ids
[i
]);
3579 rc
= bnxt_hwrm_vnic_qcfg(bp
, &vnic
, bp
->pf
.first_vf_id
+ vf
);
3582 if (vnic
.mru
<= 4) /* Indicates unallocated */
3585 vnic_cb(&vnic
, cbdata
);
3587 rc
= hwrm_cb(bp
, &vnic
);
3597 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt
*bp
, uint16_t vf
,
3600 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3601 struct hwrm_func_cfg_input req
= {0};
3604 HWRM_PREP(req
, FUNC_CFG
, BNXT_USE_CHIMP_MB
);
3606 req
.fid
= rte_cpu_to_le_16(bp
->pf
.vf_info
[vf
].fid
);
3607 req
.enables
|= rte_cpu_to_le_32(
3608 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE
);
3609 req
.vlan_antispoof_mode
= on
?
3610 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN
:
3611 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK
;
3612 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3614 HWRM_CHECK_RESULT();
3620 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt
*bp
, int vf
)
3622 struct bnxt_vnic_info vnic
;
3625 int num_vnic_ids
, i
;
3629 vnic_id_sz
= bp
->pf
.total_vnics
* sizeof(*vnic_ids
);
3630 vnic_ids
= rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz
,
3631 RTE_CACHE_LINE_SIZE
);
3632 if (vnic_ids
== NULL
) {
3637 for (sz
= 0; sz
< vnic_id_sz
; sz
+= getpagesize())
3638 rte_mem_lock_page(((char *)vnic_ids
) + sz
);
3640 rc
= bnxt_hwrm_func_vf_vnic_query(bp
, vf
, vnic_ids
);
3646 * Loop through to find the default VNIC ID.
3647 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3648 * by sending the hwrm_func_qcfg command to the firmware.
3650 for (i
= 0; i
< num_vnic_ids
; i
++) {
3651 memset(&vnic
, 0, sizeof(struct bnxt_vnic_info
));
3652 vnic
.fw_vnic_id
= rte_le_to_cpu_16(vnic_ids
[i
]);
3653 rc
= bnxt_hwrm_vnic_qcfg(bp
, &vnic
,
3654 bp
->pf
.first_vf_id
+ vf
);
3657 if (vnic
.func_default
) {
3659 return vnic
.fw_vnic_id
;
3662 /* Could not find a default VNIC. */
3663 PMD_DRV_LOG(ERR
, "No default VNIC\n");
3669 int bnxt_hwrm_set_em_filter(struct bnxt
*bp
,
3671 struct bnxt_filter_info
*filter
)
3674 struct hwrm_cfa_em_flow_alloc_input req
= {.req_type
= 0 };
3675 struct hwrm_cfa_em_flow_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3676 uint32_t enables
= 0;
3678 if (filter
->fw_em_filter_id
!= UINT64_MAX
)
3679 bnxt_hwrm_clear_em_filter(bp
, filter
);
3681 HWRM_PREP(req
, CFA_EM_FLOW_ALLOC
, BNXT_USE_KONG(bp
));
3683 req
.flags
= rte_cpu_to_le_32(filter
->flags
);
3685 enables
= filter
->enables
|
3686 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID
;
3687 req
.dst_id
= rte_cpu_to_le_16(dst_id
);
3689 if (filter
->ip_addr_type
) {
3690 req
.ip_addr_type
= filter
->ip_addr_type
;
3691 enables
|= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE
;
3694 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID
)
3695 req
.l2_filter_id
= rte_cpu_to_le_64(filter
->fw_l2_filter_id
);
3697 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR
)
3698 memcpy(req
.src_macaddr
, filter
->src_macaddr
,
3701 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR
)
3702 memcpy(req
.dst_macaddr
, filter
->dst_macaddr
,
3705 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID
)
3706 req
.ovlan_vid
= filter
->l2_ovlan
;
3708 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID
)
3709 req
.ivlan_vid
= filter
->l2_ivlan
;
3711 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE
)
3712 req
.ethertype
= rte_cpu_to_be_16(filter
->ethertype
);
3714 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL
)
3715 req
.ip_protocol
= filter
->ip_protocol
;
3717 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR
)
3718 req
.src_ipaddr
[0] = rte_cpu_to_be_32(filter
->src_ipaddr
[0]);
3720 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR
)
3721 req
.dst_ipaddr
[0] = rte_cpu_to_be_32(filter
->dst_ipaddr
[0]);
3723 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT
)
3724 req
.src_port
= rte_cpu_to_be_16(filter
->src_port
);
3726 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT
)
3727 req
.dst_port
= rte_cpu_to_be_16(filter
->dst_port
);
3729 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID
)
3730 req
.mirror_vnic_id
= filter
->mirror_vnic_id
;
3732 req
.enables
= rte_cpu_to_le_32(enables
);
3734 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_KONG(bp
));
3736 HWRM_CHECK_RESULT();
3738 filter
->fw_em_filter_id
= rte_le_to_cpu_64(resp
->em_filter_id
);
3744 int bnxt_hwrm_clear_em_filter(struct bnxt
*bp
, struct bnxt_filter_info
*filter
)
3747 struct hwrm_cfa_em_flow_free_input req
= {.req_type
= 0 };
3748 struct hwrm_cfa_em_flow_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3750 if (filter
->fw_em_filter_id
== UINT64_MAX
)
3753 PMD_DRV_LOG(ERR
, "Clear EM filter\n");
3754 HWRM_PREP(req
, CFA_EM_FLOW_FREE
, BNXT_USE_KONG(bp
));
3756 req
.em_filter_id
= rte_cpu_to_le_64(filter
->fw_em_filter_id
);
3758 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_KONG(bp
));
3760 HWRM_CHECK_RESULT();
3763 filter
->fw_em_filter_id
= UINT64_MAX
;
3764 filter
->fw_l2_filter_id
= UINT64_MAX
;
3769 int bnxt_hwrm_set_ntuple_filter(struct bnxt
*bp
,
3771 struct bnxt_filter_info
*filter
)
3774 struct hwrm_cfa_ntuple_filter_alloc_input req
= {.req_type
= 0 };
3775 struct hwrm_cfa_ntuple_filter_alloc_output
*resp
=
3776 bp
->hwrm_cmd_resp_addr
;
3777 uint32_t enables
= 0;
3779 if (filter
->fw_ntuple_filter_id
!= UINT64_MAX
)
3780 bnxt_hwrm_clear_ntuple_filter(bp
, filter
);
3782 HWRM_PREP(req
, CFA_NTUPLE_FILTER_ALLOC
, BNXT_USE_CHIMP_MB
);
3784 req
.flags
= rte_cpu_to_le_32(filter
->flags
);
3786 enables
= filter
->enables
|
3787 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID
;
3788 req
.dst_id
= rte_cpu_to_le_16(dst_id
);
3791 if (filter
->ip_addr_type
) {
3792 req
.ip_addr_type
= filter
->ip_addr_type
;
3794 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE
;
3797 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID
)
3798 req
.l2_filter_id
= rte_cpu_to_le_64(filter
->fw_l2_filter_id
);
3800 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR
)
3801 memcpy(req
.src_macaddr
, filter
->src_macaddr
,
3804 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3805 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3808 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE
)
3809 req
.ethertype
= rte_cpu_to_be_16(filter
->ethertype
);
3811 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL
)
3812 req
.ip_protocol
= filter
->ip_protocol
;
3814 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR
)
3815 req
.src_ipaddr
[0] = rte_cpu_to_le_32(filter
->src_ipaddr
[0]);
3817 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK
)
3818 req
.src_ipaddr_mask
[0] =
3819 rte_cpu_to_le_32(filter
->src_ipaddr_mask
[0]);
3821 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR
)
3822 req
.dst_ipaddr
[0] = rte_cpu_to_le_32(filter
->dst_ipaddr
[0]);
3824 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK
)
3825 req
.dst_ipaddr_mask
[0] =
3826 rte_cpu_to_be_32(filter
->dst_ipaddr_mask
[0]);
3828 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT
)
3829 req
.src_port
= rte_cpu_to_le_16(filter
->src_port
);
3831 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK
)
3832 req
.src_port_mask
= rte_cpu_to_le_16(filter
->src_port_mask
);
3834 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT
)
3835 req
.dst_port
= rte_cpu_to_le_16(filter
->dst_port
);
3837 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK
)
3838 req
.dst_port_mask
= rte_cpu_to_le_16(filter
->dst_port_mask
);
3840 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID
)
3841 req
.mirror_vnic_id
= filter
->mirror_vnic_id
;
3843 req
.enables
= rte_cpu_to_le_32(enables
);
3845 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3847 HWRM_CHECK_RESULT();
3849 filter
->fw_ntuple_filter_id
= rte_le_to_cpu_64(resp
->ntuple_filter_id
);
3855 int bnxt_hwrm_clear_ntuple_filter(struct bnxt
*bp
,
3856 struct bnxt_filter_info
*filter
)
3859 struct hwrm_cfa_ntuple_filter_free_input req
= {.req_type
= 0 };
3860 struct hwrm_cfa_ntuple_filter_free_output
*resp
=
3861 bp
->hwrm_cmd_resp_addr
;
3863 if (filter
->fw_ntuple_filter_id
== UINT64_MAX
)
3866 HWRM_PREP(req
, CFA_NTUPLE_FILTER_FREE
, BNXT_USE_CHIMP_MB
);
3868 req
.ntuple_filter_id
= rte_cpu_to_le_64(filter
->fw_ntuple_filter_id
);
3870 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3872 HWRM_CHECK_RESULT();
3875 filter
->fw_ntuple_filter_id
= UINT64_MAX
;
3880 int bnxt_vnic_rss_configure(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
3882 unsigned int rss_idx
, fw_idx
, i
;
3884 if (vnic
->rss_table
&& vnic
->hash_type
) {
3886 * Fill the RSS hash & redirection table with
3887 * ring group ids for all VNICs
3889 for (rss_idx
= 0, fw_idx
= 0; rss_idx
< HW_HASH_INDEX_SIZE
;
3890 rss_idx
++, fw_idx
++) {
3891 for (i
= 0; i
< bp
->rx_cp_nr_rings
; i
++) {
3892 fw_idx
%= bp
->rx_cp_nr_rings
;
3893 if (vnic
->fw_grp_ids
[fw_idx
] !=
3898 if (i
== bp
->rx_cp_nr_rings
)
3900 vnic
->rss_table
[rss_idx
] =
3901 vnic
->fw_grp_ids
[fw_idx
];
3903 return bnxt_hwrm_vnic_rss_cfg(bp
, vnic
);
3908 static void bnxt_hwrm_set_coal_params(struct bnxt_coal
*hw_coal
,
3909 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input
*req
)
3913 req
->num_cmpl_aggr_int
= rte_cpu_to_le_16(hw_coal
->num_cmpl_aggr_int
);
3915 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3916 req
->num_cmpl_dma_aggr
= rte_cpu_to_le_16(hw_coal
->num_cmpl_dma_aggr
);
3918 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3919 req
->num_cmpl_dma_aggr_during_int
=
3920 rte_cpu_to_le_16(hw_coal
->num_cmpl_dma_aggr_during_int
);
3922 req
->int_lat_tmr_max
= rte_cpu_to_le_16(hw_coal
->int_lat_tmr_max
);
3924 /* min timer set to 1/2 of interrupt timer */
3925 req
->int_lat_tmr_min
= rte_cpu_to_le_16(hw_coal
->int_lat_tmr_min
);
3927 /* buf timer set to 1/4 of interrupt timer */
3928 req
->cmpl_aggr_dma_tmr
= rte_cpu_to_le_16(hw_coal
->cmpl_aggr_dma_tmr
);
3930 req
->cmpl_aggr_dma_tmr_during_int
=
3931 rte_cpu_to_le_16(hw_coal
->cmpl_aggr_dma_tmr_during_int
);
3933 flags
= HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET
|
3934 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE
;
3935 req
->flags
= rte_cpu_to_le_16(flags
);
3938 int bnxt_hwrm_set_ring_coal(struct bnxt
*bp
,
3939 struct bnxt_coal
*coal
, uint16_t ring_id
)
3941 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req
= {0};
3942 struct hwrm_ring_cmpl_ring_cfg_aggint_params_output
*resp
=
3943 bp
->hwrm_cmd_resp_addr
;
3946 /* Set ring coalesce parameters only for Stratus 100G NIC */
3947 if (!bnxt_stratus_device(bp
))
3950 HWRM_PREP(req
, RING_CMPL_RING_CFG_AGGINT_PARAMS
, BNXT_USE_CHIMP_MB
);
3951 bnxt_hwrm_set_coal_params(coal
, &req
);
3952 req
.ring_id
= rte_cpu_to_le_16(ring_id
);
3953 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3954 HWRM_CHECK_RESULT();
3959 int bnxt_hwrm_ext_port_qstats(struct bnxt
*bp
)
3961 struct hwrm_port_qstats_ext_input req
= {0};
3962 struct hwrm_port_qstats_ext_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3963 struct bnxt_pf_info
*pf
= &bp
->pf
;
3966 if (!(bp
->flags
& BNXT_FLAG_EXT_RX_PORT_STATS
||
3967 bp
->flags
& BNXT_FLAG_EXT_TX_PORT_STATS
))
3970 HWRM_PREP(req
, PORT_QSTATS_EXT
, BNXT_USE_CHIMP_MB
);
3972 req
.port_id
= rte_cpu_to_le_16(pf
->port_id
);
3973 if (bp
->flags
& BNXT_FLAG_EXT_TX_PORT_STATS
) {
3974 req
.tx_stat_host_addr
=
3975 rte_cpu_to_le_64(bp
->hw_tx_port_stats_map
);
3977 rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext
));
3979 if (bp
->flags
& BNXT_FLAG_EXT_RX_PORT_STATS
) {
3980 req
.rx_stat_host_addr
=
3981 rte_cpu_to_le_64(bp
->hw_rx_port_stats_map
);
3983 rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext
));
3985 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3988 bp
->fw_rx_port_stats_ext_size
= 0;
3989 bp
->fw_tx_port_stats_ext_size
= 0;
3991 bp
->fw_rx_port_stats_ext_size
=
3992 rte_le_to_cpu_16(resp
->rx_stat_size
);
3993 bp
->fw_tx_port_stats_ext_size
=
3994 rte_le_to_cpu_16(resp
->tx_stat_size
);
3997 HWRM_CHECK_RESULT();