1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
21 #include "bnxt_ring.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
27 #define HWRM_SPEC_CODE_1_8_3 0x10803
28 #define HWRM_VERSION_1_9_1 0x10901
29 #define HWRM_VERSION_1_9_2 0x10903
31 struct bnxt_plcmodes_cfg
{
33 uint16_t jumbo_thresh
;
35 uint16_t hds_threshold
;
38 static int page_getenum(size_t size
)
54 PMD_DRV_LOG(ERR
, "Page size %zu out of range\n", size
);
55 return sizeof(void *) * 8 - 1;
58 static int page_roundup(size_t size
)
60 return 1 << page_getenum(size
);
63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info
*rmem
,
67 if (rmem
->nr_pages
> 1) {
69 *pg_dir
= rte_cpu_to_le_64(rmem
->pg_tbl_map
);
71 *pg_dir
= rte_cpu_to_le_64(rmem
->dma_arr
[0]);
76 * HWRM Functions (sent to HWRM)
77 * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
78 * HWRM command times out, or a negative error code if the HWRM
79 * command was failed by the FW.
82 static int bnxt_hwrm_send_message(struct bnxt
*bp
, void *msg
,
83 uint32_t msg_len
, bool use_kong_mb
)
86 struct input
*req
= msg
;
87 struct output
*resp
= bp
->hwrm_cmd_resp_addr
;
91 uint16_t max_req_len
= bp
->max_req_len
;
92 struct hwrm_short_input short_input
= { 0 };
93 uint16_t bar_offset
= use_kong_mb
?
94 GRCPF_REG_KONG_CHANNEL_OFFSET
: GRCPF_REG_CHIMP_CHANNEL_OFFSET
;
95 uint16_t mb_trigger_offset
= use_kong_mb
?
96 GRCPF_REG_KONG_COMM_TRIGGER
: GRCPF_REG_CHIMP_COMM_TRIGGER
;
99 /* Do not send HWRM commands to firmware in error state */
100 if (bp
->flags
& BNXT_FLAG_FATAL_ERROR
)
103 timeout
= bp
->hwrm_cmd_timeout
;
105 if (bp
->flags
& BNXT_FLAG_SHORT_CMD
||
106 msg_len
> bp
->max_req_len
) {
107 void *short_cmd_req
= bp
->hwrm_short_cmd_req_addr
;
109 memset(short_cmd_req
, 0, bp
->hwrm_max_ext_req_len
);
110 memcpy(short_cmd_req
, req
, msg_len
);
112 short_input
.req_type
= rte_cpu_to_le_16(req
->req_type
);
113 short_input
.signature
= rte_cpu_to_le_16(
114 HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD
);
115 short_input
.size
= rte_cpu_to_le_16(msg_len
);
116 short_input
.req_addr
=
117 rte_cpu_to_le_64(bp
->hwrm_short_cmd_req_dma_addr
);
119 data
= (uint32_t *)&short_input
;
120 msg_len
= sizeof(short_input
);
122 max_req_len
= BNXT_HWRM_SHORT_REQ_LEN
;
125 /* Write request msg to hwrm channel */
126 for (i
= 0; i
< msg_len
; i
+= 4) {
127 bar
= (uint8_t *)bp
->bar0
+ bar_offset
+ i
;
128 rte_write32(*data
, bar
);
132 /* Zero the rest of the request space */
133 for (; i
< max_req_len
; i
+= 4) {
134 bar
= (uint8_t *)bp
->bar0
+ bar_offset
+ i
;
138 /* Ring channel doorbell */
139 bar
= (uint8_t *)bp
->bar0
+ mb_trigger_offset
;
142 * Make sure the channel doorbell ring command complete before
143 * reading the response to avoid getting stale or invalid
148 /* Poll for the valid bit */
149 for (i
= 0; i
< timeout
; i
++) {
150 /* Sanity check on the resp->resp_len */
152 if (resp
->resp_len
&& resp
->resp_len
<= bp
->max_resp_len
) {
153 /* Last byte of resp contains the valid key */
154 valid
= (uint8_t *)resp
+ resp
->resp_len
- 1;
155 if (*valid
== HWRM_RESP_VALID_KEY
)
162 /* Suppress VER_GET timeout messages during reset recovery */
163 if (bp
->flags
& BNXT_FLAG_FW_RESET
&&
164 rte_cpu_to_le_16(req
->req_type
) == HWRM_VER_GET
)
168 "Error(timeout) sending msg 0x%04x, seq_id %d\n",
169 req
->req_type
, req
->seq_id
);
176 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
177 * spinlock, and does initial processing.
179 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
180 * releases the spinlock only if it returns. If the regular int return codes
181 * are not used by the function, HWRM_CHECK_RESULT() should not be used
182 * directly, rather it should be copied and modified to suit the function.
184 * HWRM_UNLOCK() must be called after all response processing is completed.
186 #define HWRM_PREP(req, type, kong) do { \
187 rte_spinlock_lock(&bp->hwrm_lock); \
188 if (bp->hwrm_cmd_resp_addr == NULL) { \
189 rte_spinlock_unlock(&bp->hwrm_lock); \
192 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
193 (req)->req_type = rte_cpu_to_le_16(type); \
194 (req)->cmpl_ring = rte_cpu_to_le_16(-1); \
195 (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
196 rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
197 (req)->target_id = rte_cpu_to_le_16(0xffff); \
198 (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
201 #define HWRM_CHECK_RESULT_SILENT() do {\
203 rte_spinlock_unlock(&bp->hwrm_lock); \
206 if (resp->error_code) { \
207 rc = rte_le_to_cpu_16(resp->error_code); \
208 rte_spinlock_unlock(&bp->hwrm_lock); \
213 #define HWRM_CHECK_RESULT() do {\
215 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
216 rte_spinlock_unlock(&bp->hwrm_lock); \
217 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
219 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
221 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
223 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
225 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
231 if (resp->error_code) { \
232 rc = rte_le_to_cpu_16(resp->error_code); \
233 if (resp->resp_len >= 16) { \
234 struct hwrm_err_output *tmp_hwrm_err_op = \
237 "error %d:%d:%08x:%04x\n", \
238 rc, tmp_hwrm_err_op->cmd_err, \
240 tmp_hwrm_err_op->opaque_0), \
242 tmp_hwrm_err_op->opaque_1)); \
244 PMD_DRV_LOG(ERR, "error %d\n", rc); \
246 rte_spinlock_unlock(&bp->hwrm_lock); \
247 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
249 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
251 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
253 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
255 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
263 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
265 int bnxt_hwrm_tf_message_direct(struct bnxt
*bp
,
274 bool mailbox
= BNXT_USE_CHIMP_MB
;
275 struct input
*req
= msg
;
276 struct output
*resp
= bp
->hwrm_cmd_resp_addr
;
279 mailbox
= BNXT_USE_KONG(bp
);
281 HWRM_PREP(req
, msg_type
, mailbox
);
283 rc
= bnxt_hwrm_send_message(bp
, req
, msg_len
, mailbox
);
288 memcpy(resp_msg
, resp
, resp_len
);
295 int bnxt_hwrm_tf_message_tunneled(struct bnxt
*bp
,
299 uint32_t *tf_response_code
,
303 uint32_t response_len
)
306 struct hwrm_cfa_tflib_input req
= { .req_type
= 0 };
307 struct hwrm_cfa_tflib_output
*resp
= bp
->hwrm_cmd_resp_addr
;
308 bool mailbox
= BNXT_USE_CHIMP_MB
;
310 if (msg_len
> sizeof(req
.tf_req
))
314 mailbox
= BNXT_USE_KONG(bp
);
316 HWRM_PREP(&req
, HWRM_TF
, mailbox
);
317 /* Build request using the user supplied request payload.
318 * TLV request size is checked at build time against HWRM
319 * request max size, thus no checking required.
321 req
.tf_type
= tf_type
;
322 req
.tf_subtype
= tf_subtype
;
323 memcpy(req
.tf_req
, msg
, msg_len
);
325 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), mailbox
);
328 /* Copy the resp to user provided response buffer */
329 if (response
!= NULL
)
330 /* Post process response data. We need to copy only
331 * the 'payload' as the HWRM data structure really is
332 * HWRM header + msg header + payload and the TFLIB
333 * only provided a payload place holder.
335 if (response_len
!= 0) {
341 /* Extract the internal tflib response code */
342 *tf_response_code
= resp
->tf_resp_code
;
348 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
351 struct hwrm_cfa_l2_set_rx_mask_input req
= {.req_type
= 0 };
352 struct hwrm_cfa_l2_set_rx_mask_output
*resp
= bp
->hwrm_cmd_resp_addr
;
354 HWRM_PREP(&req
, HWRM_CFA_L2_SET_RX_MASK
, BNXT_USE_CHIMP_MB
);
355 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
358 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
366 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt
*bp
,
367 struct bnxt_vnic_info
*vnic
,
369 struct bnxt_vlan_table_entry
*vlan_table
)
372 struct hwrm_cfa_l2_set_rx_mask_input req
= {.req_type
= 0 };
373 struct hwrm_cfa_l2_set_rx_mask_output
*resp
= bp
->hwrm_cmd_resp_addr
;
376 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
)
379 HWRM_PREP(&req
, HWRM_CFA_L2_SET_RX_MASK
, BNXT_USE_CHIMP_MB
);
380 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
382 if (vnic
->flags
& BNXT_VNIC_INFO_BCAST
)
383 mask
|= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST
;
384 if (vnic
->flags
& BNXT_VNIC_INFO_UNTAGGED
)
385 mask
|= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN
;
387 if (vnic
->flags
& BNXT_VNIC_INFO_PROMISC
)
388 mask
|= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS
;
390 if (vnic
->flags
& BNXT_VNIC_INFO_ALLMULTI
) {
391 mask
|= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST
;
392 } else if (vnic
->flags
& BNXT_VNIC_INFO_MCAST
) {
393 mask
|= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST
;
394 req
.num_mc_entries
= rte_cpu_to_le_32(vnic
->mc_addr_cnt
);
395 req
.mc_tbl_addr
= rte_cpu_to_le_64(vnic
->mc_list_dma_addr
);
398 if (!(mask
& HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN
))
399 mask
|= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY
;
400 req
.vlan_tag_tbl_addr
=
401 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table
));
402 req
.num_vlan_tags
= rte_cpu_to_le_32((uint32_t)vlan_count
);
404 req
.mask
= rte_cpu_to_le_32(mask
);
406 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
414 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt
*bp
, uint16_t fid
,
416 struct bnxt_vlan_antispoof_table_entry
*vlan_table
)
419 struct hwrm_cfa_vlan_antispoof_cfg_input req
= {.req_type
= 0 };
420 struct hwrm_cfa_vlan_antispoof_cfg_output
*resp
=
421 bp
->hwrm_cmd_resp_addr
;
424 * Older HWRM versions did not support this command, and the set_rx_mask
425 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
426 * removed from set_rx_mask call, and this command was added.
428 * This command is also present from 1.7.8.11 and higher,
431 if (bp
->fw_ver
< ((1 << 24) | (8 << 16))) {
432 if (bp
->fw_ver
!= ((1 << 24) | (7 << 16) | (8 << 8))) {
433 if (bp
->fw_ver
< ((1 << 24) | (7 << 16) | (8 << 8) |
438 HWRM_PREP(&req
, HWRM_CFA_VLAN_ANTISPOOF_CFG
, BNXT_USE_CHIMP_MB
);
439 req
.fid
= rte_cpu_to_le_16(fid
);
441 req
.vlan_tag_mask_tbl_addr
=
442 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table
));
443 req
.num_vlan_entries
= rte_cpu_to_le_32((uint32_t)vlan_count
);
445 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
453 int bnxt_hwrm_clear_l2_filter(struct bnxt
*bp
,
454 struct bnxt_filter_info
*filter
)
457 struct bnxt_filter_info
*l2_filter
= filter
;
458 struct bnxt_vnic_info
*vnic
= NULL
;
459 struct hwrm_cfa_l2_filter_free_input req
= {.req_type
= 0 };
460 struct hwrm_cfa_l2_filter_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
462 if (filter
->fw_l2_filter_id
== UINT64_MAX
)
465 if (filter
->matching_l2_fltr_ptr
)
466 l2_filter
= filter
->matching_l2_fltr_ptr
;
468 PMD_DRV_LOG(DEBUG
, "filter: %p l2_filter: %p ref_cnt: %d\n",
469 filter
, l2_filter
, l2_filter
->l2_ref_cnt
);
471 if (l2_filter
->l2_ref_cnt
== 0)
474 if (l2_filter
->l2_ref_cnt
> 0)
475 l2_filter
->l2_ref_cnt
--;
477 if (l2_filter
->l2_ref_cnt
> 0)
480 HWRM_PREP(&req
, HWRM_CFA_L2_FILTER_FREE
, BNXT_USE_CHIMP_MB
);
482 req
.l2_filter_id
= rte_cpu_to_le_64(filter
->fw_l2_filter_id
);
484 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
489 filter
->fw_l2_filter_id
= UINT64_MAX
;
490 if (l2_filter
->l2_ref_cnt
== 0) {
491 vnic
= l2_filter
->vnic
;
493 STAILQ_REMOVE(&vnic
->filter
, l2_filter
,
494 bnxt_filter_info
, next
);
495 bnxt_free_filter(bp
, l2_filter
);
502 int bnxt_hwrm_set_l2_filter(struct bnxt
*bp
,
504 struct bnxt_filter_info
*filter
)
507 struct hwrm_cfa_l2_filter_alloc_input req
= {.req_type
= 0 };
508 struct hwrm_cfa_l2_filter_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
509 struct rte_eth_conf
*dev_conf
= &bp
->eth_dev
->data
->dev_conf
;
510 const struct rte_eth_vmdq_rx_conf
*conf
=
511 &dev_conf
->rx_adv_conf
.vmdq_rx_conf
;
512 uint32_t enables
= 0;
513 uint16_t j
= dst_id
- 1;
515 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
516 if ((dev_conf
->rxmode
.mq_mode
& ETH_MQ_RX_VMDQ_FLAG
) &&
517 conf
->pool_map
[j
].pools
& (1UL << j
)) {
519 "Add vlan %u to vmdq pool %u\n",
520 conf
->pool_map
[j
].vlan_id
, j
);
522 filter
->l2_ivlan
= conf
->pool_map
[j
].vlan_id
;
524 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN
|
525 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK
;
528 if (filter
->fw_l2_filter_id
!= UINT64_MAX
)
529 bnxt_hwrm_clear_l2_filter(bp
, filter
);
531 HWRM_PREP(&req
, HWRM_CFA_L2_FILTER_ALLOC
, BNXT_USE_CHIMP_MB
);
533 req
.flags
= rte_cpu_to_le_32(filter
->flags
);
535 enables
= filter
->enables
|
536 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID
;
537 req
.dst_id
= rte_cpu_to_le_16(dst_id
);
540 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
)
541 memcpy(req
.l2_addr
, filter
->l2_addr
,
544 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
)
545 memcpy(req
.l2_addr_mask
, filter
->l2_addr_mask
,
548 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN
)
549 req
.l2_ovlan
= filter
->l2_ovlan
;
551 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN
)
552 req
.l2_ivlan
= filter
->l2_ivlan
;
554 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK
)
555 req
.l2_ovlan_mask
= filter
->l2_ovlan_mask
;
557 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK
)
558 req
.l2_ivlan_mask
= filter
->l2_ivlan_mask
;
559 if (enables
& HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID
)
560 req
.src_id
= rte_cpu_to_le_32(filter
->src_id
);
561 if (enables
& HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE
)
562 req
.src_type
= filter
->src_type
;
563 if (filter
->pri_hint
) {
564 req
.pri_hint
= filter
->pri_hint
;
565 req
.l2_filter_id_hint
=
566 rte_cpu_to_le_64(filter
->l2_filter_id_hint
);
569 req
.enables
= rte_cpu_to_le_32(enables
);
571 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
575 filter
->fw_l2_filter_id
= rte_le_to_cpu_64(resp
->l2_filter_id
);
576 filter
->flow_id
= rte_le_to_cpu_32(resp
->flow_id
);
579 filter
->l2_ref_cnt
++;
584 int bnxt_hwrm_ptp_cfg(struct bnxt
*bp
)
586 struct hwrm_port_mac_cfg_input req
= {.req_type
= 0};
587 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
594 HWRM_PREP(&req
, HWRM_PORT_MAC_CFG
, BNXT_USE_CHIMP_MB
);
597 flags
|= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE
;
600 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE
;
601 if (ptp
->tx_tstamp_en
)
602 flags
|= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE
;
605 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE
;
606 req
.flags
= rte_cpu_to_le_32(flags
);
607 req
.enables
= rte_cpu_to_le_32
608 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE
);
609 req
.rx_ts_capture_ptp_msg_type
= rte_cpu_to_le_16(ptp
->rxctl
);
611 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
617 static int bnxt_hwrm_ptp_qcfg(struct bnxt
*bp
)
620 struct hwrm_port_mac_ptp_qcfg_input req
= {.req_type
= 0};
621 struct hwrm_port_mac_ptp_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
622 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
627 HWRM_PREP(&req
, HWRM_PORT_MAC_PTP_QCFG
, BNXT_USE_CHIMP_MB
);
629 req
.port_id
= rte_cpu_to_le_16(bp
->pf
->port_id
);
631 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
635 if (!BNXT_CHIP_THOR(bp
) &&
636 !(resp
->flags
& HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS
))
639 if (resp
->flags
& HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS
)
640 bp
->flags
|= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS
;
642 ptp
= rte_zmalloc("ptp_cfg", sizeof(*ptp
), 0);
646 if (!BNXT_CHIP_THOR(bp
)) {
647 ptp
->rx_regs
[BNXT_PTP_RX_TS_L
] =
648 rte_le_to_cpu_32(resp
->rx_ts_reg_off_lower
);
649 ptp
->rx_regs
[BNXT_PTP_RX_TS_H
] =
650 rte_le_to_cpu_32(resp
->rx_ts_reg_off_upper
);
651 ptp
->rx_regs
[BNXT_PTP_RX_SEQ
] =
652 rte_le_to_cpu_32(resp
->rx_ts_reg_off_seq_id
);
653 ptp
->rx_regs
[BNXT_PTP_RX_FIFO
] =
654 rte_le_to_cpu_32(resp
->rx_ts_reg_off_fifo
);
655 ptp
->rx_regs
[BNXT_PTP_RX_FIFO_ADV
] =
656 rte_le_to_cpu_32(resp
->rx_ts_reg_off_fifo_adv
);
657 ptp
->tx_regs
[BNXT_PTP_TX_TS_L
] =
658 rte_le_to_cpu_32(resp
->tx_ts_reg_off_lower
);
659 ptp
->tx_regs
[BNXT_PTP_TX_TS_H
] =
660 rte_le_to_cpu_32(resp
->tx_ts_reg_off_upper
);
661 ptp
->tx_regs
[BNXT_PTP_TX_SEQ
] =
662 rte_le_to_cpu_32(resp
->tx_ts_reg_off_seq_id
);
663 ptp
->tx_regs
[BNXT_PTP_TX_FIFO
] =
664 rte_le_to_cpu_32(resp
->tx_ts_reg_off_fifo
);
673 static int __bnxt_hwrm_func_qcaps(struct bnxt
*bp
)
676 struct hwrm_func_qcaps_input req
= {.req_type
= 0 };
677 struct hwrm_func_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
678 uint16_t new_max_vfs
;
682 HWRM_PREP(&req
, HWRM_FUNC_QCAPS
, BNXT_USE_CHIMP_MB
);
684 req
.fid
= rte_cpu_to_le_16(0xffff);
686 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
690 bp
->max_ring_grps
= rte_le_to_cpu_32(resp
->max_hw_ring_grps
);
691 flags
= rte_le_to_cpu_32(resp
->flags
);
693 bp
->pf
->port_id
= resp
->port_id
;
694 bp
->pf
->first_vf_id
= rte_le_to_cpu_16(resp
->first_vf_id
);
695 bp
->pf
->total_vfs
= rte_le_to_cpu_16(resp
->max_vfs
);
696 new_max_vfs
= bp
->pdev
->max_vfs
;
697 if (new_max_vfs
!= bp
->pf
->max_vfs
) {
699 rte_free(bp
->pf
->vf_info
);
700 bp
->pf
->vf_info
= rte_malloc("bnxt_vf_info",
701 sizeof(bp
->pf
->vf_info
[0]) * new_max_vfs
, 0);
702 bp
->pf
->max_vfs
= new_max_vfs
;
703 for (i
= 0; i
< new_max_vfs
; i
++) {
704 bp
->pf
->vf_info
[i
].fid
=
705 bp
->pf
->first_vf_id
+ i
;
706 bp
->pf
->vf_info
[i
].vlan_table
=
707 rte_zmalloc("VF VLAN table",
710 if (bp
->pf
->vf_info
[i
].vlan_table
== NULL
)
712 "Fail to alloc VLAN table for VF %d\n",
716 bp
->pf
->vf_info
[i
].vlan_table
);
717 bp
->pf
->vf_info
[i
].vlan_as_table
=
718 rte_zmalloc("VF VLAN AS table",
721 if (bp
->pf
->vf_info
[i
].vlan_as_table
== NULL
)
723 "Alloc VLAN AS table for VF %d fail\n",
727 bp
->pf
->vf_info
[i
].vlan_as_table
);
728 STAILQ_INIT(&bp
->pf
->vf_info
[i
].filter
);
733 bp
->fw_fid
= rte_le_to_cpu_32(resp
->fid
);
734 if (!bnxt_check_zero_bytes(resp
->mac_address
, RTE_ETHER_ADDR_LEN
)) {
735 bp
->flags
|= BNXT_FLAG_DFLT_MAC_SET
;
736 memcpy(bp
->mac_addr
, &resp
->mac_address
, RTE_ETHER_ADDR_LEN
);
738 bp
->flags
&= ~BNXT_FLAG_DFLT_MAC_SET
;
740 bp
->max_rsscos_ctx
= rte_le_to_cpu_16(resp
->max_rsscos_ctx
);
741 bp
->max_cp_rings
= rte_le_to_cpu_16(resp
->max_cmpl_rings
);
742 bp
->max_tx_rings
= rte_le_to_cpu_16(resp
->max_tx_rings
);
743 bp
->max_rx_rings
= rte_le_to_cpu_16(resp
->max_rx_rings
);
744 bp
->first_vf_id
= rte_le_to_cpu_16(resp
->first_vf_id
);
745 bp
->max_rx_em_flows
= rte_le_to_cpu_16(resp
->max_rx_em_flows
);
746 bp
->max_l2_ctx
= rte_le_to_cpu_16(resp
->max_l2_ctxs
);
747 if (!BNXT_CHIP_THOR(bp
))
748 bp
->max_l2_ctx
+= bp
->max_rx_em_flows
;
749 /* TODO: For now, do not support VMDq/RFS on VFs. */
754 bp
->max_vnics
= rte_le_to_cpu_16(resp
->max_vnics
);
758 PMD_DRV_LOG(DEBUG
, "Max l2_cntxts is %d vnics is %d\n",
759 bp
->max_l2_ctx
, bp
->max_vnics
);
760 bp
->max_stat_ctx
= rte_le_to_cpu_16(resp
->max_stat_ctx
);
762 bp
->pf
->total_vnics
= rte_le_to_cpu_16(resp
->max_vnics
);
763 if (flags
& HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED
) {
764 bp
->flags
|= BNXT_FLAG_PTP_SUPPORTED
;
765 PMD_DRV_LOG(DEBUG
, "PTP SUPPORTED\n");
767 bnxt_hwrm_ptp_qcfg(bp
);
771 if (flags
& HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED
)
772 bp
->flags
|= BNXT_FLAG_EXT_STATS_SUPPORTED
;
774 if (flags
& HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE
) {
775 bp
->fw_cap
|= BNXT_FW_CAP_ERROR_RECOVERY
;
776 PMD_DRV_LOG(DEBUG
, "Adapter Error recovery SUPPORTED\n");
779 if (flags
& HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD
)
780 bp
->fw_cap
|= BNXT_FW_CAP_ERR_RECOVER_RELOAD
;
782 if (flags
& HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE
)
783 bp
->fw_cap
|= BNXT_FW_CAP_HOT_RESET
;
790 int bnxt_hwrm_func_qcaps(struct bnxt
*bp
)
794 rc
= __bnxt_hwrm_func_qcaps(bp
);
795 if (!rc
&& bp
->hwrm_spec_code
>= HWRM_SPEC_CODE_1_8_3
) {
796 rc
= bnxt_alloc_ctx_mem(bp
);
800 rc
= bnxt_hwrm_func_resc_qcaps(bp
);
802 bp
->flags
|= BNXT_FLAG_NEW_RM
;
806 * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
807 * But the error can be ignored. Return success.
813 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
814 int bnxt_hwrm_vnic_qcaps(struct bnxt
*bp
)
817 struct hwrm_vnic_qcaps_input req
= {.req_type
= 0 };
818 struct hwrm_vnic_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
820 HWRM_PREP(&req
, HWRM_VNIC_QCAPS
, BNXT_USE_CHIMP_MB
);
822 req
.target_id
= rte_cpu_to_le_16(0xffff);
824 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
828 if (rte_le_to_cpu_32(resp
->flags
) &
829 HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP
) {
830 bp
->vnic_cap_flags
|= BNXT_VNIC_CAP_COS_CLASSIFY
;
831 PMD_DRV_LOG(INFO
, "CoS assignment capability enabled\n");
834 bp
->max_tpa_v2
= rte_le_to_cpu_16(resp
->max_aggs_supported
);
841 int bnxt_hwrm_func_reset(struct bnxt
*bp
)
844 struct hwrm_func_reset_input req
= {.req_type
= 0 };
845 struct hwrm_func_reset_output
*resp
= bp
->hwrm_cmd_resp_addr
;
847 HWRM_PREP(&req
, HWRM_FUNC_RESET
, BNXT_USE_CHIMP_MB
);
849 req
.enables
= rte_cpu_to_le_32(0);
851 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
859 int bnxt_hwrm_func_driver_register(struct bnxt
*bp
)
863 struct hwrm_func_drv_rgtr_input req
= {.req_type
= 0 };
864 struct hwrm_func_drv_rgtr_output
*resp
= bp
->hwrm_cmd_resp_addr
;
866 if (bp
->flags
& BNXT_FLAG_REGISTERED
)
869 if (bp
->fw_cap
& BNXT_FW_CAP_HOT_RESET
)
870 flags
= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT
;
871 if (bp
->fw_cap
& BNXT_FW_CAP_ERROR_RECOVERY
)
872 flags
|= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT
;
874 /* PFs and trusted VFs should indicate the support of the
875 * Master capability on non Stingray platform
877 if ((BNXT_PF(bp
) || BNXT_VF_IS_TRUSTED(bp
)) && !BNXT_STINGRAY(bp
))
878 flags
|= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT
;
880 HWRM_PREP(&req
, HWRM_FUNC_DRV_RGTR
, BNXT_USE_CHIMP_MB
);
881 req
.enables
= rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER
|
882 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD
);
883 req
.ver_maj
= RTE_VER_YEAR
;
884 req
.ver_min
= RTE_VER_MONTH
;
885 req
.ver_upd
= RTE_VER_MINOR
;
888 req
.enables
|= rte_cpu_to_le_32(
889 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD
);
890 memcpy(req
.vf_req_fwd
, bp
->pf
->vf_req_fwd
,
891 RTE_MIN(sizeof(req
.vf_req_fwd
),
892 sizeof(bp
->pf
->vf_req_fwd
)));
895 * PF can sniff HWRM API issued by VF. This can be set up by
896 * linux driver and inherited by the DPDK PF driver. Clear
897 * this HWRM sniffer list in FW because DPDK PF driver does
900 flags
|= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE
;
903 req
.flags
= rte_cpu_to_le_32(flags
);
905 req
.async_event_fwd
[0] |=
906 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE
|
907 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED
|
908 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
|
909 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE
|
910 ASYNC_CMPL_EVENT_ID_RESET_NOTIFY
);
911 if (bp
->fw_cap
& BNXT_FW_CAP_ERROR_RECOVERY
)
912 req
.async_event_fwd
[0] |=
913 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY
);
914 req
.async_event_fwd
[1] |=
915 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD
|
916 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE
);
918 req
.async_event_fwd
[1] |=
919 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION
);
921 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
925 flags
= rte_le_to_cpu_32(resp
->flags
);
926 if (flags
& HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED
)
927 bp
->fw_cap
|= BNXT_FW_CAP_IF_CHANGE
;
931 bp
->flags
|= BNXT_FLAG_REGISTERED
;
936 int bnxt_hwrm_check_vf_rings(struct bnxt
*bp
)
938 if (!(BNXT_VF(bp
) && (bp
->flags
& BNXT_FLAG_NEW_RM
)))
941 return bnxt_hwrm_func_reserve_vf_resc(bp
, true);
944 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt
*bp
, bool test
)
949 struct hwrm_func_vf_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
950 struct hwrm_func_vf_cfg_input req
= {0};
952 HWRM_PREP(&req
, HWRM_FUNC_VF_CFG
, BNXT_USE_CHIMP_MB
);
954 enables
= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS
|
955 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS
|
956 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS
|
957 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS
|
958 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS
;
960 if (BNXT_HAS_RING_GRPS(bp
)) {
961 enables
|= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS
;
962 req
.num_hw_ring_grps
= rte_cpu_to_le_16(bp
->rx_nr_rings
);
965 req
.num_tx_rings
= rte_cpu_to_le_16(bp
->tx_nr_rings
);
966 req
.num_rx_rings
= rte_cpu_to_le_16(bp
->rx_nr_rings
*
967 AGG_RING_MULTIPLIER
);
968 req
.num_stat_ctxs
= rte_cpu_to_le_16(bp
->rx_nr_rings
+ bp
->tx_nr_rings
);
969 req
.num_cmpl_rings
= rte_cpu_to_le_16(bp
->rx_nr_rings
+
971 BNXT_NUM_ASYNC_CPR(bp
));
972 req
.num_vnics
= rte_cpu_to_le_16(bp
->rx_nr_rings
);
973 if (bp
->vf_resv_strategy
==
974 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC
) {
975 enables
|= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS
|
976 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS
|
977 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS
;
978 req
.num_rsscos_ctxs
= rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX
);
979 req
.num_l2_ctxs
= rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX
);
980 req
.num_vnics
= rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC
);
981 } else if (bp
->vf_resv_strategy
==
982 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL
) {
983 enables
|= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS
;
984 req
.num_rsscos_ctxs
= rte_cpu_to_le_16(bp
->max_rsscos_ctx
);
988 flags
= HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST
|
989 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST
|
990 HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST
|
991 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST
|
992 HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST
|
993 HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST
;
995 if (test
&& BNXT_HAS_RING_GRPS(bp
))
996 flags
|= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST
;
998 req
.flags
= rte_cpu_to_le_32(flags
);
999 req
.enables
|= rte_cpu_to_le_32(enables
);
1001 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1004 HWRM_CHECK_RESULT_SILENT();
1006 HWRM_CHECK_RESULT();
1012 int bnxt_hwrm_func_resc_qcaps(struct bnxt
*bp
)
1015 struct hwrm_func_resource_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1016 struct hwrm_func_resource_qcaps_input req
= {0};
1018 HWRM_PREP(&req
, HWRM_FUNC_RESOURCE_QCAPS
, BNXT_USE_CHIMP_MB
);
1019 req
.fid
= rte_cpu_to_le_16(0xffff);
1021 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1023 HWRM_CHECK_RESULT_SILENT();
1026 bp
->max_rsscos_ctx
= rte_le_to_cpu_16(resp
->max_rsscos_ctx
);
1027 bp
->max_cp_rings
= rte_le_to_cpu_16(resp
->max_cmpl_rings
);
1028 bp
->max_tx_rings
= rte_le_to_cpu_16(resp
->max_tx_rings
);
1029 bp
->max_rx_rings
= rte_le_to_cpu_16(resp
->max_rx_rings
);
1030 bp
->max_ring_grps
= rte_le_to_cpu_32(resp
->max_hw_ring_grps
);
1031 /* func_resource_qcaps does not return max_rx_em_flows.
1032 * So use the value provided by func_qcaps.
1034 bp
->max_l2_ctx
= rte_le_to_cpu_16(resp
->max_l2_ctxs
);
1035 if (!BNXT_CHIP_THOR(bp
))
1036 bp
->max_l2_ctx
+= bp
->max_rx_em_flows
;
1037 bp
->max_vnics
= rte_le_to_cpu_16(resp
->max_vnics
);
1038 bp
->max_stat_ctx
= rte_le_to_cpu_16(resp
->max_stat_ctx
);
1040 bp
->max_nq_rings
= rte_le_to_cpu_16(resp
->max_msix
);
1041 bp
->vf_resv_strategy
= rte_le_to_cpu_16(resp
->vf_reservation_strategy
);
1042 if (bp
->vf_resv_strategy
>
1043 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC
)
1044 bp
->vf_resv_strategy
=
1045 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL
;
1051 int bnxt_hwrm_ver_get(struct bnxt
*bp
, uint32_t timeout
)
1054 struct hwrm_ver_get_input req
= {.req_type
= 0 };
1055 struct hwrm_ver_get_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1056 uint32_t fw_version
;
1057 uint16_t max_resp_len
;
1058 char type
[RTE_MEMZONE_NAMESIZE
];
1059 uint32_t dev_caps_cfg
;
1061 bp
->max_req_len
= HWRM_MAX_REQ_LEN
;
1062 bp
->hwrm_cmd_timeout
= timeout
;
1063 HWRM_PREP(&req
, HWRM_VER_GET
, BNXT_USE_CHIMP_MB
);
1065 req
.hwrm_intf_maj
= HWRM_VERSION_MAJOR
;
1066 req
.hwrm_intf_min
= HWRM_VERSION_MINOR
;
1067 req
.hwrm_intf_upd
= HWRM_VERSION_UPDATE
;
1069 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1071 if (bp
->flags
& BNXT_FLAG_FW_RESET
)
1072 HWRM_CHECK_RESULT_SILENT();
1074 HWRM_CHECK_RESULT();
1076 PMD_DRV_LOG(INFO
, "%d.%d.%d:%d.%d.%d\n",
1077 resp
->hwrm_intf_maj_8b
, resp
->hwrm_intf_min_8b
,
1078 resp
->hwrm_intf_upd_8b
, resp
->hwrm_fw_maj_8b
,
1079 resp
->hwrm_fw_min_8b
, resp
->hwrm_fw_bld_8b
);
1080 bp
->fw_ver
= (resp
->hwrm_fw_maj_8b
<< 24) |
1081 (resp
->hwrm_fw_min_8b
<< 16) |
1082 (resp
->hwrm_fw_bld_8b
<< 8) |
1083 resp
->hwrm_fw_rsvd_8b
;
1084 PMD_DRV_LOG(INFO
, "Driver HWRM version: %d.%d.%d\n",
1085 HWRM_VERSION_MAJOR
, HWRM_VERSION_MINOR
, HWRM_VERSION_UPDATE
);
1087 fw_version
= resp
->hwrm_intf_maj_8b
<< 16;
1088 fw_version
|= resp
->hwrm_intf_min_8b
<< 8;
1089 fw_version
|= resp
->hwrm_intf_upd_8b
;
1090 bp
->hwrm_spec_code
= fw_version
;
1092 /* def_req_timeout value is in milliseconds */
1093 bp
->hwrm_cmd_timeout
= rte_le_to_cpu_16(resp
->def_req_timeout
);
1094 /* convert timeout to usec */
1095 bp
->hwrm_cmd_timeout
*= 1000;
1096 if (!bp
->hwrm_cmd_timeout
)
1097 bp
->hwrm_cmd_timeout
= DFLT_HWRM_CMD_TIMEOUT
;
1099 if (resp
->hwrm_intf_maj_8b
!= HWRM_VERSION_MAJOR
) {
1100 PMD_DRV_LOG(ERR
, "Unsupported firmware API version\n");
1105 if (bp
->max_req_len
> resp
->max_req_win_len
) {
1106 PMD_DRV_LOG(ERR
, "Unsupported request length\n");
1109 bp
->max_req_len
= rte_le_to_cpu_16(resp
->max_req_win_len
);
1110 bp
->hwrm_max_ext_req_len
= rte_le_to_cpu_16(resp
->max_ext_req_len
);
1111 if (bp
->hwrm_max_ext_req_len
< HWRM_MAX_REQ_LEN
)
1112 bp
->hwrm_max_ext_req_len
= HWRM_MAX_REQ_LEN
;
1114 max_resp_len
= rte_le_to_cpu_16(resp
->max_resp_len
);
1115 dev_caps_cfg
= rte_le_to_cpu_32(resp
->dev_caps_cfg
);
1117 if (bp
->max_resp_len
!= max_resp_len
) {
1118 sprintf(type
, "bnxt_hwrm_" PCI_PRI_FMT
,
1119 bp
->pdev
->addr
.domain
, bp
->pdev
->addr
.bus
,
1120 bp
->pdev
->addr
.devid
, bp
->pdev
->addr
.function
);
1122 rte_free(bp
->hwrm_cmd_resp_addr
);
1124 bp
->hwrm_cmd_resp_addr
= rte_malloc(type
, max_resp_len
, 0);
1125 if (bp
->hwrm_cmd_resp_addr
== NULL
) {
1129 bp
->hwrm_cmd_resp_dma_addr
=
1130 rte_malloc_virt2iova(bp
->hwrm_cmd_resp_addr
);
1131 if (bp
->hwrm_cmd_resp_dma_addr
== RTE_BAD_IOVA
) {
1133 "Unable to map response buffer to physical memory.\n");
1137 bp
->max_resp_len
= max_resp_len
;
1141 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED
) &&
1143 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED
)) {
1144 PMD_DRV_LOG(DEBUG
, "Short command supported\n");
1145 bp
->flags
|= BNXT_FLAG_SHORT_CMD
;
1148 if (((dev_caps_cfg
&
1149 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED
) &&
1151 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED
)) ||
1152 bp
->hwrm_max_ext_req_len
> HWRM_MAX_REQ_LEN
) {
1153 sprintf(type
, "bnxt_hwrm_short_" PCI_PRI_FMT
,
1154 bp
->pdev
->addr
.domain
, bp
->pdev
->addr
.bus
,
1155 bp
->pdev
->addr
.devid
, bp
->pdev
->addr
.function
);
1157 rte_free(bp
->hwrm_short_cmd_req_addr
);
1159 bp
->hwrm_short_cmd_req_addr
=
1160 rte_malloc(type
, bp
->hwrm_max_ext_req_len
, 0);
1161 if (bp
->hwrm_short_cmd_req_addr
== NULL
) {
1165 bp
->hwrm_short_cmd_req_dma_addr
=
1166 rte_malloc_virt2iova(bp
->hwrm_short_cmd_req_addr
);
1167 if (bp
->hwrm_short_cmd_req_dma_addr
== RTE_BAD_IOVA
) {
1168 rte_free(bp
->hwrm_short_cmd_req_addr
);
1170 "Unable to map buffer to physical memory.\n");
1176 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED
) {
1177 bp
->flags
|= BNXT_FLAG_KONG_MB_EN
;
1178 PMD_DRV_LOG(DEBUG
, "Kong mailbox channel enabled\n");
1181 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED
)
1182 PMD_DRV_LOG(DEBUG
, "FW supports Trusted VFs\n");
1184 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED
) {
1185 bp
->fw_cap
|= BNXT_FW_CAP_ADV_FLOW_MGMT
;
1186 PMD_DRV_LOG(DEBUG
, "FW supports advanced flow management\n");
1190 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED
) {
1191 PMD_DRV_LOG(DEBUG
, "FW supports advanced flow counters\n");
1192 bp
->fw_cap
|= BNXT_FW_CAP_ADV_FLOW_COUNTERS
;
1201 int bnxt_hwrm_func_driver_unregister(struct bnxt
*bp
, uint32_t flags
)
1204 struct hwrm_func_drv_unrgtr_input req
= {.req_type
= 0 };
1205 struct hwrm_func_drv_unrgtr_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1207 if (!(bp
->flags
& BNXT_FLAG_REGISTERED
))
1210 HWRM_PREP(&req
, HWRM_FUNC_DRV_UNRGTR
, BNXT_USE_CHIMP_MB
);
1213 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1215 HWRM_CHECK_RESULT();
1221 static int bnxt_hwrm_port_phy_cfg(struct bnxt
*bp
, struct bnxt_link_info
*conf
)
1224 struct hwrm_port_phy_cfg_input req
= {0};
1225 struct hwrm_port_phy_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1226 uint32_t enables
= 0;
1228 HWRM_PREP(&req
, HWRM_PORT_PHY_CFG
, BNXT_USE_CHIMP_MB
);
1230 if (conf
->link_up
) {
1231 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1232 if (bp
->link_info
->auto_mode
&& conf
->link_speed
) {
1233 req
.auto_mode
= HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE
;
1234 PMD_DRV_LOG(DEBUG
, "Disabling AutoNeg\n");
1237 req
.flags
= rte_cpu_to_le_32(conf
->phy_flags
);
1238 req
.force_link_speed
= rte_cpu_to_le_16(conf
->link_speed
);
1239 enables
|= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE
;
1241 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1242 * any auto mode, even "none".
1244 if (!conf
->link_speed
) {
1245 /* No speeds specified. Enable AutoNeg - all speeds */
1247 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS
;
1249 /* AutoNeg - Advertise speeds specified. */
1250 if (conf
->auto_link_speed_mask
&&
1251 !(conf
->phy_flags
& HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE
)) {
1253 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK
;
1254 req
.auto_link_speed_mask
=
1255 conf
->auto_link_speed_mask
;
1257 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK
;
1260 req
.auto_duplex
= conf
->duplex
;
1261 enables
|= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX
;
1262 req
.auto_pause
= conf
->auto_pause
;
1263 req
.force_pause
= conf
->force_pause
;
1264 /* Set force_pause if there is no auto or if there is a force */
1265 if (req
.auto_pause
&& !req
.force_pause
)
1266 enables
|= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE
;
1268 enables
|= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE
;
1270 req
.enables
= rte_cpu_to_le_32(enables
);
1273 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN
);
1274 PMD_DRV_LOG(INFO
, "Force Link Down\n");
1277 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1279 HWRM_CHECK_RESULT();
1285 static int bnxt_hwrm_port_phy_qcfg(struct bnxt
*bp
,
1286 struct bnxt_link_info
*link_info
)
1289 struct hwrm_port_phy_qcfg_input req
= {0};
1290 struct hwrm_port_phy_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1292 HWRM_PREP(&req
, HWRM_PORT_PHY_QCFG
, BNXT_USE_CHIMP_MB
);
1294 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1296 HWRM_CHECK_RESULT();
1298 link_info
->phy_link_status
= resp
->link
;
1299 link_info
->link_up
=
1300 (link_info
->phy_link_status
==
1301 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK
) ? 1 : 0;
1302 link_info
->link_speed
= rte_le_to_cpu_16(resp
->link_speed
);
1303 link_info
->duplex
= resp
->duplex_cfg
;
1304 link_info
->pause
= resp
->pause
;
1305 link_info
->auto_pause
= resp
->auto_pause
;
1306 link_info
->force_pause
= resp
->force_pause
;
1307 link_info
->auto_mode
= resp
->auto_mode
;
1308 link_info
->phy_type
= resp
->phy_type
;
1309 link_info
->media_type
= resp
->media_type
;
1311 link_info
->support_speeds
= rte_le_to_cpu_16(resp
->support_speeds
);
1312 link_info
->auto_link_speed
= rte_le_to_cpu_16(resp
->auto_link_speed
);
1313 link_info
->preemphasis
= rte_le_to_cpu_32(resp
->preemphasis
);
1314 link_info
->force_link_speed
= rte_le_to_cpu_16(resp
->force_link_speed
);
1315 link_info
->phy_ver
[0] = resp
->phy_maj
;
1316 link_info
->phy_ver
[1] = resp
->phy_min
;
1317 link_info
->phy_ver
[2] = resp
->phy_bld
;
1321 PMD_DRV_LOG(DEBUG
, "Link Speed %d\n", link_info
->link_speed
);
1322 PMD_DRV_LOG(DEBUG
, "Auto Mode %d\n", link_info
->auto_mode
);
1323 PMD_DRV_LOG(DEBUG
, "Support Speeds %x\n", link_info
->support_speeds
);
1324 PMD_DRV_LOG(DEBUG
, "Auto Link Speed %x\n", link_info
->auto_link_speed
);
1325 PMD_DRV_LOG(DEBUG
, "Auto Link Speed Mask %x\n",
1326 link_info
->auto_link_speed_mask
);
1327 PMD_DRV_LOG(DEBUG
, "Forced Link Speed %x\n",
1328 link_info
->force_link_speed
);
1333 static bool bnxt_find_lossy_profile(struct bnxt
*bp
)
1337 for (i
= BNXT_COS_QUEUE_COUNT
- 1; i
>= 0; i
--) {
1338 if (bp
->tx_cos_queue
[i
].profile
==
1339 HWRM_QUEUE_SERVICE_PROFILE_LOSSY
) {
1340 bp
->tx_cosq_id
[0] = bp
->tx_cos_queue
[i
].id
;
1347 static void bnxt_find_first_valid_profile(struct bnxt
*bp
)
1351 for (i
= BNXT_COS_QUEUE_COUNT
- 1; i
>= 0; i
--) {
1352 if (bp
->tx_cos_queue
[i
].profile
!=
1353 HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN
&&
1354 bp
->tx_cos_queue
[i
].id
!=
1355 HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN
) {
1356 bp
->tx_cosq_id
[0] = bp
->tx_cos_queue
[i
].id
;
1362 int bnxt_hwrm_queue_qportcfg(struct bnxt
*bp
)
1365 struct hwrm_queue_qportcfg_input req
= {.req_type
= 0 };
1366 struct hwrm_queue_qportcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1367 uint32_t dir
= HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX
;
1371 HWRM_PREP(&req
, HWRM_QUEUE_QPORTCFG
, BNXT_USE_CHIMP_MB
);
1373 req
.flags
= rte_cpu_to_le_32(dir
);
1374 /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1375 if (bp
->hwrm_spec_code
>= HWRM_VERSION_1_9_1
&&
1376 !(bp
->vnic_cap_flags
& BNXT_VNIC_CAP_COS_CLASSIFY
))
1378 HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED
;
1379 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1381 HWRM_CHECK_RESULT();
1383 if (dir
== HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX
) {
1384 GET_TX_QUEUE_INFO(0);
1385 GET_TX_QUEUE_INFO(1);
1386 GET_TX_QUEUE_INFO(2);
1387 GET_TX_QUEUE_INFO(3);
1388 GET_TX_QUEUE_INFO(4);
1389 GET_TX_QUEUE_INFO(5);
1390 GET_TX_QUEUE_INFO(6);
1391 GET_TX_QUEUE_INFO(7);
1393 GET_RX_QUEUE_INFO(0);
1394 GET_RX_QUEUE_INFO(1);
1395 GET_RX_QUEUE_INFO(2);
1396 GET_RX_QUEUE_INFO(3);
1397 GET_RX_QUEUE_INFO(4);
1398 GET_RX_QUEUE_INFO(5);
1399 GET_RX_QUEUE_INFO(6);
1400 GET_RX_QUEUE_INFO(7);
1405 if (dir
== HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX
)
1408 if (bp
->hwrm_spec_code
< HWRM_VERSION_1_9_1
) {
1409 bp
->tx_cosq_id
[0] = bp
->tx_cos_queue
[0].id
;
1413 /* iterate and find the COSq profile to use for Tx */
1414 if (bp
->vnic_cap_flags
& BNXT_VNIC_CAP_COS_CLASSIFY
) {
1415 for (j
= 0, i
= 0; i
< BNXT_COS_QUEUE_COUNT
; i
++) {
1416 if (bp
->tx_cos_queue
[i
].id
!= 0xff)
1417 bp
->tx_cosq_id
[j
++] =
1418 bp
->tx_cos_queue
[i
].id
;
1421 /* When CoS classification is disabled, for normal NIC
1422 * operations, ideally we should look to use LOSSY.
1423 * If not found, fallback to the first valid profile
1425 if (!bnxt_find_lossy_profile(bp
))
1426 bnxt_find_first_valid_profile(bp
);
1431 bp
->max_tc
= resp
->max_configurable_queues
;
1432 bp
->max_lltc
= resp
->max_configurable_lossless_queues
;
1433 if (bp
->max_tc
> BNXT_MAX_QUEUE
)
1434 bp
->max_tc
= BNXT_MAX_QUEUE
;
1435 bp
->max_q
= bp
->max_tc
;
1437 if (dir
== HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX
) {
1438 dir
= HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX
;
1446 int bnxt_hwrm_ring_alloc(struct bnxt
*bp
,
1447 struct bnxt_ring
*ring
,
1448 uint32_t ring_type
, uint32_t map_index
,
1449 uint32_t stats_ctx_id
, uint32_t cmpl_ring_id
,
1450 uint16_t tx_cosq_id
)
1453 uint32_t enables
= 0;
1454 struct hwrm_ring_alloc_input req
= {.req_type
= 0 };
1455 struct hwrm_ring_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1456 struct rte_mempool
*mb_pool
;
1457 uint16_t rx_buf_size
;
1459 HWRM_PREP(&req
, HWRM_RING_ALLOC
, BNXT_USE_CHIMP_MB
);
1461 req
.page_tbl_addr
= rte_cpu_to_le_64(ring
->bd_dma
);
1462 req
.fbo
= rte_cpu_to_le_32(0);
1463 /* Association of ring index with doorbell index */
1464 req
.logical_id
= rte_cpu_to_le_16(map_index
);
1465 req
.length
= rte_cpu_to_le_32(ring
->ring_size
);
1467 switch (ring_type
) {
1468 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX
:
1469 req
.ring_type
= ring_type
;
1470 req
.cmpl_ring_id
= rte_cpu_to_le_16(cmpl_ring_id
);
1471 req
.stat_ctx_id
= rte_cpu_to_le_32(stats_ctx_id
);
1472 req
.queue_id
= rte_cpu_to_le_16(tx_cosq_id
);
1473 if (stats_ctx_id
!= INVALID_STATS_CTX_ID
)
1475 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID
;
1477 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX
:
1478 req
.ring_type
= ring_type
;
1479 req
.cmpl_ring_id
= rte_cpu_to_le_16(cmpl_ring_id
);
1480 req
.stat_ctx_id
= rte_cpu_to_le_32(stats_ctx_id
);
1481 if (BNXT_CHIP_THOR(bp
)) {
1482 mb_pool
= bp
->rx_queues
[0]->mb_pool
;
1483 rx_buf_size
= rte_pktmbuf_data_room_size(mb_pool
) -
1484 RTE_PKTMBUF_HEADROOM
;
1485 rx_buf_size
= RTE_MIN(BNXT_MAX_PKT_LEN
, rx_buf_size
);
1486 req
.rx_buf_size
= rte_cpu_to_le_16(rx_buf_size
);
1488 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID
;
1490 if (stats_ctx_id
!= INVALID_STATS_CTX_ID
)
1492 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID
;
1494 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL
:
1495 req
.ring_type
= ring_type
;
1496 if (BNXT_HAS_NQ(bp
)) {
1497 /* Association of cp ring with nq */
1498 req
.nq_ring_id
= rte_cpu_to_le_16(cmpl_ring_id
);
1500 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID
;
1502 req
.int_mode
= HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX
;
1504 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ
:
1505 req
.ring_type
= ring_type
;
1506 req
.page_size
= BNXT_PAGE_SHFT
;
1507 req
.int_mode
= HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX
;
1509 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG
:
1510 req
.ring_type
= ring_type
;
1511 req
.rx_ring_id
= rte_cpu_to_le_16(ring
->fw_rx_ring_id
);
1513 mb_pool
= bp
->rx_queues
[0]->mb_pool
;
1514 rx_buf_size
= rte_pktmbuf_data_room_size(mb_pool
) -
1515 RTE_PKTMBUF_HEADROOM
;
1516 rx_buf_size
= RTE_MIN(BNXT_MAX_PKT_LEN
, rx_buf_size
);
1517 req
.rx_buf_size
= rte_cpu_to_le_16(rx_buf_size
);
1519 req
.stat_ctx_id
= rte_cpu_to_le_32(stats_ctx_id
);
1520 enables
|= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID
|
1521 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID
|
1522 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID
;
1525 PMD_DRV_LOG(ERR
, "hwrm alloc invalid ring type %d\n",
1530 req
.enables
= rte_cpu_to_le_32(enables
);
1532 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1534 if (rc
|| resp
->error_code
) {
1535 if (rc
== 0 && resp
->error_code
)
1536 rc
= rte_le_to_cpu_16(resp
->error_code
);
1537 switch (ring_type
) {
1538 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL
:
1540 "hwrm_ring_alloc cp failed. rc:%d\n", rc
);
1543 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX
:
1545 "hwrm_ring_alloc rx failed. rc:%d\n", rc
);
1548 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG
:
1550 "hwrm_ring_alloc rx agg failed. rc:%d\n",
1554 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX
:
1556 "hwrm_ring_alloc tx failed. rc:%d\n", rc
);
1559 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ
:
1561 "hwrm_ring_alloc nq failed. rc:%d\n", rc
);
1565 PMD_DRV_LOG(ERR
, "Invalid ring. rc:%d\n", rc
);
1571 ring
->fw_ring_id
= rte_le_to_cpu_16(resp
->ring_id
);
1576 int bnxt_hwrm_ring_free(struct bnxt
*bp
,
1577 struct bnxt_ring
*ring
, uint32_t ring_type
)
1580 struct hwrm_ring_free_input req
= {.req_type
= 0 };
1581 struct hwrm_ring_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1583 HWRM_PREP(&req
, HWRM_RING_FREE
, BNXT_USE_CHIMP_MB
);
1585 req
.ring_type
= ring_type
;
1586 req
.ring_id
= rte_cpu_to_le_16(ring
->fw_ring_id
);
1588 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1590 if (rc
|| resp
->error_code
) {
1591 if (rc
== 0 && resp
->error_code
)
1592 rc
= rte_le_to_cpu_16(resp
->error_code
);
1595 switch (ring_type
) {
1596 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL
:
1597 PMD_DRV_LOG(ERR
, "hwrm_ring_free cp failed. rc:%d\n",
1600 case HWRM_RING_FREE_INPUT_RING_TYPE_RX
:
1601 PMD_DRV_LOG(ERR
, "hwrm_ring_free rx failed. rc:%d\n",
1604 case HWRM_RING_FREE_INPUT_RING_TYPE_TX
:
1605 PMD_DRV_LOG(ERR
, "hwrm_ring_free tx failed. rc:%d\n",
1608 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ
:
1610 "hwrm_ring_free nq failed. rc:%d\n", rc
);
1612 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG
:
1614 "hwrm_ring_free agg failed. rc:%d\n", rc
);
1617 PMD_DRV_LOG(ERR
, "Invalid ring, rc:%d\n", rc
);
1625 int bnxt_hwrm_ring_grp_alloc(struct bnxt
*bp
, unsigned int idx
)
1628 struct hwrm_ring_grp_alloc_input req
= {.req_type
= 0 };
1629 struct hwrm_ring_grp_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1631 HWRM_PREP(&req
, HWRM_RING_GRP_ALLOC
, BNXT_USE_CHIMP_MB
);
1633 req
.cr
= rte_cpu_to_le_16(bp
->grp_info
[idx
].cp_fw_ring_id
);
1634 req
.rr
= rte_cpu_to_le_16(bp
->grp_info
[idx
].rx_fw_ring_id
);
1635 req
.ar
= rte_cpu_to_le_16(bp
->grp_info
[idx
].ag_fw_ring_id
);
1636 req
.sc
= rte_cpu_to_le_16(bp
->grp_info
[idx
].fw_stats_ctx
);
1638 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1640 HWRM_CHECK_RESULT();
1642 bp
->grp_info
[idx
].fw_grp_id
= rte_le_to_cpu_16(resp
->ring_group_id
);
1649 int bnxt_hwrm_ring_grp_free(struct bnxt
*bp
, unsigned int idx
)
1652 struct hwrm_ring_grp_free_input req
= {.req_type
= 0 };
1653 struct hwrm_ring_grp_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1655 HWRM_PREP(&req
, HWRM_RING_GRP_FREE
, BNXT_USE_CHIMP_MB
);
1657 req
.ring_group_id
= rte_cpu_to_le_16(bp
->grp_info
[idx
].fw_grp_id
);
1659 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1661 HWRM_CHECK_RESULT();
1664 bp
->grp_info
[idx
].fw_grp_id
= INVALID_HW_RING_ID
;
1668 int bnxt_hwrm_stat_clear(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
)
1671 struct hwrm_stat_ctx_clr_stats_input req
= {.req_type
= 0 };
1672 struct hwrm_stat_ctx_clr_stats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1674 if (cpr
->hw_stats_ctx_id
== (uint32_t)HWRM_NA_SIGNATURE
)
1677 HWRM_PREP(&req
, HWRM_STAT_CTX_CLR_STATS
, BNXT_USE_CHIMP_MB
);
1679 req
.stat_ctx_id
= rte_cpu_to_le_32(cpr
->hw_stats_ctx_id
);
1681 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1683 HWRM_CHECK_RESULT();
1689 int bnxt_hwrm_stat_ctx_alloc(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
1690 unsigned int idx __rte_unused
)
1693 struct hwrm_stat_ctx_alloc_input req
= {.req_type
= 0 };
1694 struct hwrm_stat_ctx_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1696 HWRM_PREP(&req
, HWRM_STAT_CTX_ALLOC
, BNXT_USE_CHIMP_MB
);
1698 req
.update_period_ms
= rte_cpu_to_le_32(0);
1700 req
.stats_dma_addr
= rte_cpu_to_le_64(cpr
->hw_stats_map
);
1702 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1704 HWRM_CHECK_RESULT();
1706 cpr
->hw_stats_ctx_id
= rte_le_to_cpu_32(resp
->stat_ctx_id
);
1713 int bnxt_hwrm_stat_ctx_free(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
1714 unsigned int idx __rte_unused
)
1717 struct hwrm_stat_ctx_free_input req
= {.req_type
= 0 };
1718 struct hwrm_stat_ctx_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1720 HWRM_PREP(&req
, HWRM_STAT_CTX_FREE
, BNXT_USE_CHIMP_MB
);
1722 req
.stat_ctx_id
= rte_cpu_to_le_32(cpr
->hw_stats_ctx_id
);
1724 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1726 HWRM_CHECK_RESULT();
1732 int bnxt_hwrm_vnic_alloc(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
1735 struct hwrm_vnic_alloc_input req
= { 0 };
1736 struct hwrm_vnic_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1738 if (!BNXT_HAS_RING_GRPS(bp
))
1739 goto skip_ring_grps
;
1741 /* map ring groups to this vnic */
1742 PMD_DRV_LOG(DEBUG
, "Alloc VNIC. Start %x, End %x\n",
1743 vnic
->start_grp_id
, vnic
->end_grp_id
);
1744 for (i
= vnic
->start_grp_id
, j
= 0; i
< vnic
->end_grp_id
; i
++, j
++)
1745 vnic
->fw_grp_ids
[j
] = bp
->grp_info
[i
].fw_grp_id
;
1747 vnic
->dflt_ring_grp
= bp
->grp_info
[vnic
->start_grp_id
].fw_grp_id
;
1748 vnic
->rss_rule
= (uint16_t)HWRM_NA_SIGNATURE
;
1749 vnic
->cos_rule
= (uint16_t)HWRM_NA_SIGNATURE
;
1750 vnic
->lb_rule
= (uint16_t)HWRM_NA_SIGNATURE
;
1753 vnic
->mru
= BNXT_VNIC_MRU(bp
->eth_dev
->data
->mtu
);
1754 HWRM_PREP(&req
, HWRM_VNIC_ALLOC
, BNXT_USE_CHIMP_MB
);
1756 if (vnic
->func_default
)
1758 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT
);
1759 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1761 HWRM_CHECK_RESULT();
1763 vnic
->fw_vnic_id
= rte_le_to_cpu_16(resp
->vnic_id
);
1765 PMD_DRV_LOG(DEBUG
, "VNIC ID %x\n", vnic
->fw_vnic_id
);
1769 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt
*bp
,
1770 struct bnxt_vnic_info
*vnic
,
1771 struct bnxt_plcmodes_cfg
*pmode
)
1774 struct hwrm_vnic_plcmodes_qcfg_input req
= {.req_type
= 0 };
1775 struct hwrm_vnic_plcmodes_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1777 HWRM_PREP(&req
, HWRM_VNIC_PLCMODES_QCFG
, BNXT_USE_CHIMP_MB
);
1779 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
1781 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1783 HWRM_CHECK_RESULT();
1785 pmode
->flags
= rte_le_to_cpu_32(resp
->flags
);
1786 /* dflt_vnic bit doesn't exist in the _cfg command */
1787 pmode
->flags
&= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC
);
1788 pmode
->jumbo_thresh
= rte_le_to_cpu_16(resp
->jumbo_thresh
);
1789 pmode
->hds_offset
= rte_le_to_cpu_16(resp
->hds_offset
);
1790 pmode
->hds_threshold
= rte_le_to_cpu_16(resp
->hds_threshold
);
1797 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt
*bp
,
1798 struct bnxt_vnic_info
*vnic
,
1799 struct bnxt_plcmodes_cfg
*pmode
)
1802 struct hwrm_vnic_plcmodes_cfg_input req
= {.req_type
= 0 };
1803 struct hwrm_vnic_plcmodes_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1805 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
) {
1806 PMD_DRV_LOG(DEBUG
, "VNIC ID %x\n", vnic
->fw_vnic_id
);
1810 HWRM_PREP(&req
, HWRM_VNIC_PLCMODES_CFG
, BNXT_USE_CHIMP_MB
);
1812 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
1813 req
.flags
= rte_cpu_to_le_32(pmode
->flags
);
1814 req
.jumbo_thresh
= rte_cpu_to_le_16(pmode
->jumbo_thresh
);
1815 req
.hds_offset
= rte_cpu_to_le_16(pmode
->hds_offset
);
1816 req
.hds_threshold
= rte_cpu_to_le_16(pmode
->hds_threshold
);
1817 req
.enables
= rte_cpu_to_le_32(
1818 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID
|
1819 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID
|
1820 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1823 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1825 HWRM_CHECK_RESULT();
1831 int bnxt_hwrm_vnic_cfg(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
1834 struct hwrm_vnic_cfg_input req
= {.req_type
= 0 };
1835 struct hwrm_vnic_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1836 struct bnxt_plcmodes_cfg pmodes
= { 0 };
1837 uint32_t ctx_enable_flag
= 0;
1838 uint32_t enables
= 0;
1840 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
) {
1841 PMD_DRV_LOG(DEBUG
, "VNIC ID %x\n", vnic
->fw_vnic_id
);
1845 rc
= bnxt_hwrm_vnic_plcmodes_qcfg(bp
, vnic
, &pmodes
);
1849 HWRM_PREP(&req
, HWRM_VNIC_CFG
, BNXT_USE_CHIMP_MB
);
1851 if (BNXT_CHIP_THOR(bp
)) {
1852 int dflt_rxq
= vnic
->start_grp_id
;
1853 struct bnxt_rx_ring_info
*rxr
;
1854 struct bnxt_cp_ring_info
*cpr
;
1855 struct bnxt_rx_queue
*rxq
;
1859 * The first active receive ring is used as the VNIC
1860 * default receive ring. If there are no active receive
1861 * rings (all corresponding receive queues are stopped),
1862 * the first receive ring is used.
1864 for (i
= vnic
->start_grp_id
; i
< vnic
->end_grp_id
; i
++) {
1865 rxq
= bp
->eth_dev
->data
->rx_queues
[i
];
1866 if (rxq
->rx_started
) {
1872 rxq
= bp
->eth_dev
->data
->rx_queues
[dflt_rxq
];
1876 req
.default_rx_ring_id
=
1877 rte_cpu_to_le_16(rxr
->rx_ring_struct
->fw_ring_id
);
1878 req
.default_cmpl_ring_id
=
1879 rte_cpu_to_le_16(cpr
->cp_ring_struct
->fw_ring_id
);
1880 enables
= HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID
|
1881 HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID
;
1885 /* Only RSS support for now TBD: COS & LB */
1886 enables
= HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP
;
1887 if (vnic
->lb_rule
!= 0xffff)
1888 ctx_enable_flag
|= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE
;
1889 if (vnic
->cos_rule
!= 0xffff)
1890 ctx_enable_flag
|= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE
;
1891 if (vnic
->rss_rule
!= (uint16_t)HWRM_NA_SIGNATURE
) {
1892 ctx_enable_flag
|= HWRM_VNIC_CFG_INPUT_ENABLES_MRU
;
1893 ctx_enable_flag
|= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE
;
1895 if (bp
->vnic_cap_flags
& BNXT_VNIC_CAP_COS_CLASSIFY
) {
1896 ctx_enable_flag
|= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID
;
1897 req
.queue_id
= rte_cpu_to_le_16(vnic
->cos_queue_id
);
1900 enables
|= ctx_enable_flag
;
1901 req
.dflt_ring_grp
= rte_cpu_to_le_16(vnic
->dflt_ring_grp
);
1902 req
.rss_rule
= rte_cpu_to_le_16(vnic
->rss_rule
);
1903 req
.cos_rule
= rte_cpu_to_le_16(vnic
->cos_rule
);
1904 req
.lb_rule
= rte_cpu_to_le_16(vnic
->lb_rule
);
1907 req
.enables
= rte_cpu_to_le_32(enables
);
1908 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
1909 req
.mru
= rte_cpu_to_le_16(vnic
->mru
);
1910 /* Configure default VNIC only once. */
1911 if (vnic
->func_default
&& !(bp
->flags
& BNXT_FLAG_DFLT_VNIC_SET
)) {
1913 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT
);
1914 bp
->flags
|= BNXT_FLAG_DFLT_VNIC_SET
;
1916 if (vnic
->vlan_strip
)
1918 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE
);
1921 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE
);
1922 if (vnic
->roce_dual
)
1923 req
.flags
|= rte_cpu_to_le_32(
1924 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE
);
1925 if (vnic
->roce_only
)
1926 req
.flags
|= rte_cpu_to_le_32(
1927 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE
);
1928 if (vnic
->rss_dflt_cr
)
1929 req
.flags
|= rte_cpu_to_le_32(
1930 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE
);
1932 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1934 HWRM_CHECK_RESULT();
1937 rc
= bnxt_hwrm_vnic_plcmodes_cfg(bp
, vnic
, &pmodes
);
1942 int bnxt_hwrm_vnic_qcfg(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
,
1946 struct hwrm_vnic_qcfg_input req
= {.req_type
= 0 };
1947 struct hwrm_vnic_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1949 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
) {
1950 PMD_DRV_LOG(DEBUG
, "VNIC QCFG ID %d\n", vnic
->fw_vnic_id
);
1953 HWRM_PREP(&req
, HWRM_VNIC_QCFG
, BNXT_USE_CHIMP_MB
);
1956 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID
);
1957 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
1958 req
.vf_id
= rte_cpu_to_le_16(fw_vf_id
);
1960 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1962 HWRM_CHECK_RESULT();
1964 vnic
->dflt_ring_grp
= rte_le_to_cpu_16(resp
->dflt_ring_grp
);
1965 vnic
->rss_rule
= rte_le_to_cpu_16(resp
->rss_rule
);
1966 vnic
->cos_rule
= rte_le_to_cpu_16(resp
->cos_rule
);
1967 vnic
->lb_rule
= rte_le_to_cpu_16(resp
->lb_rule
);
1968 vnic
->mru
= rte_le_to_cpu_16(resp
->mru
);
1969 vnic
->func_default
= rte_le_to_cpu_32(
1970 resp
->flags
) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT
;
1971 vnic
->vlan_strip
= rte_le_to_cpu_32(resp
->flags
) &
1972 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE
;
1973 vnic
->bd_stall
= rte_le_to_cpu_32(resp
->flags
) &
1974 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE
;
1975 vnic
->roce_dual
= rte_le_to_cpu_32(resp
->flags
) &
1976 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE
;
1977 vnic
->roce_only
= rte_le_to_cpu_32(resp
->flags
) &
1978 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE
;
1979 vnic
->rss_dflt_cr
= rte_le_to_cpu_32(resp
->flags
) &
1980 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE
;
1987 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt
*bp
,
1988 struct bnxt_vnic_info
*vnic
, uint16_t ctx_idx
)
1992 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req
= {.req_type
= 0 };
1993 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output
*resp
=
1994 bp
->hwrm_cmd_resp_addr
;
1996 HWRM_PREP(&req
, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC
, BNXT_USE_CHIMP_MB
);
1998 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
1999 HWRM_CHECK_RESULT();
2001 ctx_id
= rte_le_to_cpu_16(resp
->rss_cos_lb_ctx_id
);
2002 if (!BNXT_HAS_RING_GRPS(bp
))
2003 vnic
->fw_grp_ids
[ctx_idx
] = ctx_id
;
2004 else if (ctx_idx
== 0)
2005 vnic
->rss_rule
= ctx_id
;
2013 int _bnxt_hwrm_vnic_ctx_free(struct bnxt
*bp
,
2014 struct bnxt_vnic_info
*vnic
, uint16_t ctx_idx
)
2017 struct hwrm_vnic_rss_cos_lb_ctx_free_input req
= {.req_type
= 0 };
2018 struct hwrm_vnic_rss_cos_lb_ctx_free_output
*resp
=
2019 bp
->hwrm_cmd_resp_addr
;
2021 if (ctx_idx
== (uint16_t)HWRM_NA_SIGNATURE
) {
2022 PMD_DRV_LOG(DEBUG
, "VNIC RSS Rule %x\n", vnic
->rss_rule
);
2025 HWRM_PREP(&req
, HWRM_VNIC_RSS_COS_LB_CTX_FREE
, BNXT_USE_CHIMP_MB
);
2027 req
.rss_cos_lb_ctx_id
= rte_cpu_to_le_16(ctx_idx
);
2029 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2031 HWRM_CHECK_RESULT();
2037 int bnxt_hwrm_vnic_ctx_free(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
2041 if (BNXT_CHIP_THOR(bp
)) {
2044 for (j
= 0; j
< vnic
->num_lb_ctxts
; j
++) {
2045 rc
= _bnxt_hwrm_vnic_ctx_free(bp
,
2047 vnic
->fw_grp_ids
[j
]);
2048 vnic
->fw_grp_ids
[j
] = INVALID_HW_RING_ID
;
2050 vnic
->num_lb_ctxts
= 0;
2052 rc
= _bnxt_hwrm_vnic_ctx_free(bp
, vnic
, vnic
->rss_rule
);
2053 vnic
->rss_rule
= INVALID_HW_RING_ID
;
2059 int bnxt_hwrm_vnic_free(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
2062 struct hwrm_vnic_free_input req
= {.req_type
= 0 };
2063 struct hwrm_vnic_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2065 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
) {
2066 PMD_DRV_LOG(DEBUG
, "VNIC FREE ID %x\n", vnic
->fw_vnic_id
);
2070 HWRM_PREP(&req
, HWRM_VNIC_FREE
, BNXT_USE_CHIMP_MB
);
2072 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
2074 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2076 HWRM_CHECK_RESULT();
2079 vnic
->fw_vnic_id
= INVALID_HW_RING_ID
;
2080 /* Configure default VNIC again if necessary. */
2081 if (vnic
->func_default
&& (bp
->flags
& BNXT_FLAG_DFLT_VNIC_SET
))
2082 bp
->flags
&= ~BNXT_FLAG_DFLT_VNIC_SET
;
2088 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
2092 int nr_ctxs
= vnic
->num_lb_ctxts
;
2093 struct hwrm_vnic_rss_cfg_input req
= {.req_type
= 0 };
2094 struct hwrm_vnic_rss_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2096 for (i
= 0; i
< nr_ctxs
; i
++) {
2097 HWRM_PREP(&req
, HWRM_VNIC_RSS_CFG
, BNXT_USE_CHIMP_MB
);
2099 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
2100 req
.hash_type
= rte_cpu_to_le_32(vnic
->hash_type
);
2101 req
.hash_mode_flags
= vnic
->hash_mode
;
2103 req
.hash_key_tbl_addr
=
2104 rte_cpu_to_le_64(vnic
->rss_hash_key_dma_addr
);
2106 req
.ring_grp_tbl_addr
=
2107 rte_cpu_to_le_64(vnic
->rss_table_dma_addr
+
2108 i
* HW_HASH_INDEX_SIZE
);
2109 req
.ring_table_pair_index
= i
;
2110 req
.rss_ctx_idx
= rte_cpu_to_le_16(vnic
->fw_grp_ids
[i
]);
2112 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
),
2115 HWRM_CHECK_RESULT();
2122 int bnxt_hwrm_vnic_rss_cfg(struct bnxt
*bp
,
2123 struct bnxt_vnic_info
*vnic
)
2126 struct hwrm_vnic_rss_cfg_input req
= {.req_type
= 0 };
2127 struct hwrm_vnic_rss_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2129 if (!vnic
->rss_table
)
2132 if (BNXT_CHIP_THOR(bp
))
2133 return bnxt_hwrm_vnic_rss_cfg_thor(bp
, vnic
);
2135 HWRM_PREP(&req
, HWRM_VNIC_RSS_CFG
, BNXT_USE_CHIMP_MB
);
2137 req
.hash_type
= rte_cpu_to_le_32(vnic
->hash_type
);
2138 req
.hash_mode_flags
= vnic
->hash_mode
;
2140 req
.ring_grp_tbl_addr
=
2141 rte_cpu_to_le_64(vnic
->rss_table_dma_addr
);
2142 req
.hash_key_tbl_addr
=
2143 rte_cpu_to_le_64(vnic
->rss_hash_key_dma_addr
);
2144 req
.rss_ctx_idx
= rte_cpu_to_le_16(vnic
->rss_rule
);
2145 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
2147 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2149 HWRM_CHECK_RESULT();
2155 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt
*bp
,
2156 struct bnxt_vnic_info
*vnic
)
2159 struct hwrm_vnic_plcmodes_cfg_input req
= {.req_type
= 0 };
2160 struct hwrm_vnic_plcmodes_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2163 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
) {
2164 PMD_DRV_LOG(DEBUG
, "VNIC ID %x\n", vnic
->fw_vnic_id
);
2168 HWRM_PREP(&req
, HWRM_VNIC_PLCMODES_CFG
, BNXT_USE_CHIMP_MB
);
2170 req
.flags
= rte_cpu_to_le_32(
2171 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT
);
2173 req
.enables
= rte_cpu_to_le_32(
2174 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
);
2176 size
= rte_pktmbuf_data_room_size(bp
->rx_queues
[0]->mb_pool
);
2177 size
-= RTE_PKTMBUF_HEADROOM
;
2178 size
= RTE_MIN(BNXT_MAX_PKT_LEN
, size
);
2180 req
.jumbo_thresh
= rte_cpu_to_le_16(size
);
2181 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
2183 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2185 HWRM_CHECK_RESULT();
2191 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt
*bp
,
2192 struct bnxt_vnic_info
*vnic
, bool enable
)
2195 struct hwrm_vnic_tpa_cfg_input req
= {.req_type
= 0 };
2196 struct hwrm_vnic_tpa_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2198 if (BNXT_CHIP_THOR(bp
) && !bp
->max_tpa_v2
) {
2200 PMD_DRV_LOG(ERR
, "No HW support for LRO\n");
2204 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
) {
2205 PMD_DRV_LOG(DEBUG
, "Invalid vNIC ID\n");
2209 HWRM_PREP(&req
, HWRM_VNIC_TPA_CFG
, BNXT_USE_CHIMP_MB
);
2212 req
.enables
= rte_cpu_to_le_32(
2213 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS
|
2214 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS
|
2215 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN
);
2216 req
.flags
= rte_cpu_to_le_32(
2217 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA
|
2218 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA
|
2219 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE
|
2220 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO
|
2221 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN
|
2222 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ
);
2223 req
.max_agg_segs
= rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp
));
2224 req
.max_aggs
= rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp
));
2225 req
.min_agg_len
= rte_cpu_to_le_32(512);
2227 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
2229 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2231 HWRM_CHECK_RESULT();
2237 int bnxt_hwrm_func_vf_mac(struct bnxt
*bp
, uint16_t vf
, const uint8_t *mac_addr
)
2239 struct hwrm_func_cfg_input req
= {0};
2240 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2243 req
.flags
= rte_cpu_to_le_32(bp
->pf
->vf_info
[vf
].func_cfg_flags
);
2244 req
.enables
= rte_cpu_to_le_32(
2245 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR
);
2246 memcpy(req
.dflt_mac_addr
, mac_addr
, sizeof(req
.dflt_mac_addr
));
2247 req
.fid
= rte_cpu_to_le_16(bp
->pf
->vf_info
[vf
].fid
);
2249 HWRM_PREP(&req
, HWRM_FUNC_CFG
, BNXT_USE_CHIMP_MB
);
2251 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2252 HWRM_CHECK_RESULT();
2255 bp
->pf
->vf_info
[vf
].random_mac
= false;
2260 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt
*bp
, uint16_t fid
,
2264 struct hwrm_func_qstats_input req
= {.req_type
= 0};
2265 struct hwrm_func_qstats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2267 HWRM_PREP(&req
, HWRM_FUNC_QSTATS
, BNXT_USE_CHIMP_MB
);
2269 req
.fid
= rte_cpu_to_le_16(fid
);
2271 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2273 HWRM_CHECK_RESULT();
2276 *dropped
= rte_le_to_cpu_64(resp
->tx_drop_pkts
);
2283 int bnxt_hwrm_func_qstats(struct bnxt
*bp
, uint16_t fid
,
2284 struct rte_eth_stats
*stats
,
2285 struct hwrm_func_qstats_output
*func_qstats
)
2288 struct hwrm_func_qstats_input req
= {.req_type
= 0};
2289 struct hwrm_func_qstats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2291 HWRM_PREP(&req
, HWRM_FUNC_QSTATS
, BNXT_USE_CHIMP_MB
);
2293 req
.fid
= rte_cpu_to_le_16(fid
);
2295 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2297 HWRM_CHECK_RESULT();
2299 memcpy(func_qstats
, resp
,
2300 sizeof(struct hwrm_func_qstats_output
));
2305 stats
->ipackets
= rte_le_to_cpu_64(resp
->rx_ucast_pkts
);
2306 stats
->ipackets
+= rte_le_to_cpu_64(resp
->rx_mcast_pkts
);
2307 stats
->ipackets
+= rte_le_to_cpu_64(resp
->rx_bcast_pkts
);
2308 stats
->ibytes
= rte_le_to_cpu_64(resp
->rx_ucast_bytes
);
2309 stats
->ibytes
+= rte_le_to_cpu_64(resp
->rx_mcast_bytes
);
2310 stats
->ibytes
+= rte_le_to_cpu_64(resp
->rx_bcast_bytes
);
2312 stats
->opackets
= rte_le_to_cpu_64(resp
->tx_ucast_pkts
);
2313 stats
->opackets
+= rte_le_to_cpu_64(resp
->tx_mcast_pkts
);
2314 stats
->opackets
+= rte_le_to_cpu_64(resp
->tx_bcast_pkts
);
2315 stats
->obytes
= rte_le_to_cpu_64(resp
->tx_ucast_bytes
);
2316 stats
->obytes
+= rte_le_to_cpu_64(resp
->tx_mcast_bytes
);
2317 stats
->obytes
+= rte_le_to_cpu_64(resp
->tx_bcast_bytes
);
2319 stats
->imissed
= rte_le_to_cpu_64(resp
->rx_discard_pkts
);
2320 stats
->ierrors
= rte_le_to_cpu_64(resp
->rx_drop_pkts
);
2321 stats
->oerrors
= rte_le_to_cpu_64(resp
->tx_discard_pkts
);
2329 int bnxt_hwrm_func_clr_stats(struct bnxt
*bp
, uint16_t fid
)
2332 struct hwrm_func_clr_stats_input req
= {.req_type
= 0};
2333 struct hwrm_func_clr_stats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2335 HWRM_PREP(&req
, HWRM_FUNC_CLR_STATS
, BNXT_USE_CHIMP_MB
);
2337 req
.fid
= rte_cpu_to_le_16(fid
);
2339 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
2341 HWRM_CHECK_RESULT();
2347 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt
*bp
)
2352 for (i
= 0; i
< bp
->rx_cp_nr_rings
+ bp
->tx_cp_nr_rings
; i
++) {
2353 struct bnxt_tx_queue
*txq
;
2354 struct bnxt_rx_queue
*rxq
;
2355 struct bnxt_cp_ring_info
*cpr
;
2357 if (i
>= bp
->rx_cp_nr_rings
) {
2358 txq
= bp
->tx_queues
[i
- bp
->rx_cp_nr_rings
];
2361 rxq
= bp
->rx_queues
[i
];
2365 rc
= bnxt_hwrm_stat_clear(bp
, cpr
);
2373 bnxt_free_all_hwrm_stat_ctxs(struct bnxt
*bp
)
2377 struct bnxt_cp_ring_info
*cpr
;
2379 for (i
= 0; i
< bp
->rx_cp_nr_rings
+ bp
->tx_cp_nr_rings
; i
++) {
2381 if (i
>= bp
->rx_cp_nr_rings
) {
2382 cpr
= bp
->tx_queues
[i
- bp
->rx_cp_nr_rings
]->cp_ring
;
2384 cpr
= bp
->rx_queues
[i
]->cp_ring
;
2385 if (BNXT_HAS_RING_GRPS(bp
))
2386 bp
->grp_info
[i
].fw_stats_ctx
= -1;
2388 if (cpr
->hw_stats_ctx_id
!= HWRM_NA_SIGNATURE
) {
2389 rc
= bnxt_hwrm_stat_ctx_free(bp
, cpr
, i
);
2390 cpr
->hw_stats_ctx_id
= HWRM_NA_SIGNATURE
;
2398 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt
*bp
)
2403 for (i
= 0; i
< bp
->rx_cp_nr_rings
+ bp
->tx_cp_nr_rings
; i
++) {
2404 struct bnxt_tx_queue
*txq
;
2405 struct bnxt_rx_queue
*rxq
;
2406 struct bnxt_cp_ring_info
*cpr
;
2408 if (i
>= bp
->rx_cp_nr_rings
) {
2409 txq
= bp
->tx_queues
[i
- bp
->rx_cp_nr_rings
];
2412 rxq
= bp
->rx_queues
[i
];
2416 rc
= bnxt_hwrm_stat_ctx_alloc(bp
, cpr
, i
);
2425 bnxt_free_all_hwrm_ring_grps(struct bnxt
*bp
)
2430 if (!BNXT_HAS_RING_GRPS(bp
))
2433 for (idx
= 0; idx
< bp
->rx_cp_nr_rings
; idx
++) {
2435 if (bp
->grp_info
[idx
].fw_grp_id
== INVALID_HW_RING_ID
)
2438 rc
= bnxt_hwrm_ring_grp_free(bp
, idx
);
2446 void bnxt_free_nq_ring(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
)
2448 struct bnxt_ring
*cp_ring
= cpr
->cp_ring_struct
;
2450 bnxt_hwrm_ring_free(bp
, cp_ring
,
2451 HWRM_RING_FREE_INPUT_RING_TYPE_NQ
);
2452 cp_ring
->fw_ring_id
= INVALID_HW_RING_ID
;
2453 memset(cpr
->cp_desc_ring
, 0, cpr
->cp_ring_struct
->ring_size
*
2454 sizeof(*cpr
->cp_desc_ring
));
2455 cpr
->cp_raw_cons
= 0;
2459 void bnxt_free_cp_ring(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
)
2461 struct bnxt_ring
*cp_ring
= cpr
->cp_ring_struct
;
2463 bnxt_hwrm_ring_free(bp
, cp_ring
,
2464 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL
);
2465 cp_ring
->fw_ring_id
= INVALID_HW_RING_ID
;
2466 memset(cpr
->cp_desc_ring
, 0, cpr
->cp_ring_struct
->ring_size
*
2467 sizeof(*cpr
->cp_desc_ring
));
2468 cpr
->cp_raw_cons
= 0;
2472 void bnxt_free_hwrm_rx_ring(struct bnxt
*bp
, int queue_index
)
2474 struct bnxt_rx_queue
*rxq
= bp
->rx_queues
[queue_index
];
2475 struct bnxt_rx_ring_info
*rxr
= rxq
->rx_ring
;
2476 struct bnxt_ring
*ring
= rxr
->rx_ring_struct
;
2477 struct bnxt_cp_ring_info
*cpr
= rxq
->cp_ring
;
2479 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
2480 bnxt_hwrm_ring_free(bp
, ring
,
2481 HWRM_RING_FREE_INPUT_RING_TYPE_RX
);
2482 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
2483 if (BNXT_HAS_RING_GRPS(bp
))
2484 bp
->grp_info
[queue_index
].rx_fw_ring_id
=
2487 ring
= rxr
->ag_ring_struct
;
2488 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
2489 bnxt_hwrm_ring_free(bp
, ring
,
2490 BNXT_CHIP_THOR(bp
) ?
2491 HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG
:
2492 HWRM_RING_FREE_INPUT_RING_TYPE_RX
);
2493 if (BNXT_HAS_RING_GRPS(bp
))
2494 bp
->grp_info
[queue_index
].ag_fw_ring_id
=
2497 if (cpr
->cp_ring_struct
->fw_ring_id
!= INVALID_HW_RING_ID
)
2498 bnxt_free_cp_ring(bp
, cpr
);
2500 if (BNXT_HAS_RING_GRPS(bp
))
2501 bp
->grp_info
[queue_index
].cp_fw_ring_id
= INVALID_HW_RING_ID
;
2505 bnxt_free_all_hwrm_rings(struct bnxt
*bp
)
2509 for (i
= 0; i
< bp
->tx_cp_nr_rings
; i
++) {
2510 struct bnxt_tx_queue
*txq
= bp
->tx_queues
[i
];
2511 struct bnxt_tx_ring_info
*txr
= txq
->tx_ring
;
2512 struct bnxt_ring
*ring
= txr
->tx_ring_struct
;
2513 struct bnxt_cp_ring_info
*cpr
= txq
->cp_ring
;
2515 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
2516 bnxt_hwrm_ring_free(bp
, ring
,
2517 HWRM_RING_FREE_INPUT_RING_TYPE_TX
);
2518 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
2519 memset(txr
->tx_desc_ring
, 0,
2520 txr
->tx_ring_struct
->ring_size
*
2521 sizeof(*txr
->tx_desc_ring
));
2522 memset(txr
->tx_buf_ring
, 0,
2523 txr
->tx_ring_struct
->ring_size
*
2524 sizeof(*txr
->tx_buf_ring
));
2528 if (cpr
->cp_ring_struct
->fw_ring_id
!= INVALID_HW_RING_ID
) {
2529 bnxt_free_cp_ring(bp
, cpr
);
2530 cpr
->cp_ring_struct
->fw_ring_id
= INVALID_HW_RING_ID
;
2534 for (i
= 0; i
< bp
->rx_cp_nr_rings
; i
++)
2535 bnxt_free_hwrm_rx_ring(bp
, i
);
2540 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt
*bp
)
2545 if (!BNXT_HAS_RING_GRPS(bp
))
2548 for (i
= 0; i
< bp
->rx_cp_nr_rings
; i
++) {
2549 rc
= bnxt_hwrm_ring_grp_alloc(bp
, i
);
2557 * HWRM utility functions
2560 void bnxt_free_hwrm_resources(struct bnxt
*bp
)
2562 /* Release memzone */
2563 rte_free(bp
->hwrm_cmd_resp_addr
);
2564 rte_free(bp
->hwrm_short_cmd_req_addr
);
2565 bp
->hwrm_cmd_resp_addr
= NULL
;
2566 bp
->hwrm_short_cmd_req_addr
= NULL
;
2567 bp
->hwrm_cmd_resp_dma_addr
= 0;
2568 bp
->hwrm_short_cmd_req_dma_addr
= 0;
2571 int bnxt_alloc_hwrm_resources(struct bnxt
*bp
)
2573 struct rte_pci_device
*pdev
= bp
->pdev
;
2574 char type
[RTE_MEMZONE_NAMESIZE
];
2576 sprintf(type
, "bnxt_hwrm_" PCI_PRI_FMT
, pdev
->addr
.domain
,
2577 pdev
->addr
.bus
, pdev
->addr
.devid
, pdev
->addr
.function
);
2578 bp
->max_resp_len
= HWRM_MAX_RESP_LEN
;
2579 bp
->hwrm_cmd_resp_addr
= rte_malloc(type
, bp
->max_resp_len
, 0);
2580 if (bp
->hwrm_cmd_resp_addr
== NULL
)
2582 bp
->hwrm_cmd_resp_dma_addr
=
2583 rte_malloc_virt2iova(bp
->hwrm_cmd_resp_addr
);
2584 if (bp
->hwrm_cmd_resp_dma_addr
== RTE_BAD_IOVA
) {
2586 "unable to map response address to physical memory\n");
2589 rte_spinlock_init(&bp
->hwrm_lock
);
2595 bnxt_clear_hwrm_vnic_filters(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
2597 struct bnxt_filter_info
*filter
;
2600 STAILQ_FOREACH(filter
, &vnic
->filter
, next
) {
2601 if (filter
->filter_type
== HWRM_CFA_EM_FILTER
)
2602 rc
= bnxt_hwrm_clear_em_filter(bp
, filter
);
2603 else if (filter
->filter_type
== HWRM_CFA_NTUPLE_FILTER
)
2604 rc
= bnxt_hwrm_clear_ntuple_filter(bp
, filter
);
2605 rc
= bnxt_hwrm_clear_l2_filter(bp
, filter
);
2606 STAILQ_REMOVE(&vnic
->filter
, filter
, bnxt_filter_info
, next
);
2607 bnxt_free_filter(bp
, filter
);
2613 bnxt_clear_hwrm_vnic_flows(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
2615 struct bnxt_filter_info
*filter
;
2616 struct rte_flow
*flow
;
2619 while (!STAILQ_EMPTY(&vnic
->flow_list
)) {
2620 flow
= STAILQ_FIRST(&vnic
->flow_list
);
2621 filter
= flow
->filter
;
2622 PMD_DRV_LOG(DEBUG
, "filter type %d\n", filter
->filter_type
);
2623 if (filter
->filter_type
== HWRM_CFA_EM_FILTER
)
2624 rc
= bnxt_hwrm_clear_em_filter(bp
, filter
);
2625 else if (filter
->filter_type
== HWRM_CFA_NTUPLE_FILTER
)
2626 rc
= bnxt_hwrm_clear_ntuple_filter(bp
, filter
);
2627 rc
= bnxt_hwrm_clear_l2_filter(bp
, filter
);
2629 STAILQ_REMOVE(&vnic
->flow_list
, flow
, rte_flow
, next
);
2635 int bnxt_set_hwrm_vnic_filters(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
2637 struct bnxt_filter_info
*filter
;
2640 STAILQ_FOREACH(filter
, &vnic
->filter
, next
) {
2641 if (filter
->filter_type
== HWRM_CFA_EM_FILTER
)
2642 rc
= bnxt_hwrm_set_em_filter(bp
, filter
->dst_id
,
2644 else if (filter
->filter_type
== HWRM_CFA_NTUPLE_FILTER
)
2645 rc
= bnxt_hwrm_set_ntuple_filter(bp
, filter
->dst_id
,
2648 rc
= bnxt_hwrm_set_l2_filter(bp
, vnic
->fw_vnic_id
,
2657 bnxt_free_tunnel_ports(struct bnxt
*bp
)
2659 if (bp
->vxlan_port_cnt
)
2660 bnxt_hwrm_tunnel_dst_port_free(bp
, bp
->vxlan_fw_dst_port_id
,
2661 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN
);
2663 if (bp
->geneve_port_cnt
)
2664 bnxt_hwrm_tunnel_dst_port_free(bp
, bp
->geneve_fw_dst_port_id
,
2665 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE
);
2666 bp
->geneve_port
= 0;
2669 void bnxt_free_all_hwrm_resources(struct bnxt
*bp
)
2673 if (bp
->vnic_info
== NULL
)
2677 * Cleanup VNICs in reverse order, to make sure the L2 filter
2678 * from vnic0 is last to be cleaned up.
2680 for (i
= bp
->max_vnics
- 1; i
>= 0; i
--) {
2681 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
2683 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
)
2686 bnxt_clear_hwrm_vnic_flows(bp
, vnic
);
2688 bnxt_clear_hwrm_vnic_filters(bp
, vnic
);
2690 bnxt_hwrm_vnic_ctx_free(bp
, vnic
);
2692 bnxt_hwrm_vnic_tpa_cfg(bp
, vnic
, false);
2694 bnxt_hwrm_vnic_free(bp
, vnic
);
2696 rte_free(vnic
->fw_grp_ids
);
2698 /* Ring resources */
2699 bnxt_free_all_hwrm_rings(bp
);
2700 bnxt_free_all_hwrm_ring_grps(bp
);
2701 bnxt_free_all_hwrm_stat_ctxs(bp
);
2702 bnxt_free_tunnel_ports(bp
);
2705 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed
)
2707 uint8_t hw_link_duplex
= HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH
;
2709 if ((conf_link_speed
& ETH_LINK_SPEED_FIXED
) == ETH_LINK_SPEED_AUTONEG
)
2710 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH
;
2712 switch (conf_link_speed
) {
2713 case ETH_LINK_SPEED_10M_HD
:
2714 case ETH_LINK_SPEED_100M_HD
:
2716 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF
;
2718 return hw_link_duplex
;
2721 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link
)
2723 return (conf_link
& ETH_LINK_SPEED_FIXED
) ? 0 : 1;
2726 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed
)
2728 uint16_t eth_link_speed
= 0;
2730 if (conf_link_speed
== ETH_LINK_SPEED_AUTONEG
)
2731 return ETH_LINK_SPEED_AUTONEG
;
2733 switch (conf_link_speed
& ~ETH_LINK_SPEED_FIXED
) {
2734 case ETH_LINK_SPEED_100M
:
2735 case ETH_LINK_SPEED_100M_HD
:
2738 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB
;
2740 case ETH_LINK_SPEED_1G
:
2742 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB
;
2744 case ETH_LINK_SPEED_2_5G
:
2746 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB
;
2748 case ETH_LINK_SPEED_10G
:
2750 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB
;
2752 case ETH_LINK_SPEED_20G
:
2754 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB
;
2756 case ETH_LINK_SPEED_25G
:
2758 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB
;
2760 case ETH_LINK_SPEED_40G
:
2762 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB
;
2764 case ETH_LINK_SPEED_50G
:
2766 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB
;
2768 case ETH_LINK_SPEED_100G
:
2770 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB
;
2772 case ETH_LINK_SPEED_200G
:
2774 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_200GB
;
2778 "Unsupported link speed %d; default to AUTO\n",
2782 return eth_link_speed
;
2785 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2786 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2787 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2788 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
2789 ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
2791 static int bnxt_valid_link_speed(uint32_t link_speed
, uint16_t port_id
)
2795 if (link_speed
== ETH_LINK_SPEED_AUTONEG
)
2798 if (link_speed
& ETH_LINK_SPEED_FIXED
) {
2799 one_speed
= link_speed
& ~ETH_LINK_SPEED_FIXED
;
2801 if (one_speed
& (one_speed
- 1)) {
2803 "Invalid advertised speeds (%u) for port %u\n",
2804 link_speed
, port_id
);
2807 if ((one_speed
& BNXT_SUPPORTED_SPEEDS
) != one_speed
) {
2809 "Unsupported advertised speed (%u) for port %u\n",
2810 link_speed
, port_id
);
2814 if (!(link_speed
& BNXT_SUPPORTED_SPEEDS
)) {
2816 "Unsupported advertised speeds (%u) for port %u\n",
2817 link_speed
, port_id
);
2825 bnxt_parse_eth_link_speed_mask(struct bnxt
*bp
, uint32_t link_speed
)
2829 if (link_speed
== ETH_LINK_SPEED_AUTONEG
) {
2830 if (bp
->link_info
->support_speeds
)
2831 return bp
->link_info
->support_speeds
;
2832 link_speed
= BNXT_SUPPORTED_SPEEDS
;
2835 if (link_speed
& ETH_LINK_SPEED_100M
)
2836 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB
;
2837 if (link_speed
& ETH_LINK_SPEED_100M_HD
)
2838 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB
;
2839 if (link_speed
& ETH_LINK_SPEED_1G
)
2840 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB
;
2841 if (link_speed
& ETH_LINK_SPEED_2_5G
)
2842 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB
;
2843 if (link_speed
& ETH_LINK_SPEED_10G
)
2844 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB
;
2845 if (link_speed
& ETH_LINK_SPEED_20G
)
2846 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB
;
2847 if (link_speed
& ETH_LINK_SPEED_25G
)
2848 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB
;
2849 if (link_speed
& ETH_LINK_SPEED_40G
)
2850 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB
;
2851 if (link_speed
& ETH_LINK_SPEED_50G
)
2852 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB
;
2853 if (link_speed
& ETH_LINK_SPEED_100G
)
2854 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB
;
2855 if (link_speed
& ETH_LINK_SPEED_200G
)
2856 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_200GB
;
2860 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed
)
2862 uint32_t eth_link_speed
= ETH_SPEED_NUM_NONE
;
2864 switch (hw_link_speed
) {
2865 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB
:
2866 eth_link_speed
= ETH_SPEED_NUM_100M
;
2868 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB
:
2869 eth_link_speed
= ETH_SPEED_NUM_1G
;
2871 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB
:
2872 eth_link_speed
= ETH_SPEED_NUM_2_5G
;
2874 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB
:
2875 eth_link_speed
= ETH_SPEED_NUM_10G
;
2877 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB
:
2878 eth_link_speed
= ETH_SPEED_NUM_20G
;
2880 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB
:
2881 eth_link_speed
= ETH_SPEED_NUM_25G
;
2883 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB
:
2884 eth_link_speed
= ETH_SPEED_NUM_40G
;
2886 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB
:
2887 eth_link_speed
= ETH_SPEED_NUM_50G
;
2889 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB
:
2890 eth_link_speed
= ETH_SPEED_NUM_100G
;
2892 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB
:
2893 eth_link_speed
= ETH_SPEED_NUM_200G
;
2895 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB
:
2897 PMD_DRV_LOG(ERR
, "HWRM link speed %d not defined\n",
2901 return eth_link_speed
;
2904 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex
)
2906 uint16_t eth_link_duplex
= ETH_LINK_FULL_DUPLEX
;
2908 switch (hw_link_duplex
) {
2909 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH
:
2910 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL
:
2912 eth_link_duplex
= ETH_LINK_FULL_DUPLEX
;
2914 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF
:
2915 eth_link_duplex
= ETH_LINK_HALF_DUPLEX
;
2918 PMD_DRV_LOG(ERR
, "HWRM link duplex %d not defined\n",
2922 return eth_link_duplex
;
2925 int bnxt_get_hwrm_link_config(struct bnxt
*bp
, struct rte_eth_link
*link
)
2928 struct bnxt_link_info
*link_info
= bp
->link_info
;
2930 rc
= bnxt_hwrm_port_phy_qcfg(bp
, link_info
);
2933 "Get link config failed with rc %d\n", rc
);
2936 if (link_info
->link_speed
)
2938 bnxt_parse_hw_link_speed(link_info
->link_speed
);
2940 link
->link_speed
= ETH_SPEED_NUM_NONE
;
2941 link
->link_duplex
= bnxt_parse_hw_link_duplex(link_info
->duplex
);
2942 link
->link_status
= link_info
->link_up
;
2943 link
->link_autoneg
= link_info
->auto_mode
==
2944 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE
?
2945 ETH_LINK_FIXED
: ETH_LINK_AUTONEG
;
2950 int bnxt_set_hwrm_link_config(struct bnxt
*bp
, bool link_up
)
2953 struct rte_eth_conf
*dev_conf
= &bp
->eth_dev
->data
->dev_conf
;
2954 struct bnxt_link_info link_req
;
2955 uint16_t speed
, autoneg
;
2957 if (!BNXT_SINGLE_PF(bp
) || BNXT_VF(bp
))
2960 rc
= bnxt_valid_link_speed(dev_conf
->link_speeds
,
2961 bp
->eth_dev
->data
->port_id
);
2965 memset(&link_req
, 0, sizeof(link_req
));
2966 link_req
.link_up
= link_up
;
2970 autoneg
= bnxt_check_eth_link_autoneg(dev_conf
->link_speeds
);
2971 if (BNXT_CHIP_THOR(bp
) &&
2972 dev_conf
->link_speeds
== ETH_LINK_SPEED_40G
) {
2973 /* 40G is not supported as part of media auto detect.
2974 * The speed should be forced and autoneg disabled
2975 * to configure 40G speed.
2977 PMD_DRV_LOG(INFO
, "Disabling autoneg for 40G\n");
2981 speed
= bnxt_parse_eth_link_speed(dev_conf
->link_speeds
);
2982 link_req
.phy_flags
= HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY
;
2983 /* Autoneg can be done only when the FW allows.
2984 * When user configures fixed speed of 40G and later changes to
2985 * any other speed, auto_link_speed/force_link_speed is still set
2986 * to 40G until link comes up at new speed.
2989 !(!BNXT_CHIP_THOR(bp
) &&
2990 (bp
->link_info
->auto_link_speed
||
2991 bp
->link_info
->force_link_speed
))) {
2992 link_req
.phy_flags
|=
2993 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG
;
2994 link_req
.auto_link_speed_mask
=
2995 bnxt_parse_eth_link_speed_mask(bp
,
2996 dev_conf
->link_speeds
);
2998 if (bp
->link_info
->phy_type
==
2999 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET
||
3000 bp
->link_info
->phy_type
==
3001 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE
||
3002 bp
->link_info
->media_type
==
3003 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP
) {
3004 PMD_DRV_LOG(ERR
, "10GBase-T devices must autoneg\n");
3008 link_req
.phy_flags
|= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE
;
3009 /* If user wants a particular speed try that first. */
3011 link_req
.link_speed
= speed
;
3012 else if (bp
->link_info
->force_link_speed
)
3013 link_req
.link_speed
= bp
->link_info
->force_link_speed
;
3015 link_req
.link_speed
= bp
->link_info
->auto_link_speed
;
3017 link_req
.duplex
= bnxt_parse_eth_link_duplex(dev_conf
->link_speeds
);
3018 link_req
.auto_pause
= bp
->link_info
->auto_pause
;
3019 link_req
.force_pause
= bp
->link_info
->force_pause
;
3022 rc
= bnxt_hwrm_port_phy_cfg(bp
, &link_req
);
3025 "Set link config failed with rc %d\n", rc
);
3033 int bnxt_hwrm_func_qcfg(struct bnxt
*bp
, uint16_t *mtu
)
3035 struct hwrm_func_qcfg_input req
= {0};
3036 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3039 bp
->func_svif
= BNXT_SVIF_INVALID
;
3042 HWRM_PREP(&req
, HWRM_FUNC_QCFG
, BNXT_USE_CHIMP_MB
);
3043 req
.fid
= rte_cpu_to_le_16(0xffff);
3045 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3047 HWRM_CHECK_RESULT();
3049 /* Hard Coded.. 0xfff VLAN ID mask */
3050 bp
->vlan
= rte_le_to_cpu_16(resp
->vlan
) & 0xfff;
3052 svif_info
= rte_le_to_cpu_16(resp
->svif_info
);
3053 if (svif_info
& HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID
)
3054 bp
->func_svif
= svif_info
&
3055 HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK
;
3057 flags
= rte_le_to_cpu_16(resp
->flags
);
3058 if (BNXT_PF(bp
) && (flags
& HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST
))
3059 bp
->flags
|= BNXT_FLAG_MULTI_HOST
;
3062 !BNXT_VF_IS_TRUSTED(bp
) &&
3063 (flags
& HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF
)) {
3064 bp
->flags
|= BNXT_FLAG_TRUSTED_VF_EN
;
3065 PMD_DRV_LOG(INFO
, "Trusted VF cap enabled\n");
3066 } else if (BNXT_VF(bp
) &&
3067 BNXT_VF_IS_TRUSTED(bp
) &&
3068 !(flags
& HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF
)) {
3069 bp
->flags
&= ~BNXT_FLAG_TRUSTED_VF_EN
;
3070 PMD_DRV_LOG(INFO
, "Trusted VF cap disabled\n");
3074 *mtu
= rte_le_to_cpu_16(resp
->mtu
);
3076 switch (resp
->port_partition_type
) {
3077 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0
:
3078 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5
:
3079 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0
:
3081 bp
->flags
|= BNXT_FLAG_NPAR_PF
;
3084 bp
->flags
&= ~BNXT_FLAG_NPAR_PF
;
3093 int bnxt_hwrm_port_mac_qcfg(struct bnxt
*bp
)
3095 struct hwrm_port_mac_qcfg_input req
= {0};
3096 struct hwrm_port_mac_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3097 uint16_t port_svif_info
;
3100 bp
->port_svif
= BNXT_SVIF_INVALID
;
3105 HWRM_PREP(&req
, HWRM_PORT_MAC_QCFG
, BNXT_USE_CHIMP_MB
);
3107 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3109 HWRM_CHECK_RESULT();
3111 port_svif_info
= rte_le_to_cpu_16(resp
->port_svif_info
);
3112 if (port_svif_info
&
3113 HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID
)
3114 bp
->port_svif
= port_svif_info
&
3115 HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK
;
3122 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input
*fcfg
,
3123 struct hwrm_func_qcaps_output
*qcaps
)
3125 qcaps
->max_rsscos_ctx
= fcfg
->num_rsscos_ctxs
;
3126 memcpy(qcaps
->mac_address
, fcfg
->dflt_mac_addr
,
3127 sizeof(qcaps
->mac_address
));
3128 qcaps
->max_l2_ctxs
= fcfg
->num_l2_ctxs
;
3129 qcaps
->max_rx_rings
= fcfg
->num_rx_rings
;
3130 qcaps
->max_tx_rings
= fcfg
->num_tx_rings
;
3131 qcaps
->max_cmpl_rings
= fcfg
->num_cmpl_rings
;
3132 qcaps
->max_stat_ctx
= fcfg
->num_stat_ctxs
;
3134 qcaps
->first_vf_id
= 0;
3135 qcaps
->max_vnics
= fcfg
->num_vnics
;
3136 qcaps
->max_decap_records
= 0;
3137 qcaps
->max_encap_records
= 0;
3138 qcaps
->max_tx_wm_flows
= 0;
3139 qcaps
->max_tx_em_flows
= 0;
3140 qcaps
->max_rx_wm_flows
= 0;
3141 qcaps
->max_rx_em_flows
= 0;
3142 qcaps
->max_flow_id
= 0;
3143 qcaps
->max_mcast_filters
= fcfg
->num_mcast_filters
;
3144 qcaps
->max_sp_tx_rings
= 0;
3145 qcaps
->max_hw_ring_grps
= fcfg
->num_hw_ring_grps
;
3148 static int bnxt_hwrm_pf_func_cfg(struct bnxt
*bp
, int tx_rings
)
3150 struct hwrm_func_cfg_input req
= {0};
3151 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3155 enables
= HWRM_FUNC_CFG_INPUT_ENABLES_MTU
|
3156 HWRM_FUNC_CFG_INPUT_ENABLES_MRU
|
3157 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS
|
3158 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS
|
3159 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS
|
3160 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS
|
3161 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS
|
3162 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS
|
3163 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS
;
3165 if (BNXT_HAS_RING_GRPS(bp
)) {
3166 enables
|= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS
;
3167 req
.num_hw_ring_grps
= rte_cpu_to_le_16(bp
->max_ring_grps
);
3168 } else if (BNXT_HAS_NQ(bp
)) {
3169 enables
|= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX
;
3170 req
.num_msix
= rte_cpu_to_le_16(bp
->max_nq_rings
);
3173 req
.flags
= rte_cpu_to_le_32(bp
->pf
->func_cfg_flags
);
3174 req
.mtu
= rte_cpu_to_le_16(BNXT_MAX_MTU
);
3175 req
.mru
= rte_cpu_to_le_16(BNXT_VNIC_MRU(bp
->eth_dev
->data
->mtu
));
3176 req
.num_rsscos_ctxs
= rte_cpu_to_le_16(bp
->max_rsscos_ctx
);
3177 req
.num_stat_ctxs
= rte_cpu_to_le_16(bp
->max_stat_ctx
);
3178 req
.num_cmpl_rings
= rte_cpu_to_le_16(bp
->max_cp_rings
);
3179 req
.num_tx_rings
= rte_cpu_to_le_16(tx_rings
);
3180 req
.num_rx_rings
= rte_cpu_to_le_16(bp
->max_rx_rings
);
3181 req
.num_l2_ctxs
= rte_cpu_to_le_16(bp
->max_l2_ctx
);
3182 req
.num_vnics
= rte_cpu_to_le_16(bp
->max_vnics
);
3183 req
.fid
= rte_cpu_to_le_16(0xffff);
3184 req
.enables
= rte_cpu_to_le_32(enables
);
3186 HWRM_PREP(&req
, HWRM_FUNC_CFG
, BNXT_USE_CHIMP_MB
);
3188 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3190 HWRM_CHECK_RESULT();
3196 static void populate_vf_func_cfg_req(struct bnxt
*bp
,
3197 struct hwrm_func_cfg_input
*req
,
3200 req
->enables
= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU
|
3201 HWRM_FUNC_CFG_INPUT_ENABLES_MRU
|
3202 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS
|
3203 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS
|
3204 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS
|
3205 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS
|
3206 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS
|
3207 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS
|
3208 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS
|
3209 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS
);
3211 req
->mtu
= rte_cpu_to_le_16(bp
->eth_dev
->data
->mtu
+ RTE_ETHER_HDR_LEN
+
3212 RTE_ETHER_CRC_LEN
+ VLAN_TAG_SIZE
*
3214 req
->mru
= rte_cpu_to_le_16(BNXT_VNIC_MRU(bp
->eth_dev
->data
->mtu
));
3215 req
->num_rsscos_ctxs
= rte_cpu_to_le_16(bp
->max_rsscos_ctx
/
3217 req
->num_stat_ctxs
= rte_cpu_to_le_16(bp
->max_stat_ctx
/ (num_vfs
+ 1));
3218 req
->num_cmpl_rings
= rte_cpu_to_le_16(bp
->max_cp_rings
/
3220 req
->num_tx_rings
= rte_cpu_to_le_16(bp
->max_tx_rings
/ (num_vfs
+ 1));
3221 req
->num_rx_rings
= rte_cpu_to_le_16(bp
->max_rx_rings
/ (num_vfs
+ 1));
3222 req
->num_l2_ctxs
= rte_cpu_to_le_16(bp
->max_l2_ctx
/ (num_vfs
+ 1));
3223 /* TODO: For now, do not support VMDq/RFS on VFs. */
3224 req
->num_vnics
= rte_cpu_to_le_16(1);
3225 req
->num_hw_ring_grps
= rte_cpu_to_le_16(bp
->max_ring_grps
/
3229 static void add_random_mac_if_needed(struct bnxt
*bp
,
3230 struct hwrm_func_cfg_input
*cfg_req
,
3233 struct rte_ether_addr mac
;
3235 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp
, vf
, &mac
))
3238 if (memcmp(mac
.addr_bytes
, "\x00\x00\x00\x00\x00", 6) == 0) {
3240 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR
);
3241 rte_eth_random_addr(cfg_req
->dflt_mac_addr
);
3242 bp
->pf
->vf_info
[vf
].random_mac
= true;
3244 memcpy(cfg_req
->dflt_mac_addr
, mac
.addr_bytes
,
3245 RTE_ETHER_ADDR_LEN
);
3249 static int reserve_resources_from_vf(struct bnxt
*bp
,
3250 struct hwrm_func_cfg_input
*cfg_req
,
3253 struct hwrm_func_qcaps_input req
= {0};
3254 struct hwrm_func_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3257 /* Get the actual allocated values now */
3258 HWRM_PREP(&req
, HWRM_FUNC_QCAPS
, BNXT_USE_CHIMP_MB
);
3259 req
.fid
= rte_cpu_to_le_16(bp
->pf
->vf_info
[vf
].fid
);
3260 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3263 PMD_DRV_LOG(ERR
, "hwrm_func_qcaps failed rc:%d\n", rc
);
3264 copy_func_cfg_to_qcaps(cfg_req
, resp
);
3265 } else if (resp
->error_code
) {
3266 rc
= rte_le_to_cpu_16(resp
->error_code
);
3267 PMD_DRV_LOG(ERR
, "hwrm_func_qcaps error %d\n", rc
);
3268 copy_func_cfg_to_qcaps(cfg_req
, resp
);
3271 bp
->max_rsscos_ctx
-= rte_le_to_cpu_16(resp
->max_rsscos_ctx
);
3272 bp
->max_stat_ctx
-= rte_le_to_cpu_16(resp
->max_stat_ctx
);
3273 bp
->max_cp_rings
-= rte_le_to_cpu_16(resp
->max_cmpl_rings
);
3274 bp
->max_tx_rings
-= rte_le_to_cpu_16(resp
->max_tx_rings
);
3275 bp
->max_rx_rings
-= rte_le_to_cpu_16(resp
->max_rx_rings
);
3276 bp
->max_l2_ctx
-= rte_le_to_cpu_16(resp
->max_l2_ctxs
);
3278 * TODO: While not supporting VMDq with VFs, max_vnics is always
3279 * forced to 1 in this case
3281 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
3282 bp
->max_ring_grps
-= rte_le_to_cpu_16(resp
->max_hw_ring_grps
);
3289 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt
*bp
, int vf
)
3291 struct hwrm_func_qcfg_input req
= {0};
3292 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3295 /* Check for zero MAC address */
3296 HWRM_PREP(&req
, HWRM_FUNC_QCFG
, BNXT_USE_CHIMP_MB
);
3297 req
.fid
= rte_cpu_to_le_16(bp
->pf
->vf_info
[vf
].fid
);
3298 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3299 HWRM_CHECK_RESULT();
3300 rc
= rte_le_to_cpu_16(resp
->vlan
);
3307 static int update_pf_resource_max(struct bnxt
*bp
)
3309 struct hwrm_func_qcfg_input req
= {0};
3310 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3313 /* And copy the allocated numbers into the pf struct */
3314 HWRM_PREP(&req
, HWRM_FUNC_QCFG
, BNXT_USE_CHIMP_MB
);
3315 req
.fid
= rte_cpu_to_le_16(0xffff);
3316 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3317 HWRM_CHECK_RESULT();
3319 /* Only TX ring value reflects actual allocation? TODO */
3320 bp
->max_tx_rings
= rte_le_to_cpu_16(resp
->alloc_tx_rings
);
3321 bp
->pf
->evb_mode
= resp
->evb_mode
;
3328 int bnxt_hwrm_allocate_pf_only(struct bnxt
*bp
)
3333 PMD_DRV_LOG(ERR
, "Attempt to allcoate VFs on a VF!\n");
3337 rc
= bnxt_hwrm_func_qcaps(bp
);
3341 bp
->pf
->func_cfg_flags
&=
3342 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE
|
3343 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE
);
3344 bp
->pf
->func_cfg_flags
|=
3345 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE
;
3346 rc
= bnxt_hwrm_pf_func_cfg(bp
, bp
->max_tx_rings
);
3347 rc
= __bnxt_hwrm_func_qcaps(bp
);
3351 int bnxt_hwrm_allocate_vfs(struct bnxt
*bp
, int num_vfs
)
3353 struct hwrm_func_cfg_input req
= {0};
3354 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3361 PMD_DRV_LOG(ERR
, "Attempt to allcoate VFs on a VF!\n");
3365 rc
= bnxt_hwrm_func_qcaps(bp
);
3370 bp
->pf
->active_vfs
= num_vfs
;
3373 * First, configure the PF to only use one TX ring. This ensures that
3374 * there are enough rings for all VFs.
3376 * If we don't do this, when we call func_alloc() later, we will lock
3377 * extra rings to the PF that won't be available during func_cfg() of
3380 * This has been fixed with firmware versions above 20.6.54
3382 bp
->pf
->func_cfg_flags
&=
3383 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE
|
3384 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE
);
3385 bp
->pf
->func_cfg_flags
|=
3386 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE
;
3387 rc
= bnxt_hwrm_pf_func_cfg(bp
, 1);
3392 * Now, create and register a buffer to hold forwarded VF requests
3394 req_buf_sz
= num_vfs
* HWRM_MAX_REQ_LEN
;
3395 bp
->pf
->vf_req_buf
= rte_malloc("bnxt_vf_fwd", req_buf_sz
,
3396 page_roundup(num_vfs
* HWRM_MAX_REQ_LEN
));
3397 if (bp
->pf
->vf_req_buf
== NULL
) {
3401 for (sz
= 0; sz
< req_buf_sz
; sz
+= getpagesize())
3402 rte_mem_lock_page(((char *)bp
->pf
->vf_req_buf
) + sz
);
3403 for (i
= 0; i
< num_vfs
; i
++)
3404 bp
->pf
->vf_info
[i
].req_buf
= ((char *)bp
->pf
->vf_req_buf
) +
3405 (i
* HWRM_MAX_REQ_LEN
);
3407 rc
= bnxt_hwrm_func_buf_rgtr(bp
);
3411 populate_vf_func_cfg_req(bp
, &req
, num_vfs
);
3413 bp
->pf
->active_vfs
= 0;
3414 for (i
= 0; i
< num_vfs
; i
++) {
3415 add_random_mac_if_needed(bp
, &req
, i
);
3417 HWRM_PREP(&req
, HWRM_FUNC_CFG
, BNXT_USE_CHIMP_MB
);
3418 req
.flags
= rte_cpu_to_le_32(bp
->pf
->vf_info
[i
].func_cfg_flags
);
3419 req
.fid
= rte_cpu_to_le_16(bp
->pf
->vf_info
[i
].fid
);
3420 rc
= bnxt_hwrm_send_message(bp
,
3425 /* Clear enable flag for next pass */
3426 req
.enables
&= ~rte_cpu_to_le_32(
3427 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR
);
3429 if (rc
|| resp
->error_code
) {
3431 "Failed to initizlie VF %d\n", i
);
3433 "Not all VFs available. (%d, %d)\n",
3434 rc
, resp
->error_code
);
3441 reserve_resources_from_vf(bp
, &req
, i
);
3442 bp
->pf
->active_vfs
++;
3443 bnxt_hwrm_func_clr_stats(bp
, bp
->pf
->vf_info
[i
].fid
);
3447 * Now configure the PF to use "the rest" of the resources
3448 * We're using STD_TX_RING_MODE here though which will limit the TX
3449 * rings. This will allow QoS to function properly. Not setting this
3450 * will cause PF rings to break bandwidth settings.
3452 rc
= bnxt_hwrm_pf_func_cfg(bp
, bp
->max_tx_rings
);
3456 rc
= update_pf_resource_max(bp
);
3463 bnxt_hwrm_func_buf_unrgtr(bp
);
3467 int bnxt_hwrm_pf_evb_mode(struct bnxt
*bp
)
3469 struct hwrm_func_cfg_input req
= {0};
3470 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3473 HWRM_PREP(&req
, HWRM_FUNC_CFG
, BNXT_USE_CHIMP_MB
);
3475 req
.fid
= rte_cpu_to_le_16(0xffff);
3476 req
.enables
= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE
);
3477 req
.evb_mode
= bp
->pf
->evb_mode
;
3479 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3480 HWRM_CHECK_RESULT();
3486 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt
*bp
, uint16_t port
,
3487 uint8_t tunnel_type
)
3489 struct hwrm_tunnel_dst_port_alloc_input req
= {0};
3490 struct hwrm_tunnel_dst_port_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3493 HWRM_PREP(&req
, HWRM_TUNNEL_DST_PORT_ALLOC
, BNXT_USE_CHIMP_MB
);
3494 req
.tunnel_type
= tunnel_type
;
3495 req
.tunnel_dst_port_val
= port
;
3496 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3497 HWRM_CHECK_RESULT();
3499 switch (tunnel_type
) {
3500 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN
:
3501 bp
->vxlan_fw_dst_port_id
= resp
->tunnel_dst_port_id
;
3502 bp
->vxlan_port
= port
;
3504 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE
:
3505 bp
->geneve_fw_dst_port_id
= resp
->tunnel_dst_port_id
;
3506 bp
->geneve_port
= port
;
3517 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt
*bp
, uint16_t port
,
3518 uint8_t tunnel_type
)
3520 struct hwrm_tunnel_dst_port_free_input req
= {0};
3521 struct hwrm_tunnel_dst_port_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3524 HWRM_PREP(&req
, HWRM_TUNNEL_DST_PORT_FREE
, BNXT_USE_CHIMP_MB
);
3526 req
.tunnel_type
= tunnel_type
;
3527 req
.tunnel_dst_port_id
= rte_cpu_to_be_16(port
);
3528 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3530 HWRM_CHECK_RESULT();
3536 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt
*bp
, uint16_t vf
,
3539 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3540 struct hwrm_func_cfg_input req
= {0};
3543 HWRM_PREP(&req
, HWRM_FUNC_CFG
, BNXT_USE_CHIMP_MB
);
3545 req
.fid
= rte_cpu_to_le_16(bp
->pf
->vf_info
[vf
].fid
);
3546 req
.flags
= rte_cpu_to_le_32(flags
);
3547 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3549 HWRM_CHECK_RESULT();
3555 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info
*vnic
, void *flagp
)
3557 uint32_t *flag
= flagp
;
3559 vnic
->flags
= *flag
;
3562 int bnxt_set_rx_mask_no_vlan(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
3564 return bnxt_hwrm_cfa_l2_set_rx_mask(bp
, vnic
, 0, NULL
);
3567 int bnxt_hwrm_func_buf_rgtr(struct bnxt
*bp
)
3570 struct hwrm_func_buf_rgtr_input req
= {.req_type
= 0 };
3571 struct hwrm_func_buf_rgtr_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3573 HWRM_PREP(&req
, HWRM_FUNC_BUF_RGTR
, BNXT_USE_CHIMP_MB
);
3575 req
.req_buf_num_pages
= rte_cpu_to_le_16(1);
3576 req
.req_buf_page_size
= rte_cpu_to_le_16(
3577 page_getenum(bp
->pf
->active_vfs
* HWRM_MAX_REQ_LEN
));
3578 req
.req_buf_len
= rte_cpu_to_le_16(HWRM_MAX_REQ_LEN
);
3579 req
.req_buf_page_addr0
=
3580 rte_cpu_to_le_64(rte_malloc_virt2iova(bp
->pf
->vf_req_buf
));
3581 if (req
.req_buf_page_addr0
== RTE_BAD_IOVA
) {
3583 "unable to map buffer address to physical memory\n");
3587 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3589 HWRM_CHECK_RESULT();
3595 int bnxt_hwrm_func_buf_unrgtr(struct bnxt
*bp
)
3598 struct hwrm_func_buf_unrgtr_input req
= {.req_type
= 0 };
3599 struct hwrm_func_buf_unrgtr_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3601 if (!(BNXT_PF(bp
) && bp
->pdev
->max_vfs
))
3604 HWRM_PREP(&req
, HWRM_FUNC_BUF_UNRGTR
, BNXT_USE_CHIMP_MB
);
3606 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3608 HWRM_CHECK_RESULT();
3614 int bnxt_hwrm_func_cfg_def_cp(struct bnxt
*bp
)
3616 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3617 struct hwrm_func_cfg_input req
= {0};
3620 HWRM_PREP(&req
, HWRM_FUNC_CFG
, BNXT_USE_CHIMP_MB
);
3622 req
.fid
= rte_cpu_to_le_16(0xffff);
3623 req
.flags
= rte_cpu_to_le_32(bp
->pf
->func_cfg_flags
);
3624 req
.enables
= rte_cpu_to_le_32(
3625 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR
);
3626 req
.async_event_cr
= rte_cpu_to_le_16(
3627 bp
->async_cp_ring
->cp_ring_struct
->fw_ring_id
);
3628 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3630 HWRM_CHECK_RESULT();
3636 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt
*bp
)
3638 struct hwrm_func_vf_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3639 struct hwrm_func_vf_cfg_input req
= {0};
3642 HWRM_PREP(&req
, HWRM_FUNC_VF_CFG
, BNXT_USE_CHIMP_MB
);
3644 req
.enables
= rte_cpu_to_le_32(
3645 HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR
);
3646 req
.async_event_cr
= rte_cpu_to_le_16(
3647 bp
->async_cp_ring
->cp_ring_struct
->fw_ring_id
);
3648 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3650 HWRM_CHECK_RESULT();
3656 int bnxt_hwrm_set_default_vlan(struct bnxt
*bp
, int vf
, uint8_t is_vf
)
3658 struct hwrm_func_cfg_input req
= {0};
3659 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3660 uint16_t dflt_vlan
, fid
;
3661 uint32_t func_cfg_flags
;
3664 HWRM_PREP(&req
, HWRM_FUNC_CFG
, BNXT_USE_CHIMP_MB
);
3667 dflt_vlan
= bp
->pf
->vf_info
[vf
].dflt_vlan
;
3668 fid
= bp
->pf
->vf_info
[vf
].fid
;
3669 func_cfg_flags
= bp
->pf
->vf_info
[vf
].func_cfg_flags
;
3671 fid
= rte_cpu_to_le_16(0xffff);
3672 func_cfg_flags
= bp
->pf
->func_cfg_flags
;
3673 dflt_vlan
= bp
->vlan
;
3676 req
.flags
= rte_cpu_to_le_32(func_cfg_flags
);
3677 req
.fid
= rte_cpu_to_le_16(fid
);
3678 req
.enables
|= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN
);
3679 req
.dflt_vlan
= rte_cpu_to_le_16(dflt_vlan
);
3681 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3683 HWRM_CHECK_RESULT();
3689 int bnxt_hwrm_func_bw_cfg(struct bnxt
*bp
, uint16_t vf
,
3690 uint16_t max_bw
, uint16_t enables
)
3692 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3693 struct hwrm_func_cfg_input req
= {0};
3696 HWRM_PREP(&req
, HWRM_FUNC_CFG
, BNXT_USE_CHIMP_MB
);
3698 req
.fid
= rte_cpu_to_le_16(bp
->pf
->vf_info
[vf
].fid
);
3699 req
.enables
|= rte_cpu_to_le_32(enables
);
3700 req
.flags
= rte_cpu_to_le_32(bp
->pf
->vf_info
[vf
].func_cfg_flags
);
3701 req
.max_bw
= rte_cpu_to_le_32(max_bw
);
3702 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3704 HWRM_CHECK_RESULT();
3710 int bnxt_hwrm_set_vf_vlan(struct bnxt
*bp
, int vf
)
3712 struct hwrm_func_cfg_input req
= {0};
3713 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3716 HWRM_PREP(&req
, HWRM_FUNC_CFG
, BNXT_USE_CHIMP_MB
);
3718 req
.flags
= rte_cpu_to_le_32(bp
->pf
->vf_info
[vf
].func_cfg_flags
);
3719 req
.fid
= rte_cpu_to_le_16(bp
->pf
->vf_info
[vf
].fid
);
3720 req
.enables
|= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN
);
3721 req
.dflt_vlan
= rte_cpu_to_le_16(bp
->pf
->vf_info
[vf
].dflt_vlan
);
3723 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3725 HWRM_CHECK_RESULT();
3731 int bnxt_hwrm_set_async_event_cr(struct bnxt
*bp
)
3736 rc
= bnxt_hwrm_func_cfg_def_cp(bp
);
3738 rc
= bnxt_hwrm_vf_func_cfg_def_cp(bp
);
3743 int bnxt_hwrm_reject_fwd_resp(struct bnxt
*bp
, uint16_t target_id
,
3744 void *encaped
, size_t ec_size
)
3747 struct hwrm_reject_fwd_resp_input req
= {.req_type
= 0};
3748 struct hwrm_reject_fwd_resp_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3750 if (ec_size
> sizeof(req
.encap_request
))
3753 HWRM_PREP(&req
, HWRM_REJECT_FWD_RESP
, BNXT_USE_CHIMP_MB
);
3755 req
.encap_resp_target_id
= rte_cpu_to_le_16(target_id
);
3756 memcpy(req
.encap_request
, encaped
, ec_size
);
3758 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3760 HWRM_CHECK_RESULT();
3766 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt
*bp
, uint16_t vf
,
3767 struct rte_ether_addr
*mac
)
3769 struct hwrm_func_qcfg_input req
= {0};
3770 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3773 HWRM_PREP(&req
, HWRM_FUNC_QCFG
, BNXT_USE_CHIMP_MB
);
3775 req
.fid
= rte_cpu_to_le_16(bp
->pf
->vf_info
[vf
].fid
);
3776 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3778 HWRM_CHECK_RESULT();
3780 memcpy(mac
->addr_bytes
, resp
->mac_address
, RTE_ETHER_ADDR_LEN
);
3787 int bnxt_hwrm_exec_fwd_resp(struct bnxt
*bp
, uint16_t target_id
,
3788 void *encaped
, size_t ec_size
)
3791 struct hwrm_exec_fwd_resp_input req
= {.req_type
= 0};
3792 struct hwrm_exec_fwd_resp_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3794 if (ec_size
> sizeof(req
.encap_request
))
3797 HWRM_PREP(&req
, HWRM_EXEC_FWD_RESP
, BNXT_USE_CHIMP_MB
);
3799 req
.encap_resp_target_id
= rte_cpu_to_le_16(target_id
);
3800 memcpy(req
.encap_request
, encaped
, ec_size
);
3802 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3804 HWRM_CHECK_RESULT();
3810 int bnxt_hwrm_ctx_qstats(struct bnxt
*bp
, uint32_t cid
, int idx
,
3811 struct rte_eth_stats
*stats
, uint8_t rx
)
3814 struct hwrm_stat_ctx_query_input req
= {.req_type
= 0};
3815 struct hwrm_stat_ctx_query_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3817 HWRM_PREP(&req
, HWRM_STAT_CTX_QUERY
, BNXT_USE_CHIMP_MB
);
3819 req
.stat_ctx_id
= rte_cpu_to_le_32(cid
);
3821 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3823 HWRM_CHECK_RESULT();
3826 stats
->q_ipackets
[idx
] = rte_le_to_cpu_64(resp
->rx_ucast_pkts
);
3827 stats
->q_ipackets
[idx
] += rte_le_to_cpu_64(resp
->rx_mcast_pkts
);
3828 stats
->q_ipackets
[idx
] += rte_le_to_cpu_64(resp
->rx_bcast_pkts
);
3829 stats
->q_ibytes
[idx
] = rte_le_to_cpu_64(resp
->rx_ucast_bytes
);
3830 stats
->q_ibytes
[idx
] += rte_le_to_cpu_64(resp
->rx_mcast_bytes
);
3831 stats
->q_ibytes
[idx
] += rte_le_to_cpu_64(resp
->rx_bcast_bytes
);
3832 stats
->q_errors
[idx
] = rte_le_to_cpu_64(resp
->rx_err_pkts
);
3833 stats
->q_errors
[idx
] += rte_le_to_cpu_64(resp
->rx_drop_pkts
);
3835 stats
->q_opackets
[idx
] = rte_le_to_cpu_64(resp
->tx_ucast_pkts
);
3836 stats
->q_opackets
[idx
] += rte_le_to_cpu_64(resp
->tx_mcast_pkts
);
3837 stats
->q_opackets
[idx
] += rte_le_to_cpu_64(resp
->tx_bcast_pkts
);
3838 stats
->q_obytes
[idx
] = rte_le_to_cpu_64(resp
->tx_ucast_bytes
);
3839 stats
->q_obytes
[idx
] += rte_le_to_cpu_64(resp
->tx_mcast_bytes
);
3840 stats
->q_obytes
[idx
] += rte_le_to_cpu_64(resp
->tx_bcast_bytes
);
3848 int bnxt_hwrm_port_qstats(struct bnxt
*bp
)
3850 struct hwrm_port_qstats_input req
= {0};
3851 struct hwrm_port_qstats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3852 struct bnxt_pf_info
*pf
= bp
->pf
;
3855 HWRM_PREP(&req
, HWRM_PORT_QSTATS
, BNXT_USE_CHIMP_MB
);
3857 req
.port_id
= rte_cpu_to_le_16(pf
->port_id
);
3858 req
.tx_stat_host_addr
= rte_cpu_to_le_64(bp
->hw_tx_port_stats_map
);
3859 req
.rx_stat_host_addr
= rte_cpu_to_le_64(bp
->hw_rx_port_stats_map
);
3860 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3862 HWRM_CHECK_RESULT();
3868 int bnxt_hwrm_port_clr_stats(struct bnxt
*bp
)
3870 struct hwrm_port_clr_stats_input req
= {0};
3871 struct hwrm_port_clr_stats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3872 struct bnxt_pf_info
*pf
= bp
->pf
;
3875 /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3876 if (!(bp
->flags
& BNXT_FLAG_PORT_STATS
) || BNXT_VF(bp
) ||
3877 BNXT_NPAR(bp
) || BNXT_MH(bp
) || BNXT_TOTAL_VFS(bp
))
3880 HWRM_PREP(&req
, HWRM_PORT_CLR_STATS
, BNXT_USE_CHIMP_MB
);
3882 req
.port_id
= rte_cpu_to_le_16(pf
->port_id
);
3883 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3885 HWRM_CHECK_RESULT();
3891 int bnxt_hwrm_port_led_qcaps(struct bnxt
*bp
)
3893 struct hwrm_port_led_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3894 struct hwrm_port_led_qcaps_input req
= {0};
3900 HWRM_PREP(&req
, HWRM_PORT_LED_QCAPS
, BNXT_USE_CHIMP_MB
);
3901 req
.port_id
= bp
->pf
->port_id
;
3902 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3904 HWRM_CHECK_RESULT();
3906 if (resp
->num_leds
> 0 && resp
->num_leds
< BNXT_MAX_LED
) {
3909 bp
->leds
->num_leds
= resp
->num_leds
;
3910 memcpy(bp
->leds
, &resp
->led0_id
,
3911 sizeof(bp
->leds
[0]) * bp
->leds
->num_leds
);
3912 for (i
= 0; i
< bp
->leds
->num_leds
; i
++) {
3913 struct bnxt_led_info
*led
= &bp
->leds
[i
];
3915 uint16_t caps
= led
->led_state_caps
;
3917 if (!led
->led_group_id
||
3918 !BNXT_LED_ALT_BLINK_CAP(caps
)) {
3919 bp
->leds
->num_leds
= 0;
3930 int bnxt_hwrm_port_led_cfg(struct bnxt
*bp
, bool led_on
)
3932 struct hwrm_port_led_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3933 struct hwrm_port_led_cfg_input req
= {0};
3934 struct bnxt_led_cfg
*led_cfg
;
3935 uint8_t led_state
= HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT
;
3936 uint16_t duration
= 0;
3939 if (!bp
->leds
->num_leds
|| BNXT_VF(bp
))
3942 HWRM_PREP(&req
, HWRM_PORT_LED_CFG
, BNXT_USE_CHIMP_MB
);
3945 led_state
= HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT
;
3946 duration
= rte_cpu_to_le_16(500);
3948 req
.port_id
= bp
->pf
->port_id
;
3949 req
.num_leds
= bp
->leds
->num_leds
;
3950 led_cfg
= (struct bnxt_led_cfg
*)&req
.led0_id
;
3951 for (i
= 0; i
< bp
->leds
->num_leds
; i
++, led_cfg
++) {
3952 req
.enables
|= BNXT_LED_DFLT_ENABLES(i
);
3953 led_cfg
->led_id
= bp
->leds
[i
].led_id
;
3954 led_cfg
->led_state
= led_state
;
3955 led_cfg
->led_blink_on
= duration
;
3956 led_cfg
->led_blink_off
= duration
;
3957 led_cfg
->led_group_id
= bp
->leds
[i
].led_group_id
;
3960 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3962 HWRM_CHECK_RESULT();
3968 int bnxt_hwrm_nvm_get_dir_info(struct bnxt
*bp
, uint32_t *entries
,
3972 struct hwrm_nvm_get_dir_info_input req
= {0};
3973 struct hwrm_nvm_get_dir_info_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3975 HWRM_PREP(&req
, HWRM_NVM_GET_DIR_INFO
, BNXT_USE_CHIMP_MB
);
3977 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
3979 HWRM_CHECK_RESULT();
3981 *entries
= rte_le_to_cpu_32(resp
->entries
);
3982 *length
= rte_le_to_cpu_32(resp
->entry_length
);
3988 int bnxt_get_nvram_directory(struct bnxt
*bp
, uint32_t len
, uint8_t *data
)
3991 uint32_t dir_entries
;
3992 uint32_t entry_length
;
3995 rte_iova_t dma_handle
;
3996 struct hwrm_nvm_get_dir_entries_input req
= {0};
3997 struct hwrm_nvm_get_dir_entries_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3999 rc
= bnxt_hwrm_nvm_get_dir_info(bp
, &dir_entries
, &entry_length
);
4003 *data
++ = dir_entries
;
4004 *data
++ = entry_length
;
4006 memset(data
, 0xff, len
);
4008 buflen
= dir_entries
* entry_length
;
4009 buf
= rte_malloc("nvm_dir", buflen
, 0);
4012 dma_handle
= rte_malloc_virt2iova(buf
);
4013 if (dma_handle
== RTE_BAD_IOVA
) {
4015 "unable to map response address to physical memory\n");
4018 HWRM_PREP(&req
, HWRM_NVM_GET_DIR_ENTRIES
, BNXT_USE_CHIMP_MB
);
4019 req
.host_dest_addr
= rte_cpu_to_le_64(dma_handle
);
4020 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4023 memcpy(data
, buf
, len
> buflen
? buflen
: len
);
4026 HWRM_CHECK_RESULT();
4032 int bnxt_hwrm_get_nvram_item(struct bnxt
*bp
, uint32_t index
,
4033 uint32_t offset
, uint32_t length
,
4038 rte_iova_t dma_handle
;
4039 struct hwrm_nvm_read_input req
= {0};
4040 struct hwrm_nvm_read_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4042 buf
= rte_malloc("nvm_item", length
, 0);
4046 dma_handle
= rte_malloc_virt2iova(buf
);
4047 if (dma_handle
== RTE_BAD_IOVA
) {
4049 "unable to map response address to physical memory\n");
4052 HWRM_PREP(&req
, HWRM_NVM_READ
, BNXT_USE_CHIMP_MB
);
4053 req
.host_dest_addr
= rte_cpu_to_le_64(dma_handle
);
4054 req
.dir_idx
= rte_cpu_to_le_16(index
);
4055 req
.offset
= rte_cpu_to_le_32(offset
);
4056 req
.len
= rte_cpu_to_le_32(length
);
4057 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4059 memcpy(data
, buf
, length
);
4062 HWRM_CHECK_RESULT();
4068 int bnxt_hwrm_erase_nvram_directory(struct bnxt
*bp
, uint8_t index
)
4071 struct hwrm_nvm_erase_dir_entry_input req
= {0};
4072 struct hwrm_nvm_erase_dir_entry_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4074 HWRM_PREP(&req
, HWRM_NVM_ERASE_DIR_ENTRY
, BNXT_USE_CHIMP_MB
);
4075 req
.dir_idx
= rte_cpu_to_le_16(index
);
4076 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4077 HWRM_CHECK_RESULT();
4084 int bnxt_hwrm_flash_nvram(struct bnxt
*bp
, uint16_t dir_type
,
4085 uint16_t dir_ordinal
, uint16_t dir_ext
,
4086 uint16_t dir_attr
, const uint8_t *data
,
4090 struct hwrm_nvm_write_input req
= {0};
4091 struct hwrm_nvm_write_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4092 rte_iova_t dma_handle
;
4095 buf
= rte_malloc("nvm_write", data_len
, 0);
4099 dma_handle
= rte_malloc_virt2iova(buf
);
4100 if (dma_handle
== RTE_BAD_IOVA
) {
4102 "unable to map response address to physical memory\n");
4105 memcpy(buf
, data
, data_len
);
4107 HWRM_PREP(&req
, HWRM_NVM_WRITE
, BNXT_USE_CHIMP_MB
);
4109 req
.dir_type
= rte_cpu_to_le_16(dir_type
);
4110 req
.dir_ordinal
= rte_cpu_to_le_16(dir_ordinal
);
4111 req
.dir_ext
= rte_cpu_to_le_16(dir_ext
);
4112 req
.dir_attr
= rte_cpu_to_le_16(dir_attr
);
4113 req
.dir_data_length
= rte_cpu_to_le_32(data_len
);
4114 req
.host_src_addr
= rte_cpu_to_le_64(dma_handle
);
4116 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4119 HWRM_CHECK_RESULT();
4126 bnxt_vnic_count(struct bnxt_vnic_info
*vnic __rte_unused
, void *cbdata
)
4128 uint32_t *count
= cbdata
;
4130 *count
= *count
+ 1;
4133 static int bnxt_vnic_count_hwrm_stub(struct bnxt
*bp __rte_unused
,
4134 struct bnxt_vnic_info
*vnic __rte_unused
)
4139 int bnxt_vf_vnic_count(struct bnxt
*bp
, uint16_t vf
)
4143 bnxt_hwrm_func_vf_vnic_query_and_config(bp
, vf
, bnxt_vnic_count
,
4144 &count
, bnxt_vnic_count_hwrm_stub
);
4149 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt
*bp
, uint16_t vf
,
4152 struct hwrm_func_vf_vnic_ids_query_input req
= {0};
4153 struct hwrm_func_vf_vnic_ids_query_output
*resp
=
4154 bp
->hwrm_cmd_resp_addr
;
4157 /* First query all VNIC ids */
4158 HWRM_PREP(&req
, HWRM_FUNC_VF_VNIC_IDS_QUERY
, BNXT_USE_CHIMP_MB
);
4160 req
.vf_id
= rte_cpu_to_le_16(bp
->pf
->first_vf_id
+ vf
);
4161 req
.max_vnic_id_cnt
= rte_cpu_to_le_32(bp
->pf
->total_vnics
);
4162 req
.vnic_id_tbl_addr
= rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids
));
4164 if (req
.vnic_id_tbl_addr
== RTE_BAD_IOVA
) {
4167 "unable to map VNIC ID table address to physical memory\n");
4170 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4171 HWRM_CHECK_RESULT();
4172 rc
= rte_le_to_cpu_32(resp
->vnic_id_cnt
);
4180 * This function queries the VNIC IDs for a specified VF. It then calls
4181 * the vnic_cb to update the necessary field in vnic_info with cbdata.
4182 * Then it calls the hwrm_cb function to program this new vnic configuration.
4184 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt
*bp
, uint16_t vf
,
4185 void (*vnic_cb
)(struct bnxt_vnic_info
*, void *), void *cbdata
,
4186 int (*hwrm_cb
)(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
))
4188 struct bnxt_vnic_info vnic
;
4190 int i
, num_vnic_ids
;
4195 /* First query all VNIC ids */
4196 vnic_id_sz
= bp
->pf
->total_vnics
* sizeof(*vnic_ids
);
4197 vnic_ids
= rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz
,
4198 RTE_CACHE_LINE_SIZE
);
4199 if (vnic_ids
== NULL
)
4202 for (sz
= 0; sz
< vnic_id_sz
; sz
+= getpagesize())
4203 rte_mem_lock_page(((char *)vnic_ids
) + sz
);
4205 num_vnic_ids
= bnxt_hwrm_func_vf_vnic_query(bp
, vf
, vnic_ids
);
4207 if (num_vnic_ids
< 0)
4208 return num_vnic_ids
;
4210 /* Retrieve VNIC, update bd_stall then update */
4212 for (i
= 0; i
< num_vnic_ids
; i
++) {
4213 memset(&vnic
, 0, sizeof(struct bnxt_vnic_info
));
4214 vnic
.fw_vnic_id
= rte_le_to_cpu_16(vnic_ids
[i
]);
4215 rc
= bnxt_hwrm_vnic_qcfg(bp
, &vnic
, bp
->pf
->first_vf_id
+ vf
);
4218 if (vnic
.mru
<= 4) /* Indicates unallocated */
4221 vnic_cb(&vnic
, cbdata
);
4223 rc
= hwrm_cb(bp
, &vnic
);
4233 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt
*bp
, uint16_t vf
,
4236 struct hwrm_func_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4237 struct hwrm_func_cfg_input req
= {0};
4240 HWRM_PREP(&req
, HWRM_FUNC_CFG
, BNXT_USE_CHIMP_MB
);
4242 req
.fid
= rte_cpu_to_le_16(bp
->pf
->vf_info
[vf
].fid
);
4243 req
.enables
|= rte_cpu_to_le_32(
4244 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE
);
4245 req
.vlan_antispoof_mode
= on
?
4246 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN
:
4247 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK
;
4248 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4250 HWRM_CHECK_RESULT();
4256 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt
*bp
, int vf
)
4258 struct bnxt_vnic_info vnic
;
4261 int num_vnic_ids
, i
;
4265 vnic_id_sz
= bp
->pf
->total_vnics
* sizeof(*vnic_ids
);
4266 vnic_ids
= rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz
,
4267 RTE_CACHE_LINE_SIZE
);
4268 if (vnic_ids
== NULL
)
4271 for (sz
= 0; sz
< vnic_id_sz
; sz
+= getpagesize())
4272 rte_mem_lock_page(((char *)vnic_ids
) + sz
);
4274 rc
= bnxt_hwrm_func_vf_vnic_query(bp
, vf
, vnic_ids
);
4280 * Loop through to find the default VNIC ID.
4281 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4282 * by sending the hwrm_func_qcfg command to the firmware.
4284 for (i
= 0; i
< num_vnic_ids
; i
++) {
4285 memset(&vnic
, 0, sizeof(struct bnxt_vnic_info
));
4286 vnic
.fw_vnic_id
= rte_le_to_cpu_16(vnic_ids
[i
]);
4287 rc
= bnxt_hwrm_vnic_qcfg(bp
, &vnic
,
4288 bp
->pf
->first_vf_id
+ vf
);
4291 if (vnic
.func_default
) {
4293 return vnic
.fw_vnic_id
;
4296 /* Could not find a default VNIC. */
4297 PMD_DRV_LOG(ERR
, "No default VNIC\n");
4303 int bnxt_hwrm_set_em_filter(struct bnxt
*bp
,
4305 struct bnxt_filter_info
*filter
)
4308 struct hwrm_cfa_em_flow_alloc_input req
= {.req_type
= 0 };
4309 struct hwrm_cfa_em_flow_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4310 uint32_t enables
= 0;
4312 if (filter
->fw_em_filter_id
!= UINT64_MAX
)
4313 bnxt_hwrm_clear_em_filter(bp
, filter
);
4315 HWRM_PREP(&req
, HWRM_CFA_EM_FLOW_ALLOC
, BNXT_USE_KONG(bp
));
4317 req
.flags
= rte_cpu_to_le_32(filter
->flags
);
4319 enables
= filter
->enables
|
4320 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID
;
4321 req
.dst_id
= rte_cpu_to_le_16(dst_id
);
4323 if (filter
->ip_addr_type
) {
4324 req
.ip_addr_type
= filter
->ip_addr_type
;
4325 enables
|= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE
;
4328 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID
)
4329 req
.l2_filter_id
= rte_cpu_to_le_64(filter
->fw_l2_filter_id
);
4331 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR
)
4332 memcpy(req
.src_macaddr
, filter
->src_macaddr
,
4333 RTE_ETHER_ADDR_LEN
);
4335 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR
)
4336 memcpy(req
.dst_macaddr
, filter
->dst_macaddr
,
4337 RTE_ETHER_ADDR_LEN
);
4339 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID
)
4340 req
.ovlan_vid
= filter
->l2_ovlan
;
4342 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID
)
4343 req
.ivlan_vid
= filter
->l2_ivlan
;
4345 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE
)
4346 req
.ethertype
= rte_cpu_to_be_16(filter
->ethertype
);
4348 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL
)
4349 req
.ip_protocol
= filter
->ip_protocol
;
4351 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR
)
4352 req
.src_ipaddr
[0] = rte_cpu_to_be_32(filter
->src_ipaddr
[0]);
4354 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR
)
4355 req
.dst_ipaddr
[0] = rte_cpu_to_be_32(filter
->dst_ipaddr
[0]);
4357 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT
)
4358 req
.src_port
= rte_cpu_to_be_16(filter
->src_port
);
4360 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT
)
4361 req
.dst_port
= rte_cpu_to_be_16(filter
->dst_port
);
4363 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID
)
4364 req
.mirror_vnic_id
= filter
->mirror_vnic_id
;
4366 req
.enables
= rte_cpu_to_le_32(enables
);
4368 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_KONG(bp
));
4370 HWRM_CHECK_RESULT();
4372 filter
->fw_em_filter_id
= rte_le_to_cpu_64(resp
->em_filter_id
);
4378 int bnxt_hwrm_clear_em_filter(struct bnxt
*bp
, struct bnxt_filter_info
*filter
)
4381 struct hwrm_cfa_em_flow_free_input req
= {.req_type
= 0 };
4382 struct hwrm_cfa_em_flow_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4384 if (filter
->fw_em_filter_id
== UINT64_MAX
)
4387 HWRM_PREP(&req
, HWRM_CFA_EM_FLOW_FREE
, BNXT_USE_KONG(bp
));
4389 req
.em_filter_id
= rte_cpu_to_le_64(filter
->fw_em_filter_id
);
4391 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_KONG(bp
));
4393 HWRM_CHECK_RESULT();
4396 filter
->fw_em_filter_id
= UINT64_MAX
;
4397 filter
->fw_l2_filter_id
= UINT64_MAX
;
4402 int bnxt_hwrm_set_ntuple_filter(struct bnxt
*bp
,
4404 struct bnxt_filter_info
*filter
)
4407 struct hwrm_cfa_ntuple_filter_alloc_input req
= {.req_type
= 0 };
4408 struct hwrm_cfa_ntuple_filter_alloc_output
*resp
=
4409 bp
->hwrm_cmd_resp_addr
;
4410 uint32_t enables
= 0;
4412 if (filter
->fw_ntuple_filter_id
!= UINT64_MAX
)
4413 bnxt_hwrm_clear_ntuple_filter(bp
, filter
);
4415 HWRM_PREP(&req
, HWRM_CFA_NTUPLE_FILTER_ALLOC
, BNXT_USE_CHIMP_MB
);
4417 req
.flags
= rte_cpu_to_le_32(filter
->flags
);
4419 enables
= filter
->enables
|
4420 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID
;
4421 req
.dst_id
= rte_cpu_to_le_16(dst_id
);
4423 if (filter
->ip_addr_type
) {
4424 req
.ip_addr_type
= filter
->ip_addr_type
;
4426 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE
;
4429 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID
)
4430 req
.l2_filter_id
= rte_cpu_to_le_64(filter
->fw_l2_filter_id
);
4432 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR
)
4433 memcpy(req
.src_macaddr
, filter
->src_macaddr
,
4434 RTE_ETHER_ADDR_LEN
);
4436 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE
)
4437 req
.ethertype
= rte_cpu_to_be_16(filter
->ethertype
);
4439 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL
)
4440 req
.ip_protocol
= filter
->ip_protocol
;
4442 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR
)
4443 req
.src_ipaddr
[0] = rte_cpu_to_le_32(filter
->src_ipaddr
[0]);
4445 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK
)
4446 req
.src_ipaddr_mask
[0] =
4447 rte_cpu_to_le_32(filter
->src_ipaddr_mask
[0]);
4449 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR
)
4450 req
.dst_ipaddr
[0] = rte_cpu_to_le_32(filter
->dst_ipaddr
[0]);
4452 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK
)
4453 req
.dst_ipaddr_mask
[0] =
4454 rte_cpu_to_be_32(filter
->dst_ipaddr_mask
[0]);
4456 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT
)
4457 req
.src_port
= rte_cpu_to_le_16(filter
->src_port
);
4459 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK
)
4460 req
.src_port_mask
= rte_cpu_to_le_16(filter
->src_port_mask
);
4462 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT
)
4463 req
.dst_port
= rte_cpu_to_le_16(filter
->dst_port
);
4465 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK
)
4466 req
.dst_port_mask
= rte_cpu_to_le_16(filter
->dst_port_mask
);
4468 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID
)
4469 req
.mirror_vnic_id
= filter
->mirror_vnic_id
;
4471 req
.enables
= rte_cpu_to_le_32(enables
);
4473 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4475 HWRM_CHECK_RESULT();
4477 filter
->fw_ntuple_filter_id
= rte_le_to_cpu_64(resp
->ntuple_filter_id
);
4478 filter
->flow_id
= rte_le_to_cpu_32(resp
->flow_id
);
4484 int bnxt_hwrm_clear_ntuple_filter(struct bnxt
*bp
,
4485 struct bnxt_filter_info
*filter
)
4488 struct hwrm_cfa_ntuple_filter_free_input req
= {.req_type
= 0 };
4489 struct hwrm_cfa_ntuple_filter_free_output
*resp
=
4490 bp
->hwrm_cmd_resp_addr
;
4492 if (filter
->fw_ntuple_filter_id
== UINT64_MAX
)
4495 HWRM_PREP(&req
, HWRM_CFA_NTUPLE_FILTER_FREE
, BNXT_USE_CHIMP_MB
);
4497 req
.ntuple_filter_id
= rte_cpu_to_le_64(filter
->fw_ntuple_filter_id
);
4499 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4501 HWRM_CHECK_RESULT();
4504 filter
->fw_ntuple_filter_id
= UINT64_MAX
;
4510 bnxt_vnic_rss_configure_thor(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
4512 struct hwrm_vnic_rss_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4513 uint8_t *rx_queue_state
= bp
->eth_dev
->data
->rx_queue_state
;
4514 struct hwrm_vnic_rss_cfg_input req
= {.req_type
= 0 };
4515 struct bnxt_rx_queue
**rxqs
= bp
->rx_queues
;
4516 uint16_t *ring_tbl
= vnic
->rss_table
;
4517 int nr_ctxs
= vnic
->num_lb_ctxts
;
4518 int max_rings
= bp
->rx_nr_rings
;
4522 for (i
= 0, k
= 0; i
< nr_ctxs
; i
++) {
4523 struct bnxt_rx_ring_info
*rxr
;
4524 struct bnxt_cp_ring_info
*cpr
;
4526 HWRM_PREP(&req
, HWRM_VNIC_RSS_CFG
, BNXT_USE_CHIMP_MB
);
4528 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
4529 req
.hash_type
= rte_cpu_to_le_32(vnic
->hash_type
);
4530 req
.hash_mode_flags
= vnic
->hash_mode
;
4532 req
.ring_grp_tbl_addr
=
4533 rte_cpu_to_le_64(vnic
->rss_table_dma_addr
+
4534 i
* BNXT_RSS_ENTRIES_PER_CTX_THOR
*
4535 2 * sizeof(*ring_tbl
));
4536 req
.hash_key_tbl_addr
=
4537 rte_cpu_to_le_64(vnic
->rss_hash_key_dma_addr
);
4539 req
.ring_table_pair_index
= i
;
4540 req
.rss_ctx_idx
= rte_cpu_to_le_16(vnic
->fw_grp_ids
[i
]);
4542 for (j
= 0; j
< 64; j
++) {
4545 /* Find next active ring. */
4546 for (cnt
= 0; cnt
< max_rings
; cnt
++) {
4547 if (rx_queue_state
[k
] !=
4548 RTE_ETH_QUEUE_STATE_STOPPED
)
4550 if (++k
== max_rings
)
4554 /* Return if no rings are active. */
4555 if (cnt
== max_rings
) {
4560 /* Add rx/cp ring pair to RSS table. */
4561 rxr
= rxqs
[k
]->rx_ring
;
4562 cpr
= rxqs
[k
]->cp_ring
;
4564 ring_id
= rxr
->rx_ring_struct
->fw_ring_id
;
4565 *ring_tbl
++ = rte_cpu_to_le_16(ring_id
);
4566 ring_id
= cpr
->cp_ring_struct
->fw_ring_id
;
4567 *ring_tbl
++ = rte_cpu_to_le_16(ring_id
);
4569 if (++k
== max_rings
)
4572 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
),
4575 HWRM_CHECK_RESULT();
4582 int bnxt_vnic_rss_configure(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
4584 unsigned int rss_idx
, fw_idx
, i
;
4586 if (!(vnic
->rss_table
&& vnic
->hash_type
))
4589 if (BNXT_CHIP_THOR(bp
))
4590 return bnxt_vnic_rss_configure_thor(bp
, vnic
);
4592 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
)
4595 if (vnic
->rss_table
&& vnic
->hash_type
) {
4597 * Fill the RSS hash & redirection table with
4598 * ring group ids for all VNICs
4600 for (rss_idx
= 0, fw_idx
= 0; rss_idx
< HW_HASH_INDEX_SIZE
;
4601 rss_idx
++, fw_idx
++) {
4602 for (i
= 0; i
< bp
->rx_cp_nr_rings
; i
++) {
4603 fw_idx
%= bp
->rx_cp_nr_rings
;
4604 if (vnic
->fw_grp_ids
[fw_idx
] !=
4609 if (i
== bp
->rx_cp_nr_rings
)
4611 vnic
->rss_table
[rss_idx
] = vnic
->fw_grp_ids
[fw_idx
];
4613 return bnxt_hwrm_vnic_rss_cfg(bp
, vnic
);
4619 static void bnxt_hwrm_set_coal_params(struct bnxt_coal
*hw_coal
,
4620 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input
*req
)
4624 req
->num_cmpl_aggr_int
= rte_cpu_to_le_16(hw_coal
->num_cmpl_aggr_int
);
4626 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4627 req
->num_cmpl_dma_aggr
= rte_cpu_to_le_16(hw_coal
->num_cmpl_dma_aggr
);
4629 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4630 req
->num_cmpl_dma_aggr_during_int
=
4631 rte_cpu_to_le_16(hw_coal
->num_cmpl_dma_aggr_during_int
);
4633 req
->int_lat_tmr_max
= rte_cpu_to_le_16(hw_coal
->int_lat_tmr_max
);
4635 /* min timer set to 1/2 of interrupt timer */
4636 req
->int_lat_tmr_min
= rte_cpu_to_le_16(hw_coal
->int_lat_tmr_min
);
4638 /* buf timer set to 1/4 of interrupt timer */
4639 req
->cmpl_aggr_dma_tmr
= rte_cpu_to_le_16(hw_coal
->cmpl_aggr_dma_tmr
);
4641 req
->cmpl_aggr_dma_tmr_during_int
=
4642 rte_cpu_to_le_16(hw_coal
->cmpl_aggr_dma_tmr_during_int
);
4644 flags
= HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET
|
4645 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE
;
4646 req
->flags
= rte_cpu_to_le_16(flags
);
4649 static int bnxt_hwrm_set_coal_params_thor(struct bnxt
*bp
,
4650 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input
*agg_req
)
4652 struct hwrm_ring_aggint_qcaps_input req
= {0};
4653 struct hwrm_ring_aggint_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4658 HWRM_PREP(&req
, HWRM_RING_AGGINT_QCAPS
, BNXT_USE_CHIMP_MB
);
4659 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4660 HWRM_CHECK_RESULT();
4662 agg_req
->num_cmpl_dma_aggr
= resp
->num_cmpl_dma_aggr_max
;
4663 agg_req
->cmpl_aggr_dma_tmr
= resp
->cmpl_aggr_dma_tmr_min
;
4665 flags
= HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET
|
4666 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE
;
4667 agg_req
->flags
= rte_cpu_to_le_16(flags
);
4669 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR
|
4670 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR
;
4671 agg_req
->enables
= rte_cpu_to_le_32(enables
);
4677 int bnxt_hwrm_set_ring_coal(struct bnxt
*bp
,
4678 struct bnxt_coal
*coal
, uint16_t ring_id
)
4680 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req
= {0};
4681 struct hwrm_ring_cmpl_ring_cfg_aggint_params_output
*resp
=
4682 bp
->hwrm_cmd_resp_addr
;
4685 /* Set ring coalesce parameters only for 100G NICs */
4686 if (BNXT_CHIP_THOR(bp
)) {
4687 if (bnxt_hwrm_set_coal_params_thor(bp
, &req
))
4689 } else if (bnxt_stratus_device(bp
)) {
4690 bnxt_hwrm_set_coal_params(coal
, &req
);
4696 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS
,
4698 req
.ring_id
= rte_cpu_to_le_16(ring_id
);
4699 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4700 HWRM_CHECK_RESULT();
4705 #define BNXT_RTE_MEMZONE_FLAG (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
4706 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt
*bp
)
4708 struct hwrm_func_backing_store_qcaps_input req
= {0};
4709 struct hwrm_func_backing_store_qcaps_output
*resp
=
4710 bp
->hwrm_cmd_resp_addr
;
4711 struct bnxt_ctx_pg_info
*ctx_pg
;
4712 struct bnxt_ctx_mem_info
*ctx
;
4713 int total_alloc_len
;
4714 int rc
, i
, tqm_rings
;
4716 if (!BNXT_CHIP_THOR(bp
) ||
4717 bp
->hwrm_spec_code
< HWRM_VERSION_1_9_2
||
4722 HWRM_PREP(&req
, HWRM_FUNC_BACKING_STORE_QCAPS
, BNXT_USE_CHIMP_MB
);
4723 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4724 HWRM_CHECK_RESULT_SILENT();
4726 total_alloc_len
= sizeof(*ctx
);
4727 ctx
= rte_zmalloc("bnxt_ctx_mem", total_alloc_len
,
4728 RTE_CACHE_LINE_SIZE
);
4734 ctx
->qp_max_entries
= rte_le_to_cpu_32(resp
->qp_max_entries
);
4735 ctx
->qp_min_qp1_entries
=
4736 rte_le_to_cpu_16(resp
->qp_min_qp1_entries
);
4737 ctx
->qp_max_l2_entries
=
4738 rte_le_to_cpu_16(resp
->qp_max_l2_entries
);
4739 ctx
->qp_entry_size
= rte_le_to_cpu_16(resp
->qp_entry_size
);
4740 ctx
->srq_max_l2_entries
=
4741 rte_le_to_cpu_16(resp
->srq_max_l2_entries
);
4742 ctx
->srq_max_entries
= rte_le_to_cpu_32(resp
->srq_max_entries
);
4743 ctx
->srq_entry_size
= rte_le_to_cpu_16(resp
->srq_entry_size
);
4744 ctx
->cq_max_l2_entries
=
4745 rte_le_to_cpu_16(resp
->cq_max_l2_entries
);
4746 ctx
->cq_max_entries
= rte_le_to_cpu_32(resp
->cq_max_entries
);
4747 ctx
->cq_entry_size
= rte_le_to_cpu_16(resp
->cq_entry_size
);
4748 ctx
->vnic_max_vnic_entries
=
4749 rte_le_to_cpu_16(resp
->vnic_max_vnic_entries
);
4750 ctx
->vnic_max_ring_table_entries
=
4751 rte_le_to_cpu_16(resp
->vnic_max_ring_table_entries
);
4752 ctx
->vnic_entry_size
= rte_le_to_cpu_16(resp
->vnic_entry_size
);
4753 ctx
->stat_max_entries
=
4754 rte_le_to_cpu_32(resp
->stat_max_entries
);
4755 ctx
->stat_entry_size
= rte_le_to_cpu_16(resp
->stat_entry_size
);
4756 ctx
->tqm_entry_size
= rte_le_to_cpu_16(resp
->tqm_entry_size
);
4757 ctx
->tqm_min_entries_per_ring
=
4758 rte_le_to_cpu_32(resp
->tqm_min_entries_per_ring
);
4759 ctx
->tqm_max_entries_per_ring
=
4760 rte_le_to_cpu_32(resp
->tqm_max_entries_per_ring
);
4761 ctx
->tqm_entries_multiple
= resp
->tqm_entries_multiple
;
4762 if (!ctx
->tqm_entries_multiple
)
4763 ctx
->tqm_entries_multiple
= 1;
4764 ctx
->mrav_max_entries
=
4765 rte_le_to_cpu_32(resp
->mrav_max_entries
);
4766 ctx
->mrav_entry_size
= rte_le_to_cpu_16(resp
->mrav_entry_size
);
4767 ctx
->tim_entry_size
= rte_le_to_cpu_16(resp
->tim_entry_size
);
4768 ctx
->tim_max_entries
= rte_le_to_cpu_32(resp
->tim_max_entries
);
4769 ctx
->tqm_fp_rings_count
= resp
->tqm_fp_rings_count
;
4771 if (!ctx
->tqm_fp_rings_count
)
4772 ctx
->tqm_fp_rings_count
= bp
->max_q
;
4774 tqm_rings
= ctx
->tqm_fp_rings_count
+ 1;
4776 ctx_pg
= rte_malloc("bnxt_ctx_pg_mem",
4777 sizeof(*ctx_pg
) * tqm_rings
,
4778 RTE_CACHE_LINE_SIZE
);
4783 for (i
= 0; i
< tqm_rings
; i
++, ctx_pg
++)
4784 ctx
->tqm_mem
[i
] = ctx_pg
;
4792 int bnxt_hwrm_func_backing_store_cfg(struct bnxt
*bp
, uint32_t enables
)
4794 struct hwrm_func_backing_store_cfg_input req
= {0};
4795 struct hwrm_func_backing_store_cfg_output
*resp
=
4796 bp
->hwrm_cmd_resp_addr
;
4797 struct bnxt_ctx_mem_info
*ctx
= bp
->ctx
;
4798 struct bnxt_ctx_pg_info
*ctx_pg
;
4799 uint32_t *num_entries
;
4808 HWRM_PREP(&req
, HWRM_FUNC_BACKING_STORE_CFG
, BNXT_USE_CHIMP_MB
);
4809 req
.enables
= rte_cpu_to_le_32(enables
);
4811 if (enables
& HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP
) {
4812 ctx_pg
= &ctx
->qp_mem
;
4813 req
.qp_num_entries
= rte_cpu_to_le_32(ctx_pg
->entries
);
4814 req
.qp_num_qp1_entries
=
4815 rte_cpu_to_le_16(ctx
->qp_min_qp1_entries
);
4816 req
.qp_num_l2_entries
=
4817 rte_cpu_to_le_16(ctx
->qp_max_l2_entries
);
4818 req
.qp_entry_size
= rte_cpu_to_le_16(ctx
->qp_entry_size
);
4819 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
,
4820 &req
.qpc_pg_size_qpc_lvl
,
4824 if (enables
& HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ
) {
4825 ctx_pg
= &ctx
->srq_mem
;
4826 req
.srq_num_entries
= rte_cpu_to_le_32(ctx_pg
->entries
);
4827 req
.srq_num_l2_entries
=
4828 rte_cpu_to_le_16(ctx
->srq_max_l2_entries
);
4829 req
.srq_entry_size
= rte_cpu_to_le_16(ctx
->srq_entry_size
);
4830 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
,
4831 &req
.srq_pg_size_srq_lvl
,
4835 if (enables
& HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ
) {
4836 ctx_pg
= &ctx
->cq_mem
;
4837 req
.cq_num_entries
= rte_cpu_to_le_32(ctx_pg
->entries
);
4838 req
.cq_num_l2_entries
=
4839 rte_cpu_to_le_16(ctx
->cq_max_l2_entries
);
4840 req
.cq_entry_size
= rte_cpu_to_le_16(ctx
->cq_entry_size
);
4841 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
,
4842 &req
.cq_pg_size_cq_lvl
,
4846 if (enables
& HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC
) {
4847 ctx_pg
= &ctx
->vnic_mem
;
4848 req
.vnic_num_vnic_entries
=
4849 rte_cpu_to_le_16(ctx
->vnic_max_vnic_entries
);
4850 req
.vnic_num_ring_table_entries
=
4851 rte_cpu_to_le_16(ctx
->vnic_max_ring_table_entries
);
4852 req
.vnic_entry_size
= rte_cpu_to_le_16(ctx
->vnic_entry_size
);
4853 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
,
4854 &req
.vnic_pg_size_vnic_lvl
,
4855 &req
.vnic_page_dir
);
4858 if (enables
& HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT
) {
4859 ctx_pg
= &ctx
->stat_mem
;
4860 req
.stat_num_entries
= rte_cpu_to_le_16(ctx
->stat_max_entries
);
4861 req
.stat_entry_size
= rte_cpu_to_le_16(ctx
->stat_entry_size
);
4862 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
,
4863 &req
.stat_pg_size_stat_lvl
,
4864 &req
.stat_page_dir
);
4867 req
.tqm_entry_size
= rte_cpu_to_le_16(ctx
->tqm_entry_size
);
4868 num_entries
= &req
.tqm_sp_num_entries
;
4869 pg_attr
= &req
.tqm_sp_pg_size_tqm_sp_lvl
;
4870 pg_dir
= &req
.tqm_sp_page_dir
;
4871 ena
= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP
;
4872 for (i
= 0; i
< 9; i
++, num_entries
++, pg_attr
++, pg_dir
++, ena
<<= 1) {
4873 if (!(enables
& ena
))
4876 req
.tqm_entry_size
= rte_cpu_to_le_16(ctx
->tqm_entry_size
);
4878 ctx_pg
= ctx
->tqm_mem
[i
];
4879 *num_entries
= rte_cpu_to_le_16(ctx_pg
->entries
);
4880 bnxt_hwrm_set_pg_attr(&ctx_pg
->ring_mem
, pg_attr
, pg_dir
);
4883 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4884 HWRM_CHECK_RESULT();
4890 int bnxt_hwrm_ext_port_qstats(struct bnxt
*bp
)
4892 struct hwrm_port_qstats_ext_input req
= {0};
4893 struct hwrm_port_qstats_ext_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4894 struct bnxt_pf_info
*pf
= bp
->pf
;
4897 if (!(bp
->flags
& BNXT_FLAG_EXT_RX_PORT_STATS
||
4898 bp
->flags
& BNXT_FLAG_EXT_TX_PORT_STATS
))
4901 HWRM_PREP(&req
, HWRM_PORT_QSTATS_EXT
, BNXT_USE_CHIMP_MB
);
4903 req
.port_id
= rte_cpu_to_le_16(pf
->port_id
);
4904 if (bp
->flags
& BNXT_FLAG_EXT_TX_PORT_STATS
) {
4905 req
.tx_stat_host_addr
=
4906 rte_cpu_to_le_64(bp
->hw_tx_port_stats_ext_map
);
4908 rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext
));
4910 if (bp
->flags
& BNXT_FLAG_EXT_RX_PORT_STATS
) {
4911 req
.rx_stat_host_addr
=
4912 rte_cpu_to_le_64(bp
->hw_rx_port_stats_ext_map
);
4914 rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext
));
4916 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4919 bp
->fw_rx_port_stats_ext_size
= 0;
4920 bp
->fw_tx_port_stats_ext_size
= 0;
4922 bp
->fw_rx_port_stats_ext_size
=
4923 rte_le_to_cpu_16(resp
->rx_stat_size
);
4924 bp
->fw_tx_port_stats_ext_size
=
4925 rte_le_to_cpu_16(resp
->tx_stat_size
);
4928 HWRM_CHECK_RESULT();
4935 bnxt_hwrm_tunnel_redirect(struct bnxt
*bp
, uint8_t type
)
4937 struct hwrm_cfa_redirect_tunnel_type_alloc_input req
= {0};
4938 struct hwrm_cfa_redirect_tunnel_type_alloc_output
*resp
=
4939 bp
->hwrm_cmd_resp_addr
;
4942 HWRM_PREP(&req
, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC
, BNXT_USE_CHIMP_MB
);
4943 req
.tunnel_type
= type
;
4944 req
.dest_fid
= bp
->fw_fid
;
4945 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4946 HWRM_CHECK_RESULT();
4954 bnxt_hwrm_tunnel_redirect_free(struct bnxt
*bp
, uint8_t type
)
4956 struct hwrm_cfa_redirect_tunnel_type_free_input req
= {0};
4957 struct hwrm_cfa_redirect_tunnel_type_free_output
*resp
=
4958 bp
->hwrm_cmd_resp_addr
;
4961 HWRM_PREP(&req
, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE
, BNXT_USE_CHIMP_MB
);
4962 req
.tunnel_type
= type
;
4963 req
.dest_fid
= bp
->fw_fid
;
4964 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4965 HWRM_CHECK_RESULT();
4972 int bnxt_hwrm_tunnel_redirect_query(struct bnxt
*bp
, uint32_t *type
)
4974 struct hwrm_cfa_redirect_query_tunnel_type_input req
= {0};
4975 struct hwrm_cfa_redirect_query_tunnel_type_output
*resp
=
4976 bp
->hwrm_cmd_resp_addr
;
4979 HWRM_PREP(&req
, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE
, BNXT_USE_CHIMP_MB
);
4980 req
.src_fid
= bp
->fw_fid
;
4981 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
4982 HWRM_CHECK_RESULT();
4985 *type
= rte_le_to_cpu_32(resp
->tunnel_mask
);
4992 int bnxt_hwrm_tunnel_redirect_info(struct bnxt
*bp
, uint8_t tun_type
,
4995 struct hwrm_cfa_redirect_tunnel_type_info_input req
= {0};
4996 struct hwrm_cfa_redirect_tunnel_type_info_output
*resp
=
4997 bp
->hwrm_cmd_resp_addr
;
5000 HWRM_PREP(&req
, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO
, BNXT_USE_CHIMP_MB
);
5001 req
.src_fid
= bp
->fw_fid
;
5002 req
.tunnel_type
= tun_type
;
5003 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
5004 HWRM_CHECK_RESULT();
5007 *dst_fid
= rte_le_to_cpu_16(resp
->dest_fid
);
5009 PMD_DRV_LOG(DEBUG
, "dst_fid: %x\n", resp
->dest_fid
);
5016 int bnxt_hwrm_set_mac(struct bnxt
*bp
)
5018 struct hwrm_func_vf_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5019 struct hwrm_func_vf_cfg_input req
= {0};
5025 HWRM_PREP(&req
, HWRM_FUNC_VF_CFG
, BNXT_USE_CHIMP_MB
);
5028 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR
);
5029 memcpy(req
.dflt_mac_addr
, bp
->mac_addr
, RTE_ETHER_ADDR_LEN
);
5031 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
5033 HWRM_CHECK_RESULT();
5040 int bnxt_hwrm_if_change(struct bnxt
*bp
, bool up
)
5042 struct hwrm_func_drv_if_change_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5043 struct hwrm_func_drv_if_change_input req
= {0};
5047 if (!(bp
->fw_cap
& BNXT_FW_CAP_IF_CHANGE
))
5050 /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5051 * If we issue FUNC_DRV_IF_CHANGE with flags down before
5052 * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5054 if (!up
&& (bp
->flags
& BNXT_FLAG_FW_RESET
))
5057 HWRM_PREP(&req
, HWRM_FUNC_DRV_IF_CHANGE
, BNXT_USE_CHIMP_MB
);
5061 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP
);
5063 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
5065 HWRM_CHECK_RESULT();
5066 flags
= rte_le_to_cpu_32(resp
->flags
);
5072 if (flags
& HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE
) {
5073 PMD_DRV_LOG(INFO
, "FW reset happened while port was down\n");
5074 bp
->flags
|= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE
;
5080 int bnxt_hwrm_error_recovery_qcfg(struct bnxt
*bp
)
5082 struct hwrm_error_recovery_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5083 struct bnxt_error_recovery_info
*info
= bp
->recovery_info
;
5084 struct hwrm_error_recovery_qcfg_input req
= {0};
5089 /* Older FW does not have error recovery support */
5090 if (!(bp
->fw_cap
& BNXT_FW_CAP_ERROR_RECOVERY
))
5093 HWRM_PREP(&req
, HWRM_ERROR_RECOVERY_QCFG
, BNXT_USE_CHIMP_MB
);
5095 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
5097 HWRM_CHECK_RESULT();
5099 flags
= rte_le_to_cpu_32(resp
->flags
);
5100 if (flags
& HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST
)
5101 info
->flags
|= BNXT_FLAG_ERROR_RECOVERY_HOST
;
5102 else if (flags
& HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU
)
5103 info
->flags
|= BNXT_FLAG_ERROR_RECOVERY_CO_CPU
;
5105 if ((info
->flags
& BNXT_FLAG_ERROR_RECOVERY_CO_CPU
) &&
5106 !(bp
->flags
& BNXT_FLAG_KONG_MB_EN
)) {
5111 /* FW returned values are in units of 100msec */
5112 info
->driver_polling_freq
=
5113 rte_le_to_cpu_32(resp
->driver_polling_freq
) * 100;
5114 info
->master_func_wait_period
=
5115 rte_le_to_cpu_32(resp
->master_func_wait_period
) * 100;
5116 info
->normal_func_wait_period
=
5117 rte_le_to_cpu_32(resp
->normal_func_wait_period
) * 100;
5118 info
->master_func_wait_period_after_reset
=
5119 rte_le_to_cpu_32(resp
->master_func_wait_period_after_reset
) * 100;
5120 info
->max_bailout_time_after_reset
=
5121 rte_le_to_cpu_32(resp
->max_bailout_time_after_reset
) * 100;
5122 info
->status_regs
[BNXT_FW_STATUS_REG
] =
5123 rte_le_to_cpu_32(resp
->fw_health_status_reg
);
5124 info
->status_regs
[BNXT_FW_HEARTBEAT_CNT_REG
] =
5125 rte_le_to_cpu_32(resp
->fw_heartbeat_reg
);
5126 info
->status_regs
[BNXT_FW_RECOVERY_CNT_REG
] =
5127 rte_le_to_cpu_32(resp
->fw_reset_cnt_reg
);
5128 info
->status_regs
[BNXT_FW_RESET_INPROG_REG
] =
5129 rte_le_to_cpu_32(resp
->reset_inprogress_reg
);
5130 info
->reg_array_cnt
=
5131 rte_le_to_cpu_32(resp
->reg_array_cnt
);
5133 if (info
->reg_array_cnt
>= BNXT_NUM_RESET_REG
) {
5138 for (i
= 0; i
< info
->reg_array_cnt
; i
++) {
5139 info
->reset_reg
[i
] =
5140 rte_le_to_cpu_32(resp
->reset_reg
[i
]);
5141 info
->reset_reg_val
[i
] =
5142 rte_le_to_cpu_32(resp
->reset_reg_val
[i
]);
5143 info
->delay_after_reset
[i
] =
5144 resp
->delay_after_reset
[i
];
5149 /* Map the FW status registers */
5151 rc
= bnxt_map_fw_health_status_regs(bp
);
5154 rte_free(bp
->recovery_info
);
5155 bp
->recovery_info
= NULL
;
5160 int bnxt_hwrm_fw_reset(struct bnxt
*bp
)
5162 struct hwrm_fw_reset_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5163 struct hwrm_fw_reset_input req
= {0};
5169 HWRM_PREP(&req
, HWRM_FW_RESET
, BNXT_USE_KONG(bp
));
5171 req
.embedded_proc_type
=
5172 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP
;
5173 req
.selfrst_status
=
5174 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP
;
5175 req
.flags
= HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL
;
5177 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
),
5180 HWRM_CHECK_RESULT();
5186 int bnxt_hwrm_port_ts_query(struct bnxt
*bp
, uint8_t path
, uint64_t *timestamp
)
5188 struct hwrm_port_ts_query_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5189 struct hwrm_port_ts_query_input req
= {0};
5190 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
5197 HWRM_PREP(&req
, HWRM_PORT_TS_QUERY
, BNXT_USE_CHIMP_MB
);
5200 case BNXT_PTP_FLAGS_PATH_TX
:
5201 flags
|= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX
;
5203 case BNXT_PTP_FLAGS_PATH_RX
:
5204 flags
|= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX
;
5206 case BNXT_PTP_FLAGS_CURRENT_TIME
:
5207 flags
|= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME
;
5211 req
.flags
= rte_cpu_to_le_32(flags
);
5212 req
.port_id
= rte_cpu_to_le_16(bp
->pf
->port_id
);
5214 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_CHIMP_MB
);
5216 HWRM_CHECK_RESULT();
5219 *timestamp
= rte_le_to_cpu_32(resp
->ptp_msg_ts
[0]);
5221 (uint64_t)(rte_le_to_cpu_32(resp
->ptp_msg_ts
[1])) << 32;
5228 int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt
*bp
)
5230 struct hwrm_cfa_adv_flow_mgnt_qcaps_output
*resp
=
5231 bp
->hwrm_cmd_resp_addr
;
5232 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req
= {0};
5236 if (!(bp
->fw_cap
& BNXT_FW_CAP_ADV_FLOW_MGMT
))
5239 if (!(BNXT_PF(bp
) || BNXT_VF_IS_TRUSTED(bp
))) {
5241 "Not a PF or trusted VF. Command not supported\n");
5245 HWRM_PREP(&req
, HWRM_CFA_ADV_FLOW_MGNT_QCAPS
, BNXT_USE_KONG(bp
));
5246 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_KONG(bp
));
5248 HWRM_CHECK_RESULT();
5249 flags
= rte_le_to_cpu_32(resp
->flags
);
5252 if (flags
& HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN
) {
5253 bp
->flow_flags
|= BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN
;
5254 PMD_DRV_LOG(INFO
, "Source L2 header filtering enabled\n");
5260 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt
*bp
, uint16_t *max_fc
)
5264 struct hwrm_cfa_counter_qcaps_input req
= {0};
5265 struct hwrm_cfa_counter_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5267 if (!(BNXT_PF(bp
) || BNXT_VF_IS_TRUSTED(bp
))) {
5269 "Not a PF or trusted VF. Command not supported\n");
5273 HWRM_PREP(&req
, HWRM_CFA_COUNTER_QCAPS
, BNXT_USE_KONG(bp
));
5274 req
.target_id
= rte_cpu_to_le_16(bp
->fw_fid
);
5275 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_KONG(bp
));
5277 HWRM_CHECK_RESULT();
5279 *max_fc
= rte_le_to_cpu_16(resp
->max_rx_fc
);
5285 int bnxt_hwrm_ctx_rgtr(struct bnxt
*bp
, rte_iova_t dma_addr
, uint16_t *ctx_id
)
5288 struct hwrm_cfa_ctx_mem_rgtr_input req
= {.req_type
= 0 };
5289 struct hwrm_cfa_ctx_mem_rgtr_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5291 if (!(BNXT_PF(bp
) || BNXT_VF_IS_TRUSTED(bp
))) {
5293 "Not a PF or trusted VF. Command not supported\n");
5297 HWRM_PREP(&req
, HWRM_CFA_CTX_MEM_RGTR
, BNXT_USE_KONG(bp
));
5299 req
.page_level
= HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0
;
5300 req
.page_size
= HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M
;
5301 req
.page_dir
= rte_cpu_to_le_64(dma_addr
);
5303 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_KONG(bp
));
5305 HWRM_CHECK_RESULT();
5307 *ctx_id
= rte_le_to_cpu_16(resp
->ctx_id
);
5308 PMD_DRV_LOG(DEBUG
, "ctx_id = %d\n", *ctx_id
);
5315 int bnxt_hwrm_ctx_unrgtr(struct bnxt
*bp
, uint16_t ctx_id
)
5318 struct hwrm_cfa_ctx_mem_unrgtr_input req
= {.req_type
= 0 };
5319 struct hwrm_cfa_ctx_mem_unrgtr_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5321 if (!(BNXT_PF(bp
) || BNXT_VF_IS_TRUSTED(bp
))) {
5323 "Not a PF or trusted VF. Command not supported\n");
5327 HWRM_PREP(&req
, HWRM_CFA_CTX_MEM_UNRGTR
, BNXT_USE_KONG(bp
));
5329 req
.ctx_id
= rte_cpu_to_le_16(ctx_id
);
5331 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_KONG(bp
));
5333 HWRM_CHECK_RESULT();
5339 int bnxt_hwrm_cfa_counter_cfg(struct bnxt
*bp
, enum bnxt_flow_dir dir
,
5340 uint16_t cntr
, uint16_t ctx_id
,
5341 uint32_t num_entries
, bool enable
)
5343 struct hwrm_cfa_counter_cfg_input req
= {0};
5344 struct hwrm_cfa_counter_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5348 if (!(BNXT_PF(bp
) || BNXT_VF_IS_TRUSTED(bp
))) {
5350 "Not a PF or trusted VF. Command not supported\n");
5354 HWRM_PREP(&req
, HWRM_CFA_COUNTER_CFG
, BNXT_USE_KONG(bp
));
5356 req
.target_id
= rte_cpu_to_le_16(bp
->fw_fid
);
5357 req
.counter_type
= rte_cpu_to_le_16(cntr
);
5358 flags
= enable
? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE
:
5359 HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE
;
5360 flags
|= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL
;
5361 if (dir
== BNXT_DIR_RX
)
5362 flags
|= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX
;
5363 else if (dir
== BNXT_DIR_TX
)
5364 flags
|= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX
;
5365 req
.flags
= rte_cpu_to_le_16(flags
);
5366 req
.ctx_id
= rte_cpu_to_le_16(ctx_id
);
5367 req
.num_entries
= rte_cpu_to_le_32(num_entries
);
5369 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_KONG(bp
));
5370 HWRM_CHECK_RESULT();
5376 int bnxt_hwrm_cfa_counter_qstats(struct bnxt
*bp
,
5377 enum bnxt_flow_dir dir
,
5379 uint16_t num_entries
)
5381 struct hwrm_cfa_counter_qstats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5382 struct hwrm_cfa_counter_qstats_input req
= {0};
5383 uint16_t flow_ctx_id
= 0;
5387 if (!(BNXT_PF(bp
) || BNXT_VF_IS_TRUSTED(bp
))) {
5389 "Not a PF or trusted VF. Command not supported\n");
5393 if (dir
== BNXT_DIR_RX
) {
5394 flow_ctx_id
= bp
->flow_stat
->rx_fc_in_tbl
.ctx_id
;
5395 flags
= HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX
;
5396 } else if (dir
== BNXT_DIR_TX
) {
5397 flow_ctx_id
= bp
->flow_stat
->tx_fc_in_tbl
.ctx_id
;
5398 flags
= HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX
;
5401 HWRM_PREP(&req
, HWRM_CFA_COUNTER_QSTATS
, BNXT_USE_KONG(bp
));
5402 req
.target_id
= rte_cpu_to_le_16(bp
->fw_fid
);
5403 req
.counter_type
= rte_cpu_to_le_16(cntr
);
5404 req
.input_flow_ctx_id
= rte_cpu_to_le_16(flow_ctx_id
);
5405 req
.num_entries
= rte_cpu_to_le_16(num_entries
);
5406 req
.flags
= rte_cpu_to_le_16(flags
);
5407 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
), BNXT_USE_KONG(bp
));
5409 HWRM_CHECK_RESULT();