4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
47 #include "bnxt_ring.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
53 #define HWRM_CMD_TIMEOUT 2000
56 * HWRM Functions (sent to HWRM)
57 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59 * command was failed by the ChiMP.
62 static int bnxt_hwrm_send_message_locked(struct bnxt
*bp
, void *msg
,
66 struct input
*req
= msg
;
67 struct output
*resp
= bp
->hwrm_cmd_resp_addr
;
72 /* Write request msg to hwrm channel */
73 for (i
= 0; i
< msg_len
; i
+= 4) {
74 bar
= (uint8_t *)bp
->bar0
+ i
;
75 *(volatile uint32_t *)bar
= *data
;
79 /* Zero the rest of the request space */
80 for (; i
< bp
->max_req_len
; i
+= 4) {
81 bar
= (uint8_t *)bp
->bar0
+ i
;
82 *(volatile uint32_t *)bar
= 0;
85 /* Ring channel doorbell */
86 bar
= (uint8_t *)bp
->bar0
+ 0x100;
87 *(volatile uint32_t *)bar
= 1;
89 /* Poll for the valid bit */
90 for (i
= 0; i
< HWRM_CMD_TIMEOUT
; i
++) {
91 /* Sanity check on the resp->resp_len */
93 if (resp
->resp_len
&& resp
->resp_len
<=
95 /* Last byte of resp contains the valid key */
96 valid
= (uint8_t *)resp
+ resp
->resp_len
- 1;
97 if (*valid
== HWRM_RESP_VALID_KEY
)
103 if (i
>= HWRM_CMD_TIMEOUT
) {
104 RTE_LOG(ERR
, PMD
, "Error sending msg %x\n",
114 static int bnxt_hwrm_send_message(struct bnxt
*bp
, void *msg
, uint32_t msg_len
)
118 rte_spinlock_lock(&bp
->hwrm_lock
);
119 rc
= bnxt_hwrm_send_message_locked(bp
, msg
, msg_len
);
120 rte_spinlock_unlock(&bp
->hwrm_lock
);
124 #define HWRM_PREP(req, type, cr, resp) \
125 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127 req.cmpl_ring = rte_cpu_to_le_16(cr); \
128 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129 req.target_id = rte_cpu_to_le_16(0xffff); \
130 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
132 #define HWRM_CHECK_RESULT \
135 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
139 if (resp->error_code) { \
140 rc = rte_le_to_cpu_16(resp->error_code); \
141 RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
146 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
149 struct hwrm_cfa_l2_set_rx_mask_input req
= {.req_type
= 0 };
150 struct hwrm_cfa_l2_set_rx_mask_output
*resp
= bp
->hwrm_cmd_resp_addr
;
152 HWRM_PREP(req
, CFA_L2_SET_RX_MASK
, -1, resp
);
153 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
156 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
163 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
166 struct hwrm_cfa_l2_set_rx_mask_input req
= {.req_type
= 0 };
167 struct hwrm_cfa_l2_set_rx_mask_output
*resp
= bp
->hwrm_cmd_resp_addr
;
170 HWRM_PREP(req
, CFA_L2_SET_RX_MASK
, -1, resp
);
171 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
173 /* FIXME add multicast flag, when multicast adding options is supported
176 if (vnic
->flags
& BNXT_VNIC_INFO_PROMISC
)
177 mask
= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS
;
178 if (vnic
->flags
& BNXT_VNIC_INFO_ALLMULTI
)
179 mask
= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST
;
180 req
.mask
= rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST
|
183 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
190 int bnxt_hwrm_clear_filter(struct bnxt
*bp
,
191 struct bnxt_filter_info
*filter
)
194 struct hwrm_cfa_l2_filter_free_input req
= {.req_type
= 0 };
195 struct hwrm_cfa_l2_filter_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
197 HWRM_PREP(req
, CFA_L2_FILTER_FREE
, -1, resp
);
199 req
.l2_filter_id
= rte_cpu_to_le_64(filter
->fw_l2_filter_id
);
201 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
205 filter
->fw_l2_filter_id
= -1;
210 int bnxt_hwrm_set_filter(struct bnxt
*bp
,
211 struct bnxt_vnic_info
*vnic
,
212 struct bnxt_filter_info
*filter
)
215 struct hwrm_cfa_l2_filter_alloc_input req
= {.req_type
= 0 };
216 struct hwrm_cfa_l2_filter_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
217 uint32_t enables
= 0;
219 HWRM_PREP(req
, CFA_L2_FILTER_ALLOC
, -1, resp
);
221 req
.flags
= rte_cpu_to_le_32(filter
->flags
);
223 enables
= filter
->enables
|
224 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID
;
225 req
.dst_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
228 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
)
229 memcpy(req
.l2_addr
, filter
->l2_addr
,
232 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
)
233 memcpy(req
.l2_addr_mask
, filter
->l2_addr_mask
,
236 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN
)
237 req
.l2_ovlan
= filter
->l2_ovlan
;
239 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK
)
240 req
.l2_ovlan_mask
= filter
->l2_ovlan_mask
;
242 req
.enables
= rte_cpu_to_le_32(enables
);
244 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
248 filter
->fw_l2_filter_id
= rte_le_to_cpu_64(resp
->l2_filter_id
);
253 int bnxt_hwrm_exec_fwd_resp(struct bnxt
*bp
, void *fwd_cmd
)
256 struct hwrm_exec_fwd_resp_input req
= {.req_type
= 0 };
257 struct hwrm_exec_fwd_resp_output
*resp
= bp
->hwrm_cmd_resp_addr
;
259 HWRM_PREP(req
, EXEC_FWD_RESP
, -1, resp
);
261 memcpy(req
.encap_request
, fwd_cmd
,
262 sizeof(req
.encap_request
));
264 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
271 int bnxt_hwrm_func_qcaps(struct bnxt
*bp
)
274 struct hwrm_func_qcaps_input req
= {.req_type
= 0 };
275 struct hwrm_func_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
277 HWRM_PREP(req
, FUNC_QCAPS
, -1, resp
);
279 req
.fid
= rte_cpu_to_le_16(0xffff);
281 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
285 bp
->max_ring_grps
= rte_le_to_cpu_32(resp
->max_hw_ring_grps
);
287 struct bnxt_pf_info
*pf
= &bp
->pf
;
289 pf
->fw_fid
= rte_le_to_cpu_32(resp
->fid
);
290 pf
->port_id
= resp
->port_id
;
291 memcpy(pf
->mac_addr
, resp
->mac_address
, ETHER_ADDR_LEN
);
292 pf
->max_rsscos_ctx
= rte_le_to_cpu_16(resp
->max_rsscos_ctx
);
293 pf
->max_cp_rings
= rte_le_to_cpu_16(resp
->max_cmpl_rings
);
294 pf
->max_tx_rings
= rte_le_to_cpu_16(resp
->max_tx_rings
);
295 pf
->max_rx_rings
= rte_le_to_cpu_16(resp
->max_rx_rings
);
296 pf
->max_l2_ctx
= rte_le_to_cpu_16(resp
->max_l2_ctxs
);
297 pf
->max_vnics
= rte_le_to_cpu_16(resp
->max_vnics
);
298 pf
->first_vf_id
= rte_le_to_cpu_16(resp
->first_vf_id
);
299 pf
->max_vfs
= rte_le_to_cpu_16(resp
->max_vfs
);
301 struct bnxt_vf_info
*vf
= &bp
->vf
;
303 vf
->fw_fid
= rte_le_to_cpu_32(resp
->fid
);
304 memcpy(vf
->mac_addr
, &resp
->mac_address
, ETHER_ADDR_LEN
);
305 vf
->max_rsscos_ctx
= rte_le_to_cpu_16(resp
->max_rsscos_ctx
);
306 vf
->max_cp_rings
= rte_le_to_cpu_16(resp
->max_cmpl_rings
);
307 vf
->max_tx_rings
= rte_le_to_cpu_16(resp
->max_tx_rings
);
308 vf
->max_rx_rings
= rte_le_to_cpu_16(resp
->max_rx_rings
);
309 vf
->max_l2_ctx
= rte_le_to_cpu_16(resp
->max_l2_ctxs
);
310 vf
->max_vnics
= rte_le_to_cpu_16(resp
->max_vnics
);
316 int bnxt_hwrm_func_reset(struct bnxt
*bp
)
319 struct hwrm_func_reset_input req
= {.req_type
= 0 };
320 struct hwrm_func_reset_output
*resp
= bp
->hwrm_cmd_resp_addr
;
322 HWRM_PREP(req
, FUNC_RESET
, -1, resp
);
324 req
.enables
= rte_cpu_to_le_32(0);
326 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
333 int bnxt_hwrm_func_driver_register(struct bnxt
*bp
, uint32_t flags
,
334 uint32_t *vf_req_fwd
)
337 struct hwrm_func_drv_rgtr_input req
= {.req_type
= 0 };
338 struct hwrm_func_drv_rgtr_output
*resp
= bp
->hwrm_cmd_resp_addr
;
340 if (bp
->flags
& BNXT_FLAG_REGISTERED
)
343 HWRM_PREP(req
, FUNC_DRV_RGTR
, -1, resp
);
345 req
.enables
= HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER
|
346 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD
;
347 req
.ver_maj
= RTE_VER_YEAR
;
348 req
.ver_min
= RTE_VER_MONTH
;
349 req
.ver_upd
= RTE_VER_MINOR
;
351 memcpy(req
.vf_req_fwd
, vf_req_fwd
, sizeof(req
.vf_req_fwd
));
353 req
.async_event_fwd
[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
355 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
359 bp
->flags
|= BNXT_FLAG_REGISTERED
;
364 int bnxt_hwrm_ver_get(struct bnxt
*bp
)
367 struct hwrm_ver_get_input req
= {.req_type
= 0 };
368 struct hwrm_ver_get_output
*resp
= bp
->hwrm_cmd_resp_addr
;
371 uint16_t max_resp_len
;
372 char type
[RTE_MEMZONE_NAMESIZE
];
374 HWRM_PREP(req
, VER_GET
, -1, resp
);
376 req
.hwrm_intf_maj
= HWRM_VERSION_MAJOR
;
377 req
.hwrm_intf_min
= HWRM_VERSION_MINOR
;
378 req
.hwrm_intf_upd
= HWRM_VERSION_UPDATE
;
381 * Hold the lock since we may be adjusting the response pointers.
383 rte_spinlock_lock(&bp
->hwrm_lock
);
384 rc
= bnxt_hwrm_send_message_locked(bp
, &req
, sizeof(req
));
388 RTE_LOG(INFO
, PMD
, "%d.%d.%d:%d.%d.%d\n",
389 resp
->hwrm_intf_maj
, resp
->hwrm_intf_min
,
391 resp
->hwrm_fw_maj
, resp
->hwrm_fw_min
, resp
->hwrm_fw_bld
);
392 RTE_LOG(INFO
, PMD
, "Driver HWRM version: %d.%d.%d\n",
393 HWRM_VERSION_MAJOR
, HWRM_VERSION_MINOR
, HWRM_VERSION_UPDATE
);
395 my_version
= HWRM_VERSION_MAJOR
<< 16;
396 my_version
|= HWRM_VERSION_MINOR
<< 8;
397 my_version
|= HWRM_VERSION_UPDATE
;
399 fw_version
= resp
->hwrm_intf_maj
<< 16;
400 fw_version
|= resp
->hwrm_intf_min
<< 8;
401 fw_version
|= resp
->hwrm_intf_upd
;
403 if (resp
->hwrm_intf_maj
!= HWRM_VERSION_MAJOR
) {
404 RTE_LOG(ERR
, PMD
, "Unsupported firmware API version\n");
409 if (my_version
!= fw_version
) {
410 RTE_LOG(INFO
, PMD
, "BNXT Driver/HWRM API mismatch.\n");
411 if (my_version
< fw_version
) {
413 "Firmware API version is newer than driver.\n");
415 "The driver may be missing features.\n");
418 "Firmware API version is older than driver.\n");
420 "Not all driver features may be functional.\n");
424 if (bp
->max_req_len
> resp
->max_req_win_len
) {
425 RTE_LOG(ERR
, PMD
, "Unsupported request length\n");
428 bp
->max_req_len
= resp
->max_req_win_len
;
429 max_resp_len
= resp
->max_resp_len
;
430 if (bp
->max_resp_len
!= max_resp_len
) {
431 sprintf(type
, "bnxt_hwrm_%04x:%02x:%02x:%02x",
432 bp
->pdev
->addr
.domain
, bp
->pdev
->addr
.bus
,
433 bp
->pdev
->addr
.devid
, bp
->pdev
->addr
.function
);
435 rte_free(bp
->hwrm_cmd_resp_addr
);
437 bp
->hwrm_cmd_resp_addr
= rte_malloc(type
, max_resp_len
, 0);
438 if (bp
->hwrm_cmd_resp_addr
== NULL
) {
442 bp
->hwrm_cmd_resp_dma_addr
=
443 rte_malloc_virt2phy(bp
->hwrm_cmd_resp_addr
);
444 bp
->max_resp_len
= max_resp_len
;
448 rte_spinlock_unlock(&bp
->hwrm_lock
);
452 int bnxt_hwrm_func_driver_unregister(struct bnxt
*bp
, uint32_t flags
)
455 struct hwrm_func_drv_unrgtr_input req
= {.req_type
= 0 };
456 struct hwrm_func_drv_unrgtr_output
*resp
= bp
->hwrm_cmd_resp_addr
;
458 if (!(bp
->flags
& BNXT_FLAG_REGISTERED
))
461 HWRM_PREP(req
, FUNC_DRV_UNRGTR
, -1, resp
);
464 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
468 bp
->flags
&= ~BNXT_FLAG_REGISTERED
;
473 static int bnxt_hwrm_port_phy_cfg(struct bnxt
*bp
, struct bnxt_link_info
*conf
)
476 struct hwrm_port_phy_cfg_input req
= {0};
477 struct hwrm_port_phy_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
478 uint32_t enables
= 0;
480 HWRM_PREP(req
, PORT_PHY_CFG
, -1, resp
);
483 req
.flags
= rte_cpu_to_le_32(conf
->phy_flags
);
484 req
.force_link_speed
= rte_cpu_to_le_16(conf
->link_speed
);
486 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
487 * any auto mode, even "none".
489 if (!conf
->link_speed
) {
490 req
.auto_mode
|= conf
->auto_mode
;
491 enables
= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE
;
492 req
.auto_link_speed_mask
= conf
->auto_link_speed_mask
;
494 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK
;
495 req
.auto_link_speed
= bp
->link_info
.auto_link_speed
;
497 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED
;
499 req
.auto_duplex
= conf
->duplex
;
500 enables
|= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX
;
501 req
.auto_pause
= conf
->auto_pause
;
502 req
.force_pause
= conf
->force_pause
;
503 /* Set force_pause if there is no auto or if there is a force */
504 if (req
.auto_pause
&& !req
.force_pause
)
505 enables
|= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE
;
507 enables
|= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE
;
509 req
.enables
= rte_cpu_to_le_32(enables
);
512 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN
);
513 RTE_LOG(INFO
, PMD
, "Force Link Down\n");
516 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
523 static int bnxt_hwrm_port_phy_qcfg(struct bnxt
*bp
,
524 struct bnxt_link_info
*link_info
)
527 struct hwrm_port_phy_qcfg_input req
= {0};
528 struct hwrm_port_phy_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
530 HWRM_PREP(req
, PORT_PHY_QCFG
, -1, resp
);
532 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
536 link_info
->phy_link_status
= resp
->link
;
537 if (link_info
->phy_link_status
!= HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK
) {
538 link_info
->link_up
= 1;
539 link_info
->link_speed
= rte_le_to_cpu_16(resp
->link_speed
);
541 link_info
->link_up
= 0;
542 link_info
->link_speed
= 0;
544 link_info
->duplex
= resp
->duplex
;
545 link_info
->pause
= resp
->pause
;
546 link_info
->auto_pause
= resp
->auto_pause
;
547 link_info
->force_pause
= resp
->force_pause
;
548 link_info
->auto_mode
= resp
->auto_mode
;
550 link_info
->support_speeds
= rte_le_to_cpu_16(resp
->support_speeds
);
551 link_info
->auto_link_speed
= rte_le_to_cpu_16(resp
->auto_link_speed
);
552 link_info
->preemphasis
= rte_le_to_cpu_32(resp
->preemphasis
);
553 link_info
->phy_ver
[0] = resp
->phy_maj
;
554 link_info
->phy_ver
[1] = resp
->phy_min
;
555 link_info
->phy_ver
[2] = resp
->phy_bld
;
560 int bnxt_hwrm_queue_qportcfg(struct bnxt
*bp
)
563 struct hwrm_queue_qportcfg_input req
= {.req_type
= 0 };
564 struct hwrm_queue_qportcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
566 HWRM_PREP(req
, QUEUE_QPORTCFG
, -1, resp
);
568 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
572 #define GET_QUEUE_INFO(x) \
573 bp->cos_queue[x].id = resp->queue_id##x; \
574 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
588 int bnxt_hwrm_ring_alloc(struct bnxt
*bp
,
589 struct bnxt_ring
*ring
,
590 uint32_t ring_type
, uint32_t map_index
,
591 uint32_t stats_ctx_id
)
594 struct hwrm_ring_alloc_input req
= {.req_type
= 0 };
595 struct hwrm_ring_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
597 HWRM_PREP(req
, RING_ALLOC
, -1, resp
);
599 req
.enables
= rte_cpu_to_le_32(0);
601 req
.page_tbl_addr
= rte_cpu_to_le_64(ring
->bd_dma
);
602 req
.fbo
= rte_cpu_to_le_32(0);
603 /* Association of ring index with doorbell index */
604 req
.logical_id
= rte_cpu_to_le_16(map_index
);
607 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX
:
608 req
.queue_id
= bp
->cos_queue
[0].id
;
610 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX
:
611 req
.ring_type
= ring_type
;
613 rte_cpu_to_le_16(bp
->grp_info
[map_index
].cp_fw_ring_id
);
614 req
.length
= rte_cpu_to_le_32(ring
->ring_size
);
615 req
.stat_ctx_id
= rte_cpu_to_le_16(stats_ctx_id
);
616 req
.enables
= rte_cpu_to_le_32(rte_le_to_cpu_32(req
.enables
) |
617 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID
);
619 case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL
:
620 req
.ring_type
= ring_type
;
622 * TODO: Some HWRM versions crash with
623 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
625 req
.int_mode
= HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX
;
626 req
.length
= rte_cpu_to_le_32(ring
->ring_size
);
629 RTE_LOG(ERR
, PMD
, "hwrm alloc invalid ring type %d\n",
634 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
636 if (rc
|| resp
->error_code
) {
637 if (rc
== 0 && resp
->error_code
)
638 rc
= rte_le_to_cpu_16(resp
->error_code
);
640 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL
:
642 "hwrm_ring_alloc cp failed. rc:%d\n", rc
);
644 case HWRM_RING_FREE_INPUT_RING_TYPE_RX
:
646 "hwrm_ring_alloc rx failed. rc:%d\n", rc
);
648 case HWRM_RING_FREE_INPUT_RING_TYPE_TX
:
650 "hwrm_ring_alloc tx failed. rc:%d\n", rc
);
653 RTE_LOG(ERR
, PMD
, "Invalid ring. rc:%d\n", rc
);
658 ring
->fw_ring_id
= rte_le_to_cpu_16(resp
->ring_id
);
662 int bnxt_hwrm_ring_free(struct bnxt
*bp
,
663 struct bnxt_ring
*ring
, uint32_t ring_type
)
666 struct hwrm_ring_free_input req
= {.req_type
= 0 };
667 struct hwrm_ring_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
669 HWRM_PREP(req
, RING_FREE
, -1, resp
);
671 req
.ring_type
= ring_type
;
672 req
.ring_id
= rte_cpu_to_le_16(ring
->fw_ring_id
);
674 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
676 if (rc
|| resp
->error_code
) {
677 if (rc
== 0 && resp
->error_code
)
678 rc
= rte_le_to_cpu_16(resp
->error_code
);
681 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL
:
682 RTE_LOG(ERR
, PMD
, "hwrm_ring_free cp failed. rc:%d\n",
685 case HWRM_RING_FREE_INPUT_RING_TYPE_RX
:
686 RTE_LOG(ERR
, PMD
, "hwrm_ring_free rx failed. rc:%d\n",
689 case HWRM_RING_FREE_INPUT_RING_TYPE_TX
:
690 RTE_LOG(ERR
, PMD
, "hwrm_ring_free tx failed. rc:%d\n",
694 RTE_LOG(ERR
, PMD
, "Invalid ring, rc:%d\n", rc
);
701 int bnxt_hwrm_ring_grp_alloc(struct bnxt
*bp
, unsigned int idx
)
704 struct hwrm_ring_grp_alloc_input req
= {.req_type
= 0 };
705 struct hwrm_ring_grp_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
707 HWRM_PREP(req
, RING_GRP_ALLOC
, -1, resp
);
709 req
.cr
= rte_cpu_to_le_16(bp
->grp_info
[idx
].cp_fw_ring_id
);
710 req
.rr
= rte_cpu_to_le_16(bp
->grp_info
[idx
].rx_fw_ring_id
);
711 req
.ar
= rte_cpu_to_le_16(bp
->grp_info
[idx
].ag_fw_ring_id
);
712 req
.sc
= rte_cpu_to_le_16(bp
->grp_info
[idx
].fw_stats_ctx
);
714 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
718 bp
->grp_info
[idx
].fw_grp_id
=
719 rte_le_to_cpu_16(resp
->ring_group_id
);
724 int bnxt_hwrm_ring_grp_free(struct bnxt
*bp
, unsigned int idx
)
727 struct hwrm_ring_grp_free_input req
= {.req_type
= 0 };
728 struct hwrm_ring_grp_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
730 HWRM_PREP(req
, RING_GRP_FREE
, -1, resp
);
732 req
.ring_group_id
= rte_cpu_to_le_16(bp
->grp_info
[idx
].fw_grp_id
);
734 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
738 bp
->grp_info
[idx
].fw_grp_id
= INVALID_HW_RING_ID
;
742 int bnxt_hwrm_stat_clear(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
)
745 struct hwrm_stat_ctx_clr_stats_input req
= {.req_type
= 0 };
746 struct hwrm_stat_ctx_clr_stats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
748 HWRM_PREP(req
, STAT_CTX_CLR_STATS
, -1, resp
);
750 if (cpr
->hw_stats_ctx_id
== (uint32_t)HWRM_NA_SIGNATURE
)
753 req
.stat_ctx_id
= rte_cpu_to_le_16(cpr
->hw_stats_ctx_id
);
754 req
.seq_id
= rte_cpu_to_le_16(bp
->hwrm_cmd_seq
++);
756 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
763 int bnxt_hwrm_stat_ctx_alloc(struct bnxt
*bp
,
764 struct bnxt_cp_ring_info
*cpr
, unsigned int idx
)
767 struct hwrm_stat_ctx_alloc_input req
= {.req_type
= 0 };
768 struct hwrm_stat_ctx_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
770 HWRM_PREP(req
, STAT_CTX_ALLOC
, -1, resp
);
772 req
.update_period_ms
= rte_cpu_to_le_32(1000);
774 req
.seq_id
= rte_cpu_to_le_16(bp
->hwrm_cmd_seq
++);
776 rte_cpu_to_le_64(cpr
->hw_stats_map
);
778 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
782 cpr
->hw_stats_ctx_id
= rte_le_to_cpu_16(resp
->stat_ctx_id
);
783 bp
->grp_info
[idx
].fw_stats_ctx
= cpr
->hw_stats_ctx_id
;
788 int bnxt_hwrm_stat_ctx_free(struct bnxt
*bp
,
789 struct bnxt_cp_ring_info
*cpr
, unsigned int idx
)
792 struct hwrm_stat_ctx_free_input req
= {.req_type
= 0 };
793 struct hwrm_stat_ctx_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
795 HWRM_PREP(req
, STAT_CTX_FREE
, -1, resp
);
797 req
.stat_ctx_id
= rte_cpu_to_le_16(cpr
->hw_stats_ctx_id
);
798 req
.seq_id
= rte_cpu_to_le_16(bp
->hwrm_cmd_seq
++);
800 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
804 cpr
->hw_stats_ctx_id
= HWRM_NA_SIGNATURE
;
805 bp
->grp_info
[idx
].fw_stats_ctx
= cpr
->hw_stats_ctx_id
;
810 int bnxt_hwrm_vnic_alloc(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
813 struct hwrm_vnic_alloc_input req
= { 0 };
814 struct hwrm_vnic_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
816 /* map ring groups to this vnic */
817 for (i
= vnic
->start_grp_id
, j
= 0; i
<= vnic
->end_grp_id
; i
++, j
++) {
818 if (bp
->grp_info
[i
].fw_grp_id
== (uint16_t)HWRM_NA_SIGNATURE
) {
820 "Not enough ring groups avail:%x req:%x\n", j
,
821 (vnic
->end_grp_id
- vnic
->start_grp_id
) + 1);
824 vnic
->fw_grp_ids
[j
] = bp
->grp_info
[i
].fw_grp_id
;
827 vnic
->fw_rss_cos_lb_ctx
= (uint16_t)HWRM_NA_SIGNATURE
;
828 vnic
->ctx_is_rss_cos_lb
= HW_CONTEXT_NONE
;
830 HWRM_PREP(req
, VNIC_ALLOC
, -1, resp
);
832 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
836 vnic
->fw_vnic_id
= rte_le_to_cpu_16(resp
->vnic_id
);
840 int bnxt_hwrm_vnic_cfg(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
843 struct hwrm_vnic_cfg_input req
= {.req_type
= 0 };
844 struct hwrm_vnic_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
846 HWRM_PREP(req
, VNIC_CFG
, -1, resp
);
848 /* Only RSS support for now TBD: COS & LB */
850 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP
|
851 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE
|
852 HWRM_VNIC_CFG_INPUT_ENABLES_MRU
);
853 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
855 rte_cpu_to_le_16(bp
->grp_info
[vnic
->start_grp_id
].fw_grp_id
);
856 req
.rss_rule
= rte_cpu_to_le_16(vnic
->fw_rss_cos_lb_ctx
);
857 req
.cos_rule
= rte_cpu_to_le_16(0xffff);
858 req
.lb_rule
= rte_cpu_to_le_16(0xffff);
859 req
.mru
= rte_cpu_to_le_16(bp
->eth_dev
->data
->mtu
+ ETHER_HDR_LEN
+
860 ETHER_CRC_LEN
+ VLAN_TAG_SIZE
);
861 if (vnic
->func_default
)
863 if (vnic
->vlan_strip
)
865 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE
);
867 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
874 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
877 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req
= {.req_type
= 0 };
878 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output
*resp
=
879 bp
->hwrm_cmd_resp_addr
;
881 HWRM_PREP(req
, VNIC_RSS_COS_LB_CTX_ALLOC
, -1, resp
);
883 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
887 vnic
->fw_rss_cos_lb_ctx
= rte_le_to_cpu_16(resp
->rss_cos_lb_ctx_id
);
892 int bnxt_hwrm_vnic_ctx_free(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
895 struct hwrm_vnic_rss_cos_lb_ctx_free_input req
= {.req_type
= 0 };
896 struct hwrm_vnic_rss_cos_lb_ctx_free_output
*resp
=
897 bp
->hwrm_cmd_resp_addr
;
899 HWRM_PREP(req
, VNIC_RSS_COS_LB_CTX_FREE
, -1, resp
);
901 req
.rss_cos_lb_ctx_id
= rte_cpu_to_le_16(vnic
->fw_rss_cos_lb_ctx
);
903 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
907 vnic
->fw_rss_cos_lb_ctx
= INVALID_HW_RING_ID
;
912 int bnxt_hwrm_vnic_free(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
915 struct hwrm_vnic_free_input req
= {.req_type
= 0 };
916 struct hwrm_vnic_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
918 if (vnic
->fw_vnic_id
== INVALID_HW_RING_ID
)
921 HWRM_PREP(req
, VNIC_FREE
, -1, resp
);
923 req
.vnic_id
= rte_cpu_to_le_16(vnic
->fw_vnic_id
);
925 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
929 vnic
->fw_vnic_id
= INVALID_HW_RING_ID
;
933 int bnxt_hwrm_vnic_rss_cfg(struct bnxt
*bp
,
934 struct bnxt_vnic_info
*vnic
)
937 struct hwrm_vnic_rss_cfg_input req
= {.req_type
= 0 };
938 struct hwrm_vnic_rss_cfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
940 HWRM_PREP(req
, VNIC_RSS_CFG
, -1, resp
);
942 req
.hash_type
= rte_cpu_to_le_32(vnic
->hash_type
);
944 req
.ring_grp_tbl_addr
=
945 rte_cpu_to_le_64(vnic
->rss_table_dma_addr
);
946 req
.hash_key_tbl_addr
=
947 rte_cpu_to_le_64(vnic
->rss_hash_key_dma_addr
);
948 req
.rss_ctx_idx
= rte_cpu_to_le_16(vnic
->fw_rss_cos_lb_ctx
);
950 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
958 * HWRM utility functions
961 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt
*bp
)
966 for (i
= 0; i
< bp
->rx_cp_nr_rings
+ bp
->tx_cp_nr_rings
; i
++) {
967 struct bnxt_tx_queue
*txq
;
968 struct bnxt_rx_queue
*rxq
;
969 struct bnxt_cp_ring_info
*cpr
;
971 if (i
>= bp
->rx_cp_nr_rings
) {
972 txq
= bp
->tx_queues
[i
- bp
->rx_cp_nr_rings
];
975 rxq
= bp
->rx_queues
[i
];
979 rc
= bnxt_hwrm_stat_clear(bp
, cpr
);
986 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt
*bp
)
990 struct bnxt_cp_ring_info
*cpr
;
992 for (i
= 0; i
< bp
->rx_cp_nr_rings
+ bp
->tx_cp_nr_rings
; i
++) {
993 unsigned int idx
= i
+ 1;
995 if (i
>= bp
->rx_cp_nr_rings
)
996 cpr
= bp
->tx_queues
[i
- bp
->rx_cp_nr_rings
]->cp_ring
;
998 cpr
= bp
->rx_queues
[i
]->cp_ring
;
999 if (cpr
->hw_stats_ctx_id
!= HWRM_NA_SIGNATURE
) {
1000 rc
= bnxt_hwrm_stat_ctx_free(bp
, cpr
, idx
);
1008 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt
*bp
)
1013 for (i
= 0; i
< bp
->rx_cp_nr_rings
+ bp
->tx_cp_nr_rings
; i
++) {
1014 struct bnxt_tx_queue
*txq
;
1015 struct bnxt_rx_queue
*rxq
;
1016 struct bnxt_cp_ring_info
*cpr
;
1017 unsigned int idx
= i
+ 1;
1019 if (i
>= bp
->rx_cp_nr_rings
) {
1020 txq
= bp
->tx_queues
[i
- bp
->rx_cp_nr_rings
];
1023 rxq
= bp
->rx_queues
[i
];
1027 rc
= bnxt_hwrm_stat_ctx_alloc(bp
, cpr
, idx
);
1035 int bnxt_free_all_hwrm_ring_grps(struct bnxt
*bp
)
1040 for (i
= 0; i
< bp
->rx_cp_nr_rings
; i
++) {
1041 unsigned int idx
= i
+ 1;
1043 if (bp
->grp_info
[idx
].fw_grp_id
== INVALID_HW_RING_ID
) {
1045 "Attempt to free invalid ring group %d\n",
1050 rc
= bnxt_hwrm_ring_grp_free(bp
, idx
);
1058 static void bnxt_free_cp_ring(struct bnxt
*bp
,
1059 struct bnxt_cp_ring_info
*cpr
, unsigned int idx
)
1061 struct bnxt_ring
*cp_ring
= cpr
->cp_ring_struct
;
1063 bnxt_hwrm_ring_free(bp
, cp_ring
,
1064 HWRM_RING_FREE_INPUT_RING_TYPE_CMPL
);
1065 cp_ring
->fw_ring_id
= INVALID_HW_RING_ID
;
1066 bp
->grp_info
[idx
].cp_fw_ring_id
= INVALID_HW_RING_ID
;
1067 memset(cpr
->cp_desc_ring
, 0, cpr
->cp_ring_struct
->ring_size
*
1068 sizeof(*cpr
->cp_desc_ring
));
1069 cpr
->cp_raw_cons
= 0;
1072 int bnxt_free_all_hwrm_rings(struct bnxt
*bp
)
1077 for (i
= 0; i
< bp
->tx_cp_nr_rings
; i
++) {
1078 struct bnxt_tx_queue
*txq
= bp
->tx_queues
[i
];
1079 struct bnxt_tx_ring_info
*txr
= txq
->tx_ring
;
1080 struct bnxt_ring
*ring
= txr
->tx_ring_struct
;
1081 struct bnxt_cp_ring_info
*cpr
= txq
->cp_ring
;
1082 unsigned int idx
= bp
->rx_cp_nr_rings
+ i
+ 1;
1084 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
1085 bnxt_hwrm_ring_free(bp
, ring
,
1086 HWRM_RING_FREE_INPUT_RING_TYPE_TX
);
1087 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
1088 memset(txr
->tx_desc_ring
, 0,
1089 txr
->tx_ring_struct
->ring_size
*
1090 sizeof(*txr
->tx_desc_ring
));
1091 memset(txr
->tx_buf_ring
, 0,
1092 txr
->tx_ring_struct
->ring_size
*
1093 sizeof(*txr
->tx_buf_ring
));
1097 if (cpr
->cp_ring_struct
->fw_ring_id
!= INVALID_HW_RING_ID
)
1098 bnxt_free_cp_ring(bp
, cpr
, idx
);
1101 for (i
= 0; i
< bp
->rx_cp_nr_rings
; i
++) {
1102 struct bnxt_rx_queue
*rxq
= bp
->rx_queues
[i
];
1103 struct bnxt_rx_ring_info
*rxr
= rxq
->rx_ring
;
1104 struct bnxt_ring
*ring
= rxr
->rx_ring_struct
;
1105 struct bnxt_cp_ring_info
*cpr
= rxq
->cp_ring
;
1106 unsigned int idx
= i
+ 1;
1108 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
1109 bnxt_hwrm_ring_free(bp
, ring
,
1110 HWRM_RING_FREE_INPUT_RING_TYPE_RX
);
1111 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
1112 bp
->grp_info
[idx
].rx_fw_ring_id
= INVALID_HW_RING_ID
;
1113 memset(rxr
->rx_desc_ring
, 0,
1114 rxr
->rx_ring_struct
->ring_size
*
1115 sizeof(*rxr
->rx_desc_ring
));
1116 memset(rxr
->rx_buf_ring
, 0,
1117 rxr
->rx_ring_struct
->ring_size
*
1118 sizeof(*rxr
->rx_buf_ring
));
1121 if (cpr
->cp_ring_struct
->fw_ring_id
!= INVALID_HW_RING_ID
)
1122 bnxt_free_cp_ring(bp
, cpr
, idx
);
1125 /* Default completion ring */
1127 struct bnxt_cp_ring_info
*cpr
= bp
->def_cp_ring
;
1129 if (cpr
->cp_ring_struct
->fw_ring_id
!= INVALID_HW_RING_ID
)
1130 bnxt_free_cp_ring(bp
, cpr
, 0);
1136 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt
*bp
)
1141 for (i
= 0; i
< bp
->rx_cp_nr_rings
; i
++) {
1142 unsigned int idx
= i
+ 1;
1144 if (bp
->grp_info
[idx
].cp_fw_ring_id
== INVALID_HW_RING_ID
||
1145 bp
->grp_info
[idx
].rx_fw_ring_id
== INVALID_HW_RING_ID
)
1148 rc
= bnxt_hwrm_ring_grp_alloc(bp
, idx
);
1156 void bnxt_free_hwrm_resources(struct bnxt
*bp
)
1158 /* Release memzone */
1159 rte_free(bp
->hwrm_cmd_resp_addr
);
1160 bp
->hwrm_cmd_resp_addr
= NULL
;
1161 bp
->hwrm_cmd_resp_dma_addr
= 0;
1164 int bnxt_alloc_hwrm_resources(struct bnxt
*bp
)
1166 struct rte_pci_device
*pdev
= bp
->pdev
;
1167 char type
[RTE_MEMZONE_NAMESIZE
];
1169 sprintf(type
, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev
->addr
.domain
,
1170 pdev
->addr
.bus
, pdev
->addr
.devid
, pdev
->addr
.function
);
1171 bp
->max_req_len
= HWRM_MAX_REQ_LEN
;
1172 bp
->max_resp_len
= HWRM_MAX_RESP_LEN
;
1173 bp
->hwrm_cmd_resp_addr
= rte_malloc(type
, bp
->max_resp_len
, 0);
1174 if (bp
->hwrm_cmd_resp_addr
== NULL
)
1176 bp
->hwrm_cmd_resp_dma_addr
=
1177 rte_malloc_virt2phy(bp
->hwrm_cmd_resp_addr
);
1178 rte_spinlock_init(&bp
->hwrm_lock
);
1183 int bnxt_clear_hwrm_vnic_filters(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
1185 struct bnxt_filter_info
*filter
;
1188 STAILQ_FOREACH(filter
, &vnic
->filter
, next
) {
1189 rc
= bnxt_hwrm_clear_filter(bp
, filter
);
1196 int bnxt_set_hwrm_vnic_filters(struct bnxt
*bp
, struct bnxt_vnic_info
*vnic
)
1198 struct bnxt_filter_info
*filter
;
1201 STAILQ_FOREACH(filter
, &vnic
->filter
, next
) {
1202 rc
= bnxt_hwrm_set_filter(bp
, vnic
, filter
);
1209 void bnxt_free_all_hwrm_resources(struct bnxt
*bp
)
1211 struct bnxt_vnic_info
*vnic
;
1214 if (bp
->vnic_info
== NULL
)
1217 vnic
= &bp
->vnic_info
[0];
1218 bnxt_hwrm_cfa_l2_clear_rx_mask(bp
, vnic
);
1220 /* VNIC resources */
1221 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
1222 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
1224 bnxt_clear_hwrm_vnic_filters(bp
, vnic
);
1226 bnxt_hwrm_vnic_ctx_free(bp
, vnic
);
1227 bnxt_hwrm_vnic_free(bp
, vnic
);
1229 /* Ring resources */
1230 bnxt_free_all_hwrm_rings(bp
);
1231 bnxt_free_all_hwrm_ring_grps(bp
);
1232 bnxt_free_all_hwrm_stat_ctxs(bp
);
1235 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed
)
1237 uint8_t hw_link_duplex
= HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH
;
1239 if ((conf_link_speed
& ETH_LINK_SPEED_FIXED
) == ETH_LINK_SPEED_AUTONEG
)
1240 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH
;
1242 switch (conf_link_speed
) {
1243 case ETH_LINK_SPEED_10M_HD
:
1244 case ETH_LINK_SPEED_100M_HD
:
1245 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF
;
1247 return hw_link_duplex
;
1250 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed
)
1252 uint16_t eth_link_speed
= 0;
1254 if (conf_link_speed
== ETH_LINK_SPEED_AUTONEG
)
1255 return ETH_LINK_SPEED_AUTONEG
;
1257 switch (conf_link_speed
& ~ETH_LINK_SPEED_FIXED
) {
1258 case ETH_LINK_SPEED_100M
:
1259 case ETH_LINK_SPEED_100M_HD
:
1261 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB
;
1263 case ETH_LINK_SPEED_1G
:
1265 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB
;
1267 case ETH_LINK_SPEED_2_5G
:
1269 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB
;
1271 case ETH_LINK_SPEED_10G
:
1273 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB
;
1275 case ETH_LINK_SPEED_20G
:
1277 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB
;
1279 case ETH_LINK_SPEED_25G
:
1281 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB
;
1283 case ETH_LINK_SPEED_40G
:
1285 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB
;
1287 case ETH_LINK_SPEED_50G
:
1289 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB
;
1293 "Unsupported link speed %d; default to AUTO\n",
1297 return eth_link_speed
;
1300 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1301 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1302 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1303 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1305 static int bnxt_valid_link_speed(uint32_t link_speed
, uint8_t port_id
)
1309 if (link_speed
== ETH_LINK_SPEED_AUTONEG
)
1312 if (link_speed
& ETH_LINK_SPEED_FIXED
) {
1313 one_speed
= link_speed
& ~ETH_LINK_SPEED_FIXED
;
1315 if (one_speed
& (one_speed
- 1)) {
1317 "Invalid advertised speeds (%u) for port %u\n",
1318 link_speed
, port_id
);
1321 if ((one_speed
& BNXT_SUPPORTED_SPEEDS
) != one_speed
) {
1323 "Unsupported advertised speed (%u) for port %u\n",
1324 link_speed
, port_id
);
1328 if (!(link_speed
& BNXT_SUPPORTED_SPEEDS
)) {
1330 "Unsupported advertised speeds (%u) for port %u\n",
1331 link_speed
, port_id
);
1338 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed
)
1342 if (link_speed
== ETH_LINK_SPEED_AUTONEG
)
1343 link_speed
= BNXT_SUPPORTED_SPEEDS
;
1345 if (link_speed
& ETH_LINK_SPEED_100M
)
1346 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB
;
1347 if (link_speed
& ETH_LINK_SPEED_100M_HD
)
1348 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB
;
1349 if (link_speed
& ETH_LINK_SPEED_1G
)
1350 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB
;
1351 if (link_speed
& ETH_LINK_SPEED_2_5G
)
1352 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB
;
1353 if (link_speed
& ETH_LINK_SPEED_10G
)
1354 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB
;
1355 if (link_speed
& ETH_LINK_SPEED_20G
)
1356 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB
;
1357 if (link_speed
& ETH_LINK_SPEED_25G
)
1358 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB
;
1359 if (link_speed
& ETH_LINK_SPEED_40G
)
1360 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB
;
1361 if (link_speed
& ETH_LINK_SPEED_50G
)
1362 ret
|= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB
;
1366 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed
)
1368 uint32_t eth_link_speed
= ETH_SPEED_NUM_NONE
;
1370 switch (hw_link_speed
) {
1371 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB
:
1372 eth_link_speed
= ETH_SPEED_NUM_100M
;
1374 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB
:
1375 eth_link_speed
= ETH_SPEED_NUM_1G
;
1377 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB
:
1378 eth_link_speed
= ETH_SPEED_NUM_2_5G
;
1380 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB
:
1381 eth_link_speed
= ETH_SPEED_NUM_10G
;
1383 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB
:
1384 eth_link_speed
= ETH_SPEED_NUM_20G
;
1386 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB
:
1387 eth_link_speed
= ETH_SPEED_NUM_25G
;
1389 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB
:
1390 eth_link_speed
= ETH_SPEED_NUM_40G
;
1392 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB
:
1393 eth_link_speed
= ETH_SPEED_NUM_50G
;
1395 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB
:
1397 RTE_LOG(ERR
, PMD
, "HWRM link speed %d not defined\n",
1401 return eth_link_speed
;
1404 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex
)
1406 uint16_t eth_link_duplex
= ETH_LINK_FULL_DUPLEX
;
1408 switch (hw_link_duplex
) {
1409 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH
:
1410 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL
:
1411 eth_link_duplex
= ETH_LINK_FULL_DUPLEX
;
1413 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF
:
1414 eth_link_duplex
= ETH_LINK_HALF_DUPLEX
;
1417 RTE_LOG(ERR
, PMD
, "HWRM link duplex %d not defined\n",
1421 return eth_link_duplex
;
1424 int bnxt_get_hwrm_link_config(struct bnxt
*bp
, struct rte_eth_link
*link
)
1427 struct bnxt_link_info
*link_info
= &bp
->link_info
;
1429 rc
= bnxt_hwrm_port_phy_qcfg(bp
, link_info
);
1432 "Get link config failed with rc %d\n", rc
);
1435 if (link_info
->link_up
)
1437 bnxt_parse_hw_link_speed(link_info
->link_speed
);
1439 link
->link_speed
= ETH_LINK_SPEED_10M
;
1440 link
->link_duplex
= bnxt_parse_hw_link_duplex(link_info
->duplex
);
1441 link
->link_status
= link_info
->link_up
;
1442 link
->link_autoneg
= link_info
->auto_mode
==
1443 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE
?
1444 ETH_LINK_SPEED_FIXED
: ETH_LINK_SPEED_AUTONEG
;
1449 int bnxt_set_hwrm_link_config(struct bnxt
*bp
, bool link_up
)
1452 struct rte_eth_conf
*dev_conf
= &bp
->eth_dev
->data
->dev_conf
;
1453 struct bnxt_link_info link_req
;
1456 if (BNXT_NPAR_PF(bp
) || BNXT_VF(bp
))
1459 rc
= bnxt_valid_link_speed(dev_conf
->link_speeds
,
1460 bp
->eth_dev
->data
->port_id
);
1464 memset(&link_req
, 0, sizeof(link_req
));
1465 link_req
.link_up
= link_up
;
1469 speed
= bnxt_parse_eth_link_speed(dev_conf
->link_speeds
);
1470 link_req
.phy_flags
= HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY
;
1472 link_req
.phy_flags
|=
1473 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG
;
1474 link_req
.auto_mode
=
1475 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK
;
1476 link_req
.auto_link_speed_mask
=
1477 bnxt_parse_eth_link_speed_mask(dev_conf
->link_speeds
);
1479 link_req
.phy_flags
|= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE
;
1480 link_req
.link_speed
= speed
;
1481 RTE_LOG(INFO
, PMD
, "Set Link Speed %x\n", speed
);
1483 link_req
.duplex
= bnxt_parse_eth_link_duplex(dev_conf
->link_speeds
);
1484 link_req
.auto_pause
= bp
->link_info
.auto_pause
;
1485 link_req
.force_pause
= bp
->link_info
.force_pause
;
1488 rc
= bnxt_hwrm_port_phy_cfg(bp
, &link_req
);
1491 "Set link config failed with rc %d\n", rc
);
1494 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL
);
1500 int bnxt_hwrm_func_qcfg(struct bnxt
*bp
)
1502 struct hwrm_func_qcfg_input req
= {0};
1503 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1506 HWRM_PREP(req
, FUNC_QCFG
, -1, resp
);
1507 req
.fid
= rte_cpu_to_le_16(0xffff);
1509 rc
= bnxt_hwrm_send_message(bp
, &req
, sizeof(req
));
1514 struct bnxt_vf_info
*vf
= &bp
->vf
;
1516 /* Hard Coded.. 0xfff VLAN ID mask */
1517 vf
->vlan
= rte_le_to_cpu_16(resp
->vlan
) & 0xfff;
1520 switch (resp
->port_partition_type
) {
1521 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0
:
1522 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5
:
1523 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0
:
1524 bp
->port_partition_type
= resp
->port_partition_type
;
1527 bp
->port_partition_type
= 0;