1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2007-2013 Broadcom Corporation.
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9 * Copyright (c) 2015-2018 Cavium Inc.
10 * All rights reserved.
14 #define BNX2X_DRIVER_VERSION "1.78.18"
17 #include "bnx2x_vfpf.h"
19 #include "ecore_init.h"
20 #include "ecore_init_ops.h"
22 #include "rte_version.h"
24 #include <sys/types.h>
28 #include <rte_string_fns.h>
30 #define BNX2X_PMD_VER_PREFIX "BNX2X PMD"
31 #define BNX2X_PMD_VERSION_MAJOR 1
32 #define BNX2X_PMD_VERSION_MINOR 0
33 #define BNX2X_PMD_VERSION_REVISION 7
34 #define BNX2X_PMD_VERSION_PATCH 1
36 static inline const char *
37 bnx2x_pmd_version(void)
39 static char version
[32];
41 snprintf(version
, sizeof(version
), "%s %s_%d.%d.%d.%d",
44 BNX2X_PMD_VERSION_MAJOR
,
45 BNX2X_PMD_VERSION_MINOR
,
46 BNX2X_PMD_VERSION_REVISION
,
47 BNX2X_PMD_VERSION_PATCH
);
52 static z_stream zlib_stream
;
54 #define EVL_VLID_MASK 0x0FFF
56 #define BNX2X_DEF_SB_ATT_IDX 0x0001
57 #define BNX2X_DEF_SB_IDX 0x0002
60 * FLR Support - bnx2x_pf_flr_clnup() is called during nic_load in the per
61 * function HW initialization.
63 #define FLR_WAIT_USEC 10000 /* 10 msecs */
64 #define FLR_WAIT_INTERVAL 50 /* usecs */
65 #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
67 struct pbf_pN_buf_regs
{
74 struct pbf_pN_cmd_regs
{
80 /* resources needed for unloading a previously loaded device */
82 #define BNX2X_PREV_WAIT_NEEDED 1
83 rte_spinlock_t bnx2x_prev_mtx
;
84 struct bnx2x_prev_list_node
{
85 LIST_ENTRY(bnx2x_prev_list_node
) node
;
93 static LIST_HEAD(, bnx2x_prev_list_node
) bnx2x_prev_list
94 = LIST_HEAD_INITIALIZER(bnx2x_prev_list
);
96 static int load_count
[2][3] = { { 0 } };
97 /* per-path: 0-common, 1-port0, 2-port1 */
99 static void bnx2x_cmng_fns_init(struct bnx2x_softc
*sc
, uint8_t read_cfg
,
101 static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc
*sc
);
102 static void storm_memset_cmng(struct bnx2x_softc
*sc
, struct cmng_init
*cmng
,
104 static void bnx2x_set_reset_global(struct bnx2x_softc
*sc
);
105 static void bnx2x_set_reset_in_progress(struct bnx2x_softc
*sc
);
106 static uint8_t bnx2x_reset_is_done(struct bnx2x_softc
*sc
, int engine
);
107 static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc
*sc
);
108 static uint8_t bnx2x_chk_parity_attn(struct bnx2x_softc
*sc
, uint8_t * global
,
110 static void bnx2x_int_disable(struct bnx2x_softc
*sc
);
111 static int bnx2x_release_leader_lock(struct bnx2x_softc
*sc
);
112 static void bnx2x_pf_disable(struct bnx2x_softc
*sc
);
113 static void bnx2x_update_rx_prod(struct bnx2x_softc
*sc
,
114 struct bnx2x_fastpath
*fp
,
115 uint16_t rx_bd_prod
, uint16_t rx_cq_prod
);
116 static void bnx2x_link_report_locked(struct bnx2x_softc
*sc
);
117 static void bnx2x_link_report(struct bnx2x_softc
*sc
);
118 void bnx2x_link_status_update(struct bnx2x_softc
*sc
);
119 static int bnx2x_alloc_mem(struct bnx2x_softc
*sc
);
120 static void bnx2x_free_mem(struct bnx2x_softc
*sc
);
121 static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc
*sc
);
122 static void bnx2x_free_fw_stats_mem(struct bnx2x_softc
*sc
);
123 static __rte_noinline
124 int bnx2x_nic_load(struct bnx2x_softc
*sc
);
126 static int bnx2x_handle_sp_tq(struct bnx2x_softc
*sc
);
127 static void bnx2x_handle_fp_tq(struct bnx2x_fastpath
*fp
);
128 static void bnx2x_ack_sb(struct bnx2x_softc
*sc
, uint8_t igu_sb_id
,
129 uint8_t storm
, uint16_t index
, uint8_t op
,
132 int bnx2x_test_bit(int nr
, volatile unsigned long *addr
)
137 res
= ((*addr
) & (1UL << nr
)) != 0;
142 void bnx2x_set_bit(unsigned int nr
, volatile unsigned long *addr
)
144 __sync_fetch_and_or(addr
, (1UL << nr
));
147 void bnx2x_clear_bit(int nr
, volatile unsigned long *addr
)
149 __sync_fetch_and_and(addr
, ~(1UL << nr
));
152 int bnx2x_test_and_clear_bit(int nr
, volatile unsigned long *addr
)
154 unsigned long mask
= (1UL << nr
);
155 return __sync_fetch_and_and(addr
, ~mask
) & mask
;
158 int bnx2x_cmpxchg(volatile int *addr
, int old
, int new)
160 return __sync_val_compare_and_swap(addr
, old
, new);
164 bnx2x_dma_alloc(struct bnx2x_softc
*sc
, size_t size
, struct bnx2x_dma
*dma
,
165 const char *msg
, uint32_t align
)
167 char mz_name
[RTE_MEMZONE_NAMESIZE
];
168 const struct rte_memzone
*z
;
172 snprintf(mz_name
, sizeof(mz_name
), "bnx2x%d_%s_%" PRIx64
, SC_ABS_FUNC(sc
), msg
,
173 rte_get_timer_cycles());
175 snprintf(mz_name
, sizeof(mz_name
), "bnx2x%d_%s_%" PRIx64
, sc
->pcie_device
, msg
,
176 rte_get_timer_cycles());
178 /* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */
179 z
= rte_memzone_reserve_aligned(mz_name
, (uint64_t)size
,
181 RTE_MEMZONE_IOVA_CONTIG
, align
);
183 PMD_DRV_LOG(ERR
, sc
, "DMA alloc failed for %s", msg
);
186 dma
->paddr
= (uint64_t) z
->iova
;
187 dma
->vaddr
= z
->addr
;
188 dma
->mzone
= (const void *)z
;
190 PMD_DRV_LOG(DEBUG
, sc
,
191 "%s: virt=%p phys=%" PRIx64
, msg
, dma
->vaddr
, dma
->paddr
);
196 void bnx2x_dma_free(struct bnx2x_dma
*dma
)
198 if (dma
->mzone
== NULL
)
201 rte_memzone_free((const struct rte_memzone
*)dma
->mzone
);
209 static int bnx2x_acquire_hw_lock(struct bnx2x_softc
*sc
, uint32_t resource
)
211 uint32_t lock_status
;
212 uint32_t resource_bit
= (1 << resource
);
213 int func
= SC_FUNC(sc
);
214 uint32_t hw_lock_control_reg
;
217 #ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
219 PMD_INIT_FUNC_TRACE(sc
);
221 PMD_INIT_FUNC_TRACE(sc
);
224 /* validate the resource is within range */
225 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
226 PMD_DRV_LOG(NOTICE
, sc
,
227 "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE",
233 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ (func
* 8));
235 hw_lock_control_reg
=
236 (MISC_REG_DRIVER_CONTROL_7
+ ((func
- 6) * 8));
239 /* validate the resource is not already taken */
240 lock_status
= REG_RD(sc
, hw_lock_control_reg
);
241 if (lock_status
& resource_bit
) {
242 PMD_DRV_LOG(NOTICE
, sc
,
243 "resource in use (status 0x%x bit 0x%x)",
244 lock_status
, resource_bit
);
248 /* try every 5ms for 5 seconds */
249 for (cnt
= 0; cnt
< 1000; cnt
++) {
250 REG_WR(sc
, (hw_lock_control_reg
+ 4), resource_bit
);
251 lock_status
= REG_RD(sc
, hw_lock_control_reg
);
252 if (lock_status
& resource_bit
) {
258 PMD_DRV_LOG(NOTICE
, sc
, "Resource 0x%x resource_bit 0x%x lock timeout!",
259 resource
, resource_bit
);
263 static int bnx2x_release_hw_lock(struct bnx2x_softc
*sc
, uint32_t resource
)
265 uint32_t lock_status
;
266 uint32_t resource_bit
= (1 << resource
);
267 int func
= SC_FUNC(sc
);
268 uint32_t hw_lock_control_reg
;
270 #ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
272 PMD_INIT_FUNC_TRACE(sc
);
274 PMD_INIT_FUNC_TRACE(sc
);
277 /* validate the resource is within range */
278 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
279 PMD_DRV_LOG(NOTICE
, sc
,
280 "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
281 " resource_bit 0x%x", resource
, resource_bit
);
286 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ (func
* 8));
288 hw_lock_control_reg
=
289 (MISC_REG_DRIVER_CONTROL_7
+ ((func
- 6) * 8));
292 /* validate the resource is currently taken */
293 lock_status
= REG_RD(sc
, hw_lock_control_reg
);
294 if (!(lock_status
& resource_bit
)) {
295 PMD_DRV_LOG(NOTICE
, sc
,
296 "resource not in use (status 0x%x bit 0x%x)",
297 lock_status
, resource_bit
);
301 REG_WR(sc
, hw_lock_control_reg
, resource_bit
);
305 static void bnx2x_acquire_phy_lock(struct bnx2x_softc
*sc
)
308 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_MDIO
);
311 static void bnx2x_release_phy_lock(struct bnx2x_softc
*sc
)
313 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_MDIO
);
314 BNX2X_PHY_UNLOCK(sc
);
317 /* copy command into DMAE command memory and set DMAE command Go */
318 void bnx2x_post_dmae(struct bnx2x_softc
*sc
, struct dmae_command
*dmae
, int idx
)
323 cmd_offset
= (DMAE_REG_CMD_MEM
+ (sizeof(struct dmae_command
) * idx
));
324 for (i
= 0; i
< ((sizeof(struct dmae_command
) / 4)); i
++) {
325 REG_WR(sc
, (cmd_offset
+ (i
* 4)), *(((uint32_t *) dmae
) + i
));
328 REG_WR(sc
, dmae_reg_go_c
[idx
], 1);
331 uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode
, uint8_t comp_type
)
333 return opcode
| ((comp_type
<< DMAE_COMMAND_C_DST_SHIFT
) |
334 DMAE_COMMAND_C_TYPE_ENABLE
);
337 uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode
)
339 return opcode
& ~DMAE_COMMAND_SRC_RESET
;
343 bnx2x_dmae_opcode(struct bnx2x_softc
* sc
, uint8_t src_type
, uint8_t dst_type
,
344 uint8_t with_comp
, uint8_t comp_type
)
348 opcode
|= ((src_type
<< DMAE_COMMAND_SRC_SHIFT
) |
349 (dst_type
<< DMAE_COMMAND_DST_SHIFT
));
351 opcode
|= (DMAE_COMMAND_SRC_RESET
| DMAE_COMMAND_DST_RESET
);
353 opcode
|= (SC_PORT(sc
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
);
355 opcode
|= ((SC_VN(sc
) << DMAE_COMMAND_E1HVN_SHIFT
) |
356 (SC_VN(sc
) << DMAE_COMMAND_DST_VN_SHIFT
));
358 opcode
|= (DMAE_COM_SET_ERR
<< DMAE_COMMAND_ERR_POLICY_SHIFT
);
361 opcode
|= DMAE_CMD_ENDIANITY_B_DW_SWAP
;
363 opcode
|= DMAE_CMD_ENDIANITY_DW_SWAP
;
367 opcode
= bnx2x_dmae_opcode_add_comp(opcode
, comp_type
);
374 bnx2x_prep_dmae_with_comp(struct bnx2x_softc
*sc
, struct dmae_command
*dmae
,
375 uint8_t src_type
, uint8_t dst_type
)
377 memset(dmae
, 0, sizeof(struct dmae_command
));
380 dmae
->opcode
= bnx2x_dmae_opcode(sc
, src_type
, dst_type
,
381 TRUE
, DMAE_COMP_PCI
);
383 /* fill in the completion parameters */
384 dmae
->comp_addr_lo
= U64_LO(BNX2X_SP_MAPPING(sc
, wb_comp
));
385 dmae
->comp_addr_hi
= U64_HI(BNX2X_SP_MAPPING(sc
, wb_comp
));
386 dmae
->comp_val
= DMAE_COMP_VAL
;
389 /* issue a DMAE command over the init channel and wait for completion */
391 bnx2x_issue_dmae_with_comp(struct bnx2x_softc
*sc
, struct dmae_command
*dmae
)
393 uint32_t *wb_comp
= BNX2X_SP(sc
, wb_comp
);
394 int timeout
= CHIP_REV_IS_SLOW(sc
) ? 400000 : 4000;
396 /* reset completion */
399 /* post the command on the channel used for initializations */
400 bnx2x_post_dmae(sc
, dmae
, INIT_DMAE_C(sc
));
402 /* wait for completion */
405 while ((*wb_comp
& ~DMAE_PCI_ERR_FLAG
) != DMAE_COMP_VAL
) {
407 (sc
->recovery_state
!= BNX2X_RECOVERY_DONE
&&
408 sc
->recovery_state
!= BNX2X_RECOVERY_NIC_LOADING
)) {
409 PMD_DRV_LOG(INFO
, sc
, "DMAE timeout!");
417 if (*wb_comp
& DMAE_PCI_ERR_FLAG
) {
418 PMD_DRV_LOG(INFO
, sc
, "DMAE PCI error!");
419 return DMAE_PCI_ERROR
;
425 void bnx2x_read_dmae(struct bnx2x_softc
*sc
, uint32_t src_addr
, uint32_t len32
)
427 struct dmae_command dmae
;
432 if (!sc
->dmae_ready
) {
433 data
= BNX2X_SP(sc
, wb_data
[0]);
435 for (i
= 0; i
< len32
; i
++) {
436 data
[i
] = REG_RD(sc
, (src_addr
+ (i
* 4)));
442 /* set opcode and fixed command fields */
443 bnx2x_prep_dmae_with_comp(sc
, &dmae
, DMAE_SRC_GRC
, DMAE_DST_PCI
);
445 /* fill in addresses and len */
446 dmae
.src_addr_lo
= (src_addr
>> 2); /* GRC addr has dword resolution */
447 dmae
.src_addr_hi
= 0;
448 dmae
.dst_addr_lo
= U64_LO(BNX2X_SP_MAPPING(sc
, wb_data
));
449 dmae
.dst_addr_hi
= U64_HI(BNX2X_SP_MAPPING(sc
, wb_data
));
452 /* issue the command and wait for completion */
453 if ((rc
= bnx2x_issue_dmae_with_comp(sc
, &dmae
)) != 0) {
454 rte_panic("DMAE failed (%d)", rc
);
459 bnx2x_write_dmae(struct bnx2x_softc
*sc
, rte_iova_t dma_addr
, uint32_t dst_addr
,
462 struct dmae_command dmae
;
465 if (!sc
->dmae_ready
) {
466 ecore_init_str_wr(sc
, dst_addr
, BNX2X_SP(sc
, wb_data
[0]), len32
);
470 /* set opcode and fixed command fields */
471 bnx2x_prep_dmae_with_comp(sc
, &dmae
, DMAE_SRC_PCI
, DMAE_DST_GRC
);
473 /* fill in addresses and len */
474 dmae
.src_addr_lo
= U64_LO(dma_addr
);
475 dmae
.src_addr_hi
= U64_HI(dma_addr
);
476 dmae
.dst_addr_lo
= (dst_addr
>> 2); /* GRC addr has dword resolution */
477 dmae
.dst_addr_hi
= 0;
480 /* issue the command and wait for completion */
481 if ((rc
= bnx2x_issue_dmae_with_comp(sc
, &dmae
)) != 0) {
482 rte_panic("DMAE failed (%d)", rc
);
487 bnx2x_write_dmae_phys_len(struct bnx2x_softc
*sc
, rte_iova_t phys_addr
,
488 uint32_t addr
, uint32_t len
)
490 uint32_t dmae_wr_max
= DMAE_LEN32_WR_MAX(sc
);
493 while (len
> dmae_wr_max
) {
494 bnx2x_write_dmae(sc
, (phys_addr
+ offset
), /* src DMA address */
495 (addr
+ offset
), /* dst GRC address */
497 offset
+= (dmae_wr_max
* 4);
501 bnx2x_write_dmae(sc
, (phys_addr
+ offset
), /* src DMA address */
502 (addr
+ offset
), /* dst GRC address */
507 bnx2x_set_ctx_validation(struct bnx2x_softc
*sc
, struct eth_context
*cxt
,
510 /* ustorm cxt validation */
511 cxt
->ustorm_ag_context
.cdu_usage
=
512 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc
, cid
),
513 CDU_REGION_NUMBER_UCM_AG
,
514 ETH_CONNECTION_TYPE
);
515 /* xcontext validation */
516 cxt
->xstorm_ag_context
.cdu_reserved
=
517 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc
, cid
),
518 CDU_REGION_NUMBER_XCM_AG
,
519 ETH_CONNECTION_TYPE
);
523 bnx2x_storm_memset_hc_timeout(struct bnx2x_softc
*sc
, uint8_t fw_sb_id
,
524 uint8_t sb_index
, uint8_t ticks
)
527 (BAR_CSTRORM_INTMEM
+
528 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id
, sb_index
));
530 REG_WR8(sc
, addr
, ticks
);
534 bnx2x_storm_memset_hc_disable(struct bnx2x_softc
*sc
, uint16_t fw_sb_id
,
535 uint8_t sb_index
, uint8_t disable
)
537 uint32_t enable_flag
=
538 (disable
) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT
);
540 (BAR_CSTRORM_INTMEM
+
541 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id
, sb_index
));
545 flags
= REG_RD8(sc
, addr
);
546 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
547 flags
|= enable_flag
;
548 REG_WR8(sc
, addr
, flags
);
552 bnx2x_update_coalesce_sb_index(struct bnx2x_softc
*sc
, uint8_t fw_sb_id
,
553 uint8_t sb_index
, uint8_t disable
, uint16_t usec
)
555 uint8_t ticks
= (usec
/ 4);
557 bnx2x_storm_memset_hc_timeout(sc
, fw_sb_id
, sb_index
, ticks
);
559 disable
= (disable
) ? 1 : ((usec
) ? 0 : 1);
560 bnx2x_storm_memset_hc_disable(sc
, fw_sb_id
, sb_index
, disable
);
563 uint32_t elink_cb_reg_read(struct bnx2x_softc
*sc
, uint32_t reg_addr
)
565 return REG_RD(sc
, reg_addr
);
568 void elink_cb_reg_write(struct bnx2x_softc
*sc
, uint32_t reg_addr
, uint32_t val
)
570 REG_WR(sc
, reg_addr
, val
);
574 elink_cb_event_log(__rte_unused
struct bnx2x_softc
*sc
,
575 __rte_unused
const elink_log_id_t elink_log_id
, ...)
577 PMD_DRV_LOG(DEBUG
, sc
, "ELINK EVENT LOG (%d)", elink_log_id
);
580 static int bnx2x_set_spio(struct bnx2x_softc
*sc
, int spio
, uint32_t mode
)
584 /* Only 2 SPIOs are configurable */
585 if ((spio
!= MISC_SPIO_SPIO4
) && (spio
!= MISC_SPIO_SPIO5
)) {
586 PMD_DRV_LOG(NOTICE
, sc
, "Invalid SPIO 0x%x", spio
);
590 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_SPIO
);
592 /* read SPIO and mask except the float bits */
593 spio_reg
= (REG_RD(sc
, MISC_REG_SPIO
) & MISC_SPIO_FLOAT
);
596 case MISC_SPIO_OUTPUT_LOW
:
597 /* clear FLOAT and set CLR */
598 spio_reg
&= ~(spio
<< MISC_SPIO_FLOAT_POS
);
599 spio_reg
|= (spio
<< MISC_SPIO_CLR_POS
);
602 case MISC_SPIO_OUTPUT_HIGH
:
603 /* clear FLOAT and set SET */
604 spio_reg
&= ~(spio
<< MISC_SPIO_FLOAT_POS
);
605 spio_reg
|= (spio
<< MISC_SPIO_SET_POS
);
608 case MISC_SPIO_INPUT_HI_Z
:
610 spio_reg
|= (spio
<< MISC_SPIO_FLOAT_POS
);
617 REG_WR(sc
, MISC_REG_SPIO
, spio_reg
);
618 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_SPIO
);
623 static int bnx2x_gpio_read(struct bnx2x_softc
*sc
, int gpio_num
, uint8_t port
)
625 /* The GPIO should be swapped if swap register is set and active */
626 int gpio_port
= ((REG_RD(sc
, NIG_REG_PORT_SWAP
) &&
627 REG_RD(sc
, NIG_REG_STRAP_OVERRIDE
)) ^ port
);
628 int gpio_shift
= gpio_num
;
630 gpio_shift
+= MISC_REGISTERS_GPIO_PORT_SHIFT
;
632 uint32_t gpio_mask
= (1 << gpio_shift
);
635 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
636 PMD_DRV_LOG(NOTICE
, sc
, "Invalid GPIO %d", gpio_num
);
640 /* read GPIO value */
641 gpio_reg
= REG_RD(sc
, MISC_REG_GPIO
);
643 /* get the requested pin value */
644 return ((gpio_reg
& gpio_mask
) == gpio_mask
) ? 1 : 0;
648 bnx2x_gpio_write(struct bnx2x_softc
*sc
, int gpio_num
, uint32_t mode
, uint8_t port
)
650 /* The GPIO should be swapped if swap register is set and active */
651 int gpio_port
= ((REG_RD(sc
, NIG_REG_PORT_SWAP
) &&
652 REG_RD(sc
, NIG_REG_STRAP_OVERRIDE
)) ^ port
);
653 int gpio_shift
= gpio_num
;
655 gpio_shift
+= MISC_REGISTERS_GPIO_PORT_SHIFT
;
657 uint32_t gpio_mask
= (1 << gpio_shift
);
660 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
661 PMD_DRV_LOG(NOTICE
, sc
, "Invalid GPIO %d", gpio_num
);
665 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_GPIO
);
667 /* read GPIO and mask except the float bits */
668 gpio_reg
= (REG_RD(sc
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
671 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
672 /* clear FLOAT and set CLR */
673 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
674 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
677 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
678 /* clear FLOAT and set SET */
679 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
680 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
683 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
685 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
692 REG_WR(sc
, MISC_REG_GPIO
, gpio_reg
);
693 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_GPIO
);
699 bnx2x_gpio_mult_write(struct bnx2x_softc
*sc
, uint8_t pins
, uint32_t mode
)
703 /* any port swapping should be handled by caller */
705 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_GPIO
);
707 /* read GPIO and mask except the float bits */
708 gpio_reg
= REG_RD(sc
, MISC_REG_GPIO
);
709 gpio_reg
&= ~(pins
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
710 gpio_reg
&= ~(pins
<< MISC_REGISTERS_GPIO_CLR_POS
);
711 gpio_reg
&= ~(pins
<< MISC_REGISTERS_GPIO_SET_POS
);
714 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
716 gpio_reg
|= (pins
<< MISC_REGISTERS_GPIO_CLR_POS
);
719 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
721 gpio_reg
|= (pins
<< MISC_REGISTERS_GPIO_SET_POS
);
724 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
726 gpio_reg
|= (pins
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
730 PMD_DRV_LOG(NOTICE
, sc
,
731 "Invalid GPIO mode assignment %d", mode
);
732 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_GPIO
);
736 REG_WR(sc
, MISC_REG_GPIO
, gpio_reg
);
737 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_GPIO
);
743 bnx2x_gpio_int_write(struct bnx2x_softc
*sc
, int gpio_num
, uint32_t mode
,
746 /* The GPIO should be swapped if swap register is set and active */
747 int gpio_port
= ((REG_RD(sc
, NIG_REG_PORT_SWAP
) &&
748 REG_RD(sc
, NIG_REG_STRAP_OVERRIDE
)) ^ port
);
749 int gpio_shift
= gpio_num
;
751 gpio_shift
+= MISC_REGISTERS_GPIO_PORT_SHIFT
;
753 uint32_t gpio_mask
= (1 << gpio_shift
);
756 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
757 PMD_DRV_LOG(NOTICE
, sc
, "Invalid GPIO %d", gpio_num
);
761 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_GPIO
);
764 gpio_reg
= REG_RD(sc
, MISC_REG_GPIO_INT
);
767 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR
:
768 /* clear SET and set CLR */
769 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
770 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
773 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET
:
774 /* clear CLR and set SET */
775 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_INT_CLR_POS
);
776 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_INT_SET_POS
);
783 REG_WR(sc
, MISC_REG_GPIO_INT
, gpio_reg
);
784 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_GPIO
);
790 elink_cb_gpio_read(struct bnx2x_softc
* sc
, uint16_t gpio_num
, uint8_t port
)
792 return bnx2x_gpio_read(sc
, gpio_num
, port
);
795 uint8_t elink_cb_gpio_write(struct bnx2x_softc
* sc
, uint16_t gpio_num
, uint8_t mode
, /* 0=low 1=high */
798 return bnx2x_gpio_write(sc
, gpio_num
, mode
, port
);
802 elink_cb_gpio_mult_write(struct bnx2x_softc
* sc
, uint8_t pins
,
803 uint8_t mode
/* 0=low 1=high */ )
805 return bnx2x_gpio_mult_write(sc
, pins
, mode
);
808 uint8_t elink_cb_gpio_int_write(struct bnx2x_softc
* sc
, uint16_t gpio_num
, uint8_t mode
, /* 0=low 1=high */
811 return bnx2x_gpio_int_write(sc
, gpio_num
, mode
, port
);
814 void elink_cb_notify_link_changed(struct bnx2x_softc
*sc
)
816 REG_WR(sc
, (MISC_REG_AEU_GENERAL_ATTN_12
+
817 (SC_FUNC(sc
) * sizeof(uint32_t))), 1);
820 /* send the MCP a request, block until there is a reply */
822 elink_cb_fw_command(struct bnx2x_softc
*sc
, uint32_t command
, uint32_t param
)
824 int mb_idx
= SC_FW_MB_IDX(sc
);
828 uint8_t delay
= CHIP_REV_IS_SLOW(sc
) ? 100 : 10;
831 SHMEM_WR(sc
, func_mb
[mb_idx
].drv_mb_param
, param
);
832 SHMEM_WR(sc
, func_mb
[mb_idx
].drv_mb_header
, (command
| seq
));
834 PMD_DRV_LOG(DEBUG
, sc
,
835 "wrote command 0x%08x to FW MB param 0x%08x",
836 (command
| seq
), param
);
838 /* Let the FW do it's magic. GIve it up to 5 seconds... */
841 rc
= SHMEM_RD(sc
, func_mb
[mb_idx
].fw_mb_header
);
842 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 500));
844 /* is this a reply to our command? */
845 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
)) {
846 rc
&= FW_MSG_CODE_MASK
;
849 PMD_DRV_LOG(NOTICE
, sc
, "FW failed to respond!");
857 bnx2x_fw_command(struct bnx2x_softc
*sc
, uint32_t command
, uint32_t param
)
859 return elink_cb_fw_command(sc
, command
, param
);
863 __storm_memset_dma_mapping(struct bnx2x_softc
*sc
, uint32_t addr
,
866 REG_WR(sc
, addr
, U64_LO(mapping
));
867 REG_WR(sc
, (addr
+ 4), U64_HI(mapping
));
871 storm_memset_spq_addr(struct bnx2x_softc
*sc
, rte_iova_t mapping
,
874 uint32_t addr
= (XSEM_REG_FAST_MEMORY
+
875 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid
));
876 __storm_memset_dma_mapping(sc
, addr
, mapping
);
880 storm_memset_vf_to_pf(struct bnx2x_softc
*sc
, uint16_t abs_fid
, uint16_t pf_id
)
882 REG_WR8(sc
, (BAR_XSTRORM_INTMEM
+ XSTORM_VF_TO_PF_OFFSET(abs_fid
)),
884 REG_WR8(sc
, (BAR_CSTRORM_INTMEM
+ CSTORM_VF_TO_PF_OFFSET(abs_fid
)),
886 REG_WR8(sc
, (BAR_TSTRORM_INTMEM
+ TSTORM_VF_TO_PF_OFFSET(abs_fid
)),
888 REG_WR8(sc
, (BAR_USTRORM_INTMEM
+ USTORM_VF_TO_PF_OFFSET(abs_fid
)),
893 storm_memset_func_en(struct bnx2x_softc
*sc
, uint16_t abs_fid
, uint8_t enable
)
895 REG_WR8(sc
, (BAR_XSTRORM_INTMEM
+ XSTORM_FUNC_EN_OFFSET(abs_fid
)),
897 REG_WR8(sc
, (BAR_CSTRORM_INTMEM
+ CSTORM_FUNC_EN_OFFSET(abs_fid
)),
899 REG_WR8(sc
, (BAR_TSTRORM_INTMEM
+ TSTORM_FUNC_EN_OFFSET(abs_fid
)),
901 REG_WR8(sc
, (BAR_USTRORM_INTMEM
+ USTORM_FUNC_EN_OFFSET(abs_fid
)),
906 storm_memset_eq_data(struct bnx2x_softc
*sc
, struct event_ring_data
*eq_data
,
912 addr
= (BAR_CSTRORM_INTMEM
+ CSTORM_EVENT_RING_DATA_OFFSET(pfid
));
913 size
= sizeof(struct event_ring_data
);
914 ecore_storm_memset_struct(sc
, addr
, size
, (uint32_t *) eq_data
);
918 storm_memset_eq_prod(struct bnx2x_softc
*sc
, uint16_t eq_prod
, uint16_t pfid
)
920 uint32_t addr
= (BAR_CSTRORM_INTMEM
+
921 CSTORM_EVENT_RING_PROD_OFFSET(pfid
));
922 REG_WR16(sc
, addr
, eq_prod
);
926 * Post a slowpath command.
928 * A slowpath command is used to propagate a configuration change through
929 * the controller in a controlled manner, allowing each STORM processor and
930 * other H/W blocks to phase in the change. The commands sent on the
931 * slowpath are referred to as ramrods. Depending on the ramrod used the
932 * completion of the ramrod will occur in different ways. Here's a
933 * breakdown of ramrods and how they complete:
935 * RAMROD_CMD_ID_ETH_PORT_SETUP
936 * Used to setup the leading connection on a port. Completes on the
937 * Receive Completion Queue (RCQ) of that port (typically fp[0]).
939 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
940 * Used to setup an additional connection on a port. Completes on the
941 * RCQ of the multi-queue/RSS connection being initialized.
943 * RAMROD_CMD_ID_ETH_STAT_QUERY
944 * Used to force the storm processors to update the statistics database
945 * in host memory. This ramrod is send on the leading connection CID and
946 * completes as an index increment of the CSTORM on the default status
949 * RAMROD_CMD_ID_ETH_UPDATE
950 * Used to update the state of the leading connection, usually to udpate
951 * the RSS indirection table. Completes on the RCQ of the leading
952 * connection. (Not currently used under FreeBSD until OS support becomes
955 * RAMROD_CMD_ID_ETH_HALT
956 * Used when tearing down a connection prior to driver unload. Completes
957 * on the RCQ of the multi-queue/RSS connection being torn down. Don't
958 * use this on the leading connection.
960 * RAMROD_CMD_ID_ETH_SET_MAC
961 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on
962 * the RCQ of the leading connection.
964 * RAMROD_CMD_ID_ETH_CFC_DEL
965 * Used when tearing down a conneciton prior to driver unload. Completes
966 * on the RCQ of the leading connection (since the current connection
967 * has been completely removed from controller memory).
969 * RAMROD_CMD_ID_ETH_PORT_DEL
970 * Used to tear down the leading connection prior to driver unload,
971 * typically fp[0]. Completes as an index increment of the CSTORM on the
972 * default status block.
974 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
975 * Used for connection offload. Completes on the RCQ of the multi-queue
976 * RSS connection that is being offloaded. (Not currently used under
979 * There can only be one command pending per function.
982 * 0 = Success, !0 = Failure.
985 /* must be called under the spq lock */
986 static inline struct eth_spe
*bnx2x_sp_get_next(struct bnx2x_softc
*sc
)
988 struct eth_spe
*next_spe
= sc
->spq_prod_bd
;
990 if (sc
->spq_prod_bd
== sc
->spq_last_bd
) {
991 /* wrap back to the first eth_spq */
992 sc
->spq_prod_bd
= sc
->spq
;
993 sc
->spq_prod_idx
= 0;
1002 /* must be called under the spq lock */
1003 static void bnx2x_sp_prod_update(struct bnx2x_softc
*sc
)
1005 int func
= SC_FUNC(sc
);
1008 * Make sure that BD data is updated before writing the producer.
1009 * BD data is written to the memory, the producer is read from the
1010 * memory, thus we need a full memory barrier to ensure the ordering.
1014 REG_WR16(sc
, (BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
)),
1021 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
1023 * @cmd: command to check
1024 * @cmd_type: command type
1026 static int bnx2x_is_contextless_ramrod(int cmd
, int cmd_type
)
1028 if ((cmd_type
== NONE_CONNECTION_TYPE
) ||
1029 (cmd
== RAMROD_CMD_ID_ETH_FORWARD_SETUP
) ||
1030 (cmd
== RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES
) ||
1031 (cmd
== RAMROD_CMD_ID_ETH_FILTER_RULES
) ||
1032 (cmd
== RAMROD_CMD_ID_ETH_MULTICAST_RULES
) ||
1033 (cmd
== RAMROD_CMD_ID_ETH_SET_MAC
) ||
1034 (cmd
== RAMROD_CMD_ID_ETH_RSS_UPDATE
)) {
1042 * bnx2x_sp_post - place a single command on an SP ring
1044 * @sc: driver handle
1045 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
1046 * @cid: SW CID the command is related to
1047 * @data_hi: command private data address (high 32 bits)
1048 * @data_lo: command private data address (low 32 bits)
1049 * @cmd_type: command type (e.g. NONE, ETH)
1051 * SP data is handled as if it's always an address pair, thus data fields are
1052 * not swapped to little endian in upper functions. Instead this function swaps
1053 * data as if it's two uint32 fields.
1056 bnx2x_sp_post(struct bnx2x_softc
*sc
, int command
, int cid
, uint32_t data_hi
,
1057 uint32_t data_lo
, int cmd_type
)
1059 struct eth_spe
*spe
;
1063 common
= bnx2x_is_contextless_ramrod(command
, cmd_type
);
1066 if (!atomic_load_acq_long(&sc
->eq_spq_left
)) {
1067 PMD_DRV_LOG(INFO
, sc
, "EQ ring is full!");
1071 if (!atomic_load_acq_long(&sc
->cq_spq_left
)) {
1072 PMD_DRV_LOG(INFO
, sc
, "SPQ ring is full!");
1077 spe
= bnx2x_sp_get_next(sc
);
1079 /* CID needs port number to be encoded int it */
1080 spe
->hdr
.conn_and_cmd_data
=
1081 htole32((command
<< SPE_HDR_CMD_ID_SHIFT
) | HW_CID(sc
, cid
));
1083 type
= (cmd_type
<< SPE_HDR_CONN_TYPE_SHIFT
) & SPE_HDR_CONN_TYPE
;
1085 /* TBD: Check if it works for VFs */
1086 type
|= ((SC_FUNC(sc
) << SPE_HDR_FUNCTION_ID_SHIFT
) &
1087 SPE_HDR_FUNCTION_ID
);
1089 spe
->hdr
.type
= htole16(type
);
1091 spe
->data
.update_data_addr
.hi
= htole32(data_hi
);
1092 spe
->data
.update_data_addr
.lo
= htole32(data_lo
);
1095 * It's ok if the actual decrement is issued towards the memory
1096 * somewhere between the lock and unlock. Thus no more explict
1097 * memory barrier is needed.
1100 atomic_subtract_acq_long(&sc
->eq_spq_left
, 1);
1102 atomic_subtract_acq_long(&sc
->cq_spq_left
, 1);
1105 PMD_DRV_LOG(DEBUG
, sc
,
1106 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x"
1107 "data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)",
1109 (uint32_t) U64_HI(sc
->spq_dma
.paddr
),
1110 (uint32_t) (U64_LO(sc
->spq_dma
.paddr
) +
1111 (uint8_t *) sc
->spq_prod_bd
-
1112 (uint8_t *) sc
->spq
), command
, common
,
1113 HW_CID(sc
, cid
), data_hi
, data_lo
, type
,
1114 atomic_load_acq_long(&sc
->cq_spq_left
),
1115 atomic_load_acq_long(&sc
->eq_spq_left
));
1117 /* RAMROD completion is processed in bnx2x_intr_legacy()
1118 * which can run from different contexts.
1119 * Ask bnx2x_intr_intr() to process RAMROD
1120 * completion whenever it gets scheduled.
1122 rte_atomic32_set(&sc
->scan_fp
, 1);
1123 bnx2x_sp_prod_update(sc
);
1128 static void bnx2x_drv_pulse(struct bnx2x_softc
*sc
)
1130 SHMEM_WR(sc
, func_mb
[SC_FW_MB_IDX(sc
)].drv_pulse_mb
,
1131 sc
->fw_drv_pulse_wr_seq
);
1134 static int bnx2x_tx_queue_has_work(const struct bnx2x_fastpath
*fp
)
1137 struct bnx2x_tx_queue
*txq
= fp
->sc
->tx_queues
[fp
->index
];
1139 if (unlikely(!txq
)) {
1140 PMD_TX_LOG(ERR
, "ERROR: TX queue is NULL");
1144 mb(); /* status block fields can change */
1145 hw_cons
= le16toh(*fp
->tx_cons_sb
);
1146 return hw_cons
!= txq
->tx_pkt_head
;
1149 static uint8_t bnx2x_has_tx_work(struct bnx2x_fastpath
*fp
)
1151 /* expand this for multi-cos if ever supported */
1152 return bnx2x_tx_queue_has_work(fp
);
1155 static int bnx2x_has_rx_work(struct bnx2x_fastpath
*fp
)
1157 uint16_t rx_cq_cons_sb
;
1158 struct bnx2x_rx_queue
*rxq
;
1159 rxq
= fp
->sc
->rx_queues
[fp
->index
];
1160 if (unlikely(!rxq
)) {
1161 PMD_RX_LOG(ERR
, "ERROR: RX queue is NULL");
1165 mb(); /* status block fields can change */
1166 rx_cq_cons_sb
= le16toh(*fp
->rx_cq_cons_sb
);
1167 if (unlikely((rx_cq_cons_sb
& MAX_RCQ_ENTRIES(rxq
)) ==
1168 MAX_RCQ_ENTRIES(rxq
)))
1170 return rxq
->rx_cq_head
!= rx_cq_cons_sb
;
1174 bnx2x_sp_event(struct bnx2x_softc
*sc
, struct bnx2x_fastpath
*fp
,
1175 union eth_rx_cqe
*rr_cqe
)
1177 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
1178 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
1179 enum ecore_queue_cmd drv_cmd
= ECORE_Q_CMD_MAX
;
1180 struct ecore_queue_sp_obj
*q_obj
= &BNX2X_SP_OBJ(sc
, fp
).q_obj
;
1182 PMD_DRV_LOG(DEBUG
, sc
,
1183 "fp=%d cid=%d got ramrod #%d state is %x type is %d",
1184 fp
->index
, cid
, command
, sc
->state
,
1185 rr_cqe
->ramrod_cqe
.ramrod_type
);
1188 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE
):
1189 PMD_DRV_LOG(DEBUG
, sc
, "got UPDATE ramrod. CID %d", cid
);
1190 drv_cmd
= ECORE_Q_CMD_UPDATE
;
1193 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
):
1194 PMD_DRV_LOG(DEBUG
, sc
, "got MULTI[%d] setup ramrod", cid
);
1195 drv_cmd
= ECORE_Q_CMD_SETUP
;
1198 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP
):
1199 PMD_DRV_LOG(DEBUG
, sc
,
1200 "got MULTI[%d] tx-only setup ramrod", cid
);
1201 drv_cmd
= ECORE_Q_CMD_SETUP_TX_ONLY
;
1204 case (RAMROD_CMD_ID_ETH_HALT
):
1205 PMD_DRV_LOG(DEBUG
, sc
, "got MULTI[%d] halt ramrod", cid
);
1206 drv_cmd
= ECORE_Q_CMD_HALT
;
1209 case (RAMROD_CMD_ID_ETH_TERMINATE
):
1210 PMD_DRV_LOG(DEBUG
, sc
, "got MULTI[%d] teminate ramrod", cid
);
1211 drv_cmd
= ECORE_Q_CMD_TERMINATE
;
1214 case (RAMROD_CMD_ID_ETH_EMPTY
):
1215 PMD_DRV_LOG(DEBUG
, sc
, "got MULTI[%d] empty ramrod", cid
);
1216 drv_cmd
= ECORE_Q_CMD_EMPTY
;
1220 PMD_DRV_LOG(DEBUG
, sc
,
1221 "ERROR: unexpected MC reply (%d)"
1222 "on fp[%d]", command
, fp
->index
);
1226 if ((drv_cmd
!= ECORE_Q_CMD_MAX
) &&
1227 q_obj
->complete_cmd(sc
, q_obj
, drv_cmd
)) {
1229 * q_obj->complete_cmd() failure means that this was
1230 * an unexpected completion.
1232 * In this case we don't want to increase the sc->spq_left
1233 * because apparently we haven't sent this command the first
1236 // rte_panic("Unexpected SP completion");
1240 atomic_add_acq_long(&sc
->cq_spq_left
, 1);
1242 PMD_DRV_LOG(DEBUG
, sc
, "sc->cq_spq_left 0x%lx",
1243 atomic_load_acq_long(&sc
->cq_spq_left
));
1246 static uint8_t bnx2x_rxeof(struct bnx2x_softc
*sc
, struct bnx2x_fastpath
*fp
)
1248 struct bnx2x_rx_queue
*rxq
;
1249 uint16_t bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
1250 uint16_t hw_cq_cons
, sw_cq_cons
, sw_cq_prod
;
1252 rxq
= sc
->rx_queues
[fp
->index
];
1254 PMD_RX_LOG(ERR
, "RX queue %d is NULL", fp
->index
);
1258 /* CQ "next element" is of the size of the regular element */
1259 hw_cq_cons
= le16toh(*fp
->rx_cq_cons_sb
);
1260 if (unlikely((hw_cq_cons
& USABLE_RCQ_ENTRIES_PER_PAGE
) ==
1261 USABLE_RCQ_ENTRIES_PER_PAGE
)) {
1265 bd_cons
= rxq
->rx_bd_head
;
1266 bd_prod
= rxq
->rx_bd_tail
;
1267 bd_prod_fw
= bd_prod
;
1268 sw_cq_cons
= rxq
->rx_cq_head
;
1269 sw_cq_prod
= rxq
->rx_cq_tail
;
1272 * Memory barrier necessary as speculative reads of the rx
1273 * buffer can be ahead of the index in the status block
1277 while (sw_cq_cons
!= hw_cq_cons
) {
1278 union eth_rx_cqe
*cqe
;
1279 struct eth_fast_path_rx_cqe
*cqe_fp
;
1280 uint8_t cqe_fp_flags
;
1281 enum eth_rx_cqe_type cqe_fp_type
;
1283 comp_ring_cons
= RCQ_ENTRY(sw_cq_cons
, rxq
);
1284 bd_prod
= RX_BD(bd_prod
, rxq
);
1285 bd_cons
= RX_BD(bd_cons
, rxq
);
1287 cqe
= &rxq
->cq_ring
[comp_ring_cons
];
1288 cqe_fp
= &cqe
->fast_path_cqe
;
1289 cqe_fp_flags
= cqe_fp
->type_error_flags
;
1290 cqe_fp_type
= cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
;
1292 /* is this a slowpath msg? */
1293 if (CQE_TYPE_SLOW(cqe_fp_type
)) {
1294 bnx2x_sp_event(sc
, fp
, cqe
);
1298 /* is this an error packet? */
1299 if (unlikely(cqe_fp_flags
&
1300 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
)) {
1301 PMD_RX_LOG(DEBUG
, "flags 0x%x rx packet %u",
1302 cqe_fp_flags
, sw_cq_cons
);
1306 PMD_RX_LOG(DEBUG
, "Dropping fastpath called from attn poller!");
1309 bd_cons
= NEXT_RX_BD(bd_cons
);
1310 bd_prod
= NEXT_RX_BD(bd_prod
);
1311 bd_prod_fw
= NEXT_RX_BD(bd_prod_fw
);
1314 sw_cq_prod
= NEXT_RCQ_IDX(sw_cq_prod
);
1315 sw_cq_cons
= NEXT_RCQ_IDX(sw_cq_cons
);
1317 } /* while work to do */
1319 rxq
->rx_bd_head
= bd_cons
;
1320 rxq
->rx_bd_tail
= bd_prod_fw
;
1321 rxq
->rx_cq_head
= sw_cq_cons
;
1322 rxq
->rx_cq_tail
= sw_cq_prod
;
1324 /* Update producers */
1325 bnx2x_update_rx_prod(sc
, fp
, bd_prod_fw
, sw_cq_prod
);
1327 return sw_cq_cons
!= hw_cq_cons
;
1331 bnx2x_free_tx_pkt(__rte_unused
struct bnx2x_fastpath
*fp
, struct bnx2x_tx_queue
*txq
,
1332 uint16_t pkt_idx
, uint16_t bd_idx
)
1334 struct eth_tx_start_bd
*tx_start_bd
=
1335 &txq
->tx_ring
[TX_BD(bd_idx
, txq
)].start_bd
;
1336 uint16_t nbd
= rte_le_to_cpu_16(tx_start_bd
->nbd
);
1337 struct rte_mbuf
*tx_mbuf
= txq
->sw_ring
[TX_BD(pkt_idx
, txq
)];
1339 if (likely(tx_mbuf
!= NULL
)) {
1340 rte_pktmbuf_free_seg(tx_mbuf
);
1342 PMD_RX_LOG(ERR
, "fp[%02d] lost mbuf %lu",
1343 fp
->index
, (unsigned long)TX_BD(pkt_idx
, txq
));
1346 txq
->sw_ring
[TX_BD(pkt_idx
, txq
)] = NULL
;
1347 txq
->nb_tx_avail
+= nbd
;
1350 bd_idx
= NEXT_TX_BD(bd_idx
);
1355 /* processes transmit completions */
1356 uint8_t bnx2x_txeof(__rte_unused
struct bnx2x_softc
* sc
, struct bnx2x_fastpath
* fp
)
1358 uint16_t bd_cons
, hw_cons
, sw_cons
;
1359 __rte_unused
uint16_t tx_bd_avail
;
1361 struct bnx2x_tx_queue
*txq
= fp
->sc
->tx_queues
[fp
->index
];
1363 if (unlikely(!txq
)) {
1364 PMD_TX_LOG(ERR
, "ERROR: TX queue is NULL");
1368 bd_cons
= txq
->tx_bd_head
;
1369 hw_cons
= rte_le_to_cpu_16(*fp
->tx_cons_sb
);
1370 sw_cons
= txq
->tx_pkt_head
;
1372 while (sw_cons
!= hw_cons
) {
1373 bd_cons
= bnx2x_free_tx_pkt(fp
, txq
, sw_cons
, bd_cons
);
1377 txq
->tx_pkt_head
= sw_cons
;
1378 txq
->tx_bd_head
= bd_cons
;
1380 tx_bd_avail
= txq
->nb_tx_avail
;
1382 PMD_TX_LOG(DEBUG
, "fp[%02d] avail=%u cons_sb=%u, "
1383 "pkt_head=%u pkt_tail=%u bd_head=%u bd_tail=%u",
1384 fp
->index
, tx_bd_avail
, hw_cons
,
1385 txq
->tx_pkt_head
, txq
->tx_pkt_tail
,
1386 txq
->tx_bd_head
, txq
->tx_bd_tail
);
1390 static void bnx2x_drain_tx_queues(struct bnx2x_softc
*sc
)
1392 struct bnx2x_fastpath
*fp
;
1395 /* wait until all TX fastpath tasks have completed */
1396 for (i
= 0; i
< sc
->num_queues
; i
++) {
1401 while (bnx2x_has_tx_work(fp
)) {
1402 bnx2x_txeof(sc
, fp
);
1406 "Timeout waiting for fp[%d] "
1407 "transmits to complete!", i
);
1408 rte_panic("tx drain failure");
1422 bnx2x_del_all_macs(struct bnx2x_softc
*sc
, struct ecore_vlan_mac_obj
*mac_obj
,
1423 int mac_type
, uint8_t wait_for_comp
)
1425 unsigned long ramrod_flags
= 0, vlan_mac_flags
= 0;
1428 /* wait for completion of requested */
1429 if (wait_for_comp
) {
1430 bnx2x_set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
1433 /* Set the mac type of addresses we want to clear */
1434 bnx2x_set_bit(mac_type
, &vlan_mac_flags
);
1436 rc
= mac_obj
->delete_all(sc
, mac_obj
, &vlan_mac_flags
, &ramrod_flags
);
1438 PMD_DRV_LOG(ERR
, sc
, "Failed to delete MACs (%d)", rc
);
1444 bnx2x_fill_accept_flags(struct bnx2x_softc
*sc
, uint32_t rx_mode
,
1445 unsigned long *rx_accept_flags
,
1446 unsigned long *tx_accept_flags
)
1448 /* Clear the flags first */
1449 *rx_accept_flags
= 0;
1450 *tx_accept_flags
= 0;
1453 case BNX2X_RX_MODE_NONE
:
1455 * 'drop all' supersedes any accept flags that may have been
1456 * passed to the function.
1460 case BNX2X_RX_MODE_NORMAL
:
1461 bnx2x_set_bit(ECORE_ACCEPT_UNICAST
, rx_accept_flags
);
1462 bnx2x_set_bit(ECORE_ACCEPT_MULTICAST
, rx_accept_flags
);
1463 bnx2x_set_bit(ECORE_ACCEPT_BROADCAST
, rx_accept_flags
);
1465 /* internal switching mode */
1466 bnx2x_set_bit(ECORE_ACCEPT_UNICAST
, tx_accept_flags
);
1467 bnx2x_set_bit(ECORE_ACCEPT_MULTICAST
, tx_accept_flags
);
1468 bnx2x_set_bit(ECORE_ACCEPT_BROADCAST
, tx_accept_flags
);
1472 case BNX2X_RX_MODE_ALLMULTI
:
1473 bnx2x_set_bit(ECORE_ACCEPT_UNICAST
, rx_accept_flags
);
1474 bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST
, rx_accept_flags
);
1475 bnx2x_set_bit(ECORE_ACCEPT_BROADCAST
, rx_accept_flags
);
1477 /* internal switching mode */
1478 bnx2x_set_bit(ECORE_ACCEPT_UNICAST
, tx_accept_flags
);
1479 bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST
, tx_accept_flags
);
1480 bnx2x_set_bit(ECORE_ACCEPT_BROADCAST
, tx_accept_flags
);
1484 case BNX2X_RX_MODE_ALLMULTI_PROMISC
:
1485 case BNX2X_RX_MODE_PROMISC
:
1487 * According to deffinition of SI mode, iface in promisc mode
1488 * should receive matched and unmatched (in resolution of port)
1491 bnx2x_set_bit(ECORE_ACCEPT_UNMATCHED
, rx_accept_flags
);
1492 bnx2x_set_bit(ECORE_ACCEPT_UNICAST
, rx_accept_flags
);
1493 bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST
, rx_accept_flags
);
1494 bnx2x_set_bit(ECORE_ACCEPT_BROADCAST
, rx_accept_flags
);
1496 /* internal switching mode */
1497 bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST
, tx_accept_flags
);
1498 bnx2x_set_bit(ECORE_ACCEPT_BROADCAST
, tx_accept_flags
);
1501 bnx2x_set_bit(ECORE_ACCEPT_ALL_UNICAST
, tx_accept_flags
);
1503 bnx2x_set_bit(ECORE_ACCEPT_UNICAST
, tx_accept_flags
);
1509 PMD_RX_LOG(ERR
, "Unknown rx_mode (%d)", rx_mode
);
1513 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
1514 if (rx_mode
!= BNX2X_RX_MODE_NONE
) {
1515 bnx2x_set_bit(ECORE_ACCEPT_ANY_VLAN
, rx_accept_flags
);
1516 bnx2x_set_bit(ECORE_ACCEPT_ANY_VLAN
, tx_accept_flags
);
1523 bnx2x_set_q_rx_mode(struct bnx2x_softc
*sc
, uint8_t cl_id
,
1524 unsigned long rx_mode_flags
,
1525 unsigned long rx_accept_flags
,
1526 unsigned long tx_accept_flags
, unsigned long ramrod_flags
)
1528 struct ecore_rx_mode_ramrod_params ramrod_param
;
1531 memset(&ramrod_param
, 0, sizeof(ramrod_param
));
1533 /* Prepare ramrod parameters */
1534 ramrod_param
.cid
= 0;
1535 ramrod_param
.cl_id
= cl_id
;
1536 ramrod_param
.rx_mode_obj
= &sc
->rx_mode_obj
;
1537 ramrod_param
.func_id
= SC_FUNC(sc
);
1539 ramrod_param
.pstate
= &sc
->sp_state
;
1540 ramrod_param
.state
= ECORE_FILTER_RX_MODE_PENDING
;
1542 ramrod_param
.rdata
= BNX2X_SP(sc
, rx_mode_rdata
);
1543 ramrod_param
.rdata_mapping
=
1544 (rte_iova_t
)BNX2X_SP_MAPPING(sc
, rx_mode_rdata
),
1545 bnx2x_set_bit(ECORE_FILTER_RX_MODE_PENDING
, &sc
->sp_state
);
1547 ramrod_param
.ramrod_flags
= ramrod_flags
;
1548 ramrod_param
.rx_mode_flags
= rx_mode_flags
;
1550 ramrod_param
.rx_accept_flags
= rx_accept_flags
;
1551 ramrod_param
.tx_accept_flags
= tx_accept_flags
;
1553 rc
= ecore_config_rx_mode(sc
, &ramrod_param
);
1555 PMD_RX_LOG(ERR
, "Set rx_mode %d failed", sc
->rx_mode
);
1562 int bnx2x_set_storm_rx_mode(struct bnx2x_softc
*sc
)
1564 unsigned long rx_mode_flags
= 0, ramrod_flags
= 0;
1565 unsigned long rx_accept_flags
= 0, tx_accept_flags
= 0;
1568 rc
= bnx2x_fill_accept_flags(sc
, sc
->rx_mode
, &rx_accept_flags
,
1574 bnx2x_set_bit(RAMROD_RX
, &ramrod_flags
);
1575 bnx2x_set_bit(RAMROD_TX
, &ramrod_flags
);
1576 bnx2x_set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
1578 return bnx2x_set_q_rx_mode(sc
, sc
->fp
[0].cl_id
, rx_mode_flags
,
1579 rx_accept_flags
, tx_accept_flags
,
1583 /* returns the "mcp load_code" according to global load_count array */
1584 static int bnx2x_nic_load_no_mcp(struct bnx2x_softc
*sc
)
1586 int path
= SC_PATH(sc
);
1587 int port
= SC_PORT(sc
);
1589 PMD_DRV_LOG(INFO
, sc
, "NO MCP - load counts[%d] %d, %d, %d",
1590 path
, load_count
[path
][0], load_count
[path
][1],
1591 load_count
[path
][2]);
1593 load_count
[path
][0]++;
1594 load_count
[path
][1 + port
]++;
1595 PMD_DRV_LOG(INFO
, sc
, "NO MCP - new load counts[%d] %d, %d, %d",
1596 path
, load_count
[path
][0], load_count
[path
][1],
1597 load_count
[path
][2]);
1598 if (load_count
[path
][0] == 1)
1599 return FW_MSG_CODE_DRV_LOAD_COMMON
;
1600 else if (load_count
[path
][1 + port
] == 1)
1601 return FW_MSG_CODE_DRV_LOAD_PORT
;
1603 return FW_MSG_CODE_DRV_LOAD_FUNCTION
;
1606 /* returns the "mcp load_code" according to global load_count array */
1607 static int bnx2x_nic_unload_no_mcp(struct bnx2x_softc
*sc
)
1609 int port
= SC_PORT(sc
);
1610 int path
= SC_PATH(sc
);
1612 PMD_DRV_LOG(INFO
, sc
, "NO MCP - load counts[%d] %d, %d, %d",
1613 path
, load_count
[path
][0], load_count
[path
][1],
1614 load_count
[path
][2]);
1615 load_count
[path
][0]--;
1616 load_count
[path
][1 + port
]--;
1617 PMD_DRV_LOG(INFO
, sc
, "NO MCP - new load counts[%d] %d, %d, %d",
1618 path
, load_count
[path
][0], load_count
[path
][1],
1619 load_count
[path
][2]);
1620 if (load_count
[path
][0] == 0) {
1621 return FW_MSG_CODE_DRV_UNLOAD_COMMON
;
1622 } else if (load_count
[path
][1 + port
] == 0) {
1623 return FW_MSG_CODE_DRV_UNLOAD_PORT
;
1625 return FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
1629 /* request unload mode from the MCP: COMMON, PORT or FUNCTION */
1630 static uint32_t bnx2x_send_unload_req(struct bnx2x_softc
*sc
, int unload_mode
)
1632 uint32_t reset_code
= 0;
1634 /* Select the UNLOAD request mode */
1635 if (unload_mode
== UNLOAD_NORMAL
) {
1636 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
1638 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
1641 /* Send the request to the MCP */
1642 if (!BNX2X_NOMCP(sc
)) {
1643 reset_code
= bnx2x_fw_command(sc
, reset_code
, 0);
1645 reset_code
= bnx2x_nic_unload_no_mcp(sc
);
1651 /* send UNLOAD_DONE command to the MCP */
1652 static void bnx2x_send_unload_done(struct bnx2x_softc
*sc
, uint8_t keep_link
)
1654 uint32_t reset_param
=
1655 keep_link
? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET
: 0;
1657 /* Report UNLOAD_DONE to MCP */
1658 if (!BNX2X_NOMCP(sc
)) {
1659 bnx2x_fw_command(sc
, DRV_MSG_CODE_UNLOAD_DONE
, reset_param
);
1663 static int bnx2x_func_wait_started(struct bnx2x_softc
*sc
)
1667 if (!sc
->port
.pmf
) {
1672 * (assumption: No Attention from MCP at this stage)
1673 * PMF probably in the middle of TX disable/enable transaction
1674 * 1. Sync IRS for default SB
1675 * 2. Sync SP queue - this guarantees us that attention handling started
1676 * 3. Wait, that TX disable/enable transaction completes
1678 * 1+2 guarantee that if DCBX attention was scheduled it already changed
1679 * pending bit of transaction from STARTED-->TX_STOPPED, if we already
1680 * received completion for the transaction the state is TX_STOPPED.
1681 * State will return to STARTED after completion of TX_STOPPED-->STARTED
1685 while (ecore_func_get_state(sc
, &sc
->func_obj
) !=
1686 ECORE_F_STATE_STARTED
&& tout
--) {
1690 if (ecore_func_get_state(sc
, &sc
->func_obj
) != ECORE_F_STATE_STARTED
) {
1692 * Failed to complete the transaction in a "good way"
1693 * Force both transactions with CLR bit.
1695 struct ecore_func_state_params func_params
= { NULL
};
1697 PMD_DRV_LOG(NOTICE
, sc
, "Unexpected function state! "
1698 "Forcing STARTED-->TX_STOPPED-->STARTED");
1700 func_params
.f_obj
= &sc
->func_obj
;
1701 bnx2x_set_bit(RAMROD_DRV_CLR_ONLY
, &func_params
.ramrod_flags
);
1703 /* STARTED-->TX_STOPPED */
1704 func_params
.cmd
= ECORE_F_CMD_TX_STOP
;
1705 ecore_func_state_change(sc
, &func_params
);
1707 /* TX_STOPPED-->STARTED */
1708 func_params
.cmd
= ECORE_F_CMD_TX_START
;
1709 return ecore_func_state_change(sc
, &func_params
);
1715 static int bnx2x_stop_queue(struct bnx2x_softc
*sc
, int index
)
1717 struct bnx2x_fastpath
*fp
= &sc
->fp
[index
];
1718 struct ecore_queue_state_params q_params
= { NULL
};
1721 PMD_DRV_LOG(DEBUG
, sc
, "stopping queue %d cid %d", index
, fp
->index
);
1723 q_params
.q_obj
= &sc
->sp_objs
[fp
->index
].q_obj
;
1724 /* We want to wait for completion in this context */
1725 bnx2x_set_bit(RAMROD_COMP_WAIT
, &q_params
.ramrod_flags
);
1727 /* Stop the primary connection: */
1729 /* ...halt the connection */
1730 q_params
.cmd
= ECORE_Q_CMD_HALT
;
1731 rc
= ecore_queue_state_change(sc
, &q_params
);
1736 /* ...terminate the connection */
1737 q_params
.cmd
= ECORE_Q_CMD_TERMINATE
;
1738 memset(&q_params
.params
.terminate
, 0,
1739 sizeof(q_params
.params
.terminate
));
1740 q_params
.params
.terminate
.cid_index
= FIRST_TX_COS_INDEX
;
1741 rc
= ecore_queue_state_change(sc
, &q_params
);
1746 /* ...delete cfc entry */
1747 q_params
.cmd
= ECORE_Q_CMD_CFC_DEL
;
1748 memset(&q_params
.params
.cfc_del
, 0, sizeof(q_params
.params
.cfc_del
));
1749 q_params
.params
.cfc_del
.cid_index
= FIRST_TX_COS_INDEX
;
1750 return ecore_queue_state_change(sc
, &q_params
);
1753 /* wait for the outstanding SP commands */
1754 static uint8_t bnx2x_wait_sp_comp(struct bnx2x_softc
*sc
, unsigned long mask
)
1757 int tout
= 5000; /* wait for 5 secs tops */
1761 if (!(atomic_load_acq_long(&sc
->sp_state
) & mask
)) {
1770 tmp
= atomic_load_acq_long(&sc
->sp_state
);
1772 PMD_DRV_LOG(INFO
, sc
, "Filtering completion timed out: "
1773 "sp_state 0x%lx, mask 0x%lx", tmp
, mask
);
1780 static int bnx2x_func_stop(struct bnx2x_softc
*sc
)
1782 struct ecore_func_state_params func_params
= { NULL
};
1785 /* prepare parameters for function state transitions */
1786 bnx2x_set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
1787 func_params
.f_obj
= &sc
->func_obj
;
1788 func_params
.cmd
= ECORE_F_CMD_STOP
;
1791 * Try to stop the function the 'good way'. If it fails (in case
1792 * of a parity error during bnx2x_chip_cleanup()) and we are
1793 * not in a debug mode, perform a state transaction in order to
1794 * enable further HW_RESET transaction.
1796 rc
= ecore_func_state_change(sc
, &func_params
);
1798 PMD_DRV_LOG(NOTICE
, sc
, "FUNC_STOP ramrod failed. "
1799 "Running a dry transaction");
1800 bnx2x_set_bit(RAMROD_DRV_CLR_ONLY
, &func_params
.ramrod_flags
);
1801 return ecore_func_state_change(sc
, &func_params
);
1807 static int bnx2x_reset_hw(struct bnx2x_softc
*sc
, uint32_t load_code
)
1809 struct ecore_func_state_params func_params
= { NULL
};
1811 /* Prepare parameters for function state transitions */
1812 bnx2x_set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
1814 func_params
.f_obj
= &sc
->func_obj
;
1815 func_params
.cmd
= ECORE_F_CMD_HW_RESET
;
1817 func_params
.params
.hw_init
.load_phase
= load_code
;
1819 return ecore_func_state_change(sc
, &func_params
);
1822 static void bnx2x_int_disable_sync(struct bnx2x_softc
*sc
, int disable_hw
)
1825 /* prevent the HW from sending interrupts */
1826 bnx2x_int_disable(sc
);
1831 bnx2x_chip_cleanup(struct bnx2x_softc
*sc
, uint32_t unload_mode
, uint8_t keep_link
)
1833 int port
= SC_PORT(sc
);
1834 struct ecore_mcast_ramrod_params rparam
= { NULL
};
1835 uint32_t reset_code
;
1838 bnx2x_drain_tx_queues(sc
);
1840 /* give HW time to discard old tx messages */
1843 /* Clean all ETH MACs */
1844 rc
= bnx2x_del_all_macs(sc
, &sc
->sp_objs
[0].mac_obj
, ECORE_ETH_MAC
,
1847 PMD_DRV_LOG(NOTICE
, sc
,
1848 "Failed to delete all ETH MACs (%d)", rc
);
1851 /* Clean up UC list */
1852 rc
= bnx2x_del_all_macs(sc
, &sc
->sp_objs
[0].mac_obj
, ECORE_UC_LIST_MAC
,
1855 PMD_DRV_LOG(NOTICE
, sc
,
1856 "Failed to delete UC MACs list (%d)", rc
);
1860 REG_WR(sc
, NIG_REG_LLH0_FUNC_EN
+ port
* 8, 0);
1862 /* Set "drop all" to stop Rx */
1865 * We need to take the if_maddr_lock() here in order to prevent
1866 * a race between the completion code and this code.
1869 if (bnx2x_test_bit(ECORE_FILTER_RX_MODE_PENDING
, &sc
->sp_state
)) {
1870 bnx2x_set_bit(ECORE_FILTER_RX_MODE_SCHED
, &sc
->sp_state
);
1872 bnx2x_set_storm_rx_mode(sc
);
1875 /* Clean up multicast configuration */
1876 rparam
.mcast_obj
= &sc
->mcast_obj
;
1877 rc
= ecore_config_mcast(sc
, &rparam
, ECORE_MCAST_CMD_DEL
);
1879 PMD_DRV_LOG(NOTICE
, sc
,
1880 "Failed to send DEL MCAST command (%d)", rc
);
1884 * Send the UNLOAD_REQUEST to the MCP. This will return if
1885 * this function should perform FUNCTION, PORT, or COMMON HW
1888 reset_code
= bnx2x_send_unload_req(sc
, unload_mode
);
1891 * (assumption: No Attention from MCP at this stage)
1892 * PMF probably in the middle of TX disable/enable transaction
1894 rc
= bnx2x_func_wait_started(sc
);
1896 PMD_DRV_LOG(NOTICE
, sc
, "bnx2x_func_wait_started failed");
1900 * Close multi and leading connections
1901 * Completions for ramrods are collected in a synchronous way
1903 for (i
= 0; i
< sc
->num_queues
; i
++) {
1904 if (bnx2x_stop_queue(sc
, i
)) {
1910 * If SP settings didn't get completed so far - something
1911 * very wrong has happen.
1913 if (!bnx2x_wait_sp_comp(sc
, ~0x0UL
)) {
1914 PMD_DRV_LOG(NOTICE
, sc
, "Common slow path ramrods got stuck!");
1919 rc
= bnx2x_func_stop(sc
);
1921 PMD_DRV_LOG(NOTICE
, sc
, "Function stop failed!");
1924 /* disable HW interrupts */
1925 bnx2x_int_disable_sync(sc
, TRUE
);
1927 /* Reset the chip */
1928 rc
= bnx2x_reset_hw(sc
, reset_code
);
1930 PMD_DRV_LOG(NOTICE
, sc
, "Hardware reset failed");
1933 /* Report UNLOAD_DONE to MCP */
1934 bnx2x_send_unload_done(sc
, keep_link
);
1937 static void bnx2x_disable_close_the_gate(struct bnx2x_softc
*sc
)
1941 PMD_DRV_LOG(DEBUG
, sc
, "Disabling 'close the gates'");
1943 val
= REG_RD(sc
, MISC_REG_AEU_GENERAL_MASK
);
1944 val
&= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK
|
1945 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK
);
1946 REG_WR(sc
, MISC_REG_AEU_GENERAL_MASK
, val
);
1950 * Cleans the object that have internal lists without sending
1951 * ramrods. Should be run when interrutps are disabled.
1953 static void bnx2x_squeeze_objects(struct bnx2x_softc
*sc
)
1955 unsigned long ramrod_flags
= 0, vlan_mac_flags
= 0;
1956 struct ecore_mcast_ramrod_params rparam
= { NULL
};
1957 struct ecore_vlan_mac_obj
*mac_obj
= &sc
->sp_objs
->mac_obj
;
1960 /* Cleanup MACs' object first... */
1962 /* Wait for completion of requested */
1963 bnx2x_set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
1964 /* Perform a dry cleanup */
1965 bnx2x_set_bit(RAMROD_DRV_CLR_ONLY
, &ramrod_flags
);
1967 /* Clean ETH primary MAC */
1968 bnx2x_set_bit(ECORE_ETH_MAC
, &vlan_mac_flags
);
1969 rc
= mac_obj
->delete_all(sc
, &sc
->sp_objs
->mac_obj
, &vlan_mac_flags
,
1972 PMD_DRV_LOG(NOTICE
, sc
, "Failed to clean ETH MACs (%d)", rc
);
1975 /* Cleanup UC list */
1977 bnx2x_set_bit(ECORE_UC_LIST_MAC
, &vlan_mac_flags
);
1978 rc
= mac_obj
->delete_all(sc
, mac_obj
, &vlan_mac_flags
, &ramrod_flags
);
1980 PMD_DRV_LOG(NOTICE
, sc
,
1981 "Failed to clean UC list MACs (%d)", rc
);
1984 /* Now clean mcast object... */
1986 rparam
.mcast_obj
= &sc
->mcast_obj
;
1987 bnx2x_set_bit(RAMROD_DRV_CLR_ONLY
, &rparam
.ramrod_flags
);
1989 /* Add a DEL command... */
1990 rc
= ecore_config_mcast(sc
, &rparam
, ECORE_MCAST_CMD_DEL
);
1992 PMD_DRV_LOG(NOTICE
, sc
,
1993 "Failed to send DEL MCAST command (%d)", rc
);
1996 /* now wait until all pending commands are cleared */
1998 rc
= ecore_config_mcast(sc
, &rparam
, ECORE_MCAST_CMD_CONT
);
2001 PMD_DRV_LOG(NOTICE
, sc
,
2002 "Failed to clean MCAST object (%d)", rc
);
2006 rc
= ecore_config_mcast(sc
, &rparam
, ECORE_MCAST_CMD_CONT
);
2010 /* stop the controller */
2013 bnx2x_nic_unload(struct bnx2x_softc
*sc
, uint32_t unload_mode
, uint8_t keep_link
)
2015 uint8_t global
= FALSE
;
2018 PMD_DRV_LOG(DEBUG
, sc
, "Starting NIC unload...");
2020 /* mark driver as unloaded in shmem2 */
2021 if (IS_PF(sc
) && SHMEM2_HAS(sc
, drv_capabilities_flag
)) {
2022 val
= SHMEM2_RD(sc
, drv_capabilities_flag
[SC_FW_MB_IDX(sc
)]);
2023 SHMEM2_WR(sc
, drv_capabilities_flag
[SC_FW_MB_IDX(sc
)],
2024 val
& ~DRV_FLAGS_CAPABILITIES_LOADED_L2
);
2027 if (IS_PF(sc
) && sc
->recovery_state
!= BNX2X_RECOVERY_DONE
&&
2028 (sc
->state
== BNX2X_STATE_CLOSED
|| sc
->state
== BNX2X_STATE_ERROR
)) {
2030 * We can get here if the driver has been unloaded
2031 * during parity error recovery and is either waiting for a
2032 * leader to complete or for other functions to unload and
2033 * then ifconfig down has been issued. In this case we want to
2034 * unload and let other functions to complete a recovery
2037 sc
->recovery_state
= BNX2X_RECOVERY_DONE
;
2039 bnx2x_release_leader_lock(sc
);
2042 PMD_DRV_LOG(NOTICE
, sc
, "Can't unload in closed or error state");
2047 * Nothing to do during unload if previous bnx2x_nic_load()
2048 * did not completed successfully - all resourses are released.
2050 if ((sc
->state
== BNX2X_STATE_CLOSED
) || (sc
->state
== BNX2X_STATE_ERROR
)) {
2054 sc
->state
= BNX2X_STATE_CLOSING_WAITING_HALT
;
2057 sc
->rx_mode
= BNX2X_RX_MODE_NONE
;
2058 bnx2x_set_rx_mode(sc
);
2062 /* set ALWAYS_ALIVE bit in shmem */
2063 sc
->fw_drv_pulse_wr_seq
|= DRV_PULSE_ALWAYS_ALIVE
;
2065 bnx2x_drv_pulse(sc
);
2067 bnx2x_stats_handle(sc
, STATS_EVENT_STOP
);
2068 bnx2x_save_statistics(sc
);
2071 /* wait till consumers catch up with producers in all queues */
2072 bnx2x_drain_tx_queues(sc
);
2074 /* if VF indicate to PF this function is going down (PF will delete sp
2075 * elements and clear initializations
2078 bnx2x_vf_unload(sc
);
2079 } else if (unload_mode
!= UNLOAD_RECOVERY
) {
2080 /* if this is a normal/close unload need to clean up chip */
2081 bnx2x_chip_cleanup(sc
, unload_mode
, keep_link
);
2083 /* Send the UNLOAD_REQUEST to the MCP */
2084 bnx2x_send_unload_req(sc
, unload_mode
);
2087 * Prevent transactions to host from the functions on the
2088 * engine that doesn't reset global blocks in case of global
2089 * attention once gloabl blocks are reset and gates are opened
2090 * (the engine which leader will perform the recovery
2093 if (!CHIP_IS_E1x(sc
)) {
2094 bnx2x_pf_disable(sc
);
2097 /* disable HW interrupts */
2098 bnx2x_int_disable_sync(sc
, TRUE
);
2100 /* Report UNLOAD_DONE to MCP */
2101 bnx2x_send_unload_done(sc
, FALSE
);
2105 * At this stage no more interrupts will arrive so we may safely clean
2106 * the queue'able objects here in case they failed to get cleaned so far.
2109 bnx2x_squeeze_objects(sc
);
2112 /* There should be no more pending SP commands at this stage */
2121 bnx2x_free_fw_stats_mem(sc
);
2123 sc
->state
= BNX2X_STATE_CLOSED
;
2126 * Check if there are pending parity attentions. If there are - set
2127 * RECOVERY_IN_PROGRESS.
2129 if (IS_PF(sc
) && bnx2x_chk_parity_attn(sc
, &global
, FALSE
)) {
2130 bnx2x_set_reset_in_progress(sc
);
2132 /* Set RESET_IS_GLOBAL if needed */
2134 bnx2x_set_reset_global(sc
);
2139 * The last driver must disable a "close the gate" if there is no
2140 * parity attention or "process kill" pending.
2142 if (IS_PF(sc
) && !bnx2x_clear_pf_load(sc
) &&
2143 bnx2x_reset_is_done(sc
, SC_PATH(sc
))) {
2144 bnx2x_disable_close_the_gate(sc
);
2147 PMD_DRV_LOG(DEBUG
, sc
, "Ended NIC unload");
2153 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
2154 * visible to the controller.
2156 * If an mbuf is submitted to this routine and cannot be given to the
2157 * controller (e.g. it has too many fragments) then the function may free
2158 * the mbuf and return to the caller.
2161 * int: Number of TX BDs used for the mbuf
2163 * Note the side effect that an mbuf may be freed if it causes a problem.
2165 int bnx2x_tx_encap(struct bnx2x_tx_queue
*txq
, struct rte_mbuf
*m0
)
2167 struct eth_tx_start_bd
*tx_start_bd
;
2168 uint16_t bd_prod
, pkt_prod
;
2169 struct bnx2x_softc
*sc
;
2173 bd_prod
= txq
->tx_bd_tail
;
2174 pkt_prod
= txq
->tx_pkt_tail
;
2176 txq
->sw_ring
[TX_BD(pkt_prod
, txq
)] = m0
;
2178 tx_start_bd
= &txq
->tx_ring
[TX_BD(bd_prod
, txq
)].start_bd
;
2181 rte_cpu_to_le_64(rte_mbuf_data_iova(m0
));
2182 tx_start_bd
->nbytes
= rte_cpu_to_le_16(m0
->data_len
);
2183 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
2184 tx_start_bd
->general_data
=
2185 (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
);
2187 tx_start_bd
->nbd
= rte_cpu_to_le_16(2);
2189 if (m0
->ol_flags
& PKT_TX_VLAN_PKT
) {
2190 tx_start_bd
->vlan_or_ethertype
=
2191 rte_cpu_to_le_16(m0
->vlan_tci
);
2192 tx_start_bd
->bd_flags
.as_bitfield
|=
2193 (X_ETH_OUTBAND_VLAN
<<
2194 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
2197 tx_start_bd
->vlan_or_ethertype
=
2198 rte_cpu_to_le_16(pkt_prod
);
2200 struct ether_hdr
*eh
=
2201 rte_pktmbuf_mtod(m0
, struct ether_hdr
*);
2203 tx_start_bd
->vlan_or_ethertype
=
2204 rte_cpu_to_le_16(rte_be_to_cpu_16(eh
->ether_type
));
2208 bd_prod
= NEXT_TX_BD(bd_prod
);
2210 struct eth_tx_parse_bd_e2
*tx_parse_bd
;
2211 const struct ether_hdr
*eh
=
2212 rte_pktmbuf_mtod(m0
, struct ether_hdr
*);
2213 uint8_t mac_type
= UNICAST_ADDRESS
;
2216 &txq
->tx_ring
[TX_BD(bd_prod
, txq
)].parse_bd_e2
;
2217 if (is_multicast_ether_addr(&eh
->d_addr
)) {
2218 if (is_broadcast_ether_addr(&eh
->d_addr
))
2219 mac_type
= BROADCAST_ADDRESS
;
2221 mac_type
= MULTICAST_ADDRESS
;
2223 tx_parse_bd
->parsing_data
=
2224 (mac_type
<< ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT
);
2226 rte_memcpy(&tx_parse_bd
->data
.mac_addr
.dst_hi
,
2227 &eh
->d_addr
.addr_bytes
[0], 2);
2228 rte_memcpy(&tx_parse_bd
->data
.mac_addr
.dst_mid
,
2229 &eh
->d_addr
.addr_bytes
[2], 2);
2230 rte_memcpy(&tx_parse_bd
->data
.mac_addr
.dst_lo
,
2231 &eh
->d_addr
.addr_bytes
[4], 2);
2232 rte_memcpy(&tx_parse_bd
->data
.mac_addr
.src_hi
,
2233 &eh
->s_addr
.addr_bytes
[0], 2);
2234 rte_memcpy(&tx_parse_bd
->data
.mac_addr
.src_mid
,
2235 &eh
->s_addr
.addr_bytes
[2], 2);
2236 rte_memcpy(&tx_parse_bd
->data
.mac_addr
.src_lo
,
2237 &eh
->s_addr
.addr_bytes
[4], 2);
2239 tx_parse_bd
->data
.mac_addr
.dst_hi
=
2240 rte_cpu_to_be_16(tx_parse_bd
->data
.mac_addr
.dst_hi
);
2241 tx_parse_bd
->data
.mac_addr
.dst_mid
=
2242 rte_cpu_to_be_16(tx_parse_bd
->data
.
2244 tx_parse_bd
->data
.mac_addr
.dst_lo
=
2245 rte_cpu_to_be_16(tx_parse_bd
->data
.mac_addr
.dst_lo
);
2246 tx_parse_bd
->data
.mac_addr
.src_hi
=
2247 rte_cpu_to_be_16(tx_parse_bd
->data
.mac_addr
.src_hi
);
2248 tx_parse_bd
->data
.mac_addr
.src_mid
=
2249 rte_cpu_to_be_16(tx_parse_bd
->data
.
2251 tx_parse_bd
->data
.mac_addr
.src_lo
=
2252 rte_cpu_to_be_16(tx_parse_bd
->data
.mac_addr
.src_lo
);
2255 "PBD dst %x %x %x src %x %x %x p_data %x",
2256 tx_parse_bd
->data
.mac_addr
.dst_hi
,
2257 tx_parse_bd
->data
.mac_addr
.dst_mid
,
2258 tx_parse_bd
->data
.mac_addr
.dst_lo
,
2259 tx_parse_bd
->data
.mac_addr
.src_hi
,
2260 tx_parse_bd
->data
.mac_addr
.src_mid
,
2261 tx_parse_bd
->data
.mac_addr
.src_lo
,
2262 tx_parse_bd
->parsing_data
);
2266 "start bd: nbytes %d flags %x vlan %x",
2267 tx_start_bd
->nbytes
,
2268 tx_start_bd
->bd_flags
.as_bitfield
,
2269 tx_start_bd
->vlan_or_ethertype
);
2271 bd_prod
= NEXT_TX_BD(bd_prod
);
2274 if (TX_IDX(bd_prod
) < 2)
2277 txq
->nb_tx_avail
-= 2;
2278 txq
->tx_bd_tail
= bd_prod
;
2279 txq
->tx_pkt_tail
= pkt_prod
;
2284 static uint16_t bnx2x_cid_ilt_lines(struct bnx2x_softc
*sc
)
2286 return L2_ILT_LINES(sc
);
2289 static void bnx2x_ilt_set_info(struct bnx2x_softc
*sc
)
2291 struct ilt_client_info
*ilt_client
;
2292 struct ecore_ilt
*ilt
= sc
->ilt
;
2295 PMD_INIT_FUNC_TRACE(sc
);
2297 ilt
->start_line
= FUNC_ILT_BASE(SC_FUNC(sc
));
2300 ilt_client
= &ilt
->clients
[ILT_CLIENT_CDU
];
2301 ilt_client
->client_num
= ILT_CLIENT_CDU
;
2302 ilt_client
->page_size
= CDU_ILT_PAGE_SZ
;
2303 ilt_client
->flags
= ILT_CLIENT_SKIP_MEM
;
2304 ilt_client
->start
= line
;
2305 line
+= bnx2x_cid_ilt_lines(sc
);
2307 if (CNIC_SUPPORT(sc
)) {
2308 line
+= CNIC_ILT_LINES
;
2311 ilt_client
->end
= (line
- 1);
2314 if (QM_INIT(sc
->qm_cid_count
)) {
2315 ilt_client
= &ilt
->clients
[ILT_CLIENT_QM
];
2316 ilt_client
->client_num
= ILT_CLIENT_QM
;
2317 ilt_client
->page_size
= QM_ILT_PAGE_SZ
;
2318 ilt_client
->flags
= 0;
2319 ilt_client
->start
= line
;
2321 /* 4 bytes for each cid */
2322 line
+= DIV_ROUND_UP(sc
->qm_cid_count
* QM_QUEUES_PER_FUNC
* 4,
2325 ilt_client
->end
= (line
- 1);
2328 if (CNIC_SUPPORT(sc
)) {
2330 ilt_client
= &ilt
->clients
[ILT_CLIENT_SRC
];
2331 ilt_client
->client_num
= ILT_CLIENT_SRC
;
2332 ilt_client
->page_size
= SRC_ILT_PAGE_SZ
;
2333 ilt_client
->flags
= 0;
2334 ilt_client
->start
= line
;
2335 line
+= SRC_ILT_LINES
;
2336 ilt_client
->end
= (line
- 1);
2339 ilt_client
= &ilt
->clients
[ILT_CLIENT_TM
];
2340 ilt_client
->client_num
= ILT_CLIENT_TM
;
2341 ilt_client
->page_size
= TM_ILT_PAGE_SZ
;
2342 ilt_client
->flags
= 0;
2343 ilt_client
->start
= line
;
2344 line
+= TM_ILT_LINES
;
2345 ilt_client
->end
= (line
- 1);
2348 assert((line
<= ILT_MAX_LINES
));
2351 static void bnx2x_set_fp_rx_buf_size(struct bnx2x_softc
*sc
)
2355 for (i
= 0; i
< sc
->num_queues
; i
++) {
2356 /* get the Rx buffer size for RX frames */
2357 sc
->fp
[i
].rx_buf_size
=
2358 (IP_HEADER_ALIGNMENT_PADDING
+ ETH_OVERHEAD
+ sc
->mtu
);
2362 int bnx2x_alloc_ilt_mem(struct bnx2x_softc
*sc
)
2365 sc
->ilt
= rte_malloc("", sizeof(struct ecore_ilt
), RTE_CACHE_LINE_SIZE
);
2367 return sc
->ilt
== NULL
;
2370 static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc
*sc
)
2372 sc
->ilt
->lines
= rte_calloc("",
2373 sizeof(struct ilt_line
), ILT_MAX_LINES
,
2374 RTE_CACHE_LINE_SIZE
);
2375 return sc
->ilt
->lines
== NULL
;
2378 void bnx2x_free_ilt_mem(struct bnx2x_softc
*sc
)
2384 static void bnx2x_free_ilt_lines_mem(struct bnx2x_softc
*sc
)
2386 if (sc
->ilt
->lines
!= NULL
) {
2387 rte_free(sc
->ilt
->lines
);
2388 sc
->ilt
->lines
= NULL
;
2392 static void bnx2x_free_mem(struct bnx2x_softc
*sc
)
2396 for (i
= 0; i
< L2_ILT_LINES(sc
); i
++) {
2397 sc
->context
[i
].vcxt
= NULL
;
2398 sc
->context
[i
].size
= 0;
2401 ecore_ilt_mem_op(sc
, ILT_MEMOP_FREE
);
2403 bnx2x_free_ilt_lines_mem(sc
);
2406 static int bnx2x_alloc_mem(struct bnx2x_softc
*sc
)
2411 char cdu_name
[RTE_MEMZONE_NAMESIZE
];
2414 * Allocate memory for CDU context:
2415 * This memory is allocated separately and not in the generic ILT
2416 * functions because CDU differs in few aspects:
2417 * 1. There can be multiple entities allocating memory for context -
2418 * regular L2, CNIC, and SRIOV drivers. Each separately controls
2419 * its own ILT lines.
2420 * 2. Since CDU page-size is not a single 4KB page (which is the case
2421 * for the other ILT clients), to be efficient we want to support
2422 * allocation of sub-page-size in the last entry.
2423 * 3. Context pointers are used by the driver to pass to FW / update
2424 * the context (for the other ILT clients the pointers are used just to
2425 * free the memory during unload).
2427 context_size
= (sizeof(union cdu_context
) * BNX2X_L2_CID_COUNT(sc
));
2428 for (i
= 0, allocated
= 0; allocated
< context_size
; i
++) {
2429 sc
->context
[i
].size
= min(CDU_ILT_PAGE_SZ
,
2430 (context_size
- allocated
));
2432 snprintf(cdu_name
, sizeof(cdu_name
), "cdu_%d", i
);
2433 if (bnx2x_dma_alloc(sc
, sc
->context
[i
].size
,
2434 &sc
->context
[i
].vcxt_dma
,
2435 cdu_name
, BNX2X_PAGE_SIZE
) != 0) {
2440 sc
->context
[i
].vcxt
=
2441 (union cdu_context
*)sc
->context
[i
].vcxt_dma
.vaddr
;
2443 allocated
+= sc
->context
[i
].size
;
2446 bnx2x_alloc_ilt_lines_mem(sc
);
2448 if (ecore_ilt_mem_op(sc
, ILT_MEMOP_ALLOC
)) {
2449 PMD_DRV_LOG(NOTICE
, sc
, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed");
2457 static void bnx2x_free_fw_stats_mem(struct bnx2x_softc
*sc
)
2459 bnx2x_dma_free(&sc
->fw_stats_dma
);
2460 sc
->fw_stats_num
= 0;
2462 sc
->fw_stats_req_size
= 0;
2463 sc
->fw_stats_req
= NULL
;
2464 sc
->fw_stats_req_mapping
= 0;
2466 sc
->fw_stats_data_size
= 0;
2467 sc
->fw_stats_data
= NULL
;
2468 sc
->fw_stats_data_mapping
= 0;
2471 static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc
*sc
)
2473 uint8_t num_queue_stats
;
2474 int num_groups
, vf_headroom
= 0;
2476 /* number of queues for statistics is number of eth queues */
2477 num_queue_stats
= BNX2X_NUM_ETH_QUEUES(sc
);
2480 * Total number of FW statistics requests =
2481 * 1 for port stats + 1 for PF stats + num of queues
2483 sc
->fw_stats_num
= (2 + num_queue_stats
);
2486 * Request is built from stats_query_header and an array of
2487 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
2488 * rules. The real number or requests is configured in the
2489 * stats_query_header.
2491 num_groups
= (sc
->fw_stats_num
+ vf_headroom
) / STATS_QUERY_CMD_COUNT
;
2492 if ((sc
->fw_stats_num
+ vf_headroom
) % STATS_QUERY_CMD_COUNT
)
2495 sc
->fw_stats_req_size
=
2496 (sizeof(struct stats_query_header
) +
2497 (num_groups
* sizeof(struct stats_query_cmd_group
)));
2500 * Data for statistics requests + stats_counter.
2501 * stats_counter holds per-STORM counters that are incremented when
2502 * STORM has finished with the current request. Memory for FCoE
2503 * offloaded statistics are counted anyway, even if they will not be sent.
2504 * VF stats are not accounted for here as the data of VF stats is stored
2505 * in memory allocated by the VF, not here.
2507 sc
->fw_stats_data_size
=
2508 (sizeof(struct stats_counter
) +
2509 sizeof(struct per_port_stats
) + sizeof(struct per_pf_stats
) +
2510 /* sizeof(struct fcoe_statistics_params) + */
2511 (sizeof(struct per_queue_stats
) * num_queue_stats
));
2513 if (bnx2x_dma_alloc(sc
, (sc
->fw_stats_req_size
+ sc
->fw_stats_data_size
),
2514 &sc
->fw_stats_dma
, "fw_stats",
2515 RTE_CACHE_LINE_SIZE
) != 0) {
2516 bnx2x_free_fw_stats_mem(sc
);
2520 /* set up the shortcuts */
2522 sc
->fw_stats_req
= (struct bnx2x_fw_stats_req
*)sc
->fw_stats_dma
.vaddr
;
2523 sc
->fw_stats_req_mapping
= sc
->fw_stats_dma
.paddr
;
2526 (struct bnx2x_fw_stats_data
*)((uint8_t *) sc
->fw_stats_dma
.vaddr
+
2527 sc
->fw_stats_req_size
);
2528 sc
->fw_stats_data_mapping
= (sc
->fw_stats_dma
.paddr
+
2529 sc
->fw_stats_req_size
);
2536 * 0-7 - Engine0 load counter.
2537 * 8-15 - Engine1 load counter.
2538 * 16 - Engine0 RESET_IN_PROGRESS bit.
2539 * 17 - Engine1 RESET_IN_PROGRESS bit.
2540 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active
2541 * function on the engine
2542 * 19 - Engine1 ONE_IS_LOADED.
2543 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
2544 * leader to complete (check for both RESET_IN_PROGRESS bits and not
2545 * for just the one belonging to its engine).
2547 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
2548 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
2549 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0
2550 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
2551 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8
2552 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
2553 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
2554 #define BNX2X_GLOBAL_RESET_BIT 0x00040000
2556 /* set the GLOBAL_RESET bit, should be run under rtnl lock */
2557 static void bnx2x_set_reset_global(struct bnx2x_softc
*sc
)
2560 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_RECOVERY_REG
);
2561 val
= REG_RD(sc
, BNX2X_RECOVERY_GLOB_REG
);
2562 REG_WR(sc
, BNX2X_RECOVERY_GLOB_REG
, val
| BNX2X_GLOBAL_RESET_BIT
);
2563 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_RECOVERY_REG
);
2566 /* clear the GLOBAL_RESET bit, should be run under rtnl lock */
2567 static void bnx2x_clear_reset_global(struct bnx2x_softc
*sc
)
2570 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_RECOVERY_REG
);
2571 val
= REG_RD(sc
, BNX2X_RECOVERY_GLOB_REG
);
2572 REG_WR(sc
, BNX2X_RECOVERY_GLOB_REG
, val
& (~BNX2X_GLOBAL_RESET_BIT
));
2573 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_RECOVERY_REG
);
2576 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */
2577 static uint8_t bnx2x_reset_is_global(struct bnx2x_softc
*sc
)
2579 return REG_RD(sc
, BNX2X_RECOVERY_GLOB_REG
) & BNX2X_GLOBAL_RESET_BIT
;
2582 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
2583 static void bnx2x_set_reset_done(struct bnx2x_softc
*sc
)
2586 uint32_t bit
= SC_PATH(sc
) ? BNX2X_PATH1_RST_IN_PROG_BIT
:
2587 BNX2X_PATH0_RST_IN_PROG_BIT
;
2589 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_RECOVERY_REG
);
2591 val
= REG_RD(sc
, BNX2X_RECOVERY_GLOB_REG
);
2594 REG_WR(sc
, BNX2X_RECOVERY_GLOB_REG
, val
);
2596 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_RECOVERY_REG
);
2599 /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
2600 static void bnx2x_set_reset_in_progress(struct bnx2x_softc
*sc
)
2603 uint32_t bit
= SC_PATH(sc
) ? BNX2X_PATH1_RST_IN_PROG_BIT
:
2604 BNX2X_PATH0_RST_IN_PROG_BIT
;
2606 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_RECOVERY_REG
);
2608 val
= REG_RD(sc
, BNX2X_RECOVERY_GLOB_REG
);
2611 REG_WR(sc
, BNX2X_RECOVERY_GLOB_REG
, val
);
2613 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_RECOVERY_REG
);
2616 /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
2617 static uint8_t bnx2x_reset_is_done(struct bnx2x_softc
*sc
, int engine
)
2619 uint32_t val
= REG_RD(sc
, BNX2X_RECOVERY_GLOB_REG
);
2620 uint32_t bit
= engine
? BNX2X_PATH1_RST_IN_PROG_BIT
:
2621 BNX2X_PATH0_RST_IN_PROG_BIT
;
2623 /* return false if bit is set */
2624 return (val
& bit
) ? FALSE
: TRUE
;
2627 /* get the load status for an engine, should be run under rtnl lock */
2628 static uint8_t bnx2x_get_load_status(struct bnx2x_softc
*sc
, int engine
)
2630 uint32_t mask
= engine
? BNX2X_PATH1_LOAD_CNT_MASK
:
2631 BNX2X_PATH0_LOAD_CNT_MASK
;
2632 uint32_t shift
= engine
? BNX2X_PATH1_LOAD_CNT_SHIFT
:
2633 BNX2X_PATH0_LOAD_CNT_SHIFT
;
2634 uint32_t val
= REG_RD(sc
, BNX2X_RECOVERY_GLOB_REG
);
2636 val
= ((val
& mask
) >> shift
);
2641 /* set pf load mark */
2642 static void bnx2x_set_pf_load(struct bnx2x_softc
*sc
)
2646 uint32_t mask
= SC_PATH(sc
) ? BNX2X_PATH1_LOAD_CNT_MASK
:
2647 BNX2X_PATH0_LOAD_CNT_MASK
;
2648 uint32_t shift
= SC_PATH(sc
) ? BNX2X_PATH1_LOAD_CNT_SHIFT
:
2649 BNX2X_PATH0_LOAD_CNT_SHIFT
;
2651 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_RECOVERY_REG
);
2653 PMD_INIT_FUNC_TRACE(sc
);
2655 val
= REG_RD(sc
, BNX2X_RECOVERY_GLOB_REG
);
2657 /* get the current counter value */
2658 val1
= ((val
& mask
) >> shift
);
2660 /* set bit of this PF */
2661 val1
|= (1 << SC_ABS_FUNC(sc
));
2663 /* clear the old value */
2666 /* set the new one */
2667 val
|= ((val1
<< shift
) & mask
);
2669 REG_WR(sc
, BNX2X_RECOVERY_GLOB_REG
, val
);
2671 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_RECOVERY_REG
);
2674 /* clear pf load mark */
2675 static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc
*sc
)
2678 uint32_t mask
= SC_PATH(sc
) ? BNX2X_PATH1_LOAD_CNT_MASK
:
2679 BNX2X_PATH0_LOAD_CNT_MASK
;
2680 uint32_t shift
= SC_PATH(sc
) ? BNX2X_PATH1_LOAD_CNT_SHIFT
:
2681 BNX2X_PATH0_LOAD_CNT_SHIFT
;
2683 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_RECOVERY_REG
);
2684 val
= REG_RD(sc
, BNX2X_RECOVERY_GLOB_REG
);
2686 /* get the current counter value */
2687 val1
= (val
& mask
) >> shift
;
2689 /* clear bit of that PF */
2690 val1
&= ~(1 << SC_ABS_FUNC(sc
));
2692 /* clear the old value */
2695 /* set the new one */
2696 val
|= ((val1
<< shift
) & mask
);
2698 REG_WR(sc
, BNX2X_RECOVERY_GLOB_REG
, val
);
2699 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_RECOVERY_REG
);
2703 /* send load requrest to mcp and analyze response */
2704 static int bnx2x_nic_load_request(struct bnx2x_softc
*sc
, uint32_t * load_code
)
2706 PMD_INIT_FUNC_TRACE(sc
);
2710 (SHMEM_RD(sc
, func_mb
[SC_FW_MB_IDX(sc
)].drv_mb_header
) &
2711 DRV_MSG_SEQ_NUMBER_MASK
);
2713 PMD_DRV_LOG(DEBUG
, sc
, "initial fw_seq 0x%04x", sc
->fw_seq
);
2716 /* get the current FW pulse sequence */
2717 sc
->fw_drv_pulse_wr_seq
=
2718 (SHMEM_RD(sc
, func_mb
[SC_FW_MB_IDX(sc
)].drv_pulse_mb
) &
2719 DRV_PULSE_SEQ_MASK
);
2721 /* set ALWAYS_ALIVE bit in shmem */
2722 sc
->fw_drv_pulse_wr_seq
|= DRV_PULSE_ALWAYS_ALIVE
;
2723 bnx2x_drv_pulse(sc
);
2727 (*load_code
) = bnx2x_fw_command(sc
, DRV_MSG_CODE_LOAD_REQ
,
2728 DRV_MSG_CODE_LOAD_REQ_WITH_LFA
);
2730 /* if the MCP fails to respond we must abort */
2731 if (!(*load_code
)) {
2732 PMD_DRV_LOG(NOTICE
, sc
, "MCP response failure!");
2736 /* if MCP refused then must abort */
2737 if ((*load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED
) {
2738 PMD_DRV_LOG(NOTICE
, sc
, "MCP refused load request");
2746 * Check whether another PF has already loaded FW to chip. In virtualized
2747 * environments a pf from anoth VM may have already initialized the device
2748 * including loading FW.
2750 static int bnx2x_nic_load_analyze_req(struct bnx2x_softc
*sc
, uint32_t load_code
)
2752 uint32_t my_fw
, loaded_fw
;
2754 /* is another pf loaded on this engine? */
2755 if ((load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) &&
2756 (load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON
)) {
2757 /* build my FW version dword */
2758 my_fw
= (BNX2X_5710_FW_MAJOR_VERSION
+
2759 (BNX2X_5710_FW_MINOR_VERSION
<< 8) +
2760 (BNX2X_5710_FW_REVISION_VERSION
<< 16) +
2761 (BNX2X_5710_FW_ENGINEERING_VERSION
<< 24));
2763 /* read loaded FW from chip */
2764 loaded_fw
= REG_RD(sc
, XSEM_REG_PRAM
);
2765 PMD_DRV_LOG(DEBUG
, sc
, "loaded FW 0x%08x / my FW 0x%08x",
2768 /* abort nic load if version mismatch */
2769 if (my_fw
!= loaded_fw
) {
2770 PMD_DRV_LOG(NOTICE
, sc
,
2771 "FW 0x%08x already loaded (mine is 0x%08x)",
2780 /* mark PMF if applicable */
2781 static void bnx2x_nic_load_pmf(struct bnx2x_softc
*sc
, uint32_t load_code
)
2783 uint32_t ncsi_oem_data_addr
;
2785 PMD_INIT_FUNC_TRACE(sc
);
2787 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
2788 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
2789 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
)) {
2791 * Barrier here for ordering between the writing to sc->port.pmf here
2792 * and reading it from the periodic task.
2800 PMD_DRV_LOG(DEBUG
, sc
, "pmf %d", sc
->port
.pmf
);
2802 if (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) {
2803 if (SHMEM2_HAS(sc
, ncsi_oem_data_addr
)) {
2804 ncsi_oem_data_addr
= SHMEM2_RD(sc
, ncsi_oem_data_addr
);
2805 if (ncsi_oem_data_addr
) {
2807 (ncsi_oem_data_addr
+
2808 offsetof(struct glob_ncsi_oem_data
,
2809 driver_version
)), 0);
2815 static void bnx2x_read_mf_cfg(struct bnx2x_softc
*sc
)
2817 int n
= (CHIP_IS_MODE_4_PORT(sc
) ? 2 : 1);
2821 if (BNX2X_NOMCP(sc
)) {
2822 return; /* what should be the default bvalue in this case */
2826 * The formula for computing the absolute function number is...
2827 * For 2 port configuration (4 functions per port):
2828 * abs_func = 2 * vn + SC_PORT + SC_PATH
2829 * For 4 port configuration (2 functions per port):
2830 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
2832 for (vn
= VN_0
; vn
< SC_MAX_VN_NUM(sc
); vn
++) {
2833 abs_func
= (n
* (2 * vn
+ SC_PORT(sc
)) + SC_PATH(sc
));
2834 if (abs_func
>= E1H_FUNC_MAX
) {
2837 sc
->devinfo
.mf_info
.mf_config
[vn
] =
2838 MFCFG_RD(sc
, func_mf_config
[abs_func
].config
);
2841 if (sc
->devinfo
.mf_info
.mf_config
[SC_VN(sc
)] &
2842 FUNC_MF_CFG_FUNC_DISABLED
) {
2843 PMD_DRV_LOG(DEBUG
, sc
, "mf_cfg function disabled");
2844 sc
->flags
|= BNX2X_MF_FUNC_DIS
;
2846 PMD_DRV_LOG(DEBUG
, sc
, "mf_cfg function enabled");
2847 sc
->flags
&= ~BNX2X_MF_FUNC_DIS
;
2851 /* acquire split MCP access lock register */
2852 static int bnx2x_acquire_alr(struct bnx2x_softc
*sc
)
2856 for (j
= 0; j
< 1000; j
++) {
2858 REG_WR(sc
, GRCBASE_MCP
+ 0x9c, val
);
2859 val
= REG_RD(sc
, GRCBASE_MCP
+ 0x9c);
2860 if (val
& (1L << 31))
2866 if (!(val
& (1L << 31))) {
2867 PMD_DRV_LOG(NOTICE
, sc
, "Cannot acquire MCP access lock register");
2874 /* release split MCP access lock register */
2875 static void bnx2x_release_alr(struct bnx2x_softc
*sc
)
2877 REG_WR(sc
, GRCBASE_MCP
+ 0x9c, 0);
2880 static void bnx2x_fan_failure(struct bnx2x_softc
*sc
)
2882 int port
= SC_PORT(sc
);
2883 uint32_t ext_phy_config
;
2885 /* mark the failure */
2887 SHMEM_RD(sc
, dev_info
.port_hw_config
[port
].external_phy_config
);
2889 ext_phy_config
&= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2890 ext_phy_config
|= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2891 SHMEM_WR(sc
, dev_info
.port_hw_config
[port
].external_phy_config
,
2894 /* log the failure */
2895 PMD_DRV_LOG(INFO
, sc
,
2896 "Fan Failure has caused the driver to shutdown "
2897 "the card to prevent permanent damage. "
2898 "Please contact OEM Support for assistance");
2900 rte_panic("Schedule task to handle fan failure");
2903 /* this function is called upon a link interrupt */
2904 static void bnx2x_link_attn(struct bnx2x_softc
*sc
)
2906 uint32_t pause_enabled
= 0;
2907 struct host_port_stats
*pstats
;
2910 /* Make sure that we are synced with the current statistics */
2911 bnx2x_stats_handle(sc
, STATS_EVENT_STOP
);
2913 elink_link_update(&sc
->link_params
, &sc
->link_vars
);
2915 if (sc
->link_vars
.link_up
) {
2917 /* dropless flow control */
2918 if (sc
->dropless_fc
) {
2921 if (sc
->link_vars
.flow_ctrl
& ELINK_FLOW_CTRL_TX
) {
2926 (BAR_USTRORM_INTMEM
+
2927 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc
))),
2931 if (sc
->link_vars
.mac_type
!= ELINK_MAC_TYPE_EMAC
) {
2932 pstats
= BNX2X_SP(sc
, port_stats
);
2933 /* reset old mac stats */
2934 memset(&(pstats
->mac_stx
[0]), 0,
2935 sizeof(struct mac_stx
));
2938 if (sc
->state
== BNX2X_STATE_OPEN
) {
2939 bnx2x_stats_handle(sc
, STATS_EVENT_LINK_UP
);
2943 if (sc
->link_vars
.link_up
&& sc
->link_vars
.line_speed
) {
2944 cmng_fns
= bnx2x_get_cmng_fns_mode(sc
);
2946 if (cmng_fns
!= CMNG_FNS_NONE
) {
2947 bnx2x_cmng_fns_init(sc
, FALSE
, cmng_fns
);
2948 storm_memset_cmng(sc
, &sc
->cmng
, SC_PORT(sc
));
2952 bnx2x_link_report_locked(sc
);
2955 bnx2x_link_sync_notify(sc
);
2959 static void bnx2x_attn_int_asserted(struct bnx2x_softc
*sc
, uint32_t asserted
)
2961 int port
= SC_PORT(sc
);
2962 uint32_t aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2963 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2964 uint32_t nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
2965 NIG_REG_MASK_INTERRUPT_PORT0
;
2967 uint32_t nig_mask
= 0;
2972 if (sc
->attn_state
& asserted
) {
2973 PMD_DRV_LOG(ERR
, sc
, "IGU ERROR attn=0x%08x", asserted
);
2976 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2978 aeu_mask
= REG_RD(sc
, aeu_addr
);
2980 aeu_mask
&= ~(asserted
& 0x3ff);
2982 REG_WR(sc
, aeu_addr
, aeu_mask
);
2984 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
2986 sc
->attn_state
|= asserted
;
2988 if (asserted
& ATTN_HARD_WIRED_MASK
) {
2989 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2991 bnx2x_acquire_phy_lock(sc
);
2992 /* save nig interrupt mask */
2993 nig_mask
= REG_RD(sc
, nig_int_mask_addr
);
2995 /* If nig_mask is not set, no need to call the update function */
2997 REG_WR(sc
, nig_int_mask_addr
, 0);
2999 bnx2x_link_attn(sc
);
3002 /* handle unicore attn? */
3005 if (asserted
& ATTN_SW_TIMER_4_FUNC
) {
3006 PMD_DRV_LOG(DEBUG
, sc
, "ATTN_SW_TIMER_4_FUNC!");
3009 if (asserted
& GPIO_2_FUNC
) {
3010 PMD_DRV_LOG(DEBUG
, sc
, "GPIO_2_FUNC!");
3013 if (asserted
& GPIO_3_FUNC
) {
3014 PMD_DRV_LOG(DEBUG
, sc
, "GPIO_3_FUNC!");
3017 if (asserted
& GPIO_4_FUNC
) {
3018 PMD_DRV_LOG(DEBUG
, sc
, "GPIO_4_FUNC!");
3022 if (asserted
& ATTN_GENERAL_ATTN_1
) {
3023 PMD_DRV_LOG(DEBUG
, sc
, "ATTN_GENERAL_ATTN_1!");
3024 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
3026 if (asserted
& ATTN_GENERAL_ATTN_2
) {
3027 PMD_DRV_LOG(DEBUG
, sc
, "ATTN_GENERAL_ATTN_2!");
3028 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
3030 if (asserted
& ATTN_GENERAL_ATTN_3
) {
3031 PMD_DRV_LOG(DEBUG
, sc
, "ATTN_GENERAL_ATTN_3!");
3032 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
3035 if (asserted
& ATTN_GENERAL_ATTN_4
) {
3036 PMD_DRV_LOG(DEBUG
, sc
, "ATTN_GENERAL_ATTN_4!");
3037 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
3039 if (asserted
& ATTN_GENERAL_ATTN_5
) {
3040 PMD_DRV_LOG(DEBUG
, sc
, "ATTN_GENERAL_ATTN_5!");
3041 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
3043 if (asserted
& ATTN_GENERAL_ATTN_6
) {
3044 PMD_DRV_LOG(DEBUG
, sc
, "ATTN_GENERAL_ATTN_6!");
3045 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
3050 if (sc
->devinfo
.int_block
== INT_BLOCK_HC
) {
3052 (HC_REG_COMMAND_REG
+ port
* 32 +
3053 COMMAND_REG_ATTN_BITS_SET
);
3055 reg_addr
= (BAR_IGU_INTMEM
+ IGU_CMD_ATTN_BIT_SET_UPPER
* 8);
3058 PMD_DRV_LOG(DEBUG
, sc
, "about to mask 0x%08x at %s addr 0x%08x",
3060 (sc
->devinfo
.int_block
== INT_BLOCK_HC
) ? "HC" : "IGU",
3062 REG_WR(sc
, reg_addr
, asserted
);
3064 /* now set back the mask */
3065 if (asserted
& ATTN_NIG_FOR_FUNC
) {
3067 * Verify that IGU ack through BAR was written before restoring
3068 * NIG mask. This loop should exit after 2-3 iterations max.
3070 if (sc
->devinfo
.int_block
!= INT_BLOCK_HC
) {
3075 REG_RD(sc
, IGU_REG_ATTENTION_ACK_BITS
);
3076 } while (((igu_acked
& ATTN_NIG_FOR_FUNC
) == 0)
3077 && (++cnt
< MAX_IGU_ATTN_ACK_TO
));
3080 PMD_DRV_LOG(ERR
, sc
,
3081 "Failed to verify IGU ack on time");
3087 REG_WR(sc
, nig_int_mask_addr
, nig_mask
);
3089 bnx2x_release_phy_lock(sc
);
3094 bnx2x_print_next_block(__rte_unused
struct bnx2x_softc
*sc
, __rte_unused
int idx
,
3095 __rte_unused
const char *blk
)
3097 PMD_DRV_LOG(INFO
, sc
, "%s%s", idx
? ", " : "", blk
);
3101 bnx2x_check_blocks_with_parity0(struct bnx2x_softc
*sc
, uint32_t sig
, int par_num
,
3104 uint32_t cur_bit
= 0;
3107 for (i
= 0; sig
; i
++) {
3108 cur_bit
= ((uint32_t) 0x1 << i
);
3109 if (sig
& cur_bit
) {
3111 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR
:
3113 bnx2x_print_next_block(sc
, par_num
++,
3116 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR
:
3118 bnx2x_print_next_block(sc
, par_num
++,
3121 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR
:
3123 bnx2x_print_next_block(sc
, par_num
++,
3126 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR
:
3128 bnx2x_print_next_block(sc
, par_num
++,
3131 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR
:
3133 bnx2x_print_next_block(sc
, par_num
++,
3136 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR
:
3138 bnx2x_print_next_block(sc
, par_num
++,
3141 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR
:
3143 bnx2x_print_next_block(sc
, par_num
++,
3157 bnx2x_check_blocks_with_parity1(struct bnx2x_softc
*sc
, uint32_t sig
, int par_num
,
3158 uint8_t * global
, uint8_t print
)
3161 uint32_t cur_bit
= 0;
3162 for (i
= 0; sig
; i
++) {
3163 cur_bit
= ((uint32_t) 0x1 << i
);
3164 if (sig
& cur_bit
) {
3166 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR
:
3168 bnx2x_print_next_block(sc
, par_num
++,
3171 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR
:
3173 bnx2x_print_next_block(sc
, par_num
++,
3176 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR
:
3178 bnx2x_print_next_block(sc
, par_num
++,
3181 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR
:
3183 bnx2x_print_next_block(sc
, par_num
++,
3186 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR
:
3188 bnx2x_print_next_block(sc
, par_num
++,
3191 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR
:
3193 bnx2x_print_next_block(sc
, par_num
++,
3196 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR
:
3198 bnx2x_print_next_block(sc
, par_num
++,
3201 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR
:
3203 bnx2x_print_next_block(sc
, par_num
++,
3206 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR
:
3208 bnx2x_print_next_block(sc
, par_num
++,
3212 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR
:
3214 bnx2x_print_next_block(sc
, par_num
++,
3217 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR
:
3219 bnx2x_print_next_block(sc
, par_num
++,
3222 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR
:
3224 bnx2x_print_next_block(sc
, par_num
++,
3227 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR
:
3229 bnx2x_print_next_block(sc
, par_num
++,
3232 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR
:
3234 bnx2x_print_next_block(sc
, par_num
++,
3237 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR
:
3239 bnx2x_print_next_block(sc
, par_num
++,
3242 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR
:
3244 bnx2x_print_next_block(sc
, par_num
++,
3258 bnx2x_check_blocks_with_parity2(struct bnx2x_softc
*sc
, uint32_t sig
, int par_num
,
3261 uint32_t cur_bit
= 0;
3264 for (i
= 0; sig
; i
++) {
3265 cur_bit
= ((uint32_t) 0x1 << i
);
3266 if (sig
& cur_bit
) {
3268 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR
:
3270 bnx2x_print_next_block(sc
, par_num
++,
3273 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR
:
3275 bnx2x_print_next_block(sc
, par_num
++,
3278 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
:
3280 bnx2x_print_next_block(sc
, par_num
++,
3281 "PXPPCICLOCKCLIENT");
3283 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR
:
3285 bnx2x_print_next_block(sc
, par_num
++,
3288 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR
:
3290 bnx2x_print_next_block(sc
, par_num
++,
3293 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR
:
3295 bnx2x_print_next_block(sc
, par_num
++,
3298 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR
:
3300 bnx2x_print_next_block(sc
, par_num
++,
3303 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR
:
3305 bnx2x_print_next_block(sc
, par_num
++,
3319 bnx2x_check_blocks_with_parity3(struct bnx2x_softc
*sc
, uint32_t sig
, int par_num
,
3320 uint8_t * global
, uint8_t print
)
3322 uint32_t cur_bit
= 0;
3325 for (i
= 0; sig
; i
++) {
3326 cur_bit
= ((uint32_t) 0x1 << i
);
3327 if (sig
& cur_bit
) {
3329 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY
:
3331 bnx2x_print_next_block(sc
, par_num
++,
3335 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY
:
3337 bnx2x_print_next_block(sc
, par_num
++,
3341 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY
:
3343 bnx2x_print_next_block(sc
, par_num
++,
3347 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY
:
3349 bnx2x_print_next_block(sc
, par_num
++,
3364 bnx2x_check_blocks_with_parity4(struct bnx2x_softc
*sc
, uint32_t sig
, int par_num
,
3367 uint32_t cur_bit
= 0;
3370 for (i
= 0; sig
; i
++) {
3371 cur_bit
= ((uint32_t) 0x1 << i
);
3372 if (sig
& cur_bit
) {
3374 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR
:
3376 bnx2x_print_next_block(sc
, par_num
++,
3379 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR
:
3381 bnx2x_print_next_block(sc
, par_num
++,
3395 bnx2x_parity_attn(struct bnx2x_softc
*sc
, uint8_t * global
, uint8_t print
,
3400 if ((sig
[0] & HW_PRTY_ASSERT_SET_0
) ||
3401 (sig
[1] & HW_PRTY_ASSERT_SET_1
) ||
3402 (sig
[2] & HW_PRTY_ASSERT_SET_2
) ||
3403 (sig
[3] & HW_PRTY_ASSERT_SET_3
) ||
3404 (sig
[4] & HW_PRTY_ASSERT_SET_4
)) {
3405 PMD_DRV_LOG(ERR
, sc
,
3406 "Parity error: HW block parity attention:"
3407 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x",
3408 (uint32_t) (sig
[0] & HW_PRTY_ASSERT_SET_0
),
3409 (uint32_t) (sig
[1] & HW_PRTY_ASSERT_SET_1
),
3410 (uint32_t) (sig
[2] & HW_PRTY_ASSERT_SET_2
),
3411 (uint32_t) (sig
[3] & HW_PRTY_ASSERT_SET_3
),
3412 (uint32_t) (sig
[4] & HW_PRTY_ASSERT_SET_4
));
3415 PMD_DRV_LOG(INFO
, sc
, "Parity errors detected in blocks: ");
3418 bnx2x_check_blocks_with_parity0(sc
, sig
[0] &
3419 HW_PRTY_ASSERT_SET_0
,
3422 bnx2x_check_blocks_with_parity1(sc
, sig
[1] &
3423 HW_PRTY_ASSERT_SET_1
,
3424 par_num
, global
, print
);
3426 bnx2x_check_blocks_with_parity2(sc
, sig
[2] &
3427 HW_PRTY_ASSERT_SET_2
,
3430 bnx2x_check_blocks_with_parity3(sc
, sig
[3] &
3431 HW_PRTY_ASSERT_SET_3
,
3432 par_num
, global
, print
);
3434 bnx2x_check_blocks_with_parity4(sc
, sig
[4] &
3435 HW_PRTY_ASSERT_SET_4
,
3439 PMD_DRV_LOG(INFO
, sc
, "");
3448 bnx2x_chk_parity_attn(struct bnx2x_softc
*sc
, uint8_t * global
, uint8_t print
)
3450 struct attn_route attn
= { {0} };
3451 int port
= SC_PORT(sc
);
3453 attn
.sig
[0] = REG_RD(sc
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
* 4);
3454 attn
.sig
[1] = REG_RD(sc
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
* 4);
3455 attn
.sig
[2] = REG_RD(sc
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
* 4);
3456 attn
.sig
[3] = REG_RD(sc
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
* 4);
3458 if (!CHIP_IS_E1x(sc
))
3460 REG_RD(sc
, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0
+ port
* 4);
3462 return bnx2x_parity_attn(sc
, global
, print
, attn
.sig
);
3465 static void bnx2x_attn_int_deasserted4(struct bnx2x_softc
*sc
, uint32_t attn
)
3469 if (attn
& AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT
) {
3470 val
= REG_RD(sc
, PGLUE_B_REG_PGLUE_B_INT_STS_CLR
);
3471 PMD_DRV_LOG(INFO
, sc
, "ERROR: PGLUE hw attention 0x%08x", val
);
3472 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR
)
3473 PMD_DRV_LOG(INFO
, sc
,
3474 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR");
3475 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR
)
3476 PMD_DRV_LOG(INFO
, sc
,
3477 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR");
3478 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN
)
3479 PMD_DRV_LOG(INFO
, sc
,
3480 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN");
3481 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN
)
3482 PMD_DRV_LOG(INFO
, sc
,
3483 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN");
3485 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN
)
3486 PMD_DRV_LOG(INFO
, sc
,
3487 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN");
3489 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN
)
3490 PMD_DRV_LOG(INFO
, sc
,
3491 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN");
3492 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN
)
3493 PMD_DRV_LOG(INFO
, sc
,
3494 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN");
3495 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN
)
3496 PMD_DRV_LOG(INFO
, sc
,
3497 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN");
3498 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW
)
3499 PMD_DRV_LOG(INFO
, sc
,
3500 "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW");
3503 if (attn
& AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT
) {
3504 val
= REG_RD(sc
, ATC_REG_ATC_INT_STS_CLR
);
3505 PMD_DRV_LOG(INFO
, sc
, "ERROR: ATC hw attention 0x%08x", val
);
3506 if (val
& ATC_ATC_INT_STS_REG_ADDRESS_ERROR
)
3507 PMD_DRV_LOG(INFO
, sc
,
3508 "ERROR: ATC_ATC_INT_STS_REG_ADDRESS_ERROR");
3509 if (val
& ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND
)
3510 PMD_DRV_LOG(INFO
, sc
,
3511 "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND");
3512 if (val
& ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS
)
3513 PMD_DRV_LOG(INFO
, sc
,
3514 "ERROR: ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS");
3515 if (val
& ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT
)
3516 PMD_DRV_LOG(INFO
, sc
,
3517 "ERROR: ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT");
3518 if (val
& ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR
)
3519 PMD_DRV_LOG(INFO
, sc
,
3520 "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR");
3521 if (val
& ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU
)
3522 PMD_DRV_LOG(INFO
, sc
,
3523 "ERROR: ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU");
3526 if (attn
& (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR
|
3527 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR
)) {
3528 PMD_DRV_LOG(INFO
, sc
,
3529 "ERROR: FATAL parity attention set4 0x%08x",
3531 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR
3533 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR
)));
3537 static void bnx2x_e1h_disable(struct bnx2x_softc
*sc
)
3539 int port
= SC_PORT(sc
);
3541 REG_WR(sc
, NIG_REG_LLH0_FUNC_EN
+ port
* 8, 0);
3544 static void bnx2x_e1h_enable(struct bnx2x_softc
*sc
)
3546 int port
= SC_PORT(sc
);
3548 REG_WR(sc
, NIG_REG_LLH0_FUNC_EN
+ port
* 8, 1);
3552 * called due to MCP event (on pmf):
3553 * reread new bandwidth configuration
3555 * notify others function about the change
3557 static void bnx2x_config_mf_bw(struct bnx2x_softc
*sc
)
3559 if (sc
->link_vars
.link_up
) {
3560 bnx2x_cmng_fns_init(sc
, TRUE
, CMNG_FNS_MINMAX
);
3561 bnx2x_link_sync_notify(sc
);
3564 storm_memset_cmng(sc
, &sc
->cmng
, SC_PORT(sc
));
3567 static void bnx2x_set_mf_bw(struct bnx2x_softc
*sc
)
3569 bnx2x_config_mf_bw(sc
);
3570 bnx2x_fw_command(sc
, DRV_MSG_CODE_SET_MF_BW_ACK
, 0);
3573 static void bnx2x_handle_eee_event(struct bnx2x_softc
*sc
)
3575 bnx2x_fw_command(sc
, DRV_MSG_CODE_EEE_RESULTS_ACK
, 0);
3578 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3580 static void bnx2x_drv_info_ether_stat(struct bnx2x_softc
*sc
)
3582 struct eth_stats_info
*ether_stat
= &sc
->sp
->drv_info_to_mcp
.ether_stat
;
3584 strncpy(ether_stat
->version
, BNX2X_DRIVER_VERSION
,
3585 ETH_STAT_INFO_VERSION_LEN
);
3587 sc
->sp_objs
[0].mac_obj
.get_n_elements(sc
, &sc
->sp_objs
[0].mac_obj
,
3588 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED
,
3589 ether_stat
->mac_local
+ MAC_PAD
,
3592 ether_stat
->mtu_size
= sc
->mtu
;
3594 ether_stat
->feature_flags
|= FEATURE_ETH_CHKSUM_OFFLOAD_MASK
;
3595 ether_stat
->promiscuous_mode
= 0; // (flags & PROMISC) ? 1 : 0;
3597 ether_stat
->txq_size
= sc
->tx_ring_size
;
3598 ether_stat
->rxq_size
= sc
->rx_ring_size
;
3601 static void bnx2x_handle_drv_info_req(struct bnx2x_softc
*sc
)
3603 enum drv_info_opcode op_code
;
3604 uint32_t drv_info_ctl
= SHMEM2_RD(sc
, drv_info_control
);
3606 /* if drv_info version supported by MFW doesn't match - send NACK */
3607 if ((drv_info_ctl
& DRV_INFO_CONTROL_VER_MASK
) != DRV_INFO_CUR_VER
) {
3608 bnx2x_fw_command(sc
, DRV_MSG_CODE_DRV_INFO_NACK
, 0);
3612 op_code
= ((drv_info_ctl
& DRV_INFO_CONTROL_OP_CODE_MASK
) >>
3613 DRV_INFO_CONTROL_OP_CODE_SHIFT
);
3615 memset(&sc
->sp
->drv_info_to_mcp
, 0, sizeof(union drv_info_to_mcp
));
3618 case ETH_STATS_OPCODE
:
3619 bnx2x_drv_info_ether_stat(sc
);
3621 case FCOE_STATS_OPCODE
:
3622 case ISCSI_STATS_OPCODE
:
3624 /* if op code isn't supported - send NACK */
3625 bnx2x_fw_command(sc
, DRV_MSG_CODE_DRV_INFO_NACK
, 0);
3630 * If we got drv_info attn from MFW then these fields are defined in
3633 SHMEM2_WR(sc
, drv_info_host_addr_lo
,
3634 U64_LO(BNX2X_SP_MAPPING(sc
, drv_info_to_mcp
)));
3635 SHMEM2_WR(sc
, drv_info_host_addr_hi
,
3636 U64_HI(BNX2X_SP_MAPPING(sc
, drv_info_to_mcp
)));
3638 bnx2x_fw_command(sc
, DRV_MSG_CODE_DRV_INFO_ACK
, 0);
3641 static void bnx2x_dcc_event(struct bnx2x_softc
*sc
, uint32_t dcc_event
)
3643 if (dcc_event
& DRV_STATUS_DCC_DISABLE_ENABLE_PF
) {
3645 * This is the only place besides the function initialization
3646 * where the sc->flags can change so it is done without any
3650 mf_info
.mf_config
[SC_VN(sc
)] & FUNC_MF_CFG_FUNC_DISABLED
) {
3651 PMD_DRV_LOG(DEBUG
, sc
, "mf_cfg function disabled");
3652 sc
->flags
|= BNX2X_MF_FUNC_DIS
;
3653 bnx2x_e1h_disable(sc
);
3655 PMD_DRV_LOG(DEBUG
, sc
, "mf_cfg function enabled");
3656 sc
->flags
&= ~BNX2X_MF_FUNC_DIS
;
3657 bnx2x_e1h_enable(sc
);
3659 dcc_event
&= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF
;
3662 if (dcc_event
& DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
) {
3663 bnx2x_config_mf_bw(sc
);
3664 dcc_event
&= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION
;
3667 /* Report results to MCP */
3669 bnx2x_fw_command(sc
, DRV_MSG_CODE_DCC_FAILURE
, 0);
3671 bnx2x_fw_command(sc
, DRV_MSG_CODE_DCC_OK
, 0);
3674 static void bnx2x_pmf_update(struct bnx2x_softc
*sc
)
3676 int port
= SC_PORT(sc
);
3682 * We need the mb() to ensure the ordering between the writing to
3683 * sc->port.pmf here and reading it from the bnx2x_periodic_task().
3687 /* enable nig attention */
3688 val
= (0xff0f | (1 << (SC_VN(sc
) + 4)));
3689 if (sc
->devinfo
.int_block
== INT_BLOCK_HC
) {
3690 REG_WR(sc
, HC_REG_TRAILING_EDGE_0
+ port
* 8, val
);
3691 REG_WR(sc
, HC_REG_LEADING_EDGE_0
+ port
* 8, val
);
3692 } else if (!CHIP_IS_E1x(sc
)) {
3693 REG_WR(sc
, IGU_REG_TRAILING_EDGE_LATCH
, val
);
3694 REG_WR(sc
, IGU_REG_LEADING_EDGE_LATCH
, val
);
3697 bnx2x_stats_handle(sc
, STATS_EVENT_PMF
);
3700 static int bnx2x_mc_assert(struct bnx2x_softc
*sc
)
3704 __rte_unused
uint32_t row0
, row1
, row2
, row3
;
3708 REG_RD8(sc
, BAR_XSTRORM_INTMEM
+ XSTORM_ASSERT_LIST_INDEX_OFFSET
);
3710 PMD_DRV_LOG(ERR
, sc
, "XSTORM_ASSERT_LIST_INDEX 0x%x", last_idx
);
3712 /* print the asserts */
3713 for (i
= 0; i
< STORM_ASSERT_ARRAY_SIZE
; i
++) {
3717 BAR_XSTRORM_INTMEM
+ XSTORM_ASSERT_LIST_OFFSET(i
));
3720 BAR_XSTRORM_INTMEM
+ XSTORM_ASSERT_LIST_OFFSET(i
) +
3724 BAR_XSTRORM_INTMEM
+ XSTORM_ASSERT_LIST_OFFSET(i
) +
3728 BAR_XSTRORM_INTMEM
+ XSTORM_ASSERT_LIST_OFFSET(i
) +
3731 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
3732 PMD_DRV_LOG(ERR
, sc
,
3733 "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
3734 i
, row3
, row2
, row1
, row0
);
3743 REG_RD8(sc
, BAR_TSTRORM_INTMEM
+ TSTORM_ASSERT_LIST_INDEX_OFFSET
);
3745 PMD_DRV_LOG(ERR
, sc
, "TSTORM_ASSERT_LIST_INDEX 0x%x", last_idx
);
3748 /* print the asserts */
3749 for (i
= 0; i
< STORM_ASSERT_ARRAY_SIZE
; i
++) {
3753 BAR_TSTRORM_INTMEM
+ TSTORM_ASSERT_LIST_OFFSET(i
));
3756 BAR_TSTRORM_INTMEM
+ TSTORM_ASSERT_LIST_OFFSET(i
) +
3760 BAR_TSTRORM_INTMEM
+ TSTORM_ASSERT_LIST_OFFSET(i
) +
3764 BAR_TSTRORM_INTMEM
+ TSTORM_ASSERT_LIST_OFFSET(i
) +
3767 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
3768 PMD_DRV_LOG(ERR
, sc
,
3769 "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
3770 i
, row3
, row2
, row1
, row0
);
3779 REG_RD8(sc
, BAR_CSTRORM_INTMEM
+ CSTORM_ASSERT_LIST_INDEX_OFFSET
);
3781 PMD_DRV_LOG(ERR
, sc
, "CSTORM_ASSERT_LIST_INDEX 0x%x", last_idx
);
3784 /* print the asserts */
3785 for (i
= 0; i
< STORM_ASSERT_ARRAY_SIZE
; i
++) {
3789 BAR_CSTRORM_INTMEM
+ CSTORM_ASSERT_LIST_OFFSET(i
));
3792 BAR_CSTRORM_INTMEM
+ CSTORM_ASSERT_LIST_OFFSET(i
) +
3796 BAR_CSTRORM_INTMEM
+ CSTORM_ASSERT_LIST_OFFSET(i
) +
3800 BAR_CSTRORM_INTMEM
+ CSTORM_ASSERT_LIST_OFFSET(i
) +
3803 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
3804 PMD_DRV_LOG(ERR
, sc
,
3805 "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
3806 i
, row3
, row2
, row1
, row0
);
3815 REG_RD8(sc
, BAR_USTRORM_INTMEM
+ USTORM_ASSERT_LIST_INDEX_OFFSET
);
3817 PMD_DRV_LOG(ERR
, sc
, "USTORM_ASSERT_LIST_INDEX 0x%x", last_idx
);
3820 /* print the asserts */
3821 for (i
= 0; i
< STORM_ASSERT_ARRAY_SIZE
; i
++) {
3825 BAR_USTRORM_INTMEM
+ USTORM_ASSERT_LIST_OFFSET(i
));
3828 BAR_USTRORM_INTMEM
+ USTORM_ASSERT_LIST_OFFSET(i
) +
3832 BAR_USTRORM_INTMEM
+ USTORM_ASSERT_LIST_OFFSET(i
) +
3836 BAR_USTRORM_INTMEM
+ USTORM_ASSERT_LIST_OFFSET(i
) +
3839 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
3840 PMD_DRV_LOG(ERR
, sc
,
3841 "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
3842 i
, row3
, row2
, row1
, row0
);
3852 static void bnx2x_attn_int_deasserted3(struct bnx2x_softc
*sc
, uint32_t attn
)
3854 int func
= SC_FUNC(sc
);
3857 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
3859 if (attn
& BNX2X_PMF_LINK_ASSERT(sc
)) {
3861 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
* 4, 0);
3862 bnx2x_read_mf_cfg(sc
);
3863 sc
->devinfo
.mf_info
.mf_config
[SC_VN(sc
)] =
3865 func_mf_config
[SC_ABS_FUNC(sc
)].config
);
3867 SHMEM_RD(sc
, func_mb
[SC_FW_MB_IDX(sc
)].drv_status
);
3869 if (val
& DRV_STATUS_DCC_EVENT_MASK
)
3872 DRV_STATUS_DCC_EVENT_MASK
));
3874 if (val
& DRV_STATUS_SET_MF_BW
)
3875 bnx2x_set_mf_bw(sc
);
3877 if (val
& DRV_STATUS_DRV_INFO_REQ
)
3878 bnx2x_handle_drv_info_req(sc
);
3880 if ((sc
->port
.pmf
== 0) && (val
& DRV_STATUS_PMF
))
3881 bnx2x_pmf_update(sc
);
3883 if (val
& DRV_STATUS_EEE_NEGOTIATION_RESULTS
)
3884 bnx2x_handle_eee_event(sc
);
3886 if (sc
->link_vars
.periodic_flags
&
3887 ELINK_PERIODIC_FLAGS_LINK_EVENT
) {
3888 /* sync with link */
3889 bnx2x_acquire_phy_lock(sc
);
3890 sc
->link_vars
.periodic_flags
&=
3891 ~ELINK_PERIODIC_FLAGS_LINK_EVENT
;
3892 bnx2x_release_phy_lock(sc
);
3894 bnx2x_link_sync_notify(sc
);
3896 bnx2x_link_report(sc
);
3900 * Always call it here: bnx2x_link_report() will
3901 * prevent the link indication duplication.
3903 bnx2x_link_status_update(sc
);
3905 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
3907 PMD_DRV_LOG(ERR
, sc
, "MC assert!");
3908 bnx2x_mc_assert(sc
);
3909 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
3910 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
3911 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
3912 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
3913 rte_panic("MC assert!");
3915 } else if (attn
& BNX2X_MCP_ASSERT
) {
3917 PMD_DRV_LOG(ERR
, sc
, "MCP assert!");
3918 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
3921 PMD_DRV_LOG(ERR
, sc
,
3922 "Unknown HW assert! (attn 0x%08x)", attn
);
3926 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
3927 PMD_DRV_LOG(ERR
, sc
, "LATCHED attention 0x%08x (masked)", attn
);
3928 if (attn
& BNX2X_GRC_TIMEOUT
) {
3929 val
= REG_RD(sc
, MISC_REG_GRC_TIMEOUT_ATTN
);
3930 PMD_DRV_LOG(ERR
, sc
, "GRC time-out 0x%08x", val
);
3932 if (attn
& BNX2X_GRC_RSV
) {
3933 val
= REG_RD(sc
, MISC_REG_GRC_RSV_ATTN
);
3934 PMD_DRV_LOG(ERR
, sc
, "GRC reserved 0x%08x", val
);
3936 REG_WR(sc
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
3940 static void bnx2x_attn_int_deasserted2(struct bnx2x_softc
*sc
, uint32_t attn
)
3942 int port
= SC_PORT(sc
);
3944 uint32_t val0
, mask0
, val1
, mask1
;
3947 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
3948 val
= REG_RD(sc
, CFC_REG_CFC_INT_STS_CLR
);
3949 PMD_DRV_LOG(ERR
, sc
, "CFC hw attention 0x%08x", val
);
3950 /* CFC error attention */
3952 PMD_DRV_LOG(ERR
, sc
, "FATAL error from CFC");
3956 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
3957 val
= REG_RD(sc
, PXP_REG_PXP_INT_STS_CLR_0
);
3958 PMD_DRV_LOG(ERR
, sc
, "PXP hw attention-0 0x%08x", val
);
3959 /* RQ_USDMDP_FIFO_OVERFLOW */
3960 if (val
& 0x18000) {
3961 PMD_DRV_LOG(ERR
, sc
, "FATAL error from PXP");
3964 if (!CHIP_IS_E1x(sc
)) {
3965 val
= REG_RD(sc
, PXP_REG_PXP_INT_STS_CLR_1
);
3966 PMD_DRV_LOG(ERR
, sc
, "PXP hw attention-1 0x%08x", val
);
3969 #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
3970 #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
3972 if (attn
& AEU_PXP2_HW_INT_BIT
) {
3973 /* CQ47854 workaround do not panic on
3974 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
3976 if (!CHIP_IS_E1x(sc
)) {
3977 mask0
= REG_RD(sc
, PXP2_REG_PXP2_INT_MASK_0
);
3978 val1
= REG_RD(sc
, PXP2_REG_PXP2_INT_STS_1
);
3979 mask1
= REG_RD(sc
, PXP2_REG_PXP2_INT_MASK_1
);
3980 val0
= REG_RD(sc
, PXP2_REG_PXP2_INT_STS_0
);
3982 * If the only PXP2_EOP_ERROR_BIT is set in
3983 * STS0 and STS1 - clear it
3985 * probably we lose additional attentions between
3986 * STS0 and STS_CLR0, in this case user will not
3987 * be notified about them
3989 if (val0
& mask0
& PXP2_EOP_ERROR_BIT
&&
3991 val0
= REG_RD(sc
, PXP2_REG_PXP2_INT_STS_CLR_0
);
3993 /* print the register, since no one can restore it */
3994 PMD_DRV_LOG(ERR
, sc
,
3995 "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x", val0
);
3998 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
4001 if (val0
& PXP2_EOP_ERROR_BIT
) {
4002 PMD_DRV_LOG(ERR
, sc
, "PXP2_WR_PGLUE_EOP_ERROR");
4005 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
4006 * set then clear attention from PXP2 block without panic
4008 if (((val0
& mask0
) == PXP2_EOP_ERROR_BIT
) &&
4009 ((val1
& mask1
) == 0))
4010 attn
&= ~AEU_PXP2_HW_INT_BIT
;
4015 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
4016 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
4017 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
4019 val
= REG_RD(sc
, reg_offset
);
4020 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
4021 REG_WR(sc
, reg_offset
, val
);
4023 PMD_DRV_LOG(ERR
, sc
,
4024 "FATAL HW block attention set2 0x%x",
4025 (uint32_t) (attn
& HW_INTERRUT_ASSERT_SET_2
));
4026 rte_panic("HW block attention set2");
4030 static void bnx2x_attn_int_deasserted1(struct bnx2x_softc
*sc
, uint32_t attn
)
4032 int port
= SC_PORT(sc
);
4036 if (attn
& AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT
) {
4037 val
= REG_RD(sc
, DORQ_REG_DORQ_INT_STS_CLR
);
4038 PMD_DRV_LOG(ERR
, sc
, "DB hw attention 0x%08x", val
);
4039 /* DORQ discard attention */
4041 PMD_DRV_LOG(ERR
, sc
, "FATAL error from DORQ");
4045 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
4046 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
4047 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
4049 val
= REG_RD(sc
, reg_offset
);
4050 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
4051 REG_WR(sc
, reg_offset
, val
);
4053 PMD_DRV_LOG(ERR
, sc
,
4054 "FATAL HW block attention set1 0x%08x",
4055 (uint32_t) (attn
& HW_INTERRUT_ASSERT_SET_1
));
4056 rte_panic("HW block attention set1");
4060 static void bnx2x_attn_int_deasserted0(struct bnx2x_softc
*sc
, uint32_t attn
)
4062 int port
= SC_PORT(sc
);
4066 reg_offset
= (port
) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4067 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
;
4069 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
4070 val
= REG_RD(sc
, reg_offset
);
4071 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
4072 REG_WR(sc
, reg_offset
, val
);
4074 PMD_DRV_LOG(WARNING
, sc
, "SPIO5 hw attention");
4076 /* Fan failure attention */
4077 elink_hw_reset_phy(&sc
->link_params
);
4078 bnx2x_fan_failure(sc
);
4081 if ((attn
& sc
->link_vars
.aeu_int_mask
) && sc
->port
.pmf
) {
4082 bnx2x_acquire_phy_lock(sc
);
4083 elink_handle_module_detect_int(&sc
->link_params
);
4084 bnx2x_release_phy_lock(sc
);
4087 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
4088 val
= REG_RD(sc
, reg_offset
);
4089 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
4090 REG_WR(sc
, reg_offset
, val
);
4092 rte_panic("FATAL HW block attention set0 0x%lx",
4093 (attn
& HW_INTERRUT_ASSERT_SET_0
));
4097 static void bnx2x_attn_int_deasserted(struct bnx2x_softc
*sc
, uint32_t deasserted
)
4099 struct attn_route attn
;
4100 struct attn_route
*group_mask
;
4101 int port
= SC_PORT(sc
);
4106 uint8_t global
= FALSE
;
4109 * Need to take HW lock because MCP or other port might also
4110 * try to handle this event.
4112 bnx2x_acquire_alr(sc
);
4114 if (bnx2x_chk_parity_attn(sc
, &global
, TRUE
)) {
4115 sc
->recovery_state
= BNX2X_RECOVERY_INIT
;
4117 /* disable HW interrupts */
4118 bnx2x_int_disable(sc
);
4119 bnx2x_release_alr(sc
);
4123 attn
.sig
[0] = REG_RD(sc
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
* 4);
4124 attn
.sig
[1] = REG_RD(sc
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
* 4);
4125 attn
.sig
[2] = REG_RD(sc
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
* 4);
4126 attn
.sig
[3] = REG_RD(sc
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
* 4);
4127 if (!CHIP_IS_E1x(sc
)) {
4129 REG_RD(sc
, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0
+ port
* 4);
4134 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
4135 if (deasserted
& (1 << index
)) {
4136 group_mask
= &sc
->attn_group
[index
];
4138 bnx2x_attn_int_deasserted4(sc
,
4140 sig
[4] & group_mask
->sig
[4]);
4141 bnx2x_attn_int_deasserted3(sc
,
4143 sig
[3] & group_mask
->sig
[3]);
4144 bnx2x_attn_int_deasserted1(sc
,
4146 sig
[1] & group_mask
->sig
[1]);
4147 bnx2x_attn_int_deasserted2(sc
,
4149 sig
[2] & group_mask
->sig
[2]);
4150 bnx2x_attn_int_deasserted0(sc
,
4152 sig
[0] & group_mask
->sig
[0]);
4156 bnx2x_release_alr(sc
);
4158 if (sc
->devinfo
.int_block
== INT_BLOCK_HC
) {
4159 reg_addr
= (HC_REG_COMMAND_REG
+ port
* 32 +
4160 COMMAND_REG_ATTN_BITS_CLR
);
4162 reg_addr
= (BAR_IGU_INTMEM
+ IGU_CMD_ATTN_BIT_CLR_UPPER
* 8);
4166 PMD_DRV_LOG(DEBUG
, sc
,
4167 "about to mask 0x%08x at %s addr 0x%08x", val
,
4168 (sc
->devinfo
.int_block
== INT_BLOCK_HC
) ? "HC" : "IGU",
4170 REG_WR(sc
, reg_addr
, val
);
4172 if (~sc
->attn_state
& deasserted
) {
4173 PMD_DRV_LOG(ERR
, sc
, "IGU error");
4176 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
4177 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
4179 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
4181 aeu_mask
= REG_RD(sc
, reg_addr
);
4183 aeu_mask
|= (deasserted
& 0x3ff);
4185 REG_WR(sc
, reg_addr
, aeu_mask
);
4186 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_PORT0_ATT_MASK
+ port
);
4188 sc
->attn_state
&= ~deasserted
;
4191 static void bnx2x_attn_int(struct bnx2x_softc
*sc
)
4193 /* read local copy of bits */
4194 uint32_t attn_bits
= le32toh(sc
->def_sb
->atten_status_block
.attn_bits
);
4196 le32toh(sc
->def_sb
->atten_status_block
.attn_bits_ack
);
4197 uint32_t attn_state
= sc
->attn_state
;
4199 /* look for changed bits */
4200 uint32_t asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
4201 uint32_t deasserted
= ~attn_bits
& attn_ack
& attn_state
;
4203 PMD_DRV_LOG(DEBUG
, sc
,
4204 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x",
4205 attn_bits
, attn_ack
, asserted
, deasserted
);
4207 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
)) {
4208 PMD_DRV_LOG(ERR
, sc
, "BAD attention state");
4211 /* handle bits that were raised */
4213 bnx2x_attn_int_asserted(sc
, asserted
);
4217 bnx2x_attn_int_deasserted(sc
, deasserted
);
4221 static uint16_t bnx2x_update_dsb_idx(struct bnx2x_softc
*sc
)
4223 struct host_sp_status_block
*def_sb
= sc
->def_sb
;
4229 mb(); /* status block is written to by the chip */
4231 if (sc
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
4232 sc
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
4233 rc
|= BNX2X_DEF_SB_ATT_IDX
;
4236 if (sc
->def_idx
!= def_sb
->sp_sb
.running_index
) {
4237 sc
->def_idx
= def_sb
->sp_sb
.running_index
;
4238 rc
|= BNX2X_DEF_SB_IDX
;
4246 static struct ecore_queue_sp_obj
*bnx2x_cid_to_q_obj(struct bnx2x_softc
*sc
,
4249 return &sc
->sp_objs
[CID_TO_FP(cid
, sc
)].q_obj
;
4252 static void bnx2x_handle_mcast_eqe(struct bnx2x_softc
*sc
)
4254 struct ecore_mcast_ramrod_params rparam
;
4257 memset(&rparam
, 0, sizeof(rparam
));
4259 rparam
.mcast_obj
= &sc
->mcast_obj
;
4261 /* clear pending state for the last command */
4262 sc
->mcast_obj
.raw
.clear_pending(&sc
->mcast_obj
.raw
);
4264 /* if there are pending mcast commands - send them */
4265 if (sc
->mcast_obj
.check_pending(&sc
->mcast_obj
)) {
4266 rc
= ecore_config_mcast(sc
, &rparam
, ECORE_MCAST_CMD_CONT
);
4268 PMD_DRV_LOG(INFO
, sc
,
4269 "Failed to send pending mcast commands (%d)",
4276 bnx2x_handle_classification_eqe(struct bnx2x_softc
*sc
, union event_ring_elem
*elem
)
4278 unsigned long ramrod_flags
= 0;
4280 uint32_t cid
= elem
->message
.data
.eth_event
.echo
& BNX2X_SWCID_MASK
;
4281 struct ecore_vlan_mac_obj
*vlan_mac_obj
;
4283 /* always push next commands out, don't wait here */
4284 bnx2x_set_bit(RAMROD_CONT
, &ramrod_flags
);
4286 switch (le32toh(elem
->message
.data
.eth_event
.echo
) >> BNX2X_SWCID_SHIFT
) {
4287 case ECORE_FILTER_MAC_PENDING
:
4288 PMD_DRV_LOG(DEBUG
, sc
, "Got SETUP_MAC completions");
4289 vlan_mac_obj
= &sc
->sp_objs
[cid
].mac_obj
;
4292 case ECORE_FILTER_MCAST_PENDING
:
4293 PMD_DRV_LOG(DEBUG
, sc
, "Got SETUP_MCAST completions");
4294 bnx2x_handle_mcast_eqe(sc
);
4298 PMD_DRV_LOG(NOTICE
, sc
, "Unsupported classification command: %d",
4299 elem
->message
.data
.eth_event
.echo
);
4303 rc
= vlan_mac_obj
->complete(sc
, vlan_mac_obj
, elem
, &ramrod_flags
);
4306 PMD_DRV_LOG(NOTICE
, sc
,
4307 "Failed to schedule new commands (%d)", rc
);
4308 } else if (rc
> 0) {
4309 PMD_DRV_LOG(DEBUG
, sc
, "Scheduled next pending commands...");
4313 static void bnx2x_handle_rx_mode_eqe(struct bnx2x_softc
*sc
)
4315 bnx2x_clear_bit(ECORE_FILTER_RX_MODE_PENDING
, &sc
->sp_state
);
4317 /* send rx_mode command again if was requested */
4318 if (bnx2x_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED
, &sc
->sp_state
)) {
4319 bnx2x_set_storm_rx_mode(sc
);
4323 static void bnx2x_update_eq_prod(struct bnx2x_softc
*sc
, uint16_t prod
)
4325 storm_memset_eq_prod(sc
, prod
, SC_FUNC(sc
));
4326 wmb(); /* keep prod updates ordered */
4329 static void bnx2x_eq_int(struct bnx2x_softc
*sc
)
4331 uint16_t hw_cons
, sw_cons
, sw_prod
;
4332 union event_ring_elem
*elem
;
4337 struct ecore_queue_sp_obj
*q_obj
;
4338 struct ecore_func_sp_obj
*f_obj
= &sc
->func_obj
;
4339 struct ecore_raw_obj
*rss_raw
= &sc
->rss_conf_obj
.raw
;
4341 hw_cons
= le16toh(*sc
->eq_cons_sb
);
4344 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
4345 * when we get to the next-page we need to adjust so the loop
4346 * condition below will be met. The next element is the size of a
4347 * regular element and hence incrementing by 1
4349 if ((hw_cons
& EQ_DESC_MAX_PAGE
) == EQ_DESC_MAX_PAGE
) {
4354 * This function may never run in parallel with itself for a
4355 * specific sc and no need for a read memory barrier here.
4357 sw_cons
= sc
->eq_cons
;
4358 sw_prod
= sc
->eq_prod
;
4362 sw_prod
= NEXT_EQ_IDX(sw_prod
), sw_cons
= NEXT_EQ_IDX(sw_cons
)) {
4364 elem
= &sc
->eq
[EQ_DESC(sw_cons
)];
4366 /* elem CID originates from FW, actually LE */
4367 cid
= SW_CID(elem
->message
.data
.cfc_del_event
.cid
);
4368 opcode
= elem
->message
.opcode
;
4370 /* handle eq element */
4372 case EVENT_RING_OPCODE_STAT_QUERY
:
4373 PMD_DEBUG_PERIODIC_LOG(DEBUG
, sc
, "got statistics completion event %d",
4375 /* nothing to do with stats comp */
4378 case EVENT_RING_OPCODE_CFC_DEL
:
4379 /* handle according to cid range */
4380 /* we may want to verify here that the sc state is HALTING */
4381 PMD_DRV_LOG(DEBUG
, sc
, "got delete ramrod for MULTI[%d]",
4383 q_obj
= bnx2x_cid_to_q_obj(sc
, cid
);
4384 if (q_obj
->complete_cmd(sc
, q_obj
, ECORE_Q_CMD_CFC_DEL
)) {
4389 case EVENT_RING_OPCODE_STOP_TRAFFIC
:
4390 PMD_DRV_LOG(DEBUG
, sc
, "got STOP TRAFFIC");
4391 if (f_obj
->complete_cmd(sc
, f_obj
, ECORE_F_CMD_TX_STOP
)) {
4396 case EVENT_RING_OPCODE_START_TRAFFIC
:
4397 PMD_DRV_LOG(DEBUG
, sc
, "got START TRAFFIC");
4398 if (f_obj
->complete_cmd
4399 (sc
, f_obj
, ECORE_F_CMD_TX_START
)) {
4404 case EVENT_RING_OPCODE_FUNCTION_UPDATE
:
4405 echo
= elem
->message
.data
.function_update_event
.echo
;
4406 if (echo
== SWITCH_UPDATE
) {
4407 PMD_DRV_LOG(DEBUG
, sc
,
4408 "got FUNC_SWITCH_UPDATE ramrod");
4409 if (f_obj
->complete_cmd(sc
, f_obj
,
4410 ECORE_F_CMD_SWITCH_UPDATE
))
4415 PMD_DRV_LOG(DEBUG
, sc
,
4416 "AFEX: ramrod completed FUNCTION_UPDATE");
4417 f_obj
->complete_cmd(sc
, f_obj
,
4418 ECORE_F_CMD_AFEX_UPDATE
);
4422 case EVENT_RING_OPCODE_FORWARD_SETUP
:
4423 q_obj
= &bnx2x_fwd_sp_obj(sc
, q_obj
);
4424 if (q_obj
->complete_cmd(sc
, q_obj
,
4425 ECORE_Q_CMD_SETUP_TX_ONLY
)) {
4430 case EVENT_RING_OPCODE_FUNCTION_START
:
4431 PMD_DRV_LOG(DEBUG
, sc
, "got FUNC_START ramrod");
4432 if (f_obj
->complete_cmd(sc
, f_obj
, ECORE_F_CMD_START
)) {
4437 case EVENT_RING_OPCODE_FUNCTION_STOP
:
4438 PMD_DRV_LOG(DEBUG
, sc
, "got FUNC_STOP ramrod");
4439 if (f_obj
->complete_cmd(sc
, f_obj
, ECORE_F_CMD_STOP
)) {
4445 switch (opcode
| sc
->state
) {
4446 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES
| BNX2X_STATE_OPEN
):
4447 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES
| BNX2X_STATE_OPENING_WAITING_PORT
):
4449 elem
->message
.data
.eth_event
.echo
& BNX2X_SWCID_MASK
;
4450 PMD_DRV_LOG(DEBUG
, sc
, "got RSS_UPDATE ramrod. CID %d",
4452 rss_raw
->clear_pending(rss_raw
);
4455 case (EVENT_RING_OPCODE_SET_MAC
| BNX2X_STATE_OPEN
):
4456 case (EVENT_RING_OPCODE_SET_MAC
| BNX2X_STATE_DIAG
):
4457 case (EVENT_RING_OPCODE_SET_MAC
| BNX2X_STATE_CLOSING_WAITING_HALT
):
4458 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES
| BNX2X_STATE_OPEN
):
4459 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES
| BNX2X_STATE_DIAG
):
4460 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES
| BNX2X_STATE_CLOSING_WAITING_HALT
):
4461 PMD_DRV_LOG(DEBUG
, sc
,
4462 "got (un)set mac ramrod");
4463 bnx2x_handle_classification_eqe(sc
, elem
);
4466 case (EVENT_RING_OPCODE_MULTICAST_RULES
| BNX2X_STATE_OPEN
):
4467 case (EVENT_RING_OPCODE_MULTICAST_RULES
| BNX2X_STATE_DIAG
):
4468 case (EVENT_RING_OPCODE_MULTICAST_RULES
| BNX2X_STATE_CLOSING_WAITING_HALT
):
4469 PMD_DRV_LOG(DEBUG
, sc
,
4470 "got mcast ramrod");
4471 bnx2x_handle_mcast_eqe(sc
);
4474 case (EVENT_RING_OPCODE_FILTERS_RULES
| BNX2X_STATE_OPEN
):
4475 case (EVENT_RING_OPCODE_FILTERS_RULES
| BNX2X_STATE_DIAG
):
4476 case (EVENT_RING_OPCODE_FILTERS_RULES
| BNX2X_STATE_CLOSING_WAITING_HALT
):
4477 PMD_DRV_LOG(DEBUG
, sc
,
4478 "got rx_mode ramrod");
4479 bnx2x_handle_rx_mode_eqe(sc
);
4483 /* unknown event log error and continue */
4484 PMD_DRV_LOG(INFO
, sc
, "Unknown EQ event %d, sc->state 0x%x",
4485 elem
->message
.opcode
, sc
->state
);
4493 atomic_add_acq_long(&sc
->eq_spq_left
, spqe_cnt
);
4495 sc
->eq_cons
= sw_cons
;
4496 sc
->eq_prod
= sw_prod
;
4498 /* make sure that above mem writes were issued towards the memory */
4501 /* update producer */
4502 bnx2x_update_eq_prod(sc
, sc
->eq_prod
);
4505 static int bnx2x_handle_sp_tq(struct bnx2x_softc
*sc
)
4510 PMD_DRV_LOG(DEBUG
, sc
, "---> SP TASK <---");
4512 /* what work needs to be performed? */
4513 status
= bnx2x_update_dsb_idx(sc
);
4515 PMD_DRV_LOG(DEBUG
, sc
, "dsb status 0x%04x", status
);
4518 if (status
& BNX2X_DEF_SB_ATT_IDX
) {
4519 PMD_DRV_LOG(DEBUG
, sc
, "---> ATTN INTR <---");
4521 status
&= ~BNX2X_DEF_SB_ATT_IDX
;
4525 /* SP events: STAT_QUERY and others */
4526 if (status
& BNX2X_DEF_SB_IDX
) {
4527 /* handle EQ completions */
4528 PMD_DRV_LOG(DEBUG
, sc
, "---> EQ INTR <---");
4530 bnx2x_ack_sb(sc
, sc
->igu_dsb_id
, USTORM_ID
,
4531 le16toh(sc
->def_idx
), IGU_INT_NOP
, 1);
4532 status
&= ~BNX2X_DEF_SB_IDX
;
4535 /* if status is non zero then something went wrong */
4536 if (unlikely(status
)) {
4537 PMD_DRV_LOG(INFO
, sc
,
4538 "Got an unknown SP interrupt! (0x%04x)", status
);
4541 /* ack status block only if something was actually handled */
4542 bnx2x_ack_sb(sc
, sc
->igu_dsb_id
, ATTENTION_ID
,
4543 le16toh(sc
->def_att_idx
), IGU_INT_ENABLE
, 1);
4548 static void bnx2x_handle_fp_tq(struct bnx2x_fastpath
*fp
)
4550 struct bnx2x_softc
*sc
= fp
->sc
;
4551 uint8_t more_rx
= FALSE
;
4553 /* Make sure FP is initialized */
4554 if (!fp
->sb_running_index
)
4557 PMD_DEBUG_PERIODIC_LOG(DEBUG
, sc
,
4558 "---> FP TASK QUEUE (%d) <--", fp
->index
);
4560 /* update the fastpath index */
4561 bnx2x_update_fp_sb_idx(fp
);
4563 if (rte_atomic32_read(&sc
->scan_fp
) == 1) {
4564 if (bnx2x_has_rx_work(fp
)) {
4565 more_rx
= bnx2x_rxeof(sc
, fp
);
4569 /* still more work to do */
4570 bnx2x_handle_fp_tq(fp
);
4575 bnx2x_ack_sb(sc
, fp
->igu_sb_id
, USTORM_ID
,
4576 le16toh(fp
->fp_hc_idx
), IGU_INT_ENABLE
, 1);
4580 * Legacy interrupt entry point.
4582 * Verifies that the controller generated the interrupt and
4583 * then calls a separate routine to handle the various
4584 * interrupt causes: link, RX, and TX.
4586 int bnx2x_intr_legacy(struct bnx2x_softc
*sc
)
4588 struct bnx2x_fastpath
*fp
;
4589 uint32_t status
, mask
;
4593 * 0 for ustorm, 1 for cstorm
4594 * the bits returned from ack_int() are 0-15
4595 * bit 0 = attention status block
4596 * bit 1 = fast path status block
4597 * a mask of 0x2 or more = tx/rx event
4598 * a mask of 1 = slow path event
4601 status
= bnx2x_ack_int(sc
);
4603 /* the interrupt is not for us */
4604 if (unlikely(status
== 0)) {
4608 PMD_DEBUG_PERIODIC_LOG(DEBUG
, sc
, "Interrupt status 0x%04x", status
);
4609 //bnx2x_dump_status_block(sc);
4611 FOR_EACH_ETH_QUEUE(sc
, i
) {
4613 mask
= (0x2 << (fp
->index
+ CNIC_SUPPORT(sc
)));
4614 if (status
& mask
) {
4615 /* acknowledge and disable further fastpath interrupts */
4616 bnx2x_ack_sb(sc
, fp
->igu_sb_id
, USTORM_ID
,
4617 0, IGU_INT_DISABLE
, 0);
4618 bnx2x_handle_fp_tq(fp
);
4623 if (unlikely(status
& 0x1)) {
4624 /* acknowledge and disable further slowpath interrupts */
4625 bnx2x_ack_sb(sc
, sc
->igu_dsb_id
, USTORM_ID
,
4626 0, IGU_INT_DISABLE
, 0);
4627 rc
= bnx2x_handle_sp_tq(sc
);
4631 if (unlikely(status
)) {
4632 PMD_DRV_LOG(WARNING
, sc
,
4633 "Unexpected fastpath status (0x%08x)!", status
);
4639 static int bnx2x_init_hw_common_chip(struct bnx2x_softc
*sc
);
4640 static int bnx2x_init_hw_common(struct bnx2x_softc
*sc
);
4641 static int bnx2x_init_hw_port(struct bnx2x_softc
*sc
);
4642 static int bnx2x_init_hw_func(struct bnx2x_softc
*sc
);
4643 static void bnx2x_reset_common(struct bnx2x_softc
*sc
);
4644 static void bnx2x_reset_port(struct bnx2x_softc
*sc
);
4645 static void bnx2x_reset_func(struct bnx2x_softc
*sc
);
4646 static int bnx2x_init_firmware(struct bnx2x_softc
*sc
);
4647 static void bnx2x_release_firmware(struct bnx2x_softc
*sc
);
4650 ecore_func_sp_drv_ops bnx2x_func_sp_drv
= {
4651 .init_hw_cmn_chip
= bnx2x_init_hw_common_chip
,
4652 .init_hw_cmn
= bnx2x_init_hw_common
,
4653 .init_hw_port
= bnx2x_init_hw_port
,
4654 .init_hw_func
= bnx2x_init_hw_func
,
4656 .reset_hw_cmn
= bnx2x_reset_common
,
4657 .reset_hw_port
= bnx2x_reset_port
,
4658 .reset_hw_func
= bnx2x_reset_func
,
4660 .init_fw
= bnx2x_init_firmware
,
4661 .release_fw
= bnx2x_release_firmware
,
4664 static void bnx2x_init_func_obj(struct bnx2x_softc
*sc
)
4668 PMD_INIT_FUNC_TRACE(sc
);
4670 ecore_init_func_obj(sc
,
4672 BNX2X_SP(sc
, func_rdata
),
4673 (rte_iova_t
)BNX2X_SP_MAPPING(sc
, func_rdata
),
4674 BNX2X_SP(sc
, func_afex_rdata
),
4675 (rte_iova_t
)BNX2X_SP_MAPPING(sc
, func_afex_rdata
),
4676 &bnx2x_func_sp_drv
);
4679 static int bnx2x_init_hw(struct bnx2x_softc
*sc
, uint32_t load_code
)
4681 struct ecore_func_state_params func_params
= { NULL
};
4684 PMD_INIT_FUNC_TRACE(sc
);
4686 /* prepare the parameters for function state transitions */
4687 bnx2x_set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
4689 func_params
.f_obj
= &sc
->func_obj
;
4690 func_params
.cmd
= ECORE_F_CMD_HW_INIT
;
4692 func_params
.params
.hw_init
.load_phase
= load_code
;
4695 * Via a plethora of function pointers, we will eventually reach
4696 * bnx2x_init_hw_common(), bnx2x_init_hw_port(), or bnx2x_init_hw_func().
4698 rc
= ecore_func_state_change(sc
, &func_params
);
4704 bnx2x_fill(struct bnx2x_softc
*sc
, uint32_t addr
, int fill
, uint32_t len
)
4708 if (!(len
% 4) && !(addr
% 4)) {
4709 for (i
= 0; i
< len
; i
+= 4) {
4710 REG_WR(sc
, (addr
+ i
), fill
);
4713 for (i
= 0; i
< len
; i
++) {
4714 REG_WR8(sc
, (addr
+ i
), fill
);
4719 /* writes FP SP data to FW - data_size in dwords */
4721 bnx2x_wr_fp_sb_data(struct bnx2x_softc
*sc
, int fw_sb_id
, uint32_t * sb_data_p
,
4726 for (index
= 0; index
< data_size
; index
++) {
4728 (BAR_CSTRORM_INTMEM
+
4729 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id
) +
4730 (sizeof(uint32_t) * index
)), *(sb_data_p
+ index
));
4734 static void bnx2x_zero_fp_sb(struct bnx2x_softc
*sc
, int fw_sb_id
)
4736 struct hc_status_block_data_e2 sb_data_e2
;
4737 struct hc_status_block_data_e1x sb_data_e1x
;
4738 uint32_t *sb_data_p
;
4739 uint32_t data_size
= 0;
4741 if (!CHIP_IS_E1x(sc
)) {
4742 memset(&sb_data_e2
, 0, sizeof(struct hc_status_block_data_e2
));
4743 sb_data_e2
.common
.state
= SB_DISABLED
;
4744 sb_data_e2
.common
.p_func
.vf_valid
= FALSE
;
4745 sb_data_p
= (uint32_t *) & sb_data_e2
;
4746 data_size
= (sizeof(struct hc_status_block_data_e2
) /
4749 memset(&sb_data_e1x
, 0,
4750 sizeof(struct hc_status_block_data_e1x
));
4751 sb_data_e1x
.common
.state
= SB_DISABLED
;
4752 sb_data_e1x
.common
.p_func
.vf_valid
= FALSE
;
4753 sb_data_p
= (uint32_t *) & sb_data_e1x
;
4754 data_size
= (sizeof(struct hc_status_block_data_e1x
) /
4758 bnx2x_wr_fp_sb_data(sc
, fw_sb_id
, sb_data_p
, data_size
);
4761 (BAR_CSTRORM_INTMEM
+ CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id
)), 0,
4762 CSTORM_STATUS_BLOCK_SIZE
);
4763 bnx2x_fill(sc
, (BAR_CSTRORM_INTMEM
+ CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id
)),
4764 0, CSTORM_SYNC_BLOCK_SIZE
);
4768 bnx2x_wr_sp_sb_data(struct bnx2x_softc
*sc
,
4769 struct hc_sp_status_block_data
*sp_sb_data
)
4774 i
< (sizeof(struct hc_sp_status_block_data
) / sizeof(uint32_t));
4777 (BAR_CSTRORM_INTMEM
+
4778 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc
)) +
4779 (i
* sizeof(uint32_t))),
4780 *((uint32_t *) sp_sb_data
+ i
));
4784 static void bnx2x_zero_sp_sb(struct bnx2x_softc
*sc
)
4786 struct hc_sp_status_block_data sp_sb_data
;
4788 memset(&sp_sb_data
, 0, sizeof(struct hc_sp_status_block_data
));
4790 sp_sb_data
.state
= SB_DISABLED
;
4791 sp_sb_data
.p_func
.vf_valid
= FALSE
;
4793 bnx2x_wr_sp_sb_data(sc
, &sp_sb_data
);
4796 (BAR_CSTRORM_INTMEM
+
4797 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc
))),
4798 0, CSTORM_SP_STATUS_BLOCK_SIZE
);
4800 (BAR_CSTRORM_INTMEM
+
4801 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc
))),
4802 0, CSTORM_SP_SYNC_BLOCK_SIZE
);
4806 bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm
*hc_sm
, int igu_sb_id
,
4809 hc_sm
->igu_sb_id
= igu_sb_id
;
4810 hc_sm
->igu_seg_id
= igu_seg_id
;
4811 hc_sm
->timer_value
= 0xFF;
4812 hc_sm
->time_to_expire
= 0xFFFFFFFF;
4815 static void bnx2x_map_sb_state_machines(struct hc_index_data
*index_data
)
4817 /* zero out state machine indices */
4820 index_data
[HC_INDEX_ETH_RX_CQ_CONS
].flags
&= ~HC_INDEX_DATA_SM_ID
;
4823 index_data
[HC_INDEX_OOO_TX_CQ_CONS
].flags
&= ~HC_INDEX_DATA_SM_ID
;
4824 index_data
[HC_INDEX_ETH_TX_CQ_CONS_COS0
].flags
&= ~HC_INDEX_DATA_SM_ID
;
4825 index_data
[HC_INDEX_ETH_TX_CQ_CONS_COS1
].flags
&= ~HC_INDEX_DATA_SM_ID
;
4826 index_data
[HC_INDEX_ETH_TX_CQ_CONS_COS2
].flags
&= ~HC_INDEX_DATA_SM_ID
;
4831 index_data
[HC_INDEX_ETH_RX_CQ_CONS
].flags
|=
4832 (SM_RX_ID
<< HC_INDEX_DATA_SM_ID_SHIFT
);
4835 index_data
[HC_INDEX_OOO_TX_CQ_CONS
].flags
|=
4836 (SM_TX_ID
<< HC_INDEX_DATA_SM_ID_SHIFT
);
4837 index_data
[HC_INDEX_ETH_TX_CQ_CONS_COS0
].flags
|=
4838 (SM_TX_ID
<< HC_INDEX_DATA_SM_ID_SHIFT
);
4839 index_data
[HC_INDEX_ETH_TX_CQ_CONS_COS1
].flags
|=
4840 (SM_TX_ID
<< HC_INDEX_DATA_SM_ID_SHIFT
);
4841 index_data
[HC_INDEX_ETH_TX_CQ_CONS_COS2
].flags
|=
4842 (SM_TX_ID
<< HC_INDEX_DATA_SM_ID_SHIFT
);
4846 bnx2x_init_sb(struct bnx2x_softc
*sc
, rte_iova_t busaddr
, int vfid
,
4847 uint8_t vf_valid
, int fw_sb_id
, int igu_sb_id
)
4849 struct hc_status_block_data_e2 sb_data_e2
;
4850 struct hc_status_block_data_e1x sb_data_e1x
;
4851 struct hc_status_block_sm
*hc_sm_p
;
4852 uint32_t *sb_data_p
;
4856 if (CHIP_INT_MODE_IS_BC(sc
)) {
4857 igu_seg_id
= HC_SEG_ACCESS_NORM
;
4859 igu_seg_id
= IGU_SEG_ACCESS_NORM
;
4862 bnx2x_zero_fp_sb(sc
, fw_sb_id
);
4864 if (!CHIP_IS_E1x(sc
)) {
4865 memset(&sb_data_e2
, 0, sizeof(struct hc_status_block_data_e2
));
4866 sb_data_e2
.common
.state
= SB_ENABLED
;
4867 sb_data_e2
.common
.p_func
.pf_id
= SC_FUNC(sc
);
4868 sb_data_e2
.common
.p_func
.vf_id
= vfid
;
4869 sb_data_e2
.common
.p_func
.vf_valid
= vf_valid
;
4870 sb_data_e2
.common
.p_func
.vnic_id
= SC_VN(sc
);
4871 sb_data_e2
.common
.same_igu_sb_1b
= TRUE
;
4872 sb_data_e2
.common
.host_sb_addr
.hi
= U64_HI(busaddr
);
4873 sb_data_e2
.common
.host_sb_addr
.lo
= U64_LO(busaddr
);
4874 hc_sm_p
= sb_data_e2
.common
.state_machine
;
4875 sb_data_p
= (uint32_t *) & sb_data_e2
;
4876 data_size
= (sizeof(struct hc_status_block_data_e2
) /
4878 bnx2x_map_sb_state_machines(sb_data_e2
.index_data
);
4880 memset(&sb_data_e1x
, 0,
4881 sizeof(struct hc_status_block_data_e1x
));
4882 sb_data_e1x
.common
.state
= SB_ENABLED
;
4883 sb_data_e1x
.common
.p_func
.pf_id
= SC_FUNC(sc
);
4884 sb_data_e1x
.common
.p_func
.vf_id
= 0xff;
4885 sb_data_e1x
.common
.p_func
.vf_valid
= FALSE
;
4886 sb_data_e1x
.common
.p_func
.vnic_id
= SC_VN(sc
);
4887 sb_data_e1x
.common
.same_igu_sb_1b
= TRUE
;
4888 sb_data_e1x
.common
.host_sb_addr
.hi
= U64_HI(busaddr
);
4889 sb_data_e1x
.common
.host_sb_addr
.lo
= U64_LO(busaddr
);
4890 hc_sm_p
= sb_data_e1x
.common
.state_machine
;
4891 sb_data_p
= (uint32_t *) & sb_data_e1x
;
4892 data_size
= (sizeof(struct hc_status_block_data_e1x
) /
4894 bnx2x_map_sb_state_machines(sb_data_e1x
.index_data
);
4897 bnx2x_setup_ndsb_state_machine(&hc_sm_p
[SM_RX_ID
], igu_sb_id
, igu_seg_id
);
4898 bnx2x_setup_ndsb_state_machine(&hc_sm_p
[SM_TX_ID
], igu_sb_id
, igu_seg_id
);
4900 /* write indices to HW - PCI guarantees endianity of regpairs */
4901 bnx2x_wr_fp_sb_data(sc
, fw_sb_id
, sb_data_p
, data_size
);
4904 static uint8_t bnx2x_fp_qzone_id(struct bnx2x_fastpath
*fp
)
4906 if (CHIP_IS_E1x(fp
->sc
)) {
4907 return fp
->cl_id
+ SC_PORT(fp
->sc
) * ETH_MAX_RX_CLIENTS_E1H
;
4914 bnx2x_rx_ustorm_prods_offset(struct bnx2x_softc
*sc
, struct bnx2x_fastpath
*fp
)
4916 uint32_t offset
= BAR_USTRORM_INTMEM
;
4919 return PXP_VF_ADDR_USDM_QUEUES_START
+
4920 (sc
->acquire_resp
.resc
.hw_qid
[fp
->index
] *
4921 sizeof(struct ustorm_queue_zone_data
));
4922 } else if (!CHIP_IS_E1x(sc
)) {
4923 offset
+= USTORM_RX_PRODS_E2_OFFSET(fp
->cl_qzone_id
);
4925 offset
+= USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc
), fp
->cl_id
);
4931 static void bnx2x_init_eth_fp(struct bnx2x_softc
*sc
, int idx
)
4933 struct bnx2x_fastpath
*fp
= &sc
->fp
[idx
];
4934 uint32_t cids
[ECORE_MULTI_TX_COS
] = { 0 };
4935 unsigned long q_type
= 0;
4941 fp
->igu_sb_id
= (sc
->igu_base_sb
+ idx
+ CNIC_SUPPORT(sc
));
4942 fp
->fw_sb_id
= (sc
->base_fw_ndsb
+ idx
+ CNIC_SUPPORT(sc
));
4944 if (CHIP_IS_E1x(sc
))
4945 fp
->cl_id
= SC_L_ID(sc
) + idx
;
4947 /* want client ID same as IGU SB ID for non-E1 */
4948 fp
->cl_id
= fp
->igu_sb_id
;
4949 fp
->cl_qzone_id
= bnx2x_fp_qzone_id(fp
);
4951 /* setup sb indices */
4952 if (!CHIP_IS_E1x(sc
)) {
4953 fp
->sb_index_values
= fp
->status_block
.e2_sb
->sb
.index_values
;
4954 fp
->sb_running_index
= fp
->status_block
.e2_sb
->sb
.running_index
;
4956 fp
->sb_index_values
= fp
->status_block
.e1x_sb
->sb
.index_values
;
4957 fp
->sb_running_index
=
4958 fp
->status_block
.e1x_sb
->sb
.running_index
;
4962 fp
->ustorm_rx_prods_offset
= bnx2x_rx_ustorm_prods_offset(sc
, fp
);
4964 fp
->rx_cq_cons_sb
= &fp
->sb_index_values
[HC_INDEX_ETH_RX_CQ_CONS
];
4966 for (cos
= 0; cos
< sc
->max_cos
; cos
++) {
4969 fp
->tx_cons_sb
= &fp
->sb_index_values
[HC_INDEX_ETH_TX_CQ_CONS_COS0
];
4971 /* nothing more for a VF to do */
4976 bnx2x_init_sb(sc
, fp
->sb_dma
.paddr
, BNX2X_VF_ID_INVALID
, FALSE
,
4977 fp
->fw_sb_id
, fp
->igu_sb_id
);
4979 bnx2x_update_fp_sb_idx(fp
);
4981 /* Configure Queue State object */
4982 bnx2x_set_bit(ECORE_Q_TYPE_HAS_RX
, &q_type
);
4983 bnx2x_set_bit(ECORE_Q_TYPE_HAS_TX
, &q_type
);
4985 ecore_init_queue_obj(sc
,
4986 &sc
->sp_objs
[idx
].q_obj
,
4991 BNX2X_SP(sc
, q_rdata
),
4992 (rte_iova_t
)BNX2X_SP_MAPPING(sc
, q_rdata
),
4995 /* configure classification DBs */
4996 ecore_init_mac_obj(sc
,
4997 &sc
->sp_objs
[idx
].mac_obj
,
5001 BNX2X_SP(sc
, mac_rdata
),
5002 (rte_iova_t
)BNX2X_SP_MAPPING(sc
, mac_rdata
),
5003 ECORE_FILTER_MAC_PENDING
, &sc
->sp_state
,
5004 ECORE_OBJ_TYPE_RX_TX
, &sc
->macs_pool
);
5008 bnx2x_update_rx_prod(struct bnx2x_softc
*sc
, struct bnx2x_fastpath
*fp
,
5009 uint16_t rx_bd_prod
, uint16_t rx_cq_prod
)
5011 union ustorm_eth_rx_producers rx_prods
;
5014 /* update producers */
5015 rx_prods
.prod
.bd_prod
= rx_bd_prod
;
5016 rx_prods
.prod
.cqe_prod
= rx_cq_prod
;
5017 rx_prods
.prod
.reserved
= 0;
5020 * Make sure that the BD and SGE data is updated before updating the
5021 * producers since FW might read the BD/SGE right after the producer
5023 * This is only applicable for weak-ordered memory model archs such
5024 * as IA-64. The following barrier is also mandatory since FW will
5025 * assumes BDs must have buffers.
5029 for (i
= 0; i
< (sizeof(rx_prods
) / 4); i
++) {
5031 (fp
->ustorm_rx_prods_offset
+ (i
* 4)),
5032 rx_prods
.raw_data
[i
]);
5035 wmb(); /* keep prod updates ordered */
5038 static void bnx2x_init_rx_rings(struct bnx2x_softc
*sc
)
5040 struct bnx2x_fastpath
*fp
;
5042 struct bnx2x_rx_queue
*rxq
;
5044 for (i
= 0; i
< sc
->num_queues
; i
++) {
5046 rxq
= sc
->rx_queues
[fp
->index
];
5048 PMD_RX_LOG(ERR
, "RX queue is NULL");
5052 rxq
->rx_bd_head
= 0;
5053 rxq
->rx_bd_tail
= rxq
->nb_rx_desc
;
5054 rxq
->rx_cq_head
= 0;
5055 rxq
->rx_cq_tail
= TOTAL_RCQ_ENTRIES(rxq
);
5056 *fp
->rx_cq_cons_sb
= 0;
5059 * Activate the BD ring...
5060 * Warning, this will generate an interrupt (to the TSTORM)
5061 * so this can only be done after the chip is initialized
5063 bnx2x_update_rx_prod(sc
, fp
, rxq
->rx_bd_tail
, rxq
->rx_cq_tail
);
5071 static void bnx2x_init_tx_ring_one(struct bnx2x_fastpath
*fp
)
5073 struct bnx2x_tx_queue
*txq
= fp
->sc
->tx_queues
[fp
->index
];
5075 fp
->tx_db
.data
.header
.header
= 1 << DOORBELL_HDR_DB_TYPE_SHIFT
;
5076 fp
->tx_db
.data
.zero_fill1
= 0;
5077 fp
->tx_db
.data
.prod
= 0;
5080 PMD_TX_LOG(ERR
, "ERROR: TX queue is NULL");
5084 txq
->tx_pkt_tail
= 0;
5085 txq
->tx_pkt_head
= 0;
5086 txq
->tx_bd_tail
= 0;
5087 txq
->tx_bd_head
= 0;
5090 static void bnx2x_init_tx_rings(struct bnx2x_softc
*sc
)
5094 for (i
= 0; i
< sc
->num_queues
; i
++) {
5095 bnx2x_init_tx_ring_one(&sc
->fp
[i
]);
5099 static void bnx2x_init_def_sb(struct bnx2x_softc
*sc
)
5101 struct host_sp_status_block
*def_sb
= sc
->def_sb
;
5102 rte_iova_t mapping
= sc
->def_sb_dma
.paddr
;
5103 int igu_sp_sb_index
;
5105 int port
= SC_PORT(sc
);
5106 int func
= SC_FUNC(sc
);
5107 int reg_offset
, reg_offset_en5
;
5110 struct hc_sp_status_block_data sp_sb_data
;
5112 memset(&sp_sb_data
, 0, sizeof(struct hc_sp_status_block_data
));
5114 if (CHIP_INT_MODE_IS_BC(sc
)) {
5115 igu_sp_sb_index
= DEF_SB_IGU_ID
;
5116 igu_seg_id
= HC_SEG_ACCESS_DEF
;
5118 igu_sp_sb_index
= sc
->igu_dsb_id
;
5119 igu_seg_id
= IGU_SEG_ACCESS_DEF
;
5123 section
= ((uint64_t) mapping
+
5124 offsetof(struct host_sp_status_block
, atten_status_block
));
5125 def_sb
->atten_status_block
.status_block_id
= igu_sp_sb_index
;
5128 reg_offset
= (port
) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
5129 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
;
5131 reg_offset_en5
= (port
) ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0
:
5132 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0
;
5134 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
5135 /* take care of sig[0]..sig[4] */
5136 for (sindex
= 0; sindex
< 4; sindex
++) {
5137 sc
->attn_group
[index
].sig
[sindex
] =
5139 (reg_offset
+ (sindex
* 0x4) +
5143 if (!CHIP_IS_E1x(sc
)) {
5145 * enable5 is separate from the rest of the registers,
5146 * and the address skip is 4 and not 16 between the
5149 sc
->attn_group
[index
].sig
[4] =
5150 REG_RD(sc
, (reg_offset_en5
+ (0x4 * index
)));
5152 sc
->attn_group
[index
].sig
[4] = 0;
5156 if (sc
->devinfo
.int_block
== INT_BLOCK_HC
) {
5158 port
? HC_REG_ATTN_MSG1_ADDR_L
: HC_REG_ATTN_MSG0_ADDR_L
;
5159 REG_WR(sc
, reg_offset
, U64_LO(section
));
5160 REG_WR(sc
, (reg_offset
+ 4), U64_HI(section
));
5161 } else if (!CHIP_IS_E1x(sc
)) {
5162 REG_WR(sc
, IGU_REG_ATTN_MSG_ADDR_L
, U64_LO(section
));
5163 REG_WR(sc
, IGU_REG_ATTN_MSG_ADDR_H
, U64_HI(section
));
5166 section
= ((uint64_t) mapping
+
5167 offsetof(struct host_sp_status_block
, sp_sb
));
5169 bnx2x_zero_sp_sb(sc
);
5171 /* PCI guarantees endianity of regpair */
5172 sp_sb_data
.state
= SB_ENABLED
;
5173 sp_sb_data
.host_sb_addr
.lo
= U64_LO(section
);
5174 sp_sb_data
.host_sb_addr
.hi
= U64_HI(section
);
5175 sp_sb_data
.igu_sb_id
= igu_sp_sb_index
;
5176 sp_sb_data
.igu_seg_id
= igu_seg_id
;
5177 sp_sb_data
.p_func
.pf_id
= func
;
5178 sp_sb_data
.p_func
.vnic_id
= SC_VN(sc
);
5179 sp_sb_data
.p_func
.vf_id
= 0xff;
5181 bnx2x_wr_sp_sb_data(sc
, &sp_sb_data
);
5183 bnx2x_ack_sb(sc
, sc
->igu_dsb_id
, USTORM_ID
, 0, IGU_INT_ENABLE
, 0);
5186 static void bnx2x_init_sp_ring(struct bnx2x_softc
*sc
)
5188 atomic_store_rel_long(&sc
->cq_spq_left
, MAX_SPQ_PENDING
);
5189 sc
->spq_prod_idx
= 0;
5191 &sc
->def_sb
->sp_sb
.index_values
[HC_SP_INDEX_ETH_DEF_CONS
];
5192 sc
->spq_prod_bd
= sc
->spq
;
5193 sc
->spq_last_bd
= (sc
->spq_prod_bd
+ MAX_SP_DESC_CNT
);
5196 static void bnx2x_init_eq_ring(struct bnx2x_softc
*sc
)
5198 union event_ring_elem
*elem
;
5201 for (i
= 1; i
<= NUM_EQ_PAGES
; i
++) {
5202 elem
= &sc
->eq
[EQ_DESC_CNT_PAGE
* i
- 1];
5204 elem
->next_page
.addr
.hi
= htole32(U64_HI(sc
->eq_dma
.paddr
+
5206 (i
% NUM_EQ_PAGES
)));
5207 elem
->next_page
.addr
.lo
= htole32(U64_LO(sc
->eq_dma
.paddr
+
5209 (i
% NUM_EQ_PAGES
)));
5213 sc
->eq_prod
= NUM_EQ_DESC
;
5214 sc
->eq_cons_sb
= &sc
->def_sb
->sp_sb
.index_values
[HC_SP_INDEX_EQ_CONS
];
5216 atomic_store_rel_long(&sc
->eq_spq_left
,
5217 (min((MAX_SP_DESC_CNT
- MAX_SPQ_PENDING
),
5221 static void bnx2x_init_internal_common(struct bnx2x_softc
*sc
)
5227 * In switch independent mode, the TSTORM needs to accept
5228 * packets that failed classification, since approximate match
5229 * mac addresses aren't written to NIG LLH.
5232 (BAR_TSTRORM_INTMEM
+
5233 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET
), 2);
5236 (BAR_TSTRORM_INTMEM
+
5237 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET
), 0);
5240 * Zero this manually as its initialization is currently missing
5243 for (i
= 0; i
< (USTORM_AGG_DATA_SIZE
>> 2); i
++) {
5245 (BAR_USTRORM_INTMEM
+ USTORM_AGG_DATA_OFFSET
+ (i
* 4)),
5249 if (!CHIP_IS_E1x(sc
)) {
5250 REG_WR8(sc
, (BAR_CSTRORM_INTMEM
+ CSTORM_IGU_MODE_OFFSET
),
5251 CHIP_INT_MODE_IS_BC(sc
) ? HC_IGU_BC_MODE
:
5256 static void bnx2x_init_internal(struct bnx2x_softc
*sc
, uint32_t load_code
)
5258 switch (load_code
) {
5259 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5260 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
:
5261 bnx2x_init_internal_common(sc
);
5264 case FW_MSG_CODE_DRV_LOAD_PORT
:
5268 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5269 /* internal memory per function is initialized inside bnx2x_pf_init */
5273 PMD_DRV_LOG(NOTICE
, sc
, "Unknown load_code (0x%x) from MCP",
5280 storm_memset_func_cfg(struct bnx2x_softc
*sc
,
5281 struct tstorm_eth_function_common_config
*tcfg
,
5287 addr
= (BAR_TSTRORM_INTMEM
+
5288 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid
));
5289 size
= sizeof(struct tstorm_eth_function_common_config
);
5290 ecore_storm_memset_struct(sc
, addr
, size
, (uint32_t *) tcfg
);
5293 static void bnx2x_func_init(struct bnx2x_softc
*sc
, struct bnx2x_func_init_params
*p
)
5295 struct tstorm_eth_function_common_config tcfg
= { 0 };
5297 if (CHIP_IS_E1x(sc
)) {
5298 storm_memset_func_cfg(sc
, &tcfg
, p
->func_id
);
5301 /* Enable the function in the FW */
5302 storm_memset_vf_to_pf(sc
, p
->func_id
, p
->pf_id
);
5303 storm_memset_func_en(sc
, p
->func_id
, 1);
5306 if (p
->func_flgs
& FUNC_FLG_SPQ
) {
5307 storm_memset_spq_addr(sc
, p
->spq_map
, p
->func_id
);
5309 (XSEM_REG_FAST_MEMORY
+
5310 XSTORM_SPQ_PROD_OFFSET(p
->func_id
)), p
->spq_prod
);
5315 * Calculates the sum of vn_min_rates.
5316 * It's needed for further normalizing of the min_rates.
5318 * sum of vn_min_rates.
5320 * 0 - if all the min_rates are 0.
5321 * In the later case fainess algorithm should be deactivated.
5322 * If all min rates are not zero then those that are zeroes will be set to 1.
5324 static void bnx2x_calc_vn_min(struct bnx2x_softc
*sc
, struct cmng_init_input
*input
)
5327 uint32_t vn_min_rate
;
5331 for (vn
= VN_0
; vn
< SC_MAX_VN_NUM(sc
); vn
++) {
5332 vn_cfg
= sc
->devinfo
.mf_info
.mf_config
[vn
];
5333 vn_min_rate
= (((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
5334 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100);
5336 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
5337 /* skip hidden VNs */
5339 } else if (!vn_min_rate
) {
5340 /* If min rate is zero - set it to 100 */
5341 vn_min_rate
= DEF_MIN_RATE
;
5346 input
->vnic_min_rate
[vn
] = vn_min_rate
;
5349 /* if ETS or all min rates are zeros - disable fairness */
5351 input
->flags
.cmng_enables
&= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
5353 input
->flags
.cmng_enables
|= CMNG_FLAGS_PER_PORT_FAIRNESS_VN
;
5358 bnx2x_extract_max_cfg(__rte_unused
struct bnx2x_softc
*sc
, uint32_t mf_cfg
)
5360 uint16_t max_cfg
= ((mf_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
5361 FUNC_MF_CFG_MAX_BW_SHIFT
);
5364 PMD_DRV_LOG(DEBUG
, sc
,
5365 "Max BW configured to 0 - using 100 instead");
5373 bnx2x_calc_vn_max(struct bnx2x_softc
*sc
, int vn
, struct cmng_init_input
*input
)
5375 uint16_t vn_max_rate
;
5376 uint32_t vn_cfg
= sc
->devinfo
.mf_info
.mf_config
[vn
];
5379 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
5382 max_cfg
= bnx2x_extract_max_cfg(sc
, vn_cfg
);
5385 /* max_cfg in percents of linkspeed */
5387 ((sc
->link_vars
.line_speed
* max_cfg
) / 100);
5388 } else { /* SD modes */
5389 /* max_cfg is absolute in 100Mb units */
5390 vn_max_rate
= (max_cfg
* 100);
5394 input
->vnic_max_rate
[vn
] = vn_max_rate
;
5398 bnx2x_cmng_fns_init(struct bnx2x_softc
*sc
, uint8_t read_cfg
, uint8_t cmng_type
)
5400 struct cmng_init_input input
;
5403 memset(&input
, 0, sizeof(struct cmng_init_input
));
5405 input
.port_rate
= sc
->link_vars
.line_speed
;
5407 if (cmng_type
== CMNG_FNS_MINMAX
) {
5408 /* read mf conf from shmem */
5410 bnx2x_read_mf_cfg(sc
);
5413 /* get VN min rate and enable fairness if not 0 */
5414 bnx2x_calc_vn_min(sc
, &input
);
5416 /* get VN max rate */
5418 for (vn
= VN_0
; vn
< SC_MAX_VN_NUM(sc
); vn
++) {
5419 bnx2x_calc_vn_max(sc
, vn
, &input
);
5423 /* always enable rate shaping and fairness */
5424 input
.flags
.cmng_enables
|= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN
;
5426 ecore_init_cmng(&input
, &sc
->cmng
);
5431 static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc
*sc
)
5433 if (CHIP_REV_IS_SLOW(sc
)) {
5434 return CMNG_FNS_NONE
;
5438 return CMNG_FNS_MINMAX
;
5441 return CMNG_FNS_NONE
;
5445 storm_memset_cmng(struct bnx2x_softc
*sc
, struct cmng_init
*cmng
, uint8_t port
)
5452 addr
= (BAR_XSTRORM_INTMEM
+ XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
));
5453 size
= sizeof(struct cmng_struct_per_port
);
5454 ecore_storm_memset_struct(sc
, addr
, size
, (uint32_t *) & cmng
->port
);
5456 for (vn
= VN_0
; vn
< SC_MAX_VN_NUM(sc
); vn
++) {
5457 func
= func_by_vn(sc
, vn
);
5459 addr
= (BAR_XSTRORM_INTMEM
+
5460 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
));
5461 size
= sizeof(struct rate_shaping_vars_per_vn
);
5462 ecore_storm_memset_struct(sc
, addr
, size
,
5463 (uint32_t *) & cmng
->
5464 vnic
.vnic_max_rate
[vn
]);
5466 addr
= (BAR_XSTRORM_INTMEM
+
5467 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
));
5468 size
= sizeof(struct fairness_vars_per_vn
);
5469 ecore_storm_memset_struct(sc
, addr
, size
,
5470 (uint32_t *) & cmng
->
5471 vnic
.vnic_min_rate
[vn
]);
5475 static void bnx2x_pf_init(struct bnx2x_softc
*sc
)
5477 struct bnx2x_func_init_params func_init
;
5478 struct event_ring_data eq_data
;
5481 memset(&eq_data
, 0, sizeof(struct event_ring_data
));
5482 memset(&func_init
, 0, sizeof(struct bnx2x_func_init_params
));
5484 if (!CHIP_IS_E1x(sc
)) {
5485 /* reset IGU PF statistics: MSIX + ATTN */
5488 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT
+
5489 (BNX2X_IGU_STAS_MSG_VF_CNT
* 4) +
5490 ((CHIP_IS_MODE_4_PORT(sc
) ? SC_FUNC(sc
) : SC_VN(sc
)) *
5494 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT
+
5495 (BNX2X_IGU_STAS_MSG_VF_CNT
* 4) +
5496 (BNX2X_IGU_STAS_MSG_PF_CNT
* 4) +
5497 ((CHIP_IS_MODE_4_PORT(sc
) ? SC_FUNC(sc
) : SC_VN(sc
)) *
5501 /* function setup flags */
5502 flags
= (FUNC_FLG_STATS
| FUNC_FLG_LEADING
| FUNC_FLG_SPQ
);
5504 func_init
.func_flgs
= flags
;
5505 func_init
.pf_id
= SC_FUNC(sc
);
5506 func_init
.func_id
= SC_FUNC(sc
);
5507 func_init
.spq_map
= sc
->spq_dma
.paddr
;
5508 func_init
.spq_prod
= sc
->spq_prod_idx
;
5510 bnx2x_func_init(sc
, &func_init
);
5512 memset(&sc
->cmng
, 0, sizeof(struct cmng_struct_per_port
));
5515 * Congestion management values depend on the link rate.
5516 * There is no active link so initial link rate is set to 10Gbps.
5517 * When the link comes up the congestion management values are
5518 * re-calculated according to the actual link rate.
5520 sc
->link_vars
.line_speed
= SPEED_10000
;
5521 bnx2x_cmng_fns_init(sc
, TRUE
, bnx2x_get_cmng_fns_mode(sc
));
5523 /* Only the PMF sets the HW */
5525 storm_memset_cmng(sc
, &sc
->cmng
, SC_PORT(sc
));
5528 /* init Event Queue - PCI bus guarantees correct endainity */
5529 eq_data
.base_addr
.hi
= U64_HI(sc
->eq_dma
.paddr
);
5530 eq_data
.base_addr
.lo
= U64_LO(sc
->eq_dma
.paddr
);
5531 eq_data
.producer
= sc
->eq_prod
;
5532 eq_data
.index_id
= HC_SP_INDEX_EQ_CONS
;
5533 eq_data
.sb_id
= DEF_SB_ID
;
5534 storm_memset_eq_data(sc
, &eq_data
, SC_FUNC(sc
));
5537 static void bnx2x_hc_int_enable(struct bnx2x_softc
*sc
)
5539 int port
= SC_PORT(sc
);
5540 uint32_t addr
= (port
) ? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
5541 uint32_t val
= REG_RD(sc
, addr
);
5542 uint8_t msix
= (sc
->interrupt_mode
== INTR_MODE_MSIX
)
5543 || (sc
->interrupt_mode
== INTR_MODE_SINGLE_MSIX
);
5544 uint8_t single_msix
= (sc
->interrupt_mode
== INTR_MODE_SINGLE_MSIX
);
5545 uint8_t msi
= (sc
->interrupt_mode
== INTR_MODE_MSI
);
5548 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
5549 HC_CONFIG_0_REG_INT_LINE_EN_0
);
5550 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
5551 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
5553 val
|= HC_CONFIG_0_REG_SINGLE_ISR_EN_0
;
5556 val
&= ~HC_CONFIG_0_REG_INT_LINE_EN_0
;
5557 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
5558 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
5559 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
5561 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
5562 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
5563 HC_CONFIG_0_REG_INT_LINE_EN_0
|
5564 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
5566 REG_WR(sc
, addr
, val
);
5568 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
5571 REG_WR(sc
, addr
, val
);
5573 /* ensure that HC_CONFIG is written before leading/trailing edge config */
5576 /* init leading/trailing edge */
5578 val
= (0xee0f | (1 << (SC_VN(sc
) + 4)));
5580 /* enable nig and gpio3 attention */
5587 REG_WR(sc
, (HC_REG_TRAILING_EDGE_0
+ port
* 8), val
);
5588 REG_WR(sc
, (HC_REG_LEADING_EDGE_0
+ port
* 8), val
);
5590 /* make sure that interrupts are indeed enabled from here on */
5594 static void bnx2x_igu_int_enable(struct bnx2x_softc
*sc
)
5597 uint8_t msix
= (sc
->interrupt_mode
== INTR_MODE_MSIX
)
5598 || (sc
->interrupt_mode
== INTR_MODE_SINGLE_MSIX
);
5599 uint8_t single_msix
= (sc
->interrupt_mode
== INTR_MODE_SINGLE_MSIX
);
5600 uint8_t msi
= (sc
->interrupt_mode
== INTR_MODE_MSI
);
5602 val
= REG_RD(sc
, IGU_REG_PF_CONFIGURATION
);
5605 val
&= ~(IGU_PF_CONF_INT_LINE_EN
| IGU_PF_CONF_SINGLE_ISR_EN
);
5606 val
|= (IGU_PF_CONF_MSI_MSIX_EN
| IGU_PF_CONF_ATTN_BIT_EN
);
5608 val
|= IGU_PF_CONF_SINGLE_ISR_EN
;
5611 val
&= ~IGU_PF_CONF_INT_LINE_EN
;
5612 val
|= (IGU_PF_CONF_MSI_MSIX_EN
|
5613 IGU_PF_CONF_ATTN_BIT_EN
| IGU_PF_CONF_SINGLE_ISR_EN
);
5615 val
&= ~IGU_PF_CONF_MSI_MSIX_EN
;
5616 val
|= (IGU_PF_CONF_INT_LINE_EN
|
5617 IGU_PF_CONF_ATTN_BIT_EN
| IGU_PF_CONF_SINGLE_ISR_EN
);
5620 /* clean previous status - need to configure igu prior to ack */
5621 if ((!msix
) || single_msix
) {
5622 REG_WR(sc
, IGU_REG_PF_CONFIGURATION
, val
);
5626 val
|= IGU_PF_CONF_FUNC_EN
;
5628 PMD_DRV_LOG(DEBUG
, sc
, "write 0x%x to IGU mode %s",
5629 val
, ((msix
) ? "MSI-X" : ((msi
) ? "MSI" : "INTx")));
5631 REG_WR(sc
, IGU_REG_PF_CONFIGURATION
, val
);
5635 /* init leading/trailing edge */
5637 val
= (0xee0f | (1 << (SC_VN(sc
) + 4)));
5639 /* enable nig and gpio3 attention */
5646 REG_WR(sc
, IGU_REG_TRAILING_EDGE_LATCH
, val
);
5647 REG_WR(sc
, IGU_REG_LEADING_EDGE_LATCH
, val
);
5649 /* make sure that interrupts are indeed enabled from here on */
5653 static void bnx2x_int_enable(struct bnx2x_softc
*sc
)
5655 if (sc
->devinfo
.int_block
== INT_BLOCK_HC
) {
5656 bnx2x_hc_int_enable(sc
);
5658 bnx2x_igu_int_enable(sc
);
5662 static void bnx2x_hc_int_disable(struct bnx2x_softc
*sc
)
5664 int port
= SC_PORT(sc
);
5665 uint32_t addr
= (port
) ? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
5666 uint32_t val
= REG_RD(sc
, addr
);
5668 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
5669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
5670 HC_CONFIG_0_REG_INT_LINE_EN_0
| HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
5671 /* flush all outstanding writes */
5674 REG_WR(sc
, addr
, val
);
5675 if (REG_RD(sc
, addr
) != val
) {
5676 PMD_DRV_LOG(ERR
, sc
, "proper val not read from HC IGU!");
5680 static void bnx2x_igu_int_disable(struct bnx2x_softc
*sc
)
5682 uint32_t val
= REG_RD(sc
, IGU_REG_PF_CONFIGURATION
);
5684 val
&= ~(IGU_PF_CONF_MSI_MSIX_EN
|
5685 IGU_PF_CONF_INT_LINE_EN
| IGU_PF_CONF_ATTN_BIT_EN
);
5687 PMD_DRV_LOG(DEBUG
, sc
, "write %x to IGU", val
);
5689 /* flush all outstanding writes */
5692 REG_WR(sc
, IGU_REG_PF_CONFIGURATION
, val
);
5693 if (REG_RD(sc
, IGU_REG_PF_CONFIGURATION
) != val
) {
5694 PMD_DRV_LOG(ERR
, sc
, "proper val not read from IGU!");
5698 static void bnx2x_int_disable(struct bnx2x_softc
*sc
)
5700 if (sc
->devinfo
.int_block
== INT_BLOCK_HC
) {
5701 bnx2x_hc_int_disable(sc
);
5703 bnx2x_igu_int_disable(sc
);
5707 static void bnx2x_nic_init(struct bnx2x_softc
*sc
, int load_code
)
5711 PMD_INIT_FUNC_TRACE(sc
);
5713 for (i
= 0; i
< sc
->num_queues
; i
++) {
5714 bnx2x_init_eth_fp(sc
, i
);
5717 rmb(); /* ensure status block indices were read */
5719 bnx2x_init_rx_rings(sc
);
5720 bnx2x_init_tx_rings(sc
);
5723 bnx2x_memset_stats(sc
);
5727 /* initialize MOD_ABS interrupts */
5728 elink_init_mod_abs_int(sc
, &sc
->link_vars
,
5729 sc
->devinfo
.chip_id
,
5730 sc
->devinfo
.shmem_base
,
5731 sc
->devinfo
.shmem2_base
, SC_PORT(sc
));
5733 bnx2x_init_def_sb(sc
);
5734 bnx2x_update_dsb_idx(sc
);
5735 bnx2x_init_sp_ring(sc
);
5736 bnx2x_init_eq_ring(sc
);
5737 bnx2x_init_internal(sc
, load_code
);
5739 bnx2x_stats_init(sc
);
5741 /* flush all before enabling interrupts */
5744 bnx2x_int_enable(sc
);
5746 /* check for SPIO5 */
5747 bnx2x_attn_int_deasserted0(sc
,
5749 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+
5751 AEU_INPUTS_ATTN_BITS_SPIO5
);
5754 static void bnx2x_init_objs(struct bnx2x_softc
*sc
)
5756 /* mcast rules must be added to tx if tx switching is enabled */
5757 ecore_obj_type o_type
;
5758 if (sc
->flags
& BNX2X_TX_SWITCHING
)
5759 o_type
= ECORE_OBJ_TYPE_RX_TX
;
5761 o_type
= ECORE_OBJ_TYPE_RX
;
5763 /* RX_MODE controlling object */
5764 ecore_init_rx_mode_obj(sc
, &sc
->rx_mode_obj
);
5766 /* multicast configuration controlling object */
5767 ecore_init_mcast_obj(sc
,
5773 BNX2X_SP(sc
, mcast_rdata
),
5774 (rte_iova_t
)BNX2X_SP_MAPPING(sc
, mcast_rdata
),
5775 ECORE_FILTER_MCAST_PENDING
,
5776 &sc
->sp_state
, o_type
);
5778 /* Setup CAM credit pools */
5779 ecore_init_mac_credit_pool(sc
,
5782 CHIP_IS_E1x(sc
) ? VNICS_PER_PORT(sc
) :
5783 VNICS_PER_PATH(sc
));
5785 ecore_init_vlan_credit_pool(sc
,
5787 SC_ABS_FUNC(sc
) >> 1,
5788 CHIP_IS_E1x(sc
) ? VNICS_PER_PORT(sc
) :
5789 VNICS_PER_PATH(sc
));
5791 /* RSS configuration object */
5792 ecore_init_rss_config_obj(&sc
->rss_conf_obj
,
5797 BNX2X_SP(sc
, rss_rdata
),
5798 (rte_iova_t
)BNX2X_SP_MAPPING(sc
, rss_rdata
),
5799 ECORE_FILTER_RSS_CONF_PENDING
,
5800 &sc
->sp_state
, ECORE_OBJ_TYPE_RX
);
5804 * Initialize the function. This must be called before sending CLIENT_SETUP
5805 * for the first client.
5807 static int bnx2x_func_start(struct bnx2x_softc
*sc
)
5809 struct ecore_func_state_params func_params
= { NULL
};
5810 struct ecore_func_start_params
*start_params
=
5811 &func_params
.params
.start
;
5813 /* Prepare parameters for function state transitions */
5814 bnx2x_set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
5816 func_params
.f_obj
= &sc
->func_obj
;
5817 func_params
.cmd
= ECORE_F_CMD_START
;
5819 /* Function parameters */
5820 start_params
->mf_mode
= sc
->devinfo
.mf_info
.mf_mode
;
5821 start_params
->sd_vlan_tag
= OVLAN(sc
);
5823 if (CHIP_IS_E2(sc
) || CHIP_IS_E3(sc
)) {
5824 start_params
->network_cos_mode
= STATIC_COS
;
5825 } else { /* CHIP_IS_E1X */
5826 start_params
->network_cos_mode
= FW_WRR
;
5829 start_params
->gre_tunnel_mode
= 0;
5830 start_params
->gre_tunnel_rss
= 0;
5832 return ecore_func_state_change(sc
, &func_params
);
5835 static int bnx2x_set_power_state(struct bnx2x_softc
*sc
, uint8_t state
)
5839 /* If there is no power capability, silently succeed */
5840 if (!(sc
->devinfo
.pcie_cap_flags
& BNX2X_PM_CAPABLE_FLAG
)) {
5841 PMD_DRV_LOG(INFO
, sc
, "No power capability");
5845 pci_read(sc
, (sc
->devinfo
.pcie_pm_cap_reg
+ PCIR_POWER_STATUS
), &pmcsr
,
5851 (sc
->devinfo
.pcie_pm_cap_reg
+
5853 ((pmcsr
& ~PCIM_PSTAT_DMASK
) | PCIM_PSTAT_PME
));
5855 if (pmcsr
& PCIM_PSTAT_DMASK
) {
5856 /* delay required during transition out of D3hot */
5863 /* don't shut down the power for emulation and FPGA */
5864 if (CHIP_REV_IS_SLOW(sc
)) {
5868 pmcsr
&= ~PCIM_PSTAT_DMASK
;
5869 pmcsr
|= PCIM_PSTAT_D3
;
5872 pmcsr
|= PCIM_PSTAT_PMEENABLE
;
5876 (sc
->devinfo
.pcie_pm_cap_reg
+
5877 PCIR_POWER_STATUS
), pmcsr
);
5880 * No more memory access after this point until device is brought back
5886 PMD_DRV_LOG(NOTICE
, sc
, "Can't support PCI power state = %d",
5894 /* return true if succeeded to acquire the lock */
5895 static uint8_t bnx2x_trylock_hw_lock(struct bnx2x_softc
*sc
, uint32_t resource
)
5897 uint32_t lock_status
;
5898 uint32_t resource_bit
= (1 << resource
);
5899 int func
= SC_FUNC(sc
);
5900 uint32_t hw_lock_control_reg
;
5902 /* Validating that the resource is within range */
5903 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
5904 PMD_DRV_LOG(INFO
, sc
,
5905 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)",
5906 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
5911 hw_lock_control_reg
= (MISC_REG_DRIVER_CONTROL_1
+ func
* 8);
5913 hw_lock_control_reg
=
5914 (MISC_REG_DRIVER_CONTROL_7
+ (func
- 6) * 8);
5917 /* try to acquire the lock */
5918 REG_WR(sc
, hw_lock_control_reg
+ 4, resource_bit
);
5919 lock_status
= REG_RD(sc
, hw_lock_control_reg
);
5920 if (lock_status
& resource_bit
) {
5924 PMD_DRV_LOG(NOTICE
, sc
, "Failed to get a resource lock 0x%x", resource
);
5930 * Get the recovery leader resource id according to the engine this function
5931 * belongs to. Currently only only 2 engines is supported.
5933 static int bnx2x_get_leader_lock_resource(struct bnx2x_softc
*sc
)
5936 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1
;
5938 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0
;
5942 /* try to acquire a leader lock for current engine */
5943 static uint8_t bnx2x_trylock_leader_lock(struct bnx2x_softc
*sc
)
5945 return bnx2x_trylock_hw_lock(sc
, bnx2x_get_leader_lock_resource(sc
));
5948 static int bnx2x_release_leader_lock(struct bnx2x_softc
*sc
)
5950 return bnx2x_release_hw_lock(sc
, bnx2x_get_leader_lock_resource(sc
));
5953 /* close gates #2, #3 and #4 */
5954 static void bnx2x_set_234_gates(struct bnx2x_softc
*sc
, uint8_t close
)
5958 /* gates #2 and #4a are closed/opened */
5960 REG_WR(sc
, PXP_REG_HST_DISCARD_DOORBELLS
, ! !close
);
5962 REG_WR(sc
, PXP_REG_HST_DISCARD_INTERNAL_WRITES
, ! !close
);
5965 if (CHIP_IS_E1x(sc
)) {
5966 /* prevent interrupts from HC on both ports */
5967 val
= REG_RD(sc
, HC_REG_CONFIG_1
);
5969 REG_WR(sc
, HC_REG_CONFIG_1
, (val
& ~(uint32_t)
5970 HC_CONFIG_1_REG_BLOCK_DISABLE_1
));
5972 REG_WR(sc
, HC_REG_CONFIG_1
,
5973 (val
| HC_CONFIG_1_REG_BLOCK_DISABLE_1
));
5975 val
= REG_RD(sc
, HC_REG_CONFIG_0
);
5977 REG_WR(sc
, HC_REG_CONFIG_0
, (val
& ~(uint32_t)
5978 HC_CONFIG_0_REG_BLOCK_DISABLE_0
));
5980 REG_WR(sc
, HC_REG_CONFIG_0
,
5981 (val
| HC_CONFIG_0_REG_BLOCK_DISABLE_0
));
5984 /* Prevent incoming interrupts in IGU */
5985 val
= REG_RD(sc
, IGU_REG_BLOCK_CONFIGURATION
);
5988 REG_WR(sc
, IGU_REG_BLOCK_CONFIGURATION
,
5990 IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE
));
5992 REG_WR(sc
, IGU_REG_BLOCK_CONFIGURATION
,
5994 IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE
));
6000 /* poll for pending writes bit, it should get cleared in no more than 1s */
6001 static int bnx2x_er_poll_igu_vq(struct bnx2x_softc
*sc
)
6003 uint32_t cnt
= 1000;
6004 uint32_t pend_bits
= 0;
6007 pend_bits
= REG_RD(sc
, IGU_REG_PENDING_BITS_STATUS
);
6009 if (pend_bits
== 0) {
6014 } while (cnt
-- > 0);
6017 PMD_DRV_LOG(NOTICE
, sc
, "Still pending IGU requests bits=0x%08x!",
6025 #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */
6027 static void bnx2x_clp_reset_prep(struct bnx2x_softc
*sc
, uint32_t * magic_val
)
6029 /* Do some magic... */
6030 uint32_t val
= MFCFG_RD(sc
, shared_mf_config
.clp_mb
);
6031 *magic_val
= val
& SHARED_MF_CLP_MAGIC
;
6032 MFCFG_WR(sc
, shared_mf_config
.clp_mb
, val
| SHARED_MF_CLP_MAGIC
);
6035 /* restore the value of the 'magic' bit */
6036 static void bnx2x_clp_reset_done(struct bnx2x_softc
*sc
, uint32_t magic_val
)
6038 /* Restore the 'magic' bit value... */
6039 uint32_t val
= MFCFG_RD(sc
, shared_mf_config
.clp_mb
);
6040 MFCFG_WR(sc
, shared_mf_config
.clp_mb
,
6041 (val
& (~SHARED_MF_CLP_MAGIC
)) | magic_val
);
6044 /* prepare for MCP reset, takes care of CLP configurations */
6045 static void bnx2x_reset_mcp_prep(struct bnx2x_softc
*sc
, uint32_t * magic_val
)
6048 uint32_t validity_offset
;
6050 /* set `magic' bit in order to save MF config */
6051 bnx2x_clp_reset_prep(sc
, magic_val
);
6053 /* get shmem offset */
6054 shmem
= REG_RD(sc
, MISC_REG_SHARED_MEM_ADDR
);
6056 offsetof(struct shmem_region
, validity_map
[SC_PORT(sc
)]);
6058 /* Clear validity map flags */
6060 REG_WR(sc
, shmem
+ validity_offset
, 0);
6064 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
6065 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
6067 static void bnx2x_mcp_wait_one(struct bnx2x_softc
*sc
)
6069 /* special handling for emulation and FPGA (10 times longer) */
6070 if (CHIP_REV_IS_SLOW(sc
)) {
6071 DELAY((MCP_ONE_TIMEOUT
* 10) * 1000);
6073 DELAY((MCP_ONE_TIMEOUT
) * 1000);
6077 /* initialize shmem_base and waits for validity signature to appear */
6078 static int bnx2x_init_shmem(struct bnx2x_softc
*sc
)
6084 sc
->devinfo
.shmem_base
=
6085 sc
->link_params
.shmem_base
=
6086 REG_RD(sc
, MISC_REG_SHARED_MEM_ADDR
);
6088 if (sc
->devinfo
.shmem_base
) {
6089 val
= SHMEM_RD(sc
, validity_map
[SC_PORT(sc
)]);
6090 if (val
& SHR_MEM_VALIDITY_MB
)
6094 bnx2x_mcp_wait_one(sc
);
6096 } while (cnt
++ < (MCP_TIMEOUT
/ MCP_ONE_TIMEOUT
));
6098 PMD_DRV_LOG(NOTICE
, sc
, "BAD MCP validity signature");
6103 static int bnx2x_reset_mcp_comp(struct bnx2x_softc
*sc
, uint32_t magic_val
)
6105 int rc
= bnx2x_init_shmem(sc
);
6107 /* Restore the `magic' bit value */
6108 bnx2x_clp_reset_done(sc
, magic_val
);
6113 static void bnx2x_pxp_prep(struct bnx2x_softc
*sc
)
6115 REG_WR(sc
, PXP2_REG_RD_START_INIT
, 0);
6116 REG_WR(sc
, PXP2_REG_RQ_RBC_DONE
, 0);
6121 * Reset the whole chip except for:
6123 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
6125 * - MISC (including AEU)
6129 static void bnx2x_process_kill_chip_reset(struct bnx2x_softc
*sc
, uint8_t global
)
6131 uint32_t not_reset_mask1
, reset_mask1
, not_reset_mask2
, reset_mask2
;
6132 uint32_t global_bits2
, stay_reset2
;
6135 * Bits that have to be set in reset_mask2 if we want to reset 'global'
6136 * (per chip) blocks.
6139 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU
|
6140 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE
;
6143 * Don't reset the following blocks.
6144 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
6145 * reset, as in 4 port device they might still be owned
6146 * by the MCP (there is only one leader per path).
6149 MISC_REGISTERS_RESET_REG_1_RST_HC
|
6150 MISC_REGISTERS_RESET_REG_1_RST_PXPV
|
6151 MISC_REGISTERS_RESET_REG_1_RST_PXP
;
6154 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO
|
6155 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE
|
6156 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE
|
6157 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE
|
6158 MISC_REGISTERS_RESET_REG_2_RST_RBCN
|
6159 MISC_REGISTERS_RESET_REG_2_RST_GRC
|
6160 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE
|
6161 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B
|
6162 MISC_REGISTERS_RESET_REG_2_RST_ATC
|
6163 MISC_REGISTERS_RESET_REG_2_PGLC
|
6164 MISC_REGISTERS_RESET_REG_2_RST_BMAC0
|
6165 MISC_REGISTERS_RESET_REG_2_RST_BMAC1
|
6166 MISC_REGISTERS_RESET_REG_2_RST_EMAC0
|
6167 MISC_REGISTERS_RESET_REG_2_RST_EMAC1
|
6168 MISC_REGISTERS_RESET_REG_2_UMAC0
| MISC_REGISTERS_RESET_REG_2_UMAC1
;
6171 * Keep the following blocks in reset:
6172 * - all xxMACs are handled by the elink code.
6175 MISC_REGISTERS_RESET_REG_2_XMAC
|
6176 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT
;
6178 /* Full reset masks according to the chip */
6179 reset_mask1
= 0xffffffff;
6181 if (CHIP_IS_E1H(sc
))
6182 reset_mask2
= 0x1ffff;
6183 else if (CHIP_IS_E2(sc
))
6184 reset_mask2
= 0xfffff;
6185 else /* CHIP_IS_E3 */
6186 reset_mask2
= 0x3ffffff;
6188 /* Don't reset global blocks unless we need to */
6190 reset_mask2
&= ~global_bits2
;
6193 * In case of attention in the QM, we need to reset PXP
6194 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
6195 * because otherwise QM reset would release 'close the gates' shortly
6196 * before resetting the PXP, then the PSWRQ would send a write
6197 * request to PGLUE. Then when PXP is reset, PGLUE would try to
6198 * read the payload data from PSWWR, but PSWWR would not
6199 * respond. The write queue in PGLUE would stuck, dmae commands
6200 * would not return. Therefore it's important to reset the second
6201 * reset register (containing the
6202 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
6203 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
6206 REG_WR(sc
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
6207 reset_mask2
& (~not_reset_mask2
));
6209 REG_WR(sc
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
6210 reset_mask1
& (~not_reset_mask1
));
6215 REG_WR(sc
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
,
6216 reset_mask2
& (~stay_reset2
));
6221 REG_WR(sc
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, reset_mask1
);
6225 static int bnx2x_process_kill(struct bnx2x_softc
*sc
, uint8_t global
)
6229 uint32_t sr_cnt
, blk_cnt
, port_is_idle_0
, port_is_idle_1
, pgl_exp_rom2
;
6230 uint32_t tags_63_32
= 0;
6232 /* Empty the Tetris buffer, wait for 1s */
6234 sr_cnt
= REG_RD(sc
, PXP2_REG_RD_SR_CNT
);
6235 blk_cnt
= REG_RD(sc
, PXP2_REG_RD_BLK_CNT
);
6236 port_is_idle_0
= REG_RD(sc
, PXP2_REG_RD_PORT_IS_IDLE_0
);
6237 port_is_idle_1
= REG_RD(sc
, PXP2_REG_RD_PORT_IS_IDLE_1
);
6238 pgl_exp_rom2
= REG_RD(sc
, PXP2_REG_PGL_EXP_ROM2
);
6239 if (CHIP_IS_E3(sc
)) {
6240 tags_63_32
= REG_RD(sc
, PGLUE_B_REG_TAGS_63_32
);
6243 if ((sr_cnt
== 0x7e) && (blk_cnt
== 0xa0) &&
6244 ((port_is_idle_0
& 0x1) == 0x1) &&
6245 ((port_is_idle_1
& 0x1) == 0x1) &&
6246 (pgl_exp_rom2
== 0xffffffff) &&
6247 (!CHIP_IS_E3(sc
) || (tags_63_32
== 0xffffffff)))
6250 } while (cnt
-- > 0);
6253 PMD_DRV_LOG(NOTICE
, sc
,
6254 "ERROR: Tetris buffer didn't get empty or there "
6255 "are still outstanding read requests after 1s! "
6256 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
6257 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x",
6258 sr_cnt
, blk_cnt
, port_is_idle_0
, port_is_idle_1
,
6265 /* Close gates #2, #3 and #4 */
6266 bnx2x_set_234_gates(sc
, TRUE
);
6268 /* Poll for IGU VQs for 57712 and newer chips */
6269 if (!CHIP_IS_E1x(sc
) && bnx2x_er_poll_igu_vq(sc
)) {
6273 /* clear "unprepared" bit */
6274 REG_WR(sc
, MISC_REG_UNPREPARED
, 0);
6277 /* Make sure all is written to the chip before the reset */
6281 * Wait for 1ms to empty GLUE and PCI-E core queues,
6282 * PSWHST, GRC and PSWRD Tetris buffer.
6286 /* Prepare to chip reset: */
6289 bnx2x_reset_mcp_prep(sc
, &val
);
6296 /* reset the chip */
6297 bnx2x_process_kill_chip_reset(sc
, global
);
6300 /* Recover after reset: */
6302 if (global
&& bnx2x_reset_mcp_comp(sc
, val
)) {
6306 /* Open the gates #2, #3 and #4 */
6307 bnx2x_set_234_gates(sc
, FALSE
);
6312 static int bnx2x_leader_reset(struct bnx2x_softc
*sc
)
6315 uint8_t global
= bnx2x_reset_is_global(sc
);
6319 * If not going to reset MCP, load "fake" driver to reset HW while
6320 * driver is owner of the HW.
6322 if (!global
&& !BNX2X_NOMCP(sc
)) {
6323 load_code
= bnx2x_fw_command(sc
, DRV_MSG_CODE_LOAD_REQ
,
6324 DRV_MSG_CODE_LOAD_REQ_WITH_LFA
);
6326 PMD_DRV_LOG(NOTICE
, sc
, "MCP response failure, aborting");
6328 goto exit_leader_reset
;
6331 if ((load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) &&
6332 (load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON
)) {
6333 PMD_DRV_LOG(NOTICE
, sc
,
6334 "MCP unexpected response, aborting");
6336 goto exit_leader_reset2
;
6339 load_code
= bnx2x_fw_command(sc
, DRV_MSG_CODE_LOAD_DONE
, 0);
6341 PMD_DRV_LOG(NOTICE
, sc
, "MCP response failure, aborting");
6343 goto exit_leader_reset2
;
6347 /* try to recover after the failure */
6348 if (bnx2x_process_kill(sc
, global
)) {
6349 PMD_DRV_LOG(NOTICE
, sc
, "Something bad occurred on engine %d!",
6352 goto exit_leader_reset2
;
6356 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
6359 bnx2x_set_reset_done(sc
);
6361 bnx2x_clear_reset_global(sc
);
6366 /* unload "fake driver" if it was loaded */
6367 if (!global
&&!BNX2X_NOMCP(sc
)) {
6368 bnx2x_fw_command(sc
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
6369 bnx2x_fw_command(sc
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
6375 bnx2x_release_leader_lock(sc
);
6382 * prepare INIT transition, parameters configured:
6383 * - HC configuration
6384 * - Queue's CDU context
6387 bnx2x_pf_q_prep_init(struct bnx2x_softc
*sc
, struct bnx2x_fastpath
*fp
,
6388 struct ecore_queue_init_params
*init_params
)
6391 int cxt_index
, cxt_offset
;
6393 bnx2x_set_bit(ECORE_Q_FLG_HC
, &init_params
->rx
.flags
);
6394 bnx2x_set_bit(ECORE_Q_FLG_HC
, &init_params
->tx
.flags
);
6396 bnx2x_set_bit(ECORE_Q_FLG_HC_EN
, &init_params
->rx
.flags
);
6397 bnx2x_set_bit(ECORE_Q_FLG_HC_EN
, &init_params
->tx
.flags
);
6400 init_params
->rx
.hc_rate
=
6401 sc
->hc_rx_ticks
? (1000000 / sc
->hc_rx_ticks
) : 0;
6402 init_params
->tx
.hc_rate
=
6403 sc
->hc_tx_ticks
? (1000000 / sc
->hc_tx_ticks
) : 0;
6406 init_params
->rx
.fw_sb_id
= init_params
->tx
.fw_sb_id
= fp
->fw_sb_id
;
6408 /* CQ index among the SB indices */
6409 init_params
->rx
.sb_cq_index
= HC_INDEX_ETH_RX_CQ_CONS
;
6410 init_params
->tx
.sb_cq_index
= HC_INDEX_ETH_FIRST_TX_CQ_CONS
;
6412 /* set maximum number of COSs supported by this queue */
6413 init_params
->max_cos
= sc
->max_cos
;
6415 /* set the context pointers queue object */
6416 for (cos
= FIRST_TX_COS_INDEX
; cos
< init_params
->max_cos
; cos
++) {
6417 cxt_index
= fp
->index
/ ILT_PAGE_CIDS
;
6418 cxt_offset
= fp
->index
- (cxt_index
* ILT_PAGE_CIDS
);
6419 init_params
->cxts
[cos
] =
6420 &sc
->context
[cxt_index
].vcxt
[cxt_offset
].eth
;
6424 /* set flags that are common for the Tx-only and not normal connections */
6425 static unsigned long
6426 bnx2x_get_common_flags(struct bnx2x_softc
*sc
, uint8_t zero_stats
)
6428 unsigned long flags
= 0;
6430 /* PF driver will always initialize the Queue to an ACTIVE state */
6431 bnx2x_set_bit(ECORE_Q_FLG_ACTIVE
, &flags
);
6434 * tx only connections collect statistics (on the same index as the
6435 * parent connection). The statistics are zeroed when the parent
6436 * connection is initialized.
6439 bnx2x_set_bit(ECORE_Q_FLG_STATS
, &flags
);
6441 bnx2x_set_bit(ECORE_Q_FLG_ZERO_STATS
, &flags
);
6445 * tx only connections can support tx-switching, though their
6446 * CoS-ness doesn't survive the loopback
6448 if (sc
->flags
& BNX2X_TX_SWITCHING
) {
6449 bnx2x_set_bit(ECORE_Q_FLG_TX_SWITCH
, &flags
);
6452 bnx2x_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT
, &flags
);
6457 static unsigned long bnx2x_get_q_flags(struct bnx2x_softc
*sc
, uint8_t leading
)
6459 unsigned long flags
= 0;
6462 bnx2x_set_bit(ECORE_Q_FLG_OV
, &flags
);
6466 bnx2x_set_bit(ECORE_Q_FLG_LEADING_RSS
, &flags
);
6467 bnx2x_set_bit(ECORE_Q_FLG_MCAST
, &flags
);
6470 bnx2x_set_bit(ECORE_Q_FLG_VLAN
, &flags
);
6472 /* merge with common flags */
6473 return flags
| bnx2x_get_common_flags(sc
, TRUE
);
6477 bnx2x_pf_q_prep_general(struct bnx2x_softc
*sc
, struct bnx2x_fastpath
*fp
,
6478 struct ecore_general_setup_params
*gen_init
, uint8_t cos
)
6480 gen_init
->stat_id
= bnx2x_stats_id(fp
);
6481 gen_init
->spcl_id
= fp
->cl_id
;
6482 gen_init
->mtu
= sc
->mtu
;
6483 gen_init
->cos
= cos
;
6487 bnx2x_pf_rx_q_prep(struct bnx2x_softc
*sc
, struct bnx2x_fastpath
*fp
,
6488 struct rxq_pause_params
*pause
,
6489 struct ecore_rxq_setup_params
*rxq_init
)
6491 struct bnx2x_rx_queue
*rxq
;
6493 rxq
= sc
->rx_queues
[fp
->index
];
6495 PMD_RX_LOG(ERR
, "RX queue is NULL");
6499 pause
->bd_th_lo
= BD_TH_LO(sc
);
6500 pause
->bd_th_hi
= BD_TH_HI(sc
);
6502 pause
->rcq_th_lo
= RCQ_TH_LO(sc
);
6503 pause
->rcq_th_hi
= RCQ_TH_HI(sc
);
6505 /* validate rings have enough entries to cross high thresholds */
6506 if (sc
->dropless_fc
&&
6507 pause
->bd_th_hi
+ FW_PREFETCH_CNT
> sc
->rx_ring_size
) {
6508 PMD_DRV_LOG(WARNING
, sc
, "rx bd ring threshold limit");
6511 if (sc
->dropless_fc
&&
6512 pause
->rcq_th_hi
+ FW_PREFETCH_CNT
> USABLE_RCQ_ENTRIES(rxq
)) {
6513 PMD_DRV_LOG(WARNING
, sc
, "rcq ring threshold limit");
6519 rxq_init
->dscr_map
= (rte_iova_t
)rxq
->rx_ring_phys_addr
;
6520 rxq_init
->rcq_map
= (rte_iova_t
)rxq
->cq_ring_phys_addr
;
6521 rxq_init
->rcq_np_map
= (rte_iova_t
)(rxq
->cq_ring_phys_addr
+
6525 * This should be a maximum number of data bytes that may be
6526 * placed on the BD (not including paddings).
6528 rxq_init
->buf_sz
= (fp
->rx_buf_size
- IP_HEADER_ALIGNMENT_PADDING
);
6530 rxq_init
->cl_qzone_id
= fp
->cl_qzone_id
;
6531 rxq_init
->rss_engine_id
= SC_FUNC(sc
);
6532 rxq_init
->mcast_engine_id
= SC_FUNC(sc
);
6534 rxq_init
->cache_line_log
= BNX2X_RX_ALIGN_SHIFT
;
6535 rxq_init
->fw_sb_id
= fp
->fw_sb_id
;
6537 rxq_init
->sb_cq_index
= HC_INDEX_ETH_RX_CQ_CONS
;
6540 * configure silent vlan removal
6541 * if multi function mode is afex, then mask default vlan
6543 if (IS_MF_AFEX(sc
)) {
6544 rxq_init
->silent_removal_value
=
6545 sc
->devinfo
.mf_info
.afex_def_vlan_tag
;
6546 rxq_init
->silent_removal_mask
= EVL_VLID_MASK
;
6551 bnx2x_pf_tx_q_prep(struct bnx2x_softc
*sc
, struct bnx2x_fastpath
*fp
,
6552 struct ecore_txq_setup_params
*txq_init
, uint8_t cos
)
6554 struct bnx2x_tx_queue
*txq
= fp
->sc
->tx_queues
[fp
->index
];
6557 PMD_TX_LOG(ERR
, "ERROR: TX queue is NULL");
6560 txq_init
->dscr_map
= (rte_iova_t
)txq
->tx_ring_phys_addr
;
6561 txq_init
->sb_cq_index
= HC_INDEX_ETH_FIRST_TX_CQ_CONS
+ cos
;
6562 txq_init
->traffic_type
= LLFC_TRAFFIC_TYPE_NW
;
6563 txq_init
->fw_sb_id
= fp
->fw_sb_id
;
6566 * set the TSS leading client id for TX classfication to the
6567 * leading RSS client id
6569 txq_init
->tss_leading_cl_id
= BNX2X_FP(sc
, 0, cl_id
);
6573 * This function performs 2 steps in a queue state machine:
6578 bnx2x_setup_queue(struct bnx2x_softc
*sc
, struct bnx2x_fastpath
*fp
, uint8_t leading
)
6580 struct ecore_queue_state_params q_params
= { NULL
};
6581 struct ecore_queue_setup_params
*setup_params
= &q_params
.params
.setup
;
6584 PMD_DRV_LOG(DEBUG
, sc
, "setting up queue %d", fp
->index
);
6586 bnx2x_ack_sb(sc
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_ENABLE
, 0);
6588 q_params
.q_obj
= &BNX2X_SP_OBJ(sc
, fp
).q_obj
;
6590 /* we want to wait for completion in this context */
6591 bnx2x_set_bit(RAMROD_COMP_WAIT
, &q_params
.ramrod_flags
);
6593 /* prepare the INIT parameters */
6594 bnx2x_pf_q_prep_init(sc
, fp
, &q_params
.params
.init
);
6596 /* Set the command */
6597 q_params
.cmd
= ECORE_Q_CMD_INIT
;
6599 /* Change the state to INIT */
6600 rc
= ecore_queue_state_change(sc
, &q_params
);
6602 PMD_DRV_LOG(NOTICE
, sc
, "Queue(%d) INIT failed", fp
->index
);
6606 PMD_DRV_LOG(DEBUG
, sc
, "init complete");
6608 /* now move the Queue to the SETUP state */
6609 memset(setup_params
, 0, sizeof(*setup_params
));
6611 /* set Queue flags */
6612 setup_params
->flags
= bnx2x_get_q_flags(sc
, leading
);
6614 /* set general SETUP parameters */
6615 bnx2x_pf_q_prep_general(sc
, fp
, &setup_params
->gen_params
,
6616 FIRST_TX_COS_INDEX
);
6618 bnx2x_pf_rx_q_prep(sc
, fp
,
6619 &setup_params
->pause_params
,
6620 &setup_params
->rxq_params
);
6622 bnx2x_pf_tx_q_prep(sc
, fp
, &setup_params
->txq_params
, FIRST_TX_COS_INDEX
);
6624 /* Set the command */
6625 q_params
.cmd
= ECORE_Q_CMD_SETUP
;
6627 /* change the state to SETUP */
6628 rc
= ecore_queue_state_change(sc
, &q_params
);
6630 PMD_DRV_LOG(NOTICE
, sc
, "Queue(%d) SETUP failed", fp
->index
);
6637 static int bnx2x_setup_leading(struct bnx2x_softc
*sc
)
6640 return bnx2x_setup_queue(sc
, &sc
->fp
[0], TRUE
);
6642 return bnx2x_vf_setup_queue(sc
, &sc
->fp
[0], TRUE
);
6646 bnx2x_config_rss_pf(struct bnx2x_softc
*sc
, struct ecore_rss_config_obj
*rss_obj
,
6647 uint8_t config_hash
)
6649 struct ecore_config_rss_params params
= { NULL
};
6653 * Although RSS is meaningless when there is a single HW queue we
6654 * still need it enabled in order to have HW Rx hash generated.
6657 params
.rss_obj
= rss_obj
;
6659 bnx2x_set_bit(RAMROD_COMP_WAIT
, ¶ms
.ramrod_flags
);
6661 bnx2x_set_bit(ECORE_RSS_MODE_REGULAR
, ¶ms
.rss_flags
);
6663 /* RSS configuration */
6664 bnx2x_set_bit(ECORE_RSS_IPV4
, ¶ms
.rss_flags
);
6665 bnx2x_set_bit(ECORE_RSS_IPV4_TCP
, ¶ms
.rss_flags
);
6666 bnx2x_set_bit(ECORE_RSS_IPV6
, ¶ms
.rss_flags
);
6667 bnx2x_set_bit(ECORE_RSS_IPV6_TCP
, ¶ms
.rss_flags
);
6668 if (rss_obj
->udp_rss_v4
) {
6669 bnx2x_set_bit(ECORE_RSS_IPV4_UDP
, ¶ms
.rss_flags
);
6671 if (rss_obj
->udp_rss_v6
) {
6672 bnx2x_set_bit(ECORE_RSS_IPV6_UDP
, ¶ms
.rss_flags
);
6676 params
.rss_result_mask
= MULTI_MASK
;
6678 rte_memcpy(params
.ind_table
, rss_obj
->ind_table
,
6679 sizeof(params
.ind_table
));
6683 for (i
= 0; i
< sizeof(params
.rss_key
) / 4; i
++) {
6684 params
.rss_key
[i
] = (uint32_t) rte_rand();
6687 bnx2x_set_bit(ECORE_RSS_SET_SRCH
, ¶ms
.rss_flags
);
6691 return ecore_config_rss(sc
, ¶ms
);
6693 return bnx2x_vf_config_rss(sc
, ¶ms
);
6696 static int bnx2x_config_rss_eth(struct bnx2x_softc
*sc
, uint8_t config_hash
)
6698 return bnx2x_config_rss_pf(sc
, &sc
->rss_conf_obj
, config_hash
);
6701 static int bnx2x_init_rss_pf(struct bnx2x_softc
*sc
)
6703 uint8_t num_eth_queues
= BNX2X_NUM_ETH_QUEUES(sc
);
6707 * Prepare the initial contents of the indirection table if
6710 for (i
= 0; i
< sizeof(sc
->rss_conf_obj
.ind_table
); i
++) {
6711 sc
->rss_conf_obj
.ind_table
[i
] =
6712 (sc
->fp
->cl_id
+ (i
% num_eth_queues
));
6716 sc
->rss_conf_obj
.udp_rss_v4
= sc
->rss_conf_obj
.udp_rss_v6
= 1;
6720 * For 57711 SEARCHER configuration (rss_keys) is
6721 * per-port, so if explicit configuration is needed, do it only
6724 * For 57712 and newer it's a per-function configuration.
6726 return bnx2x_config_rss_eth(sc
, sc
->port
.pmf
|| !CHIP_IS_E1x(sc
));
6730 bnx2x_set_mac_one(struct bnx2x_softc
*sc
, uint8_t * mac
,
6731 struct ecore_vlan_mac_obj
*obj
, uint8_t set
, int mac_type
,
6732 unsigned long *ramrod_flags
)
6734 struct ecore_vlan_mac_ramrod_params ramrod_param
;
6737 memset(&ramrod_param
, 0, sizeof(ramrod_param
));
6739 /* fill in general parameters */
6740 ramrod_param
.vlan_mac_obj
= obj
;
6741 ramrod_param
.ramrod_flags
= *ramrod_flags
;
6743 /* fill a user request section if needed */
6744 if (!bnx2x_test_bit(RAMROD_CONT
, ramrod_flags
)) {
6745 rte_memcpy(ramrod_param
.user_req
.u
.mac
.mac
, mac
,
6748 bnx2x_set_bit(mac_type
, &ramrod_param
.user_req
.vlan_mac_flags
);
6750 /* Set the command: ADD or DEL */
6751 ramrod_param
.user_req
.cmd
= (set
) ? ECORE_VLAN_MAC_ADD
:
6755 rc
= ecore_config_vlan_mac(sc
, &ramrod_param
);
6757 if (rc
== ECORE_EXISTS
) {
6758 PMD_DRV_LOG(INFO
, sc
, "Failed to schedule ADD operations (EEXIST)");
6759 /* do not treat adding same MAC as error */
6761 } else if (rc
< 0) {
6762 PMD_DRV_LOG(ERR
, sc
,
6763 "%s MAC failed (%d)", (set
? "Set" : "Delete"), rc
);
6769 static int bnx2x_set_eth_mac(struct bnx2x_softc
*sc
, uint8_t set
)
6771 unsigned long ramrod_flags
= 0;
6773 PMD_DRV_LOG(DEBUG
, sc
, "Adding Ethernet MAC");
6775 bnx2x_set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
6777 /* Eth MAC is set on RSS leading client (fp[0]) */
6778 return bnx2x_set_mac_one(sc
, sc
->link_params
.mac_addr
,
6779 &sc
->sp_objs
->mac_obj
,
6780 set
, ECORE_ETH_MAC
, &ramrod_flags
);
6783 static int bnx2x_get_cur_phy_idx(struct bnx2x_softc
*sc
)
6785 uint32_t sel_phy_idx
= 0;
6787 if (sc
->link_params
.num_phys
<= 1) {
6788 return ELINK_INT_PHY
;
6791 if (sc
->link_vars
.link_up
) {
6792 sel_phy_idx
= ELINK_EXT_PHY1
;
6793 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
6794 if ((sc
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
6795 (sc
->link_params
.phy
[ELINK_EXT_PHY2
].supported
&
6796 ELINK_SUPPORTED_FIBRE
))
6797 sel_phy_idx
= ELINK_EXT_PHY2
;
6799 switch (elink_phy_selection(&sc
->link_params
)) {
6800 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
6801 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
6802 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
6803 sel_phy_idx
= ELINK_EXT_PHY1
;
6805 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
6806 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
6807 sel_phy_idx
= ELINK_EXT_PHY2
;
6815 static int bnx2x_get_link_cfg_idx(struct bnx2x_softc
*sc
)
6817 uint32_t sel_phy_idx
= bnx2x_get_cur_phy_idx(sc
);
6820 * The selected activated PHY is always after swapping (in case PHY
6821 * swapping is enabled). So when swapping is enabled, we need to reverse
6825 if (sc
->link_params
.multi_phy_config
& PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
6826 if (sel_phy_idx
== ELINK_EXT_PHY1
)
6827 sel_phy_idx
= ELINK_EXT_PHY2
;
6828 else if (sel_phy_idx
== ELINK_EXT_PHY2
)
6829 sel_phy_idx
= ELINK_EXT_PHY1
;
6832 return ELINK_LINK_CONFIG_IDX(sel_phy_idx
);
6835 static void bnx2x_set_requested_fc(struct bnx2x_softc
*sc
)
6838 * Initialize link parameters structure variables
6839 * It is recommended to turn off RX FC for jumbo frames
6840 * for better performance
6842 if (CHIP_IS_E1x(sc
) && (sc
->mtu
> 5000)) {
6843 sc
->link_params
.req_fc_auto_adv
= ELINK_FLOW_CTRL_TX
;
6845 sc
->link_params
.req_fc_auto_adv
= ELINK_FLOW_CTRL_BOTH
;
6849 static void bnx2x_calc_fc_adv(struct bnx2x_softc
*sc
)
6851 uint8_t cfg_idx
= bnx2x_get_link_cfg_idx(sc
);
6852 switch (sc
->link_vars
.ieee_fc
&
6853 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK
) {
6854 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
6856 sc
->port
.advertising
[cfg_idx
] &= ~(ADVERTISED_Asym_Pause
|
6860 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
6861 sc
->port
.advertising
[cfg_idx
] |= (ADVERTISED_Asym_Pause
|
6865 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
6866 sc
->port
.advertising
[cfg_idx
] |= ADVERTISED_Asym_Pause
;
6871 static uint16_t bnx2x_get_mf_speed(struct bnx2x_softc
*sc
)
6873 uint16_t line_speed
= sc
->link_vars
.line_speed
;
6875 uint16_t maxCfg
= bnx2x_extract_max_cfg(sc
,
6877 mf_info
.mf_config
[SC_VN
6880 /* calculate the current MAX line speed limit for the MF devices */
6882 line_speed
= (line_speed
* maxCfg
) / 100;
6883 } else { /* SD mode */
6884 uint16_t vn_max_rate
= maxCfg
* 100;
6886 if (vn_max_rate
< line_speed
) {
6887 line_speed
= vn_max_rate
;
6896 bnx2x_fill_report_data(struct bnx2x_softc
*sc
, struct bnx2x_link_report_data
*data
)
6898 uint16_t line_speed
= bnx2x_get_mf_speed(sc
);
6900 memset(data
, 0, sizeof(*data
));
6902 /* fill the report data with the effective line speed */
6903 data
->line_speed
= line_speed
;
6906 if (!sc
->link_vars
.link_up
|| (sc
->flags
& BNX2X_MF_FUNC_DIS
)) {
6907 bnx2x_set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
6908 &data
->link_report_flags
);
6912 if (sc
->link_vars
.duplex
== DUPLEX_FULL
) {
6913 bnx2x_set_bit(BNX2X_LINK_REPORT_FULL_DUPLEX
,
6914 &data
->link_report_flags
);
6917 /* Rx Flow Control is ON */
6918 if (sc
->link_vars
.flow_ctrl
& ELINK_FLOW_CTRL_RX
) {
6919 bnx2x_set_bit(BNX2X_LINK_REPORT_RX_FC_ON
, &data
->link_report_flags
);
6922 /* Tx Flow Control is ON */
6923 if (sc
->link_vars
.flow_ctrl
& ELINK_FLOW_CTRL_TX
) {
6924 bnx2x_set_bit(BNX2X_LINK_REPORT_TX_FC_ON
, &data
->link_report_flags
);
6928 /* report link status to OS, should be called under phy_lock */
6929 static void bnx2x_link_report_locked(struct bnx2x_softc
*sc
)
6931 struct bnx2x_link_report_data cur_data
;
6935 bnx2x_read_mf_cfg(sc
);
6938 /* Read the current link report info */
6939 bnx2x_fill_report_data(sc
, &cur_data
);
6941 /* Don't report link down or exactly the same link status twice */
6942 if (!memcmp(&cur_data
, &sc
->last_reported_link
, sizeof(cur_data
)) ||
6943 (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
6944 &sc
->last_reported_link
.link_report_flags
) &&
6945 bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
6946 &cur_data
.link_report_flags
))) {
6950 ELINK_DEBUG_P2(sc
, "Change in link status : cur_data = %lx, last_reported_link = %lx",
6951 cur_data
.link_report_flags
,
6952 sc
->last_reported_link
.link_report_flags
);
6956 ELINK_DEBUG_P1(sc
, "link status change count = %x", sc
->link_cnt
);
6957 /* report new link params and remember the state for the next time */
6958 rte_memcpy(&sc
->last_reported_link
, &cur_data
, sizeof(cur_data
));
6960 if (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
6961 &cur_data
.link_report_flags
)) {
6962 ELINK_DEBUG_P0(sc
, "NIC Link is Down");
6964 __rte_unused
const char *duplex
;
6965 __rte_unused
const char *flow
;
6967 if (bnx2x_test_and_clear_bit(BNX2X_LINK_REPORT_FULL_DUPLEX
,
6968 &cur_data
.link_report_flags
)) {
6970 ELINK_DEBUG_P0(sc
, "link set to full duplex");
6973 ELINK_DEBUG_P0(sc
, "link set to half duplex");
6977 * Handle the FC at the end so that only these flags would be
6978 * possibly set. This way we may easily check if there is no FC
6981 if (cur_data
.link_report_flags
) {
6982 if (bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
6983 &cur_data
.link_report_flags
) &&
6984 bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
6985 &cur_data
.link_report_flags
)) {
6986 flow
= "ON - receive & transmit";
6987 } else if (bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
6988 &cur_data
.link_report_flags
) &&
6989 !bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
6990 &cur_data
.link_report_flags
)) {
6991 flow
= "ON - receive";
6992 } else if (!bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
6993 &cur_data
.link_report_flags
) &&
6994 bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
6995 &cur_data
.link_report_flags
)) {
6996 flow
= "ON - transmit";
6998 flow
= "none"; /* possible? */
7004 PMD_DRV_LOG(INFO
, sc
,
7005 "NIC Link is Up, %d Mbps %s duplex, Flow control: %s",
7006 cur_data
.line_speed
, duplex
, flow
);
7011 bnx2x_link_report(struct bnx2x_softc
*sc
)
7013 bnx2x_acquire_phy_lock(sc
);
7014 bnx2x_link_report_locked(sc
);
7015 bnx2x_release_phy_lock(sc
);
7018 void bnx2x_link_status_update(struct bnx2x_softc
*sc
)
7020 if (sc
->state
!= BNX2X_STATE_OPEN
) {
7024 if (IS_PF(sc
) && !CHIP_REV_IS_SLOW(sc
)) {
7025 elink_link_status_update(&sc
->link_params
, &sc
->link_vars
);
7027 sc
->port
.supported
[0] |= (ELINK_SUPPORTED_10baseT_Half
|
7028 ELINK_SUPPORTED_10baseT_Full
|
7029 ELINK_SUPPORTED_100baseT_Half
|
7030 ELINK_SUPPORTED_100baseT_Full
|
7031 ELINK_SUPPORTED_1000baseT_Full
|
7032 ELINK_SUPPORTED_2500baseX_Full
|
7033 ELINK_SUPPORTED_10000baseT_Full
|
7034 ELINK_SUPPORTED_TP
|
7035 ELINK_SUPPORTED_FIBRE
|
7036 ELINK_SUPPORTED_Autoneg
|
7037 ELINK_SUPPORTED_Pause
|
7038 ELINK_SUPPORTED_Asym_Pause
);
7039 sc
->port
.advertising
[0] = sc
->port
.supported
[0];
7041 sc
->link_params
.sc
= sc
;
7042 sc
->link_params
.port
= SC_PORT(sc
);
7043 sc
->link_params
.req_duplex
[0] = DUPLEX_FULL
;
7044 sc
->link_params
.req_flow_ctrl
[0] = ELINK_FLOW_CTRL_NONE
;
7045 sc
->link_params
.req_line_speed
[0] = SPEED_10000
;
7046 sc
->link_params
.speed_cap_mask
[0] = 0x7f0000;
7047 sc
->link_params
.switch_cfg
= ELINK_SWITCH_CFG_10G
;
7049 if (CHIP_REV_IS_FPGA(sc
)) {
7050 sc
->link_vars
.mac_type
= ELINK_MAC_TYPE_EMAC
;
7051 sc
->link_vars
.line_speed
= ELINK_SPEED_1000
;
7052 sc
->link_vars
.link_status
= (LINK_STATUS_LINK_UP
|
7053 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
);
7055 sc
->link_vars
.mac_type
= ELINK_MAC_TYPE_BMAC
;
7056 sc
->link_vars
.line_speed
= ELINK_SPEED_10000
;
7057 sc
->link_vars
.link_status
= (LINK_STATUS_LINK_UP
|
7058 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
);
7061 sc
->link_vars
.link_up
= 1;
7063 sc
->link_vars
.duplex
= DUPLEX_FULL
;
7064 sc
->link_vars
.flow_ctrl
= ELINK_FLOW_CTRL_NONE
;
7068 NIG_REG_EGRESS_DRAIN0_MODE
+
7069 sc
->link_params
.port
* 4, 0);
7070 bnx2x_stats_handle(sc
, STATS_EVENT_LINK_UP
);
7071 bnx2x_link_report(sc
);
7076 if (sc
->link_vars
.link_up
) {
7077 bnx2x_stats_handle(sc
, STATS_EVENT_LINK_UP
);
7079 bnx2x_stats_handle(sc
, STATS_EVENT_STOP
);
7081 bnx2x_link_report(sc
);
7083 bnx2x_link_report_locked(sc
);
7084 bnx2x_stats_handle(sc
, STATS_EVENT_LINK_UP
);
7088 static int bnx2x_initial_phy_init(struct bnx2x_softc
*sc
, int load_mode
)
7090 int rc
, cfg_idx
= bnx2x_get_link_cfg_idx(sc
);
7091 uint16_t req_line_speed
= sc
->link_params
.req_line_speed
[cfg_idx
];
7092 struct elink_params
*lp
= &sc
->link_params
;
7094 bnx2x_set_requested_fc(sc
);
7096 bnx2x_acquire_phy_lock(sc
);
7098 if (load_mode
== LOAD_DIAG
) {
7099 lp
->loopback_mode
= ELINK_LOOPBACK_XGXS
;
7100 /* Prefer doing PHY loopback at 10G speed, if possible */
7101 if (lp
->req_line_speed
[cfg_idx
] < ELINK_SPEED_10000
) {
7102 if (lp
->speed_cap_mask
[cfg_idx
] &
7103 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
) {
7104 lp
->req_line_speed
[cfg_idx
] = ELINK_SPEED_10000
;
7106 lp
->req_line_speed
[cfg_idx
] = ELINK_SPEED_1000
;
7111 if (load_mode
== LOAD_LOOPBACK_EXT
) {
7112 lp
->loopback_mode
= ELINK_LOOPBACK_EXT
;
7115 rc
= elink_phy_init(&sc
->link_params
, &sc
->link_vars
);
7117 bnx2x_release_phy_lock(sc
);
7119 bnx2x_calc_fc_adv(sc
);
7121 if (sc
->link_vars
.link_up
) {
7122 bnx2x_stats_handle(sc
, STATS_EVENT_LINK_UP
);
7123 bnx2x_link_report(sc
);
7126 sc
->link_params
.req_line_speed
[cfg_idx
] = req_line_speed
;
7130 /* update flags in shmem */
7132 bnx2x_update_drv_flags(struct bnx2x_softc
*sc
, uint32_t flags
, uint32_t set
)
7136 if (SHMEM2_HAS(sc
, drv_flags
)) {
7137 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_DRV_FLAGS
);
7138 drv_flags
= SHMEM2_RD(sc
, drv_flags
);
7143 drv_flags
&= ~flags
;
7146 SHMEM2_WR(sc
, drv_flags
, drv_flags
);
7148 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_DRV_FLAGS
);
7152 /* periodic timer callout routine, only runs when the interface is up */
7153 void bnx2x_periodic_callout(struct bnx2x_softc
*sc
)
7155 if ((sc
->state
!= BNX2X_STATE_OPEN
) ||
7156 (atomic_load_acq_long(&sc
->periodic_flags
) == PERIODIC_STOP
)) {
7157 PMD_DRV_LOG(DEBUG
, sc
, "periodic callout exit (state=0x%x)",
7161 if (!CHIP_REV_IS_SLOW(sc
)) {
7163 * This barrier is needed to ensure the ordering between the writing
7164 * to the sc->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
7169 bnx2x_acquire_phy_lock(sc
);
7170 elink_period_func(&sc
->link_params
, &sc
->link_vars
);
7171 bnx2x_release_phy_lock(sc
);
7175 if (IS_PF(sc
) && !BNX2X_NOMCP(sc
)) {
7176 int mb_idx
= SC_FW_MB_IDX(sc
);
7180 ++sc
->fw_drv_pulse_wr_seq
;
7181 sc
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
7183 drv_pulse
= sc
->fw_drv_pulse_wr_seq
;
7184 bnx2x_drv_pulse(sc
);
7186 mcp_pulse
= (SHMEM_RD(sc
, func_mb
[mb_idx
].mcp_pulse_mb
) &
7187 MCP_PULSE_SEQ_MASK
);
7190 * The delta between driver pulse and mcp response should
7191 * be 1 (before mcp response) or 0 (after mcp response).
7193 if ((drv_pulse
!= mcp_pulse
) &&
7194 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
7195 /* someone lost a heartbeat... */
7196 PMD_DRV_LOG(ERR
, sc
,
7197 "drv_pulse (0x%x) != mcp_pulse (0x%x)",
7198 drv_pulse
, mcp_pulse
);
7204 /* start the controller */
7205 static __rte_noinline
7206 int bnx2x_nic_load(struct bnx2x_softc
*sc
)
7209 uint32_t load_code
= 0;
7212 PMD_INIT_FUNC_TRACE(sc
);
7214 sc
->state
= BNX2X_STATE_OPENING_WAITING_LOAD
;
7217 /* must be called before memory allocation and HW init */
7218 bnx2x_ilt_set_info(sc
);
7221 bnx2x_set_fp_rx_buf_size(sc
);
7224 if (bnx2x_alloc_mem(sc
) != 0) {
7225 sc
->state
= BNX2X_STATE_CLOSED
;
7227 goto bnx2x_nic_load_error0
;
7231 if (bnx2x_alloc_fw_stats_mem(sc
) != 0) {
7232 sc
->state
= BNX2X_STATE_CLOSED
;
7234 goto bnx2x_nic_load_error0
;
7238 rc
= bnx2x_vf_init(sc
);
7240 sc
->state
= BNX2X_STATE_ERROR
;
7241 goto bnx2x_nic_load_error0
;
7246 /* set pf load just before approaching the MCP */
7247 bnx2x_set_pf_load(sc
);
7249 /* if MCP exists send load request and analyze response */
7250 if (!BNX2X_NOMCP(sc
)) {
7251 /* attempt to load pf */
7252 if (bnx2x_nic_load_request(sc
, &load_code
) != 0) {
7253 sc
->state
= BNX2X_STATE_CLOSED
;
7255 goto bnx2x_nic_load_error1
;
7258 /* what did the MCP say? */
7259 if (bnx2x_nic_load_analyze_req(sc
, load_code
) != 0) {
7260 bnx2x_fw_command(sc
, DRV_MSG_CODE_LOAD_DONE
, 0);
7261 sc
->state
= BNX2X_STATE_CLOSED
;
7263 goto bnx2x_nic_load_error2
;
7266 PMD_DRV_LOG(INFO
, sc
, "Device has no MCP!");
7267 load_code
= bnx2x_nic_load_no_mcp(sc
);
7270 /* mark PMF if applicable */
7271 bnx2x_nic_load_pmf(sc
, load_code
);
7273 /* Init Function state controlling object */
7274 bnx2x_init_func_obj(sc
);
7277 if (bnx2x_init_hw(sc
, load_code
) != 0) {
7278 PMD_DRV_LOG(NOTICE
, sc
, "HW init failed");
7279 bnx2x_fw_command(sc
, DRV_MSG_CODE_LOAD_DONE
, 0);
7280 sc
->state
= BNX2X_STATE_CLOSED
;
7282 goto bnx2x_nic_load_error2
;
7286 bnx2x_nic_init(sc
, load_code
);
7288 /* Init per-function objects */
7290 bnx2x_init_objs(sc
);
7292 /* set AFEX default VLAN tag to an invalid value */
7293 sc
->devinfo
.mf_info
.afex_def_vlan_tag
= -1;
7295 sc
->state
= BNX2X_STATE_OPENING_WAITING_PORT
;
7296 rc
= bnx2x_func_start(sc
);
7298 PMD_DRV_LOG(NOTICE
, sc
, "Function start failed!");
7299 bnx2x_fw_command(sc
, DRV_MSG_CODE_LOAD_DONE
, 0);
7300 sc
->state
= BNX2X_STATE_ERROR
;
7301 goto bnx2x_nic_load_error3
;
7304 /* send LOAD_DONE command to MCP */
7305 if (!BNX2X_NOMCP(sc
)) {
7307 bnx2x_fw_command(sc
, DRV_MSG_CODE_LOAD_DONE
, 0);
7309 PMD_DRV_LOG(NOTICE
, sc
,
7310 "MCP response failure, aborting");
7311 sc
->state
= BNX2X_STATE_ERROR
;
7313 goto bnx2x_nic_load_error3
;
7318 rc
= bnx2x_setup_leading(sc
);
7320 PMD_DRV_LOG(NOTICE
, sc
, "Setup leading failed!");
7321 sc
->state
= BNX2X_STATE_ERROR
;
7322 goto bnx2x_nic_load_error3
;
7325 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc
, i
) {
7327 rc
= bnx2x_setup_queue(sc
, &sc
->fp
[i
], FALSE
);
7328 else /* IS_VF(sc) */
7329 rc
= bnx2x_vf_setup_queue(sc
, &sc
->fp
[i
], FALSE
);
7332 PMD_DRV_LOG(NOTICE
, sc
, "Queue(%d) setup failed", i
);
7333 sc
->state
= BNX2X_STATE_ERROR
;
7334 goto bnx2x_nic_load_error3
;
7338 rc
= bnx2x_init_rss_pf(sc
);
7340 PMD_DRV_LOG(NOTICE
, sc
, "PF RSS init failed");
7341 sc
->state
= BNX2X_STATE_ERROR
;
7342 goto bnx2x_nic_load_error3
;
7345 /* now when Clients are configured we are ready to work */
7346 sc
->state
= BNX2X_STATE_OPEN
;
7348 /* Configure a ucast MAC */
7350 rc
= bnx2x_set_eth_mac(sc
, TRUE
);
7351 } else { /* IS_VF(sc) */
7352 rc
= bnx2x_vf_set_mac(sc
, TRUE
);
7356 PMD_DRV_LOG(NOTICE
, sc
, "Setting Ethernet MAC failed");
7357 sc
->state
= BNX2X_STATE_ERROR
;
7358 goto bnx2x_nic_load_error3
;
7362 rc
= bnx2x_initial_phy_init(sc
, LOAD_OPEN
);
7364 sc
->state
= BNX2X_STATE_ERROR
;
7365 goto bnx2x_nic_load_error3
;
7369 sc
->link_params
.feature_config_flags
&=
7370 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN
;
7373 switch (LOAD_OPEN
) {
7379 case LOAD_LOOPBACK_EXT
:
7380 sc
->state
= BNX2X_STATE_DIAG
;
7388 bnx2x_update_drv_flags(sc
, 1 << DRV_FLAGS_PORT_MASK
, 0);
7390 bnx2x_link_status_update(sc
);
7393 if (IS_PF(sc
) && SHMEM2_HAS(sc
, drv_capabilities_flag
)) {
7394 /* mark driver is loaded in shmem2 */
7395 val
= SHMEM2_RD(sc
, drv_capabilities_flag
[SC_FW_MB_IDX(sc
)]);
7396 SHMEM2_WR(sc
, drv_capabilities_flag
[SC_FW_MB_IDX(sc
)],
7398 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED
|
7399 DRV_FLAGS_CAPABILITIES_LOADED_L2
));
7402 /* start fast path */
7403 /* Initialize Rx filter */
7404 bnx2x_set_rx_mode(sc
);
7406 /* wait for all pending SP commands to complete */
7407 if (IS_PF(sc
) && !bnx2x_wait_sp_comp(sc
, ~0x0UL
)) {
7408 PMD_DRV_LOG(NOTICE
, sc
, "Timeout waiting for all SPs to complete!");
7409 bnx2x_periodic_stop(sc
);
7410 bnx2x_nic_unload(sc
, UNLOAD_CLOSE
, FALSE
);
7414 PMD_DRV_LOG(DEBUG
, sc
, "NIC successfully loaded");
7418 bnx2x_nic_load_error3
:
7421 bnx2x_int_disable_sync(sc
, 1);
7423 /* clean out queued objects */
7424 bnx2x_squeeze_objects(sc
);
7427 bnx2x_nic_load_error2
:
7429 if (IS_PF(sc
) && !BNX2X_NOMCP(sc
)) {
7430 bnx2x_fw_command(sc
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
7431 bnx2x_fw_command(sc
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
7436 bnx2x_nic_load_error1
:
7438 /* clear pf_load status, as it was already set */
7440 bnx2x_clear_pf_load(sc
);
7443 bnx2x_nic_load_error0
:
7445 bnx2x_free_fw_stats_mem(sc
);
7452 * Handles controller initialization.
7454 int bnx2x_init(struct bnx2x_softc
*sc
)
7456 int other_engine
= SC_PATH(sc
) ? 0 : 1;
7457 uint8_t other_load_status
, load_status
;
7458 uint8_t global
= FALSE
;
7461 /* Check if the driver is still running and bail out if it is. */
7462 if (sc
->state
!= BNX2X_STATE_CLOSED
) {
7463 PMD_DRV_LOG(DEBUG
, sc
, "Init called while driver is running!");
7465 goto bnx2x_init_done
;
7468 bnx2x_set_power_state(sc
, PCI_PM_D0
);
7471 * If parity occurred during the unload, then attentions and/or
7472 * RECOVERY_IN_PROGRESS may still be set. If so we want the first function
7473 * loaded on the current engine to complete the recovery. Parity recovery
7474 * is only relevant for PF driver.
7477 other_load_status
= bnx2x_get_load_status(sc
, other_engine
);
7478 load_status
= bnx2x_get_load_status(sc
, SC_PATH(sc
));
7480 if (!bnx2x_reset_is_done(sc
, SC_PATH(sc
)) ||
7481 bnx2x_chk_parity_attn(sc
, &global
, TRUE
)) {
7484 * If there are attentions and they are in global blocks, set
7485 * the GLOBAL_RESET bit regardless whether it will be this
7486 * function that will complete the recovery or not.
7489 bnx2x_set_reset_global(sc
);
7493 * Only the first function on the current engine should try
7494 * to recover in open. In case of attentions in global blocks
7495 * only the first in the chip should try to recover.
7498 && (!global
||!other_load_status
))
7499 && bnx2x_trylock_leader_lock(sc
)
7500 && !bnx2x_leader_reset(sc
)) {
7501 PMD_DRV_LOG(INFO
, sc
,
7502 "Recovered during init");
7506 /* recovery has failed... */
7507 bnx2x_set_power_state(sc
, PCI_PM_D3hot
);
7509 sc
->recovery_state
= BNX2X_RECOVERY_FAILED
;
7511 PMD_DRV_LOG(NOTICE
, sc
,
7512 "Recovery flow hasn't properly "
7513 "completed yet, try again later. "
7514 "If you still see this message after a "
7515 "few retries then power cycle is required.");
7518 goto bnx2x_init_done
;
7523 sc
->recovery_state
= BNX2X_RECOVERY_DONE
;
7525 rc
= bnx2x_nic_load(sc
);
7530 PMD_DRV_LOG(NOTICE
, sc
, "Initialization failed, "
7531 "stack notified driver is NOT running!");
7537 static void bnx2x_get_function_num(struct bnx2x_softc
*sc
)
7542 * Read the ME register to get the function number. The ME register
7543 * holds the relative-function number and absolute-function number. The
7544 * absolute-function number appears only in E2 and above. Before that
7545 * these bits always contained zero, therefore we cannot blindly use them.
7548 val
= REG_RD(sc
, BAR_ME_REGISTER
);
7551 (uint8_t) ((val
& ME_REG_PF_NUM
) >> ME_REG_PF_NUM_SHIFT
);
7553 (uint8_t) ((val
& ME_REG_ABS_PF_NUM
) >> ME_REG_ABS_PF_NUM_SHIFT
) &
7556 if (CHIP_PORT_MODE(sc
) == CHIP_4_PORT_MODE
) {
7557 sc
->pfunc_abs
= ((sc
->pfunc_rel
<< 1) | sc
->path_id
);
7559 sc
->pfunc_abs
= (sc
->pfunc_rel
| sc
->path_id
);
7562 PMD_DRV_LOG(DEBUG
, sc
,
7563 "Relative function %d, Absolute function %d, Path %d",
7564 sc
->pfunc_rel
, sc
->pfunc_abs
, sc
->path_id
);
7567 static uint32_t bnx2x_get_shmem_mf_cfg_base(struct bnx2x_softc
*sc
)
7569 uint32_t shmem2_size
;
7571 uint32_t mf_cfg_offset_value
;
7574 offset
= (SHMEM_ADDR(sc
, func_mb
) +
7575 (MAX_FUNC_NUM
* sizeof(struct drv_func_mb
)));
7578 if (sc
->devinfo
.shmem2_base
!= 0) {
7579 shmem2_size
= SHMEM2_RD(sc
, size
);
7580 if (shmem2_size
> offsetof(struct shmem2_region
, mf_cfg_addr
)) {
7581 mf_cfg_offset_value
= SHMEM2_RD(sc
, mf_cfg_addr
);
7582 if (SHMEM_MF_CFG_ADDR_NONE
!= mf_cfg_offset_value
) {
7583 offset
= mf_cfg_offset_value
;
7591 static uint32_t bnx2x_pcie_capability_read(struct bnx2x_softc
*sc
, int reg
)
7594 struct bnx2x_pci_cap
*caps
;
7596 /* ensure PCIe capability is enabled */
7597 caps
= pci_find_cap(sc
, PCIY_EXPRESS
, BNX2X_PCI_CAP
);
7599 PMD_DRV_LOG(DEBUG
, sc
, "Found PCIe capability: "
7600 "id=0x%04X type=0x%04X addr=0x%08X",
7601 caps
->id
, caps
->type
, caps
->addr
);
7602 pci_read(sc
, (caps
->addr
+ reg
), &ret
, 2);
7606 PMD_DRV_LOG(WARNING
, sc
, "PCIe capability NOT FOUND!!!");
7611 static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc
*sc
)
7613 return bnx2x_pcie_capability_read(sc
, PCIR_EXPRESS_DEVICE_STA
) &
7614 PCIM_EXP_STA_TRANSACTION_PND
;
7618 * Walk the PCI capabiites list for the device to find what features are
7619 * supported. These capabilites may be enabled/disabled by firmware so it's
7620 * best to walk the list rather than make assumptions.
7622 static void bnx2x_probe_pci_caps(struct bnx2x_softc
*sc
)
7624 PMD_INIT_FUNC_TRACE(sc
);
7626 struct bnx2x_pci_cap
*caps
;
7627 uint16_t link_status
;
7630 /* check if PCI Power Management is enabled */
7631 caps
= pci_find_cap(sc
, PCIY_PMG
, BNX2X_PCI_CAP
);
7633 PMD_DRV_LOG(DEBUG
, sc
, "Found PM capability: "
7634 "id=0x%04X type=0x%04X addr=0x%08X",
7635 caps
->id
, caps
->type
, caps
->addr
);
7637 sc
->devinfo
.pcie_cap_flags
|= BNX2X_PM_CAPABLE_FLAG
;
7638 sc
->devinfo
.pcie_pm_cap_reg
= caps
->addr
;
7641 link_status
= bnx2x_pcie_capability_read(sc
, PCIR_EXPRESS_LINK_STA
);
7643 sc
->devinfo
.pcie_link_speed
= (link_status
& PCIM_LINK_STA_SPEED
);
7644 sc
->devinfo
.pcie_link_width
=
7645 ((link_status
& PCIM_LINK_STA_WIDTH
) >> 4);
7647 PMD_DRV_LOG(DEBUG
, sc
, "PCIe link speed=%d width=%d",
7648 sc
->devinfo
.pcie_link_speed
, sc
->devinfo
.pcie_link_width
);
7650 sc
->devinfo
.pcie_cap_flags
|= BNX2X_PCIE_CAPABLE_FLAG
;
7652 /* check if MSI capability is enabled */
7653 caps
= pci_find_cap(sc
, PCIY_MSI
, BNX2X_PCI_CAP
);
7655 PMD_DRV_LOG(DEBUG
, sc
, "Found MSI capability at 0x%04x", reg
);
7657 sc
->devinfo
.pcie_cap_flags
|= BNX2X_MSI_CAPABLE_FLAG
;
7658 sc
->devinfo
.pcie_msi_cap_reg
= caps
->addr
;
7661 /* check if MSI-X capability is enabled */
7662 caps
= pci_find_cap(sc
, PCIY_MSIX
, BNX2X_PCI_CAP
);
7664 PMD_DRV_LOG(DEBUG
, sc
, "Found MSI-X capability at 0x%04x", reg
);
7666 sc
->devinfo
.pcie_cap_flags
|= BNX2X_MSIX_CAPABLE_FLAG
;
7667 sc
->devinfo
.pcie_msix_cap_reg
= caps
->addr
;
7671 static int bnx2x_get_shmem_mf_cfg_info_sd(struct bnx2x_softc
*sc
)
7673 struct bnx2x_mf_info
*mf_info
= &sc
->devinfo
.mf_info
;
7676 /* get the outer vlan if we're in switch-dependent mode */
7678 val
= MFCFG_RD(sc
, func_mf_config
[SC_ABS_FUNC(sc
)].e1hov_tag
);
7679 mf_info
->ext_id
= (uint16_t) val
;
7681 mf_info
->multi_vnics_mode
= 1;
7683 if (!VALID_OVLAN(mf_info
->ext_id
)) {
7684 PMD_DRV_LOG(NOTICE
, sc
, "Invalid VLAN (%d)", mf_info
->ext_id
);
7688 /* get the capabilities */
7689 if ((mf_info
->mf_config
[SC_VN(sc
)] & FUNC_MF_CFG_PROTOCOL_MASK
) ==
7690 FUNC_MF_CFG_PROTOCOL_ISCSI
) {
7691 mf_info
->mf_protos_supported
|= MF_PROTO_SUPPORT_ISCSI
;
7692 } else if ((mf_info
->mf_config
[SC_VN(sc
)] & FUNC_MF_CFG_PROTOCOL_MASK
)
7693 == FUNC_MF_CFG_PROTOCOL_FCOE
) {
7694 mf_info
->mf_protos_supported
|= MF_PROTO_SUPPORT_FCOE
;
7696 mf_info
->mf_protos_supported
|= MF_PROTO_SUPPORT_ETHERNET
;
7699 mf_info
->vnics_per_port
=
7700 (CHIP_PORT_MODE(sc
) == CHIP_4_PORT_MODE
) ? 2 : 4;
7705 static uint32_t bnx2x_get_shmem_ext_proto_support_flags(struct bnx2x_softc
*sc
)
7707 uint32_t retval
= 0;
7710 val
= MFCFG_RD(sc
, func_ext_config
[SC_ABS_FUNC(sc
)].func_cfg
);
7712 if (val
& MACP_FUNC_CFG_FLAGS_ENABLED
) {
7713 if (val
& MACP_FUNC_CFG_FLAGS_ETHERNET
) {
7714 retval
|= MF_PROTO_SUPPORT_ETHERNET
;
7716 if (val
& MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD
) {
7717 retval
|= MF_PROTO_SUPPORT_ISCSI
;
7719 if (val
& MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD
) {
7720 retval
|= MF_PROTO_SUPPORT_FCOE
;
7727 static int bnx2x_get_shmem_mf_cfg_info_si(struct bnx2x_softc
*sc
)
7729 struct bnx2x_mf_info
*mf_info
= &sc
->devinfo
.mf_info
;
7733 * There is no outer vlan if we're in switch-independent mode.
7734 * If the mac is valid then assume multi-function.
7737 val
= MFCFG_RD(sc
, func_ext_config
[SC_ABS_FUNC(sc
)].func_cfg
);
7739 mf_info
->multi_vnics_mode
= ((val
& MACP_FUNC_CFG_FLAGS_MASK
) != 0);
7741 mf_info
->mf_protos_supported
=
7742 bnx2x_get_shmem_ext_proto_support_flags(sc
);
7744 mf_info
->vnics_per_port
=
7745 (CHIP_PORT_MODE(sc
) == CHIP_4_PORT_MODE
) ? 2 : 4;
7750 static int bnx2x_get_shmem_mf_cfg_info_niv(struct bnx2x_softc
*sc
)
7752 struct bnx2x_mf_info
*mf_info
= &sc
->devinfo
.mf_info
;
7754 uint32_t func_config
;
7755 uint32_t niv_config
;
7757 mf_info
->multi_vnics_mode
= 1;
7759 e1hov_tag
= MFCFG_RD(sc
, func_mf_config
[SC_ABS_FUNC(sc
)].e1hov_tag
);
7760 func_config
= MFCFG_RD(sc
, func_mf_config
[SC_ABS_FUNC(sc
)].config
);
7761 niv_config
= MFCFG_RD(sc
, func_mf_config
[SC_ABS_FUNC(sc
)].afex_config
);
7764 (uint16_t) ((e1hov_tag
& FUNC_MF_CFG_E1HOV_TAG_MASK
) >>
7765 FUNC_MF_CFG_E1HOV_TAG_SHIFT
);
7767 mf_info
->default_vlan
=
7768 (uint16_t) ((e1hov_tag
& FUNC_MF_CFG_AFEX_VLAN_MASK
) >>
7769 FUNC_MF_CFG_AFEX_VLAN_SHIFT
);
7771 mf_info
->niv_allowed_priorities
=
7772 (uint8_t) ((niv_config
& FUNC_MF_CFG_AFEX_COS_FILTER_MASK
) >>
7773 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT
);
7775 mf_info
->niv_default_cos
=
7776 (uint8_t) ((func_config
& FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK
) >>
7777 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT
);
7779 mf_info
->afex_vlan_mode
=
7780 ((niv_config
& FUNC_MF_CFG_AFEX_VLAN_MODE_MASK
) >>
7781 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT
);
7783 mf_info
->niv_mba_enabled
=
7784 ((niv_config
& FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK
) >>
7785 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT
);
7787 mf_info
->mf_protos_supported
=
7788 bnx2x_get_shmem_ext_proto_support_flags(sc
);
7790 mf_info
->vnics_per_port
=
7791 (CHIP_PORT_MODE(sc
) == CHIP_4_PORT_MODE
) ? 2 : 4;
7796 static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc
*sc
)
7798 struct bnx2x_mf_info
*mf_info
= &sc
->devinfo
.mf_info
;
7805 /* various MF mode sanity checks... */
7807 if (mf_info
->mf_config
[SC_VN(sc
)] & FUNC_MF_CFG_FUNC_HIDE
) {
7808 PMD_DRV_LOG(NOTICE
, sc
,
7809 "Enumerated function %d is marked as hidden",
7814 if ((mf_info
->vnics_per_port
> 1) && !mf_info
->multi_vnics_mode
) {
7815 PMD_DRV_LOG(NOTICE
, sc
, "vnics_per_port=%d multi_vnics_mode=%d",
7816 mf_info
->vnics_per_port
, mf_info
->multi_vnics_mode
);
7820 if (mf_info
->mf_mode
== MULTI_FUNCTION_SD
) {
7821 /* vnic id > 0 must have valid ovlan in switch-dependent mode */
7822 if ((SC_VN(sc
) > 0) && !VALID_OVLAN(OVLAN(sc
))) {
7823 PMD_DRV_LOG(NOTICE
, sc
, "mf_mode=SD vnic_id=%d ovlan=%d",
7824 SC_VN(sc
), OVLAN(sc
));
7828 if (!VALID_OVLAN(OVLAN(sc
)) && mf_info
->multi_vnics_mode
) {
7829 PMD_DRV_LOG(NOTICE
, sc
,
7830 "mf_mode=SD multi_vnics_mode=%d ovlan=%d",
7831 mf_info
->multi_vnics_mode
, OVLAN(sc
));
7836 * Verify all functions are either MF or SF mode. If MF, make sure
7837 * sure that all non-hidden functions have a valid ovlan. If SF,
7838 * make sure that all non-hidden functions have an invalid ovlan.
7840 FOREACH_ABS_FUNC_IN_PORT(sc
, i
) {
7841 mf_cfg1
= MFCFG_RD(sc
, func_mf_config
[i
].config
);
7842 ovlan1
= MFCFG_RD(sc
, func_mf_config
[i
].e1hov_tag
);
7843 if (!(mf_cfg1
& FUNC_MF_CFG_FUNC_HIDE
) &&
7844 (((mf_info
->multi_vnics_mode
)
7845 && !VALID_OVLAN(ovlan1
))
7846 || ((!mf_info
->multi_vnics_mode
)
7847 && VALID_OVLAN(ovlan1
)))) {
7848 PMD_DRV_LOG(NOTICE
, sc
,
7849 "mf_mode=SD function %d MF config "
7850 "mismatch, multi_vnics_mode=%d ovlan=%d",
7851 i
, mf_info
->multi_vnics_mode
,
7857 /* Verify all funcs on the same port each have a different ovlan. */
7858 FOREACH_ABS_FUNC_IN_PORT(sc
, i
) {
7859 mf_cfg1
= MFCFG_RD(sc
, func_mf_config
[i
].config
);
7860 ovlan1
= MFCFG_RD(sc
, func_mf_config
[i
].e1hov_tag
);
7861 /* iterate from the next function on the port to the max func */
7862 for (j
= i
+ 2; j
< MAX_FUNC_NUM
; j
+= 2) {
7864 MFCFG_RD(sc
, func_mf_config
[j
].config
);
7866 MFCFG_RD(sc
, func_mf_config
[j
].e1hov_tag
);
7867 if (!(mf_cfg1
& FUNC_MF_CFG_FUNC_HIDE
)
7868 && VALID_OVLAN(ovlan1
)
7869 && !(mf_cfg2
& FUNC_MF_CFG_FUNC_HIDE
)
7870 && VALID_OVLAN(ovlan2
)
7871 && (ovlan1
== ovlan2
)) {
7872 PMD_DRV_LOG(NOTICE
, sc
,
7873 "mf_mode=SD functions %d and %d "
7874 "have the same ovlan (%d)",
7881 /* MULTI_FUNCTION_SD */
7885 static int bnx2x_get_mf_cfg_info(struct bnx2x_softc
*sc
)
7887 struct bnx2x_mf_info
*mf_info
= &sc
->devinfo
.mf_info
;
7888 uint32_t val
, mac_upper
;
7891 /* initialize mf_info defaults */
7892 mf_info
->vnics_per_port
= 1;
7893 mf_info
->multi_vnics_mode
= FALSE
;
7894 mf_info
->path_has_ovlan
= FALSE
;
7895 mf_info
->mf_mode
= SINGLE_FUNCTION
;
7897 if (!CHIP_IS_MF_CAP(sc
)) {
7901 if (sc
->devinfo
.mf_cfg_base
== SHMEM_MF_CFG_ADDR_NONE
) {
7902 PMD_DRV_LOG(NOTICE
, sc
, "Invalid mf_cfg_base!");
7906 /* get the MF mode (switch dependent / independent / single-function) */
7908 val
= SHMEM_RD(sc
, dev_info
.shared_feature_config
.config
);
7910 switch (val
& SHARED_FEAT_CFG_FORCE_SF_MODE_MASK
) {
7911 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT
:
7914 MFCFG_RD(sc
, func_mf_config
[SC_ABS_FUNC(sc
)].mac_upper
);
7916 /* check for legal upper mac bytes */
7917 if (mac_upper
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) {
7918 mf_info
->mf_mode
= MULTI_FUNCTION_SI
;
7920 PMD_DRV_LOG(NOTICE
, sc
,
7921 "Invalid config for Switch Independent mode");
7926 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED
:
7927 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4
:
7929 /* get outer vlan configuration */
7930 val
= MFCFG_RD(sc
, func_mf_config
[SC_ABS_FUNC(sc
)].e1hov_tag
);
7932 if ((val
& FUNC_MF_CFG_E1HOV_TAG_MASK
) !=
7933 FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
7934 mf_info
->mf_mode
= MULTI_FUNCTION_SD
;
7936 PMD_DRV_LOG(NOTICE
, sc
,
7937 "Invalid config for Switch Dependent mode");
7942 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF
:
7944 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
7947 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE
:
7950 * Mark MF mode as NIV if MCP version includes NPAR-SD support
7951 * and the MAC address is valid.
7954 MFCFG_RD(sc
, func_mf_config
[SC_ABS_FUNC(sc
)].mac_upper
);
7956 if ((SHMEM2_HAS(sc
, afex_driver_support
)) &&
7957 (mac_upper
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
)) {
7958 mf_info
->mf_mode
= MULTI_FUNCTION_AFEX
;
7960 PMD_DRV_LOG(NOTICE
, sc
, "Invalid config for AFEX mode");
7967 PMD_DRV_LOG(NOTICE
, sc
, "Unknown MF mode (0x%08x)",
7968 (val
& SHARED_FEAT_CFG_FORCE_SF_MODE_MASK
));
7973 /* set path mf_mode (which could be different than function mf_mode) */
7974 if (mf_info
->mf_mode
== MULTI_FUNCTION_SD
) {
7975 mf_info
->path_has_ovlan
= TRUE
;
7976 } else if (mf_info
->mf_mode
== SINGLE_FUNCTION
) {
7978 * Decide on path multi vnics mode. If we're not in MF mode and in
7979 * 4-port mode, this is good enough to check vnic-0 of the other port
7982 if (CHIP_PORT_MODE(sc
) == CHIP_4_PORT_MODE
) {
7983 uint8_t other_port
= !(PORT_ID(sc
) & 1);
7984 uint8_t abs_func_other_port
=
7985 (SC_PATH(sc
) + (2 * other_port
));
7990 [abs_func_other_port
].e1hov_tag
);
7992 mf_info
->path_has_ovlan
= VALID_OVLAN((uint16_t) val
);
7996 if (mf_info
->mf_mode
== SINGLE_FUNCTION
) {
7997 /* invalid MF config */
7998 if (SC_VN(sc
) >= 1) {
7999 PMD_DRV_LOG(NOTICE
, sc
, "VNIC ID >= 1 in SF mode");
8006 /* get the MF configuration */
8007 mf_info
->mf_config
[SC_VN(sc
)] =
8008 MFCFG_RD(sc
, func_mf_config
[SC_ABS_FUNC(sc
)].config
);
8010 switch (mf_info
->mf_mode
) {
8011 case MULTI_FUNCTION_SD
:
8013 bnx2x_get_shmem_mf_cfg_info_sd(sc
);
8016 case MULTI_FUNCTION_SI
:
8018 bnx2x_get_shmem_mf_cfg_info_si(sc
);
8021 case MULTI_FUNCTION_AFEX
:
8023 bnx2x_get_shmem_mf_cfg_info_niv(sc
);
8028 PMD_DRV_LOG(NOTICE
, sc
, "Get MF config failed (mf_mode=0x%08x)",
8033 /* get the congestion management parameters */
8036 FOREACH_ABS_FUNC_IN_PORT(sc
, i
) {
8037 /* get min/max bw */
8038 val
= MFCFG_RD(sc
, func_mf_config
[i
].config
);
8039 mf_info
->min_bw
[vnic
] =
8040 ((val
& FUNC_MF_CFG_MIN_BW_MASK
) >>
8041 FUNC_MF_CFG_MIN_BW_SHIFT
);
8042 mf_info
->max_bw
[vnic
] =
8043 ((val
& FUNC_MF_CFG_MAX_BW_MASK
) >>
8044 FUNC_MF_CFG_MAX_BW_SHIFT
);
8048 return bnx2x_check_valid_mf_cfg(sc
);
8051 static int bnx2x_get_shmem_info(struct bnx2x_softc
*sc
)
8054 uint32_t mac_hi
, mac_lo
, val
;
8056 PMD_INIT_FUNC_TRACE(sc
);
8059 mac_hi
= mac_lo
= 0;
8061 sc
->link_params
.sc
= sc
;
8062 sc
->link_params
.port
= port
;
8064 /* get the hardware config info */
8065 sc
->devinfo
.hw_config
= SHMEM_RD(sc
, dev_info
.shared_hw_config
.config
);
8066 sc
->devinfo
.hw_config2
=
8067 SHMEM_RD(sc
, dev_info
.shared_hw_config
.config2
);
8069 sc
->link_params
.hw_led_mode
=
8070 ((sc
->devinfo
.hw_config
& SHARED_HW_CFG_LED_MODE_MASK
) >>
8071 SHARED_HW_CFG_LED_MODE_SHIFT
);
8073 /* get the port feature config */
8075 SHMEM_RD(sc
, dev_info
.port_feature_config
[port
].config
);
8077 /* get the link params */
8078 sc
->link_params
.speed_cap_mask
[ELINK_INT_PHY
] =
8079 SHMEM_RD(sc
, dev_info
.port_hw_config
[port
].speed_capability_mask
)
8080 & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK
;
8081 sc
->link_params
.speed_cap_mask
[ELINK_EXT_PHY1
] =
8082 SHMEM_RD(sc
, dev_info
.port_hw_config
[port
].speed_capability_mask2
)
8083 & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK
;
8085 /* get the lane config */
8086 sc
->link_params
.lane_config
=
8087 SHMEM_RD(sc
, dev_info
.port_hw_config
[port
].lane_config
);
8089 /* get the link config */
8090 val
= SHMEM_RD(sc
, dev_info
.port_feature_config
[port
].link_config
);
8091 sc
->port
.link_config
[ELINK_INT_PHY
] = val
;
8092 sc
->link_params
.switch_cfg
= (val
& PORT_FEATURE_CONNECTED_SWITCH_MASK
);
8093 sc
->port
.link_config
[ELINK_EXT_PHY1
] =
8094 SHMEM_RD(sc
, dev_info
.port_feature_config
[port
].link_config2
);
8096 /* get the override preemphasis flag and enable it or turn it off */
8097 val
= SHMEM_RD(sc
, dev_info
.shared_feature_config
.config
);
8098 if (val
& SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED
) {
8099 sc
->link_params
.feature_config_flags
|=
8100 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
8102 sc
->link_params
.feature_config_flags
&=
8103 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED
;
8106 val
= sc
->devinfo
.bc_ver
>> 8;
8107 if (val
< BNX2X_BC_VER
) {
8108 /* for now only warn later we might need to enforce this */
8109 PMD_DRV_LOG(NOTICE
, sc
, "This driver needs bc_ver %X but found %X, please upgrade BC\n",
8112 sc
->link_params
.feature_config_flags
|=
8113 (val
>= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL
) ?
8114 ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY
:
8117 sc
->link_params
.feature_config_flags
|=
8118 (val
>= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL
) ?
8119 ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY
: 0;
8120 sc
->link_params
.feature_config_flags
|=
8121 (val
>= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED
) ?
8122 ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX
: 0;
8123 sc
->link_params
.feature_config_flags
|=
8124 (val
>= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED
) ?
8125 ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED
: 0;
8127 /* get the initial value of the link params */
8128 sc
->link_params
.multi_phy_config
=
8129 SHMEM_RD(sc
, dev_info
.port_hw_config
[port
].multi_phy_config
);
8131 /* get external phy info */
8132 sc
->port
.ext_phy_config
=
8133 SHMEM_RD(sc
, dev_info
.port_hw_config
[port
].external_phy_config
);
8135 /* get the multifunction configuration */
8136 bnx2x_get_mf_cfg_info(sc
);
8138 /* get the mac address */
8141 MFCFG_RD(sc
, func_mf_config
[SC_ABS_FUNC(sc
)].mac_upper
);
8143 MFCFG_RD(sc
, func_mf_config
[SC_ABS_FUNC(sc
)].mac_lower
);
8145 mac_hi
= SHMEM_RD(sc
, dev_info
.port_hw_config
[port
].mac_upper
);
8146 mac_lo
= SHMEM_RD(sc
, dev_info
.port_hw_config
[port
].mac_lower
);
8149 if ((mac_lo
== 0) && (mac_hi
== 0)) {
8150 *sc
->mac_addr_str
= 0;
8151 PMD_DRV_LOG(NOTICE
, sc
, "No Ethernet address programmed!");
8153 sc
->link_params
.mac_addr
[0] = (uint8_t) (mac_hi
>> 8);
8154 sc
->link_params
.mac_addr
[1] = (uint8_t) (mac_hi
);
8155 sc
->link_params
.mac_addr
[2] = (uint8_t) (mac_lo
>> 24);
8156 sc
->link_params
.mac_addr
[3] = (uint8_t) (mac_lo
>> 16);
8157 sc
->link_params
.mac_addr
[4] = (uint8_t) (mac_lo
>> 8);
8158 sc
->link_params
.mac_addr
[5] = (uint8_t) (mac_lo
);
8159 snprintf(sc
->mac_addr_str
, sizeof(sc
->mac_addr_str
),
8160 "%02x:%02x:%02x:%02x:%02x:%02x",
8161 sc
->link_params
.mac_addr
[0],
8162 sc
->link_params
.mac_addr
[1],
8163 sc
->link_params
.mac_addr
[2],
8164 sc
->link_params
.mac_addr
[3],
8165 sc
->link_params
.mac_addr
[4],
8166 sc
->link_params
.mac_addr
[5]);
8167 PMD_DRV_LOG(DEBUG
, sc
,
8168 "Ethernet address: %s", sc
->mac_addr_str
);
8174 static void bnx2x_media_detect(struct bnx2x_softc
*sc
)
8176 uint32_t phy_idx
= bnx2x_get_cur_phy_idx(sc
);
8177 switch (sc
->link_params
.phy
[phy_idx
].media_type
) {
8178 case ELINK_ETH_PHY_SFPP_10G_FIBER
:
8179 case ELINK_ETH_PHY_SFP_1G_FIBER
:
8180 case ELINK_ETH_PHY_XFP_FIBER
:
8181 case ELINK_ETH_PHY_KR
:
8182 case ELINK_ETH_PHY_CX4
:
8183 PMD_DRV_LOG(INFO
, sc
, "Found 10GBase-CX4 media.");
8184 sc
->media
= IFM_10G_CX4
;
8186 case ELINK_ETH_PHY_DA_TWINAX
:
8187 PMD_DRV_LOG(INFO
, sc
, "Found 10Gb Twinax media.");
8188 sc
->media
= IFM_10G_TWINAX
;
8190 case ELINK_ETH_PHY_BASE_T
:
8191 PMD_DRV_LOG(INFO
, sc
, "Found 10GBase-T media.");
8192 sc
->media
= IFM_10G_T
;
8194 case ELINK_ETH_PHY_NOT_PRESENT
:
8195 PMD_DRV_LOG(INFO
, sc
, "Media not present.");
8198 case ELINK_ETH_PHY_UNSPECIFIED
:
8200 PMD_DRV_LOG(INFO
, sc
, "Unknown media!");
8206 #define GET_FIELD(value, fname) \
8207 (((value) & (fname##_MASK)) >> (fname##_SHIFT))
8208 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
8209 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
8211 static int bnx2x_get_igu_cam_info(struct bnx2x_softc
*sc
)
8213 int pfid
= SC_FUNC(sc
);
8216 uint8_t fid
, igu_sb_cnt
= 0;
8218 sc
->igu_base_sb
= 0xff;
8220 if (CHIP_INT_MODE_IS_BC(sc
)) {
8222 igu_sb_cnt
= sc
->igu_sb_cnt
;
8223 sc
->igu_base_sb
= ((CHIP_IS_MODE_4_PORT(sc
) ? pfid
: vn
) *
8225 sc
->igu_dsb_id
= (E1HVN_MAX
* FP_SB_MAX_E1x
+
8226 (CHIP_IS_MODE_4_PORT(sc
) ? pfid
: vn
));
8230 /* IGU in normal mode - read CAM */
8232 igu_sb_id
< IGU_REG_MAPPING_MEMORY_SIZE
; igu_sb_id
++) {
8233 val
= REG_RD(sc
, IGU_REG_MAPPING_MEMORY
+ igu_sb_id
* 4);
8234 if (!(val
& IGU_REG_MAPPING_MEMORY_VALID
)) {
8238 if (fid
& IGU_FID_ENCODE_IS_PF
) {
8239 if ((fid
& IGU_FID_PF_NUM_MASK
) != pfid
) {
8242 if (IGU_VEC(val
) == 0) {
8243 /* default status block */
8244 sc
->igu_dsb_id
= igu_sb_id
;
8246 if (sc
->igu_base_sb
== 0xff) {
8247 sc
->igu_base_sb
= igu_sb_id
;
8255 * Due to new PF resource allocation by MFW T7.4 and above, it's optional
8256 * that number of CAM entries will not be equal to the value advertised in
8257 * PCI. Driver should use the minimal value of both as the actual status
8260 sc
->igu_sb_cnt
= min(sc
->igu_sb_cnt
, igu_sb_cnt
);
8262 if (igu_sb_cnt
== 0) {
8263 PMD_DRV_LOG(ERR
, sc
, "CAM configuration error");
8271 * Gather various information from the device config space, the device itself,
8272 * shmem, and the user input.
8274 static int bnx2x_get_device_info(struct bnx2x_softc
*sc
)
8279 /* get the chip revision (chip metal comes from pci config space) */
8280 sc
->devinfo
.chip_id
= sc
->link_params
.chip_id
=
8281 (((REG_RD(sc
, MISC_REG_CHIP_NUM
) & 0xffff) << 16) |
8282 ((REG_RD(sc
, MISC_REG_CHIP_REV
) & 0xf) << 12) |
8283 (((REG_RD(sc
, PCICFG_OFFSET
+ PCI_ID_VAL3
) >> 24) & 0xf) << 4) |
8284 ((REG_RD(sc
, MISC_REG_BOND_ID
) & 0xf) << 0));
8286 /* force 57811 according to MISC register */
8287 if (REG_RD(sc
, MISC_REG_CHIP_TYPE
) & MISC_REG_CHIP_TYPE_57811_MASK
) {
8288 if (CHIP_IS_57810(sc
)) {
8289 sc
->devinfo
.chip_id
= ((CHIP_NUM_57811
<< 16) |
8291 devinfo
.chip_id
& 0x0000ffff));
8292 } else if (CHIP_IS_57810_MF(sc
)) {
8293 sc
->devinfo
.chip_id
= ((CHIP_NUM_57811_MF
<< 16) |
8295 devinfo
.chip_id
& 0x0000ffff));
8297 sc
->devinfo
.chip_id
|= 0x1;
8300 PMD_DRV_LOG(DEBUG
, sc
,
8301 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)",
8302 sc
->devinfo
.chip_id
,
8303 ((sc
->devinfo
.chip_id
>> 16) & 0xffff),
8304 ((sc
->devinfo
.chip_id
>> 12) & 0xf),
8305 ((sc
->devinfo
.chip_id
>> 4) & 0xff),
8306 ((sc
->devinfo
.chip_id
>> 0) & 0xf));
8308 val
= (REG_RD(sc
, 0x2874) & 0x55);
8309 if ((sc
->devinfo
.chip_id
& 0x1) || (CHIP_IS_E1H(sc
) && (val
== 0x55))) {
8310 sc
->flags
|= BNX2X_ONE_PORT_FLAG
;
8311 PMD_DRV_LOG(DEBUG
, sc
, "single port device");
8314 /* set the doorbell size */
8315 sc
->doorbell_size
= (1 << BNX2X_DB_SHIFT
);
8317 /* determine whether the device is in 2 port or 4 port mode */
8318 sc
->devinfo
.chip_port_mode
= CHIP_PORT_MODE_NONE
; /* E1h */
8319 if (CHIP_IS_E2E3(sc
)) {
8321 * Read port4mode_en_ovwr[0]:
8322 * If 1, four port mode is in port4mode_en_ovwr[1].
8323 * If 0, four port mode is in port4mode_en[0].
8325 val
= REG_RD(sc
, MISC_REG_PORT4MODE_EN_OVWR
);
8327 val
= ((val
>> 1) & 1);
8329 val
= REG_RD(sc
, MISC_REG_PORT4MODE_EN
);
8332 sc
->devinfo
.chip_port_mode
=
8333 (val
) ? CHIP_4_PORT_MODE
: CHIP_2_PORT_MODE
;
8335 PMD_DRV_LOG(DEBUG
, sc
, "Port mode = %s", (val
) ? "4" : "2");
8338 /* get the function and path info for the device */
8339 bnx2x_get_function_num(sc
);
8341 /* get the shared memory base address */
8342 sc
->devinfo
.shmem_base
=
8343 sc
->link_params
.shmem_base
= REG_RD(sc
, MISC_REG_SHARED_MEM_ADDR
);
8344 sc
->devinfo
.shmem2_base
=
8345 REG_RD(sc
, (SC_PATH(sc
) ? MISC_REG_GENERIC_CR_1
:
8346 MISC_REG_GENERIC_CR_0
));
8348 if (!sc
->devinfo
.shmem_base
) {
8349 /* this should ONLY prevent upcoming shmem reads */
8350 PMD_DRV_LOG(INFO
, sc
, "MCP not active");
8351 sc
->flags
|= BNX2X_NO_MCP_FLAG
;
8355 /* make sure the shared memory contents are valid */
8356 val
= SHMEM_RD(sc
, validity_map
[SC_PORT(sc
)]);
8357 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
)) !=
8358 (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
)) {
8359 PMD_DRV_LOG(NOTICE
, sc
, "Invalid SHMEM validity signature: 0x%08x",
8364 /* get the bootcode version */
8365 sc
->devinfo
.bc_ver
= SHMEM_RD(sc
, dev_info
.bc_rev
);
8366 snprintf(sc
->devinfo
.bc_ver_str
,
8367 sizeof(sc
->devinfo
.bc_ver_str
),
8369 ((sc
->devinfo
.bc_ver
>> 24) & 0xff),
8370 ((sc
->devinfo
.bc_ver
>> 16) & 0xff),
8371 ((sc
->devinfo
.bc_ver
>> 8) & 0xff));
8372 PMD_DRV_LOG(DEBUG
, sc
, "Bootcode version: %s", sc
->devinfo
.bc_ver_str
);
8374 /* get the bootcode shmem address */
8375 sc
->devinfo
.mf_cfg_base
= bnx2x_get_shmem_mf_cfg_base(sc
);
8377 /* clean indirect addresses as they're not used */
8378 pci_write_long(sc
, PCICFG_GRC_ADDRESS
, 0);
8380 REG_WR(sc
, PXP2_REG_PGL_ADDR_88_F0
, 0);
8381 REG_WR(sc
, PXP2_REG_PGL_ADDR_8C_F0
, 0);
8382 REG_WR(sc
, PXP2_REG_PGL_ADDR_90_F0
, 0);
8383 REG_WR(sc
, PXP2_REG_PGL_ADDR_94_F0
, 0);
8384 if (CHIP_IS_E1x(sc
)) {
8385 REG_WR(sc
, PXP2_REG_PGL_ADDR_88_F1
, 0);
8386 REG_WR(sc
, PXP2_REG_PGL_ADDR_8C_F1
, 0);
8387 REG_WR(sc
, PXP2_REG_PGL_ADDR_90_F1
, 0);
8388 REG_WR(sc
, PXP2_REG_PGL_ADDR_94_F1
, 0);
8392 /* get the nvram size */
8393 val
= REG_RD(sc
, MCP_REG_MCPR_NVM_CFG4
);
8394 sc
->devinfo
.flash_size
=
8395 (NVRAM_1MB_SIZE
<< (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
8397 bnx2x_set_power_state(sc
, PCI_PM_D0
);
8398 /* get various configuration parameters from shmem */
8399 bnx2x_get_shmem_info(sc
);
8401 /* initialize IGU parameters */
8402 if (CHIP_IS_E1x(sc
)) {
8403 sc
->devinfo
.int_block
= INT_BLOCK_HC
;
8404 sc
->igu_dsb_id
= DEF_SB_IGU_ID
;
8405 sc
->igu_base_sb
= 0;
8407 sc
->devinfo
.int_block
= INT_BLOCK_IGU
;
8409 /* do not allow device reset during IGU info preocessing */
8410 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_RESET
);
8412 val
= REG_RD(sc
, IGU_REG_BLOCK_CONFIGURATION
);
8414 if (val
& IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN
) {
8417 val
&= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN
);
8418 REG_WR(sc
, IGU_REG_BLOCK_CONFIGURATION
, val
);
8419 REG_WR(sc
, IGU_REG_RESET_MEMORIES
, 0x7f);
8421 while (tout
&& REG_RD(sc
, IGU_REG_RESET_MEMORIES
)) {
8426 if (REG_RD(sc
, IGU_REG_RESET_MEMORIES
)) {
8427 PMD_DRV_LOG(NOTICE
, sc
,
8428 "FORCING IGU Normal Mode failed!!!");
8429 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_RESET
);
8434 if (val
& IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN
) {
8435 PMD_DRV_LOG(DEBUG
, sc
, "IGU Backward Compatible Mode");
8436 sc
->devinfo
.int_block
|= INT_BLOCK_MODE_BW_COMP
;
8438 PMD_DRV_LOG(DEBUG
, sc
, "IGU Normal Mode");
8441 rc
= bnx2x_get_igu_cam_info(sc
);
8443 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_RESET
);
8451 * Get base FW non-default (fast path) status block ID. This value is
8452 * used to initialize the fw_sb_id saved on the fp/queue structure to
8453 * determine the id used by the FW.
8455 if (CHIP_IS_E1x(sc
)) {
8457 ((SC_PORT(sc
) * FP_SB_MAX_E1x
) + SC_L_ID(sc
));
8460 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
8461 * the same queue are indicated on the same IGU SB). So we prefer
8462 * FW and IGU SBs to be the same value.
8464 sc
->base_fw_ndsb
= sc
->igu_base_sb
;
8467 elink_phy_probe(&sc
->link_params
);
8473 bnx2x_link_settings_supported(struct bnx2x_softc
*sc
, uint32_t switch_cfg
)
8475 uint32_t cfg_size
= 0;
8477 uint8_t port
= SC_PORT(sc
);
8479 /* aggregation of supported attributes of all external phys */
8480 sc
->port
.supported
[0] = 0;
8481 sc
->port
.supported
[1] = 0;
8483 switch (sc
->link_params
.num_phys
) {
8485 sc
->port
.supported
[0] =
8486 sc
->link_params
.phy
[ELINK_INT_PHY
].supported
;
8490 sc
->port
.supported
[0] =
8491 sc
->link_params
.phy
[ELINK_EXT_PHY1
].supported
;
8495 if (sc
->link_params
.multi_phy_config
&
8496 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
8497 sc
->port
.supported
[1] =
8498 sc
->link_params
.phy
[ELINK_EXT_PHY1
].supported
;
8499 sc
->port
.supported
[0] =
8500 sc
->link_params
.phy
[ELINK_EXT_PHY2
].supported
;
8502 sc
->port
.supported
[0] =
8503 sc
->link_params
.phy
[ELINK_EXT_PHY1
].supported
;
8504 sc
->port
.supported
[1] =
8505 sc
->link_params
.phy
[ELINK_EXT_PHY2
].supported
;
8511 if (!(sc
->port
.supported
[0] || sc
->port
.supported
[1])) {
8512 PMD_DRV_LOG(ERR
, sc
,
8513 "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)",
8515 dev_info
.port_hw_config
8516 [port
].external_phy_config
),
8518 dev_info
.port_hw_config
8519 [port
].external_phy_config2
));
8524 sc
->port
.phy_addr
= REG_RD(sc
, MISC_REG_WC0_CTRL_PHY_ADDR
);
8526 switch (switch_cfg
) {
8527 case ELINK_SWITCH_CFG_1G
:
8530 NIG_REG_SERDES0_CTRL_PHY_ADDR
+ port
* 0x10);
8532 case ELINK_SWITCH_CFG_10G
:
8535 NIG_REG_XGXS0_CTRL_PHY_ADDR
+ port
* 0x18);
8538 PMD_DRV_LOG(ERR
, sc
,
8539 "Invalid switch config in"
8540 "link_config=0x%08x",
8541 sc
->port
.link_config
[0]);
8546 PMD_DRV_LOG(INFO
, sc
, "PHY addr 0x%08x", sc
->port
.phy_addr
);
8548 /* mask what we support according to speed_cap_mask per configuration */
8549 for (idx
= 0; idx
< cfg_size
; idx
++) {
8550 if (!(sc
->link_params
.speed_cap_mask
[idx
] &
8551 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
)) {
8552 sc
->port
.supported
[idx
] &=
8553 ~ELINK_SUPPORTED_10baseT_Half
;
8556 if (!(sc
->link_params
.speed_cap_mask
[idx
] &
8557 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
)) {
8558 sc
->port
.supported
[idx
] &=
8559 ~ELINK_SUPPORTED_10baseT_Full
;
8562 if (!(sc
->link_params
.speed_cap_mask
[idx
] &
8563 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
)) {
8564 sc
->port
.supported
[idx
] &=
8565 ~ELINK_SUPPORTED_100baseT_Half
;
8568 if (!(sc
->link_params
.speed_cap_mask
[idx
] &
8569 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
)) {
8570 sc
->port
.supported
[idx
] &=
8571 ~ELINK_SUPPORTED_100baseT_Full
;
8574 if (!(sc
->link_params
.speed_cap_mask
[idx
] &
8575 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
)) {
8576 sc
->port
.supported
[idx
] &=
8577 ~ELINK_SUPPORTED_1000baseT_Full
;
8580 if (!(sc
->link_params
.speed_cap_mask
[idx
] &
8581 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
)) {
8582 sc
->port
.supported
[idx
] &=
8583 ~ELINK_SUPPORTED_2500baseX_Full
;
8586 if (!(sc
->link_params
.speed_cap_mask
[idx
] &
8587 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
)) {
8588 sc
->port
.supported
[idx
] &=
8589 ~ELINK_SUPPORTED_10000baseT_Full
;
8592 if (!(sc
->link_params
.speed_cap_mask
[idx
] &
8593 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G
)) {
8594 sc
->port
.supported
[idx
] &=
8595 ~ELINK_SUPPORTED_20000baseKR2_Full
;
8599 PMD_DRV_LOG(INFO
, sc
, "PHY supported 0=0x%08x 1=0x%08x",
8600 sc
->port
.supported
[0], sc
->port
.supported
[1]);
8603 static void bnx2x_link_settings_requested(struct bnx2x_softc
*sc
)
8605 uint32_t link_config
;
8607 uint32_t cfg_size
= 0;
8609 sc
->port
.advertising
[0] = 0;
8610 sc
->port
.advertising
[1] = 0;
8612 switch (sc
->link_params
.num_phys
) {
8622 for (idx
= 0; idx
< cfg_size
; idx
++) {
8623 sc
->link_params
.req_duplex
[idx
] = DUPLEX_FULL
;
8624 link_config
= sc
->port
.link_config
[idx
];
8626 switch (link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
8627 case PORT_FEATURE_LINK_SPEED_AUTO
:
8628 if (sc
->port
.supported
[idx
] & ELINK_SUPPORTED_Autoneg
) {
8629 sc
->link_params
.req_line_speed
[idx
] =
8630 ELINK_SPEED_AUTO_NEG
;
8631 sc
->port
.advertising
[idx
] |=
8632 sc
->port
.supported
[idx
];
8633 if (sc
->link_params
.phy
[ELINK_EXT_PHY1
].type
==
8634 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833
)
8635 sc
->port
.advertising
[idx
] |=
8636 (ELINK_SUPPORTED_100baseT_Half
|
8637 ELINK_SUPPORTED_100baseT_Full
);
8639 /* force 10G, no AN */
8640 sc
->link_params
.req_line_speed
[idx
] =
8642 sc
->port
.advertising
[idx
] |=
8643 (ADVERTISED_10000baseT_Full
|
8649 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
8651 port
.supported
[idx
] & ELINK_SUPPORTED_10baseT_Full
)
8653 sc
->link_params
.req_line_speed
[idx
] =
8655 sc
->port
.advertising
[idx
] |=
8656 (ADVERTISED_10baseT_Full
| ADVERTISED_TP
);
8658 PMD_DRV_LOG(ERR
, sc
,
8659 "Invalid NVRAM config link_config=0x%08x "
8660 "speed_cap_mask=0x%08x",
8663 link_params
.speed_cap_mask
[idx
]);
8668 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
8670 port
.supported
[idx
] & ELINK_SUPPORTED_10baseT_Half
)
8672 sc
->link_params
.req_line_speed
[idx
] =
8674 sc
->link_params
.req_duplex
[idx
] = DUPLEX_HALF
;
8675 sc
->port
.advertising
[idx
] |=
8676 (ADVERTISED_10baseT_Half
| ADVERTISED_TP
);
8678 PMD_DRV_LOG(ERR
, sc
,
8679 "Invalid NVRAM config link_config=0x%08x "
8680 "speed_cap_mask=0x%08x",
8683 link_params
.speed_cap_mask
[idx
]);
8688 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
8690 port
.supported
[idx
] & ELINK_SUPPORTED_100baseT_Full
)
8692 sc
->link_params
.req_line_speed
[idx
] =
8694 sc
->port
.advertising
[idx
] |=
8695 (ADVERTISED_100baseT_Full
| ADVERTISED_TP
);
8697 PMD_DRV_LOG(ERR
, sc
,
8698 "Invalid NVRAM config link_config=0x%08x "
8699 "speed_cap_mask=0x%08x",
8702 link_params
.speed_cap_mask
[idx
]);
8707 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
8709 port
.supported
[idx
] & ELINK_SUPPORTED_100baseT_Half
)
8711 sc
->link_params
.req_line_speed
[idx
] =
8713 sc
->link_params
.req_duplex
[idx
] = DUPLEX_HALF
;
8714 sc
->port
.advertising
[idx
] |=
8715 (ADVERTISED_100baseT_Half
| ADVERTISED_TP
);
8717 PMD_DRV_LOG(ERR
, sc
,
8718 "Invalid NVRAM config link_config=0x%08x "
8719 "speed_cap_mask=0x%08x",
8722 link_params
.speed_cap_mask
[idx
]);
8727 case PORT_FEATURE_LINK_SPEED_1G
:
8728 if (sc
->port
.supported
[idx
] &
8729 ELINK_SUPPORTED_1000baseT_Full
) {
8730 sc
->link_params
.req_line_speed
[idx
] =
8732 sc
->port
.advertising
[idx
] |=
8733 (ADVERTISED_1000baseT_Full
| ADVERTISED_TP
);
8735 PMD_DRV_LOG(ERR
, sc
,
8736 "Invalid NVRAM config link_config=0x%08x "
8737 "speed_cap_mask=0x%08x",
8740 link_params
.speed_cap_mask
[idx
]);
8745 case PORT_FEATURE_LINK_SPEED_2_5G
:
8746 if (sc
->port
.supported
[idx
] &
8747 ELINK_SUPPORTED_2500baseX_Full
) {
8748 sc
->link_params
.req_line_speed
[idx
] =
8750 sc
->port
.advertising
[idx
] |=
8751 (ADVERTISED_2500baseX_Full
| ADVERTISED_TP
);
8753 PMD_DRV_LOG(ERR
, sc
,
8754 "Invalid NVRAM config link_config=0x%08x "
8755 "speed_cap_mask=0x%08x",
8758 link_params
.speed_cap_mask
[idx
]);
8763 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
8764 if (sc
->port
.supported
[idx
] &
8765 ELINK_SUPPORTED_10000baseT_Full
) {
8766 sc
->link_params
.req_line_speed
[idx
] =
8768 sc
->port
.advertising
[idx
] |=
8769 (ADVERTISED_10000baseT_Full
|
8772 PMD_DRV_LOG(ERR
, sc
,
8773 "Invalid NVRAM config link_config=0x%08x "
8774 "speed_cap_mask=0x%08x",
8777 link_params
.speed_cap_mask
[idx
]);
8782 case PORT_FEATURE_LINK_SPEED_20G
:
8783 sc
->link_params
.req_line_speed
[idx
] = ELINK_SPEED_20000
;
8787 PMD_DRV_LOG(ERR
, sc
,
8788 "Invalid NVRAM config link_config=0x%08x "
8789 "speed_cap_mask=0x%08x", link_config
,
8790 sc
->link_params
.speed_cap_mask
[idx
]);
8791 sc
->link_params
.req_line_speed
[idx
] =
8792 ELINK_SPEED_AUTO_NEG
;
8793 sc
->port
.advertising
[idx
] = sc
->port
.supported
[idx
];
8797 sc
->link_params
.req_flow_ctrl
[idx
] =
8798 (link_config
& PORT_FEATURE_FLOW_CONTROL_MASK
);
8800 if (sc
->link_params
.req_flow_ctrl
[idx
] == ELINK_FLOW_CTRL_AUTO
) {
8803 port
.supported
[idx
] & ELINK_SUPPORTED_Autoneg
)) {
8804 sc
->link_params
.req_flow_ctrl
[idx
] =
8805 ELINK_FLOW_CTRL_NONE
;
8807 bnx2x_set_requested_fc(sc
);
8813 static void bnx2x_get_phy_info(struct bnx2x_softc
*sc
)
8815 uint8_t port
= SC_PORT(sc
);
8818 PMD_INIT_FUNC_TRACE(sc
);
8820 /* shmem data already read in bnx2x_get_shmem_info() */
8822 bnx2x_link_settings_supported(sc
, sc
->link_params
.switch_cfg
);
8823 bnx2x_link_settings_requested(sc
);
8825 /* configure link feature according to nvram value */
8827 (((SHMEM_RD(sc
, dev_info
.port_feature_config
[port
].eee_power_mode
))
8828 & PORT_FEAT_CFG_EEE_POWER_MODE_MASK
) >>
8829 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT
);
8830 if (eee_mode
!= PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED
) {
8831 sc
->link_params
.eee_mode
= (ELINK_EEE_MODE_ADV_LPI
|
8832 ELINK_EEE_MODE_ENABLE_LPI
|
8833 ELINK_EEE_MODE_OUTPUT_TIME
);
8835 sc
->link_params
.eee_mode
= 0;
8838 /* get the media type */
8839 bnx2x_media_detect(sc
);
8842 static void bnx2x_set_modes_bitmap(struct bnx2x_softc
*sc
)
8844 uint32_t flags
= MODE_ASIC
| MODE_PORT2
;
8846 if (CHIP_IS_E2(sc
)) {
8848 } else if (CHIP_IS_E3(sc
)) {
8850 if (CHIP_REV(sc
) == CHIP_REV_Ax
) {
8851 flags
|= MODE_E3_A0
;
8852 } else { /*if (CHIP_REV(sc) == CHIP_REV_Bx) */
8854 flags
|= MODE_E3_B0
| MODE_COS3
;
8860 switch (sc
->devinfo
.mf_info
.mf_mode
) {
8861 case MULTI_FUNCTION_SD
:
8862 flags
|= MODE_MF_SD
;
8864 case MULTI_FUNCTION_SI
:
8865 flags
|= MODE_MF_SI
;
8867 case MULTI_FUNCTION_AFEX
:
8868 flags
|= MODE_MF_AFEX
;
8875 #if defined(__LITTLE_ENDIAN)
8876 flags
|= MODE_LITTLE_ENDIAN
;
8877 #else /* __BIG_ENDIAN */
8878 flags
|= MODE_BIG_ENDIAN
;
8881 INIT_MODE_FLAGS(sc
) = flags
;
8884 int bnx2x_alloc_hsi_mem(struct bnx2x_softc
*sc
)
8886 struct bnx2x_fastpath
*fp
;
8891 /************************/
8892 /* DEFAULT STATUS BLOCK */
8893 /************************/
8895 if (bnx2x_dma_alloc(sc
, sizeof(struct host_sp_status_block
),
8896 &sc
->def_sb_dma
, "def_sb",
8897 RTE_CACHE_LINE_SIZE
) != 0) {
8902 (struct host_sp_status_block
*)sc
->def_sb_dma
.vaddr
;
8907 if (bnx2x_dma_alloc(sc
, BNX2X_PAGE_SIZE
,
8908 &sc
->eq_dma
, "ev_queue",
8909 RTE_CACHE_LINE_SIZE
) != 0) {
8914 sc
->eq
= (union event_ring_elem
*)sc
->eq_dma
.vaddr
;
8920 if (bnx2x_dma_alloc(sc
, sizeof(struct bnx2x_slowpath
),
8922 RTE_CACHE_LINE_SIZE
) != 0) {
8928 sc
->sp
= (struct bnx2x_slowpath
*)sc
->sp_dma
.vaddr
;
8930 /*******************/
8931 /* SLOW PATH QUEUE */
8932 /*******************/
8934 if (bnx2x_dma_alloc(sc
, BNX2X_PAGE_SIZE
,
8935 &sc
->spq_dma
, "sp_queue",
8936 RTE_CACHE_LINE_SIZE
) != 0) {
8943 sc
->spq
= (struct eth_spe
*)sc
->spq_dma
.vaddr
;
8945 /***************************/
8946 /* FW DECOMPRESSION BUFFER */
8947 /***************************/
8949 if (bnx2x_dma_alloc(sc
, FW_BUF_SIZE
, &sc
->gz_buf_dma
,
8950 "fw_buf", RTE_CACHE_LINE_SIZE
) != 0) {
8958 sc
->gz_buf
= (void *)sc
->gz_buf_dma
.vaddr
;
8965 /* allocate DMA memory for each fastpath structure */
8966 for (i
= 0; i
< sc
->num_queues
; i
++) {
8971 /*******************/
8972 /* FP STATUS BLOCK */
8973 /*******************/
8975 snprintf(buf
, sizeof(buf
), "fp_%d_sb", i
);
8976 if (bnx2x_dma_alloc(sc
, sizeof(union bnx2x_host_hc_status_block
),
8977 &fp
->sb_dma
, buf
, RTE_CACHE_LINE_SIZE
) != 0) {
8978 PMD_DRV_LOG(NOTICE
, sc
, "Failed to alloc %s", buf
);
8981 if (CHIP_IS_E2E3(sc
)) {
8982 fp
->status_block
.e2_sb
=
8983 (struct host_hc_status_block_e2
*)
8986 fp
->status_block
.e1x_sb
=
8987 (struct host_hc_status_block_e1x
*)
8996 void bnx2x_free_hsi_mem(struct bnx2x_softc
*sc
)
8998 struct bnx2x_fastpath
*fp
;
9001 for (i
= 0; i
< sc
->num_queues
; i
++) {
9004 /*******************/
9005 /* FP STATUS BLOCK */
9006 /*******************/
9008 memset(&fp
->status_block
, 0, sizeof(fp
->status_block
));
9011 /***************************/
9012 /* FW DECOMPRESSION BUFFER */
9013 /***************************/
9017 /*******************/
9018 /* SLOW PATH QUEUE */
9019 /*******************/
9035 /************************/
9036 /* DEFAULT STATUS BLOCK */
9037 /************************/
9044 * Previous driver DMAE transaction may have occurred when pre-boot stage
9045 * ended and boot began. This would invalidate the addresses of the
9046 * transaction, resulting in was-error bit set in the PCI causing all
9047 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
9048 * the interrupt which detected this from the pglueb and the was-done bit
9050 static void bnx2x_prev_interrupted_dmae(struct bnx2x_softc
*sc
)
9054 if (!CHIP_IS_E1x(sc
)) {
9055 val
= REG_RD(sc
, PGLUE_B_REG_PGLUE_B_INT_STS
);
9056 if (val
& PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN
) {
9057 REG_WR(sc
, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR
,
9063 static int bnx2x_prev_mcp_done(struct bnx2x_softc
*sc
)
9065 uint32_t rc
= bnx2x_fw_command(sc
, DRV_MSG_CODE_UNLOAD_DONE
,
9066 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET
);
9068 PMD_DRV_LOG(NOTICE
, sc
, "MCP response failure, aborting");
9075 static struct bnx2x_prev_list_node
*bnx2x_prev_path_get_entry(struct bnx2x_softc
*sc
)
9077 struct bnx2x_prev_list_node
*tmp
;
9079 LIST_FOREACH(tmp
, &bnx2x_prev_list
, node
) {
9080 if ((sc
->pcie_bus
== tmp
->bus
) &&
9081 (sc
->pcie_device
== tmp
->slot
) &&
9082 (SC_PATH(sc
) == tmp
->path
)) {
9090 static uint8_t bnx2x_prev_is_path_marked(struct bnx2x_softc
*sc
)
9092 struct bnx2x_prev_list_node
*tmp
;
9095 rte_spinlock_lock(&bnx2x_prev_mtx
);
9097 tmp
= bnx2x_prev_path_get_entry(sc
);
9100 PMD_DRV_LOG(DEBUG
, sc
,
9101 "Path %d/%d/%d was marked by AER",
9102 sc
->pcie_bus
, sc
->pcie_device
, SC_PATH(sc
));
9105 PMD_DRV_LOG(DEBUG
, sc
,
9106 "Path %d/%d/%d was already cleaned from previous drivers",
9107 sc
->pcie_bus
, sc
->pcie_device
, SC_PATH(sc
));
9111 rte_spinlock_unlock(&bnx2x_prev_mtx
);
9116 static int bnx2x_prev_mark_path(struct bnx2x_softc
*sc
, uint8_t after_undi
)
9118 struct bnx2x_prev_list_node
*tmp
;
9120 rte_spinlock_lock(&bnx2x_prev_mtx
);
9122 /* Check whether the entry for this path already exists */
9123 tmp
= bnx2x_prev_path_get_entry(sc
);
9126 PMD_DRV_LOG(DEBUG
, sc
,
9127 "Re-marking AER in path %d/%d/%d",
9128 sc
->pcie_bus
, sc
->pcie_device
, SC_PATH(sc
));
9130 PMD_DRV_LOG(DEBUG
, sc
,
9131 "Removing AER indication from path %d/%d/%d",
9132 sc
->pcie_bus
, sc
->pcie_device
, SC_PATH(sc
));
9136 rte_spinlock_unlock(&bnx2x_prev_mtx
);
9140 rte_spinlock_unlock(&bnx2x_prev_mtx
);
9142 /* Create an entry for this path and add it */
9143 tmp
= rte_malloc("", sizeof(struct bnx2x_prev_list_node
),
9144 RTE_CACHE_LINE_SIZE
);
9146 PMD_DRV_LOG(NOTICE
, sc
, "Failed to allocate 'bnx2x_prev_list_node'");
9150 tmp
->bus
= sc
->pcie_bus
;
9151 tmp
->slot
= sc
->pcie_device
;
9152 tmp
->path
= SC_PATH(sc
);
9154 tmp
->undi
= after_undi
? (1 << SC_PORT(sc
)) : 0;
9156 rte_spinlock_lock(&bnx2x_prev_mtx
);
9158 LIST_INSERT_HEAD(&bnx2x_prev_list
, tmp
, node
);
9160 rte_spinlock_unlock(&bnx2x_prev_mtx
);
9165 static int bnx2x_do_flr(struct bnx2x_softc
*sc
)
9169 /* only E2 and onwards support FLR */
9170 if (CHIP_IS_E1x(sc
)) {
9171 PMD_DRV_LOG(WARNING
, sc
, "FLR not supported in E1H");
9175 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
9176 if (sc
->devinfo
.bc_ver
< REQ_BC_VER_4_INITIATE_FLR
) {
9177 PMD_DRV_LOG(WARNING
, sc
,
9178 "FLR not supported by BC_VER: 0x%08x",
9179 sc
->devinfo
.bc_ver
);
9183 /* Wait for Transaction Pending bit clean */
9184 for (i
= 0; i
< 4; i
++) {
9186 DELAY(((1 << (i
- 1)) * 100) * 1000);
9189 if (!bnx2x_is_pcie_pending(sc
)) {
9194 PMD_DRV_LOG(NOTICE
, sc
, "PCIE transaction is not cleared, "
9195 "proceeding with reset anyway");
9198 bnx2x_fw_command(sc
, DRV_MSG_CODE_INITIATE_FLR
, 0);
9203 struct bnx2x_mac_vals
{
9211 uint32_t bmac_val
[2];
9215 bnx2x_prev_unload_close_mac(struct bnx2x_softc
*sc
, struct bnx2x_mac_vals
*vals
)
9217 uint32_t val
, base_addr
, offset
, mask
, reset_reg
;
9218 uint8_t mac_stopped
= FALSE
;
9219 uint8_t port
= SC_PORT(sc
);
9220 uint32_t wb_data
[2];
9222 /* reset addresses as they also mark which values were changed */
9223 vals
->bmac_addr
= 0;
9224 vals
->umac_addr
= 0;
9225 vals
->xmac_addr
= 0;
9226 vals
->emac_addr
= 0;
9228 reset_reg
= REG_RD(sc
, MISC_REG_RESET_REG_2
);
9230 if (!CHIP_IS_E3(sc
)) {
9231 val
= REG_RD(sc
, NIG_REG_BMAC0_REGS_OUT_EN
+ port
* 4);
9232 mask
= MISC_REGISTERS_RESET_REG_2_RST_BMAC0
<< port
;
9233 if ((mask
& reset_reg
) && val
) {
9234 base_addr
= SC_PORT(sc
) ? NIG_REG_INGRESS_BMAC1_MEM
9235 : NIG_REG_INGRESS_BMAC0_MEM
;
9236 offset
= CHIP_IS_E2(sc
) ? BIGMAC2_REGISTER_BMAC_CONTROL
9237 : BIGMAC_REGISTER_BMAC_CONTROL
;
9240 * use rd/wr since we cannot use dmae. This is safe
9241 * since MCP won't access the bus due to the request
9242 * to unload, and no function on the path can be
9243 * loaded at this time.
9245 wb_data
[0] = REG_RD(sc
, base_addr
+ offset
);
9246 wb_data
[1] = REG_RD(sc
, base_addr
+ offset
+ 0x4);
9247 vals
->bmac_addr
= base_addr
+ offset
;
9248 vals
->bmac_val
[0] = wb_data
[0];
9249 vals
->bmac_val
[1] = wb_data
[1];
9250 wb_data
[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE
;
9251 REG_WR(sc
, vals
->bmac_addr
, wb_data
[0]);
9252 REG_WR(sc
, vals
->bmac_addr
+ 0x4, wb_data
[1]);
9255 vals
->emac_addr
= NIG_REG_NIG_EMAC0_EN
+ SC_PORT(sc
) * 4;
9256 vals
->emac_val
= REG_RD(sc
, vals
->emac_addr
);
9257 REG_WR(sc
, vals
->emac_addr
, 0);
9260 if (reset_reg
& MISC_REGISTERS_RESET_REG_2_XMAC
) {
9261 base_addr
= SC_PORT(sc
) ? GRCBASE_XMAC1
: GRCBASE_XMAC0
;
9262 val
= REG_RD(sc
, base_addr
+ XMAC_REG_PFC_CTRL_HI
);
9263 REG_WR(sc
, base_addr
+ XMAC_REG_PFC_CTRL_HI
,
9265 REG_WR(sc
, base_addr
+ XMAC_REG_PFC_CTRL_HI
,
9267 vals
->xmac_addr
= base_addr
+ XMAC_REG_CTRL
;
9268 vals
->xmac_val
= REG_RD(sc
, vals
->xmac_addr
);
9269 REG_WR(sc
, vals
->xmac_addr
, 0);
9273 mask
= MISC_REGISTERS_RESET_REG_2_UMAC0
<< port
;
9274 if (mask
& reset_reg
) {
9275 base_addr
= SC_PORT(sc
) ? GRCBASE_UMAC1
: GRCBASE_UMAC0
;
9276 vals
->umac_addr
= base_addr
+ UMAC_REG_COMMAND_CONFIG
;
9277 vals
->umac_val
= REG_RD(sc
, vals
->umac_addr
);
9278 REG_WR(sc
, vals
->umac_addr
, 0);
9288 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
9289 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
9290 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
9291 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
9294 bnx2x_prev_unload_undi_inc(struct bnx2x_softc
*sc
, uint8_t port
, uint8_t inc
)
9297 uint32_t tmp_reg
= REG_RD(sc
, BNX2X_PREV_UNDI_PROD_ADDR(port
));
9299 rcq
= BNX2X_PREV_UNDI_RCQ(tmp_reg
) + inc
;
9300 bd
= BNX2X_PREV_UNDI_BD(tmp_reg
) + inc
;
9302 tmp_reg
= BNX2X_PREV_UNDI_PROD(rcq
, bd
);
9303 REG_WR(sc
, BNX2X_PREV_UNDI_PROD_ADDR(port
), tmp_reg
);
9306 static int bnx2x_prev_unload_common(struct bnx2x_softc
*sc
)
9308 uint32_t reset_reg
, tmp_reg
= 0, rc
;
9309 uint8_t prev_undi
= FALSE
;
9310 struct bnx2x_mac_vals mac_vals
;
9311 uint32_t timer_count
= 1000;
9315 * It is possible a previous function received 'common' answer,
9316 * but hasn't loaded yet, therefore creating a scenario of
9317 * multiple functions receiving 'common' on the same path.
9319 memset(&mac_vals
, 0, sizeof(mac_vals
));
9321 if (bnx2x_prev_is_path_marked(sc
)) {
9322 return bnx2x_prev_mcp_done(sc
);
9325 reset_reg
= REG_RD(sc
, MISC_REG_RESET_REG_1
);
9327 /* Reset should be performed after BRB is emptied */
9328 if (reset_reg
& MISC_REGISTERS_RESET_REG_1_RST_BRB1
) {
9329 /* Close the MAC Rx to prevent BRB from filling up */
9330 bnx2x_prev_unload_close_mac(sc
, &mac_vals
);
9332 /* close LLH filters towards the BRB */
9333 elink_set_rx_filter(&sc
->link_params
, 0);
9336 * Check if the UNDI driver was previously loaded.
9337 * UNDI driver initializes CID offset for normal bell to 0x7
9339 if (reset_reg
& MISC_REGISTERS_RESET_REG_1_RST_DORQ
) {
9340 tmp_reg
= REG_RD(sc
, DORQ_REG_NORM_CID_OFST
);
9341 if (tmp_reg
== 0x7) {
9342 PMD_DRV_LOG(DEBUG
, sc
, "UNDI previously loaded");
9344 /* clear the UNDI indication */
9345 REG_WR(sc
, DORQ_REG_NORM_CID_OFST
, 0);
9346 /* clear possible idle check errors */
9347 REG_RD(sc
, NIG_REG_NIG_INT_STS_CLR_0
);
9351 /* wait until BRB is empty */
9352 tmp_reg
= REG_RD(sc
, BRB1_REG_NUM_OF_FULL_BLOCKS
);
9353 while (timer_count
) {
9356 tmp_reg
= REG_RD(sc
, BRB1_REG_NUM_OF_FULL_BLOCKS
);
9361 PMD_DRV_LOG(DEBUG
, sc
, "BRB still has 0x%08x", tmp_reg
);
9363 /* reset timer as long as BRB actually gets emptied */
9364 if (prev_brb
> tmp_reg
) {
9370 /* If UNDI resides in memory, manually increment it */
9372 bnx2x_prev_unload_undi_inc(sc
, SC_PORT(sc
), 1);
9379 PMD_DRV_LOG(NOTICE
, sc
, "Failed to empty BRB");
9383 /* No packets are in the pipeline, path is ready for reset */
9384 bnx2x_reset_common(sc
);
9386 if (mac_vals
.xmac_addr
) {
9387 REG_WR(sc
, mac_vals
.xmac_addr
, mac_vals
.xmac_val
);
9389 if (mac_vals
.umac_addr
) {
9390 REG_WR(sc
, mac_vals
.umac_addr
, mac_vals
.umac_val
);
9392 if (mac_vals
.emac_addr
) {
9393 REG_WR(sc
, mac_vals
.emac_addr
, mac_vals
.emac_val
);
9395 if (mac_vals
.bmac_addr
) {
9396 REG_WR(sc
, mac_vals
.bmac_addr
, mac_vals
.bmac_val
[0]);
9397 REG_WR(sc
, mac_vals
.bmac_addr
+ 4, mac_vals
.bmac_val
[1]);
9400 rc
= bnx2x_prev_mark_path(sc
, prev_undi
);
9402 bnx2x_prev_mcp_done(sc
);
9406 return bnx2x_prev_mcp_done(sc
);
9409 static int bnx2x_prev_unload_uncommon(struct bnx2x_softc
*sc
)
9413 /* Test if previous unload process was already finished for this path */
9414 if (bnx2x_prev_is_path_marked(sc
)) {
9415 return bnx2x_prev_mcp_done(sc
);
9419 * If function has FLR capabilities, and existing FW version matches
9420 * the one required, then FLR will be sufficient to clean any residue
9421 * left by previous driver
9423 rc
= bnx2x_nic_load_analyze_req(sc
, FW_MSG_CODE_DRV_LOAD_FUNCTION
);
9425 /* fw version is good */
9426 rc
= bnx2x_do_flr(sc
);
9430 /* FLR was performed */
9434 PMD_DRV_LOG(INFO
, sc
, "Could not FLR");
9436 /* Close the MCP request, return failure */
9437 rc
= bnx2x_prev_mcp_done(sc
);
9439 rc
= BNX2X_PREV_WAIT_NEEDED
;
9445 static int bnx2x_prev_unload(struct bnx2x_softc
*sc
)
9447 int time_counter
= 10;
9448 uint32_t fw
, hw_lock_reg
, hw_lock_val
;
9451 PMD_INIT_FUNC_TRACE(sc
);
9454 * Clear HW from errors which may have resulted from an interrupted
9457 bnx2x_prev_interrupted_dmae(sc
);
9459 /* Release previously held locks */
9460 hw_lock_reg
= (SC_FUNC(sc
) <= 5) ?
9461 (MISC_REG_DRIVER_CONTROL_1
+ SC_FUNC(sc
) * 8) :
9462 (MISC_REG_DRIVER_CONTROL_7
+ (SC_FUNC(sc
) - 6) * 8);
9464 hw_lock_val
= (REG_RD(sc
, hw_lock_reg
));
9466 if (hw_lock_val
& HW_LOCK_RESOURCE_NVRAM
) {
9467 PMD_DRV_LOG(DEBUG
, sc
, "Releasing previously held NVRAM lock\n");
9468 REG_WR(sc
, MCP_REG_MCPR_NVM_SW_ARB
,
9469 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1
<< SC_PORT(sc
)));
9471 PMD_DRV_LOG(DEBUG
, sc
, "Releasing previously held HW lock\n");
9472 REG_WR(sc
, hw_lock_reg
, 0xffffffff);
9475 if (MCPR_ACCESS_LOCK_LOCK
& REG_RD(sc
, MCP_REG_MCPR_ACCESS_LOCK
)) {
9476 PMD_DRV_LOG(DEBUG
, sc
, "Releasing previously held ALR\n");
9477 REG_WR(sc
, MCP_REG_MCPR_ACCESS_LOCK
, 0);
9481 /* Lock MCP using an unload request */
9482 fw
= bnx2x_fw_command(sc
, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
, 0);
9484 PMD_DRV_LOG(NOTICE
, sc
, "MCP response failure, aborting");
9489 if (fw
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
9490 rc
= bnx2x_prev_unload_common(sc
);
9494 /* non-common reply from MCP might require looping */
9495 rc
= bnx2x_prev_unload_uncommon(sc
);
9496 if (rc
!= BNX2X_PREV_WAIT_NEEDED
) {
9501 } while (--time_counter
);
9503 if (!time_counter
|| rc
) {
9504 PMD_DRV_LOG(NOTICE
, sc
, "Failed to unload previous driver!");
9512 bnx2x_dcbx_set_state(struct bnx2x_softc
*sc
, uint8_t dcb_on
, uint32_t dcbx_enabled
)
9514 if (!CHIP_IS_E1x(sc
)) {
9515 sc
->dcb_state
= dcb_on
;
9516 sc
->dcbx_enabled
= dcbx_enabled
;
9518 sc
->dcb_state
= FALSE
;
9519 sc
->dcbx_enabled
= BNX2X_DCBX_ENABLED_INVALID
;
9521 PMD_DRV_LOG(DEBUG
, sc
,
9522 "DCB state [%s:%s]",
9523 dcb_on
? "ON" : "OFF",
9524 (dcbx_enabled
== BNX2X_DCBX_ENABLED_OFF
) ? "user-mode" :
9526 BNX2X_DCBX_ENABLED_ON_NEG_OFF
) ? "on-chip static"
9528 BNX2X_DCBX_ENABLED_ON_NEG_ON
) ?
9529 "on-chip with negotiation" : "invalid");
9532 static int bnx2x_set_qm_cid_count(struct bnx2x_softc
*sc
)
9534 int cid_count
= BNX2X_L2_MAX_CID(sc
);
9536 if (CNIC_SUPPORT(sc
)) {
9537 cid_count
+= CNIC_CID_MAX
;
9540 return roundup(cid_count
, QM_CID_ROUND
);
9543 static void bnx2x_init_multi_cos(struct bnx2x_softc
*sc
)
9547 uint32_t pri_map
= 0;
9549 for (pri
= 0; pri
< BNX2X_MAX_PRIORITY
; pri
++) {
9550 cos
= ((pri_map
& (0xf << (pri
* 4))) >> (pri
* 4));
9551 if (cos
< sc
->max_cos
) {
9552 sc
->prio_to_cos
[pri
] = cos
;
9554 PMD_DRV_LOG(WARNING
, sc
,
9555 "Invalid COS %d for priority %d "
9556 "(max COS is %d), setting to 0", cos
, pri
,
9558 sc
->prio_to_cos
[pri
] = 0;
9563 static int bnx2x_pci_get_caps(struct bnx2x_softc
*sc
)
9570 struct bnx2x_pci_cap
*cap
;
9572 cap
= sc
->pci_caps
= rte_zmalloc("caps", sizeof(struct bnx2x_pci_cap
),
9573 RTE_CACHE_LINE_SIZE
);
9575 PMD_DRV_LOG(NOTICE
, sc
, "Failed to allocate memory");
9580 pci_read(sc
, PCI_STATUS
, &status
, 2);
9581 if (!(status
& PCI_STATUS_CAP_LIST
)) {
9583 pci_read(sc
, PCIR_STATUS
, &status
, 2);
9584 if (!(status
& PCIM_STATUS_CAPPRESENT
)) {
9586 PMD_DRV_LOG(NOTICE
, sc
, "PCIe capability reading failed");
9591 pci_read(sc
, PCI_CAPABILITY_LIST
, &pci_cap
.next
, 1);
9593 pci_read(sc
, PCIR_CAP_PTR
, &pci_cap
.next
, 1);
9595 while (pci_cap
.next
) {
9596 cap
->addr
= pci_cap
.next
& ~3;
9597 pci_read(sc
, pci_cap
.next
& ~3, &pci_cap
, 2);
9598 if (pci_cap
.id
== 0xff)
9600 cap
->id
= pci_cap
.id
;
9601 cap
->type
= BNX2X_PCI_CAP
;
9602 cap
->next
= rte_zmalloc("pci_cap",
9603 sizeof(struct bnx2x_pci_cap
),
9604 RTE_CACHE_LINE_SIZE
);
9606 PMD_DRV_LOG(NOTICE
, sc
, "Failed to allocate memory");
9615 static void bnx2x_init_rte(struct bnx2x_softc
*sc
)
9618 sc
->max_tx_queues
= min(BNX2X_VF_MAX_QUEUES_PER_VF
,
9620 sc
->max_rx_queues
= min(BNX2X_VF_MAX_QUEUES_PER_VF
,
9623 sc
->max_rx_queues
= BNX2X_MAX_RSS_COUNT(sc
);
9624 sc
->max_tx_queues
= sc
->max_rx_queues
;
9628 #define FW_HEADER_LEN 104
9629 #define FW_NAME_57711 "/lib/firmware/bnx2x/bnx2x-e1h-7.2.51.0.fw"
9630 #define FW_NAME_57810 "/lib/firmware/bnx2x/bnx2x-e2-7.2.51.0.fw"
9632 void bnx2x_load_firmware(struct bnx2x_softc
*sc
)
9638 fwname
= sc
->devinfo
.device_id
== CHIP_NUM_57711
9639 ? FW_NAME_57711
: FW_NAME_57810
;
9640 f
= open(fwname
, O_RDONLY
);
9642 PMD_DRV_LOG(NOTICE
, sc
, "Can't open firmware file");
9646 if (fstat(f
, &st
) < 0) {
9647 PMD_DRV_LOG(NOTICE
, sc
, "Can't stat firmware file");
9652 sc
->firmware
= rte_zmalloc("bnx2x_fw", st
.st_size
, RTE_CACHE_LINE_SIZE
);
9653 if (!sc
->firmware
) {
9654 PMD_DRV_LOG(NOTICE
, sc
, "Can't allocate memory for firmware");
9659 if (read(f
, sc
->firmware
, st
.st_size
) != st
.st_size
) {
9660 PMD_DRV_LOG(NOTICE
, sc
, "Can't read firmware data");
9666 sc
->fw_len
= st
.st_size
;
9667 if (sc
->fw_len
< FW_HEADER_LEN
) {
9668 PMD_DRV_LOG(NOTICE
, sc
,
9669 "Invalid fw size: %" PRIu64
, sc
->fw_len
);
9672 PMD_DRV_LOG(DEBUG
, sc
, "fw_len = %" PRIu64
, sc
->fw_len
);
9676 bnx2x_data_to_init_ops(uint8_t * data
, struct raw_op
*dst
, uint32_t len
)
9678 uint32_t *src
= (uint32_t *) data
;
9681 for (i
= 0, j
= 0; i
< len
/ 8; ++i
, j
+= 2) {
9682 tmp
= rte_be_to_cpu_32(src
[j
]);
9683 dst
[i
].op
= (tmp
>> 24) & 0xFF;
9684 dst
[i
].offset
= tmp
& 0xFFFFFF;
9685 dst
[i
].raw_data
= rte_be_to_cpu_32(src
[j
+ 1]);
9690 bnx2x_data_to_init_offsets(uint8_t * data
, uint16_t * dst
, uint32_t len
)
9692 uint16_t *src
= (uint16_t *) data
;
9695 for (i
= 0; i
< len
/ 2; ++i
)
9696 dst
[i
] = rte_be_to_cpu_16(src
[i
]);
9699 static void bnx2x_data_to_init_data(uint8_t * data
, uint32_t * dst
, uint32_t len
)
9701 uint32_t *src
= (uint32_t *) data
;
9704 for (i
= 0; i
< len
/ 4; ++i
)
9705 dst
[i
] = rte_be_to_cpu_32(src
[i
]);
9708 static void bnx2x_data_to_iro_array(uint8_t * data
, struct iro
*dst
, uint32_t len
)
9710 uint32_t *src
= (uint32_t *) data
;
9713 for (i
= 0, j
= 0; i
< len
/ sizeof(struct iro
); ++i
, ++j
) {
9714 dst
[i
].base
= rte_be_to_cpu_32(src
[j
++]);
9715 tmp
= rte_be_to_cpu_32(src
[j
]);
9716 dst
[i
].m1
= (tmp
>> 16) & 0xFFFF;
9717 dst
[i
].m2
= tmp
& 0xFFFF;
9719 tmp
= rte_be_to_cpu_32(src
[j
]);
9720 dst
[i
].m3
= (tmp
>> 16) & 0xFFFF;
9721 dst
[i
].size
= tmp
& 0xFFFF;
9726 * Device attach function.
9728 * Allocates device resources, performs secondary chip identification, and
9729 * initializes driver instance variables. This function is called from driver
9730 * load after a successful probe.
9733 * 0 = Success, >0 = Failure
9735 int bnx2x_attach(struct bnx2x_softc
*sc
)
9739 PMD_DRV_LOG(DEBUG
, sc
, "Starting attach...");
9741 rc
= bnx2x_pci_get_caps(sc
);
9743 PMD_DRV_LOG(NOTICE
, sc
, "PCIe caps reading was failed");
9747 sc
->state
= BNX2X_STATE_CLOSED
;
9749 pci_write_long(sc
, PCICFG_GRC_ADDRESS
, PCICFG_VENDOR_ID_OFFSET
);
9751 sc
->igu_base_addr
= IS_VF(sc
) ? PXP_VF_ADDR_IGU_START
: BAR_IGU_INTMEM
;
9753 /* get PCI capabilites */
9754 bnx2x_probe_pci_caps(sc
);
9756 if (sc
->devinfo
.pcie_msix_cap_reg
!= 0) {
9759 (sc
->devinfo
.pcie_msix_cap_reg
+ PCIR_MSIX_CTRL
), &val
,
9761 sc
->igu_sb_cnt
= (val
& PCIM_MSIXCTRL_TABLE_SIZE
) + 1;
9766 /* Init RTE stuff */
9770 /* Enable internal target-read (in case we are probed after PF
9771 * FLR). Must be done prior to any BAR read access. Only for
9774 if (!CHIP_IS_E1x(sc
)) {
9775 REG_WR(sc
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ
,
9780 /* get device info and set params */
9781 if (bnx2x_get_device_info(sc
) != 0) {
9782 PMD_DRV_LOG(NOTICE
, sc
, "getting device info");
9786 /* get phy settings from shmem and 'and' against admin settings */
9787 bnx2x_get_phy_info(sc
);
9789 /* Left mac of VF unfilled, PF should set it for VF */
9790 memset(sc
->link_params
.mac_addr
, 0, ETHER_ADDR_LEN
);
9795 /* set the default MTU (changed via ifconfig) */
9796 sc
->mtu
= ETHER_MTU
;
9798 bnx2x_set_modes_bitmap(sc
);
9800 /* need to reset chip if UNDI was active */
9801 if (IS_PF(sc
) && !BNX2X_NOMCP(sc
)) {
9804 (SHMEM_RD(sc
, func_mb
[SC_FW_MB_IDX(sc
)].drv_mb_header
) &
9805 DRV_MSG_SEQ_NUMBER_MASK
);
9806 PMD_DRV_LOG(DEBUG
, sc
, "prev unload fw_seq 0x%04x",
9808 bnx2x_prev_unload(sc
);
9811 bnx2x_dcbx_set_state(sc
, FALSE
, BNX2X_DCBX_ENABLED_OFF
);
9813 /* calculate qm_cid_count */
9814 sc
->qm_cid_count
= bnx2x_set_qm_cid_count(sc
);
9817 bnx2x_init_multi_cos(sc
);
9823 bnx2x_igu_ack_sb(struct bnx2x_softc
*sc
, uint8_t igu_sb_id
, uint8_t segment
,
9824 uint16_t index
, uint8_t op
, uint8_t update
)
9826 uint32_t igu_addr
= sc
->igu_base_addr
;
9827 igu_addr
+= (IGU_CMD_INT_ACK_BASE
+ igu_sb_id
) * 8;
9828 bnx2x_igu_ack_sb_gen(sc
, segment
, index
, op
, update
, igu_addr
);
9832 bnx2x_ack_sb(struct bnx2x_softc
*sc
, uint8_t igu_sb_id
, uint8_t storm
,
9833 uint16_t index
, uint8_t op
, uint8_t update
)
9835 if (unlikely(sc
->devinfo
.int_block
== INT_BLOCK_HC
))
9836 bnx2x_hc_ack_sb(sc
, igu_sb_id
, storm
, index
, op
, update
);
9839 if (CHIP_INT_MODE_IS_BC(sc
)) {
9841 } else if (igu_sb_id
!= sc
->igu_dsb_id
) {
9842 segment
= IGU_SEG_ACCESS_DEF
;
9843 } else if (storm
== ATTENTION_ID
) {
9844 segment
= IGU_SEG_ACCESS_ATTN
;
9846 segment
= IGU_SEG_ACCESS_DEF
;
9848 bnx2x_igu_ack_sb(sc
, igu_sb_id
, segment
, index
, op
, update
);
9853 bnx2x_igu_clear_sb_gen(struct bnx2x_softc
*sc
, uint8_t func
, uint8_t idu_sb_id
,
9856 uint32_t data
, ctl
, cnt
= 100;
9857 uint32_t igu_addr_data
= IGU_REG_COMMAND_REG_32LSB_DATA
;
9858 uint32_t igu_addr_ctl
= IGU_REG_COMMAND_REG_CTRL
;
9859 uint32_t igu_addr_ack
= IGU_REG_CSTORM_TYPE_0_SB_CLEANUP
+
9860 (idu_sb_id
/ 32) * 4;
9861 uint32_t sb_bit
= 1 << (idu_sb_id
% 32);
9862 uint32_t func_encode
= func
|
9863 (is_pf
? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT
;
9864 uint32_t addr_encode
= IGU_CMD_E2_PROD_UPD_BASE
+ idu_sb_id
;
9866 /* Not supported in BC mode */
9867 if (CHIP_INT_MODE_IS_BC(sc
)) {
9871 data
= ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
<<
9872 IGU_REGULAR_CLEANUP_TYPE_SHIFT
) |
9873 IGU_REGULAR_CLEANUP_SET
| IGU_REGULAR_BCLEANUP
);
9875 ctl
= ((addr_encode
<< IGU_CTRL_REG_ADDRESS_SHIFT
) |
9876 (func_encode
<< IGU_CTRL_REG_FID_SHIFT
) |
9877 (IGU_CTRL_CMD_TYPE_WR
<< IGU_CTRL_REG_TYPE_SHIFT
));
9879 REG_WR(sc
, igu_addr_data
, data
);
9883 PMD_DRV_LOG(DEBUG
, sc
, "write 0x%08x to IGU(via GRC) addr 0x%x",
9885 REG_WR(sc
, igu_addr_ctl
, ctl
);
9889 /* wait for clean up to finish */
9890 while (!(REG_RD(sc
, igu_addr_ack
) & sb_bit
) && --cnt
) {
9894 if (!(REG_RD(sc
, igu_addr_ack
) & sb_bit
)) {
9895 PMD_DRV_LOG(DEBUG
, sc
,
9896 "Unable to finish IGU cleanup: "
9897 "idu_sb_id %d offset %d bit %d (cnt %d)",
9898 idu_sb_id
, idu_sb_id
/ 32, idu_sb_id
% 32, cnt
);
9902 static void bnx2x_igu_clear_sb(struct bnx2x_softc
*sc
, uint8_t idu_sb_id
)
9904 bnx2x_igu_clear_sb_gen(sc
, SC_FUNC(sc
), idu_sb_id
, TRUE
/*PF*/);
9907 /*******************/
9908 /* ECORE CALLBACKS */
9909 /*******************/
9911 static void bnx2x_reset_common(struct bnx2x_softc
*sc
)
9913 uint32_t val
= 0x1400;
9915 PMD_INIT_FUNC_TRACE(sc
);
9918 REG_WR(sc
, (GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
),
9921 if (CHIP_IS_E3(sc
)) {
9922 val
|= MISC_REGISTERS_RESET_REG_2_MSTAT0
;
9923 val
|= MISC_REGISTERS_RESET_REG_2_MSTAT1
;
9926 REG_WR(sc
, (GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
), val
);
9929 static void bnx2x_common_init_phy(struct bnx2x_softc
*sc
)
9931 uint32_t shmem_base
[2];
9932 uint32_t shmem2_base
[2];
9934 /* Avoid common init in case MFW supports LFA */
9935 if (SHMEM2_RD(sc
, size
) >
9936 (uint32_t) offsetof(struct shmem2_region
,
9937 lfa_host_addr
[SC_PORT(sc
)])) {
9941 shmem_base
[0] = sc
->devinfo
.shmem_base
;
9942 shmem2_base
[0] = sc
->devinfo
.shmem2_base
;
9944 if (!CHIP_IS_E1x(sc
)) {
9945 shmem_base
[1] = SHMEM2_RD(sc
, other_shmem_base_addr
);
9946 shmem2_base
[1] = SHMEM2_RD(sc
, other_shmem2_base_addr
);
9949 bnx2x_acquire_phy_lock(sc
);
9950 elink_common_init_phy(sc
, shmem_base
, shmem2_base
,
9951 sc
->devinfo
.chip_id
, 0);
9952 bnx2x_release_phy_lock(sc
);
9955 static void bnx2x_pf_disable(struct bnx2x_softc
*sc
)
9957 uint32_t val
= REG_RD(sc
, IGU_REG_PF_CONFIGURATION
);
9959 val
&= ~IGU_PF_CONF_FUNC_EN
;
9961 REG_WR(sc
, IGU_REG_PF_CONFIGURATION
, val
);
9962 REG_WR(sc
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, 0);
9963 REG_WR(sc
, CFC_REG_WEAK_ENABLE_PF
, 0);
9966 static void bnx2x_init_pxp(struct bnx2x_softc
*sc
)
9969 int r_order
, w_order
;
9971 devctl
= bnx2x_pcie_capability_read(sc
, PCIR_EXPRESS_DEVICE_CTL
);
9973 w_order
= ((devctl
& PCIM_EXP_CTL_MAX_PAYLOAD
) >> 5);
9974 r_order
= ((devctl
& PCIM_EXP_CTL_MAX_READ_REQUEST
) >> 12);
9976 ecore_init_pxp_arb(sc
, r_order
, w_order
);
9979 static uint32_t bnx2x_get_pretend_reg(struct bnx2x_softc
*sc
)
9981 uint32_t base
= PXP2_REG_PGL_PRETEND_FUNC_F0
;
9982 uint32_t stride
= (PXP2_REG_PGL_PRETEND_FUNC_F1
- base
);
9983 return base
+ (SC_ABS_FUNC(sc
)) * stride
;
9987 * Called only on E1H or E2.
9988 * When pretending to be PF, the pretend value is the function number 0..7.
9989 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
9992 static int bnx2x_pretend_func(struct bnx2x_softc
*sc
, uint16_t pretend_func_val
)
9994 uint32_t pretend_reg
;
9996 if (CHIP_IS_E1H(sc
) && (pretend_func_val
> E1H_FUNC_MAX
))
9999 /* get my own pretend register */
10000 pretend_reg
= bnx2x_get_pretend_reg(sc
);
10001 REG_WR(sc
, pretend_reg
, pretend_func_val
);
10002 REG_RD(sc
, pretend_reg
);
10006 static void bnx2x_setup_fan_failure_detection(struct bnx2x_softc
*sc
)
10013 val
= (SHMEM_RD(sc
, dev_info
.shared_hw_config
.config2
) &
10014 SHARED_HW_CFG_FAN_FAILURE_MASK
);
10016 if (val
== SHARED_HW_CFG_FAN_FAILURE_ENABLED
) {
10020 * The fan failure mechanism is usually related to the PHY type since
10021 * the power consumption of the board is affected by the PHY. Currently,
10022 * fan is required for most designs with SFX7101, BNX2X8727 and BNX2X8481.
10024 else if (val
== SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE
) {
10025 for (port
= PORT_0
; port
< PORT_MAX
; port
++) {
10026 is_required
|= elink_fan_failure_det_req(sc
,
10028 devinfo
.shmem_base
,
10030 devinfo
.shmem2_base
,
10035 if (is_required
== 0) {
10039 /* Fan failure is indicated by SPIO 5 */
10040 bnx2x_set_spio(sc
, MISC_SPIO_SPIO5
, MISC_SPIO_INPUT_HI_Z
);
10042 /* set to active low mode */
10043 val
= REG_RD(sc
, MISC_REG_SPIO_INT
);
10044 val
|= (MISC_SPIO_SPIO5
<< MISC_SPIO_INT_OLD_SET_POS
);
10045 REG_WR(sc
, MISC_REG_SPIO_INT
, val
);
10047 /* enable interrupt to signal the IGU */
10048 val
= REG_RD(sc
, MISC_REG_SPIO_EVENT_EN
);
10049 val
|= MISC_SPIO_SPIO5
;
10050 REG_WR(sc
, MISC_REG_SPIO_EVENT_EN
, val
);
10053 static void bnx2x_enable_blocks_attention(struct bnx2x_softc
*sc
)
10057 REG_WR(sc
, PXP_REG_PXP_INT_MASK_0
, 0);
10058 if (!CHIP_IS_E1x(sc
)) {
10059 REG_WR(sc
, PXP_REG_PXP_INT_MASK_1
, 0x40);
10061 REG_WR(sc
, PXP_REG_PXP_INT_MASK_1
, 0);
10063 REG_WR(sc
, DORQ_REG_DORQ_INT_MASK
, 0);
10064 REG_WR(sc
, CFC_REG_CFC_INT_MASK
, 0);
10066 * mask read length error interrupts in brb for parser
10067 * (parsing unit and 'checksum and crc' unit)
10068 * these errors are legal (PU reads fixed length and CAC can cause
10069 * read length error on truncated packets)
10071 REG_WR(sc
, BRB1_REG_BRB1_INT_MASK
, 0xFC00);
10072 REG_WR(sc
, QM_REG_QM_INT_MASK
, 0);
10073 REG_WR(sc
, TM_REG_TM_INT_MASK
, 0);
10074 REG_WR(sc
, XSDM_REG_XSDM_INT_MASK_0
, 0);
10075 REG_WR(sc
, XSDM_REG_XSDM_INT_MASK_1
, 0);
10076 REG_WR(sc
, XCM_REG_XCM_INT_MASK
, 0);
10077 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
10078 /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
10079 REG_WR(sc
, USDM_REG_USDM_INT_MASK_0
, 0);
10080 REG_WR(sc
, USDM_REG_USDM_INT_MASK_1
, 0);
10081 REG_WR(sc
, UCM_REG_UCM_INT_MASK
, 0);
10082 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
10083 /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
10084 REG_WR(sc
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
10085 REG_WR(sc
, CSDM_REG_CSDM_INT_MASK_0
, 0);
10086 REG_WR(sc
, CSDM_REG_CSDM_INT_MASK_1
, 0);
10087 REG_WR(sc
, CCM_REG_CCM_INT_MASK
, 0);
10088 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
10089 /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
10091 val
= (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
|
10092 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
|
10093 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
);
10094 if (!CHIP_IS_E1x(sc
)) {
10095 val
|= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
|
10096 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED
);
10098 REG_WR(sc
, PXP2_REG_PXP2_INT_MASK_0
, val
);
10100 REG_WR(sc
, TSDM_REG_TSDM_INT_MASK_0
, 0);
10101 REG_WR(sc
, TSDM_REG_TSDM_INT_MASK_1
, 0);
10102 REG_WR(sc
, TCM_REG_TCM_INT_MASK
, 0);
10103 /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
10105 if (!CHIP_IS_E1x(sc
)) {
10106 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
10107 REG_WR(sc
, TSEM_REG_TSEM_INT_MASK_1
, 0x07ff);
10110 REG_WR(sc
, CDU_REG_CDU_INT_MASK
, 0);
10111 REG_WR(sc
, DMAE_REG_DMAE_INT_MASK
, 0);
10112 /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
10113 REG_WR(sc
, PBF_REG_PBF_INT_MASK
, 0x18); /* bit 3,4 masked */
10117 * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
10119 * @sc: driver handle
10121 static int bnx2x_init_hw_common(struct bnx2x_softc
*sc
)
10123 uint8_t abs_func_id
;
10126 PMD_DRV_LOG(DEBUG
, sc
,
10127 "starting common init for func %d", SC_ABS_FUNC(sc
));
10130 * take the RESET lock to protect undi_unload flow from accessing
10131 * registers while we are resetting the chip
10133 bnx2x_acquire_hw_lock(sc
, HW_LOCK_RESOURCE_RESET
);
10135 bnx2x_reset_common(sc
);
10137 REG_WR(sc
, (GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
), 0xffffffff);
10140 if (CHIP_IS_E3(sc
)) {
10141 val
|= MISC_REGISTERS_RESET_REG_2_MSTAT0
;
10142 val
|= MISC_REGISTERS_RESET_REG_2_MSTAT1
;
10145 REG_WR(sc
, (GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
), val
);
10147 bnx2x_release_hw_lock(sc
, HW_LOCK_RESOURCE_RESET
);
10149 ecore_init_block(sc
, BLOCK_MISC
, PHASE_COMMON
);
10151 if (!CHIP_IS_E1x(sc
)) {
10153 * 4-port mode or 2-port mode we need to turn off master-enable for
10154 * everyone. After that we turn it back on for self. So, we disregard
10155 * multi-function, and always disable all functions on the given path,
10156 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
10158 for (abs_func_id
= SC_PATH(sc
);
10159 abs_func_id
< (E2_FUNC_MAX
* 2); abs_func_id
+= 2) {
10160 if (abs_func_id
== SC_ABS_FUNC(sc
)) {
10162 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
,
10167 bnx2x_pretend_func(sc
, abs_func_id
);
10169 /* clear pf enable */
10170 bnx2x_pf_disable(sc
);
10172 bnx2x_pretend_func(sc
, SC_ABS_FUNC(sc
));
10176 ecore_init_block(sc
, BLOCK_PXP
, PHASE_COMMON
);
10178 ecore_init_block(sc
, BLOCK_PXP2
, PHASE_COMMON
);
10179 bnx2x_init_pxp(sc
);
10181 #ifdef __BIG_ENDIAN
10182 REG_WR(sc
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
10183 REG_WR(sc
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
10184 REG_WR(sc
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
10185 REG_WR(sc
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
10186 REG_WR(sc
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
10187 /* make sure this value is 0 */
10188 REG_WR(sc
, PXP2_REG_RQ_HC_ENDIAN_M
, 0);
10190 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
10191 REG_WR(sc
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
10192 REG_WR(sc
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
10193 REG_WR(sc
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
10194 REG_WR(sc
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
10197 ecore_ilt_init_page_size(sc
, INITOP_SET
);
10199 if (CHIP_REV_IS_FPGA(sc
) && CHIP_IS_E1H(sc
)) {
10200 REG_WR(sc
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
10203 /* let the HW do it's magic... */
10206 /* finish PXP init */
10208 val
= REG_RD(sc
, PXP2_REG_RQ_CFG_DONE
);
10210 PMD_DRV_LOG(NOTICE
, sc
, "PXP2 CFG failed");
10213 val
= REG_RD(sc
, PXP2_REG_RD_INIT_DONE
);
10215 PMD_DRV_LOG(NOTICE
, sc
, "PXP2 RD_INIT failed");
10220 * Timer bug workaround for E2 only. We need to set the entire ILT to have
10221 * entries with value "0" and valid bit on. This needs to be done by the
10222 * first PF that is loaded in a path (i.e. common phase)
10224 if (!CHIP_IS_E1x(sc
)) {
10226 * In E2 there is a bug in the timers block that can cause function 6 / 7
10227 * (i.e. vnic3) to start even if it is marked as "scan-off".
10228 * This occurs when a different function (func2,3) is being marked
10229 * as "scan-off". Real-life scenario for example: if a driver is being
10230 * load-unloaded while func6,7 are down. This will cause the timer to access
10231 * the ilt, translate to a logical address and send a request to read/write.
10232 * Since the ilt for the function that is down is not valid, this will cause
10233 * a translation error which is unrecoverable.
10234 * The Workaround is intended to make sure that when this happens nothing
10235 * fatal will occur. The workaround:
10236 * 1. First PF driver which loads on a path will:
10237 * a. After taking the chip out of reset, by using pretend,
10238 * it will write "0" to the following registers of
10240 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10241 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
10242 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
10243 * And for itself it will write '1' to
10244 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
10245 * dmae-operations (writing to pram for example.)
10246 * note: can be done for only function 6,7 but cleaner this
10248 * b. Write zero+valid to the entire ILT.
10249 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
10250 * VNIC3 (of that port). The range allocated will be the
10251 * entire ILT. This is needed to prevent ILT range error.
10252 * 2. Any PF driver load flow:
10253 * a. ILT update with the physical addresses of the allocated
10255 * b. Wait 20msec. - note that this timeout is needed to make
10256 * sure there are no requests in one of the PXP internal
10257 * queues with "old" ILT addresses.
10258 * c. PF enable in the PGLC.
10259 * d. Clear the was_error of the PF in the PGLC. (could have
10260 * occurred while driver was down)
10261 * e. PF enable in the CFC (WEAK + STRONG)
10262 * f. Timers scan enable
10263 * 3. PF driver unload flow:
10264 * a. Clear the Timers scan_en.
10265 * b. Polling for scan_on=0 for that PF.
10266 * c. Clear the PF enable bit in the PXP.
10267 * d. Clear the PF enable in the CFC (WEAK + STRONG)
10268 * e. Write zero+valid to all ILT entries (The valid bit must
10270 * f. If this is VNIC 3 of a port then also init
10271 * first_timers_ilt_entry to zero and last_timers_ilt_entry
10272 * to the last enrty in the ILT.
10275 * Currently the PF error in the PGLC is non recoverable.
10276 * In the future the there will be a recovery routine for this error.
10277 * Currently attention is masked.
10278 * Having an MCP lock on the load/unload process does not guarantee that
10279 * there is no Timer disable during Func6/7 enable. This is because the
10280 * Timers scan is currently being cleared by the MCP on FLR.
10281 * Step 2.d can be done only for PF6/7 and the driver can also check if
10282 * there is error before clearing it. But the flow above is simpler and
10284 * All ILT entries are written by zero+valid and not just PF6/7
10285 * ILT entries since in the future the ILT entries allocation for
10286 * PF-s might be dynamic.
10288 struct ilt_client_info ilt_cli
;
10289 struct ecore_ilt ilt
;
10291 memset(&ilt_cli
, 0, sizeof(struct ilt_client_info
));
10292 memset(&ilt
, 0, sizeof(struct ecore_ilt
));
10294 /* initialize dummy TM client */
10296 ilt_cli
.end
= ILT_NUM_PAGE_ENTRIES
- 1;
10297 ilt_cli
.client_num
= ILT_CLIENT_TM
;
10300 * Step 1: set zeroes to all ilt page entries with valid bit on
10301 * Step 2: set the timers first/last ilt entry to point
10302 * to the entire range to prevent ILT range error for 3rd/4th
10303 * vnic (this code assumes existence of the vnic)
10305 * both steps performed by call to ecore_ilt_client_init_op()
10306 * with dummy TM client
10308 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
10309 * and his brother are split registers
10312 bnx2x_pretend_func(sc
, (SC_PATH(sc
) + 6));
10313 ecore_ilt_client_init_op_ilt(sc
, &ilt
, &ilt_cli
, INITOP_CLEAR
);
10314 bnx2x_pretend_func(sc
, SC_ABS_FUNC(sc
));
10316 REG_WR(sc
, PXP2_REG_RQ_DRAM_ALIGN
, BNX2X_PXP_DRAM_ALIGN
);
10317 REG_WR(sc
, PXP2_REG_RQ_DRAM_ALIGN_RD
, BNX2X_PXP_DRAM_ALIGN
);
10318 REG_WR(sc
, PXP2_REG_RQ_DRAM_ALIGN_SEL
, 1);
10321 REG_WR(sc
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
10322 REG_WR(sc
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
10324 if (!CHIP_IS_E1x(sc
)) {
10327 ecore_init_block(sc
, BLOCK_PGLUE_B
, PHASE_COMMON
);
10328 ecore_init_block(sc
, BLOCK_ATC
, PHASE_COMMON
);
10330 /* let the HW do it's magic... */
10333 val
= REG_RD(sc
, ATC_REG_ATC_INIT_DONE
);
10334 } while (factor
-- && (val
!= 1));
10337 PMD_DRV_LOG(NOTICE
, sc
, "ATC_INIT failed");
10342 ecore_init_block(sc
, BLOCK_DMAE
, PHASE_COMMON
);
10344 /* clean the DMAE memory */
10345 sc
->dmae_ready
= 1;
10346 ecore_init_fill(sc
, TSEM_REG_PRAM
, 0, 8);
10348 ecore_init_block(sc
, BLOCK_TCM
, PHASE_COMMON
);
10350 ecore_init_block(sc
, BLOCK_UCM
, PHASE_COMMON
);
10352 ecore_init_block(sc
, BLOCK_CCM
, PHASE_COMMON
);
10354 ecore_init_block(sc
, BLOCK_XCM
, PHASE_COMMON
);
10356 bnx2x_read_dmae(sc
, XSEM_REG_PASSIVE_BUFFER
, 3);
10357 bnx2x_read_dmae(sc
, CSEM_REG_PASSIVE_BUFFER
, 3);
10358 bnx2x_read_dmae(sc
, TSEM_REG_PASSIVE_BUFFER
, 3);
10359 bnx2x_read_dmae(sc
, USEM_REG_PASSIVE_BUFFER
, 3);
10361 ecore_init_block(sc
, BLOCK_QM
, PHASE_COMMON
);
10363 /* QM queues pointers table */
10364 ecore_qm_init_ptr_table(sc
, sc
->qm_cid_count
, INITOP_SET
);
10366 /* soft reset pulse */
10367 REG_WR(sc
, QM_REG_SOFT_RESET
, 1);
10368 REG_WR(sc
, QM_REG_SOFT_RESET
, 0);
10370 if (CNIC_SUPPORT(sc
))
10371 ecore_init_block(sc
, BLOCK_TM
, PHASE_COMMON
);
10373 ecore_init_block(sc
, BLOCK_DORQ
, PHASE_COMMON
);
10374 REG_WR(sc
, DORQ_REG_DPM_CID_OFST
, BNX2X_DB_SHIFT
);
10376 if (!CHIP_REV_IS_SLOW(sc
)) {
10377 /* enable hw interrupt from doorbell Q */
10378 REG_WR(sc
, DORQ_REG_DORQ_INT_MASK
, 0);
10381 ecore_init_block(sc
, BLOCK_BRB1
, PHASE_COMMON
);
10383 ecore_init_block(sc
, BLOCK_PRS
, PHASE_COMMON
);
10384 REG_WR(sc
, PRS_REG_A_PRSU_20
, 0xf);
10385 REG_WR(sc
, PRS_REG_E1HOV_MODE
, sc
->devinfo
.mf_info
.path_has_ovlan
);
10387 if (!CHIP_IS_E1x(sc
) && !CHIP_IS_E3B0(sc
)) {
10388 if (IS_MF_AFEX(sc
)) {
10390 * configure that AFEX and VLAN headers must be
10391 * received in AFEX mode
10393 REG_WR(sc
, PRS_REG_HDRS_AFTER_BASIC
, 0xE);
10394 REG_WR(sc
, PRS_REG_MUST_HAVE_HDRS
, 0xA);
10395 REG_WR(sc
, PRS_REG_HDRS_AFTER_TAG_0
, 0x6);
10396 REG_WR(sc
, PRS_REG_TAG_ETHERTYPE_0
, 0x8926);
10397 REG_WR(sc
, PRS_REG_TAG_LEN_0
, 0x4);
10400 * Bit-map indicating which L2 hdrs may appear
10401 * after the basic Ethernet header
10403 REG_WR(sc
, PRS_REG_HDRS_AFTER_BASIC
,
10404 sc
->devinfo
.mf_info
.path_has_ovlan
? 7 : 6);
10408 ecore_init_block(sc
, BLOCK_TSDM
, PHASE_COMMON
);
10409 ecore_init_block(sc
, BLOCK_CSDM
, PHASE_COMMON
);
10410 ecore_init_block(sc
, BLOCK_USDM
, PHASE_COMMON
);
10411 ecore_init_block(sc
, BLOCK_XSDM
, PHASE_COMMON
);
10413 if (!CHIP_IS_E1x(sc
)) {
10414 /* reset VFC memories */
10415 REG_WR(sc
, TSEM_REG_FAST_MEMORY
+ VFC_REG_MEMORIES_RST
,
10416 VFC_MEMORIES_RST_REG_CAM_RST
|
10417 VFC_MEMORIES_RST_REG_RAM_RST
);
10418 REG_WR(sc
, XSEM_REG_FAST_MEMORY
+ VFC_REG_MEMORIES_RST
,
10419 VFC_MEMORIES_RST_REG_CAM_RST
|
10420 VFC_MEMORIES_RST_REG_RAM_RST
);
10425 ecore_init_block(sc
, BLOCK_TSEM
, PHASE_COMMON
);
10426 ecore_init_block(sc
, BLOCK_USEM
, PHASE_COMMON
);
10427 ecore_init_block(sc
, BLOCK_CSEM
, PHASE_COMMON
);
10428 ecore_init_block(sc
, BLOCK_XSEM
, PHASE_COMMON
);
10430 /* sync semi rtc */
10431 REG_WR(sc
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x80000000);
10432 REG_WR(sc
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x80000000);
10434 ecore_init_block(sc
, BLOCK_UPB
, PHASE_COMMON
);
10435 ecore_init_block(sc
, BLOCK_XPB
, PHASE_COMMON
);
10436 ecore_init_block(sc
, BLOCK_PBF
, PHASE_COMMON
);
10438 if (!CHIP_IS_E1x(sc
)) {
10439 if (IS_MF_AFEX(sc
)) {
10441 * configure that AFEX and VLAN headers must be
10442 * sent in AFEX mode
10444 REG_WR(sc
, PBF_REG_HDRS_AFTER_BASIC
, 0xE);
10445 REG_WR(sc
, PBF_REG_MUST_HAVE_HDRS
, 0xA);
10446 REG_WR(sc
, PBF_REG_HDRS_AFTER_TAG_0
, 0x6);
10447 REG_WR(sc
, PBF_REG_TAG_ETHERTYPE_0
, 0x8926);
10448 REG_WR(sc
, PBF_REG_TAG_LEN_0
, 0x4);
10450 REG_WR(sc
, PBF_REG_HDRS_AFTER_BASIC
,
10451 sc
->devinfo
.mf_info
.path_has_ovlan
? 7 : 6);
10455 REG_WR(sc
, SRC_REG_SOFT_RST
, 1);
10457 ecore_init_block(sc
, BLOCK_SRC
, PHASE_COMMON
);
10459 if (CNIC_SUPPORT(sc
)) {
10460 REG_WR(sc
, SRC_REG_KEYSEARCH_0
, 0x63285672);
10461 REG_WR(sc
, SRC_REG_KEYSEARCH_1
, 0x24b8f2cc);
10462 REG_WR(sc
, SRC_REG_KEYSEARCH_2
, 0x223aef9b);
10463 REG_WR(sc
, SRC_REG_KEYSEARCH_3
, 0x26001e3a);
10464 REG_WR(sc
, SRC_REG_KEYSEARCH_4
, 0x7ae91116);
10465 REG_WR(sc
, SRC_REG_KEYSEARCH_5
, 0x5ce5230b);
10466 REG_WR(sc
, SRC_REG_KEYSEARCH_6
, 0x298d8adf);
10467 REG_WR(sc
, SRC_REG_KEYSEARCH_7
, 0x6eb0ff09);
10468 REG_WR(sc
, SRC_REG_KEYSEARCH_8
, 0x1830f82f);
10469 REG_WR(sc
, SRC_REG_KEYSEARCH_9
, 0x01e46be7);
10471 REG_WR(sc
, SRC_REG_SOFT_RST
, 0);
10473 if (sizeof(union cdu_context
) != 1024) {
10474 /* we currently assume that a context is 1024 bytes */
10475 PMD_DRV_LOG(NOTICE
, sc
,
10476 "please adjust the size of cdu_context(%ld)",
10477 (long)sizeof(union cdu_context
));
10480 ecore_init_block(sc
, BLOCK_CDU
, PHASE_COMMON
);
10481 val
= (4 << 24) + (0 << 12) + 1024;
10482 REG_WR(sc
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
10484 ecore_init_block(sc
, BLOCK_CFC
, PHASE_COMMON
);
10486 REG_WR(sc
, CFC_REG_INIT_REG
, 0x7FF);
10487 /* enable context validation interrupt from CFC */
10488 REG_WR(sc
, CFC_REG_CFC_INT_MASK
, 0);
10490 /* set the thresholds to prevent CFC/CDU race */
10491 REG_WR(sc
, CFC_REG_DEBUG0
, 0x20020000);
10492 ecore_init_block(sc
, BLOCK_HC
, PHASE_COMMON
);
10494 if (!CHIP_IS_E1x(sc
) && BNX2X_NOMCP(sc
)) {
10495 REG_WR(sc
, IGU_REG_RESET_MEMORIES
, 0x36);
10498 ecore_init_block(sc
, BLOCK_IGU
, PHASE_COMMON
);
10499 ecore_init_block(sc
, BLOCK_MISC_AEU
, PHASE_COMMON
);
10501 /* Reset PCIE errors for debug */
10502 REG_WR(sc
, 0x2814, 0xffffffff);
10503 REG_WR(sc
, 0x3820, 0xffffffff);
10505 if (!CHIP_IS_E1x(sc
)) {
10506 REG_WR(sc
, PCICFG_OFFSET
+ PXPCS_TL_CONTROL_5
,
10507 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1
|
10508 PXPCS_TL_CONTROL_5_ERR_UNSPPORT
));
10509 REG_WR(sc
, PCICFG_OFFSET
+ PXPCS_TL_FUNC345_STAT
,
10510 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4
|
10511 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3
|
10512 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2
));
10513 REG_WR(sc
, PCICFG_OFFSET
+ PXPCS_TL_FUNC678_STAT
,
10514 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7
|
10515 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6
|
10516 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5
));
10519 ecore_init_block(sc
, BLOCK_NIG
, PHASE_COMMON
);
10521 /* in E3 this done in per-port section */
10522 if (!CHIP_IS_E3(sc
))
10523 REG_WR(sc
, NIG_REG_LLH_MF_MODE
, IS_MF(sc
));
10525 if (CHIP_IS_E1H(sc
)) {
10526 /* not applicable for E2 (and above ...) */
10527 REG_WR(sc
, NIG_REG_LLH_E1HOV_MODE
, IS_MF_SD(sc
));
10530 if (CHIP_REV_IS_SLOW(sc
)) {
10534 /* finish CFC init */
10535 val
= reg_poll(sc
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
10537 PMD_DRV_LOG(NOTICE
, sc
, "CFC LL_INIT failed");
10540 val
= reg_poll(sc
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
10542 PMD_DRV_LOG(NOTICE
, sc
, "CFC AC_INIT failed");
10545 val
= reg_poll(sc
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
10547 PMD_DRV_LOG(NOTICE
, sc
, "CFC CAM_INIT failed");
10550 REG_WR(sc
, CFC_REG_DEBUG0
, 0);
10552 bnx2x_setup_fan_failure_detection(sc
);
10554 /* clear PXP2 attentions */
10555 REG_RD(sc
, PXP2_REG_PXP2_INT_STS_CLR_0
);
10557 bnx2x_enable_blocks_attention(sc
);
10559 if (!CHIP_REV_IS_SLOW(sc
)) {
10560 ecore_enable_blocks_parity(sc
);
10563 if (!BNX2X_NOMCP(sc
)) {
10564 if (CHIP_IS_E1x(sc
)) {
10565 bnx2x_common_init_phy(sc
);
10573 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
10575 * @sc: driver handle
10577 static int bnx2x_init_hw_common_chip(struct bnx2x_softc
*sc
)
10579 int rc
= bnx2x_init_hw_common(sc
);
10585 /* In E2 2-PORT mode, same ext phy is used for the two paths */
10586 if (!BNX2X_NOMCP(sc
)) {
10587 bnx2x_common_init_phy(sc
);
10593 static int bnx2x_init_hw_port(struct bnx2x_softc
*sc
)
10595 int port
= SC_PORT(sc
);
10596 int init_phase
= port
? PHASE_PORT1
: PHASE_PORT0
;
10597 uint32_t low
, high
;
10600 PMD_DRV_LOG(DEBUG
, sc
, "starting port init for port %d", port
);
10602 REG_WR(sc
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
* 4, 0);
10604 ecore_init_block(sc
, BLOCK_MISC
, init_phase
);
10605 ecore_init_block(sc
, BLOCK_PXP
, init_phase
);
10606 ecore_init_block(sc
, BLOCK_PXP2
, init_phase
);
10609 * Timers bug workaround: disables the pf_master bit in pglue at
10610 * common phase, we need to enable it here before any dmae access are
10611 * attempted. Therefore we manually added the enable-master to the
10612 * port phase (it also happens in the function phase)
10614 if (!CHIP_IS_E1x(sc
)) {
10615 REG_WR(sc
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, 1);
10618 ecore_init_block(sc
, BLOCK_ATC
, init_phase
);
10619 ecore_init_block(sc
, BLOCK_DMAE
, init_phase
);
10620 ecore_init_block(sc
, BLOCK_PGLUE_B
, init_phase
);
10621 ecore_init_block(sc
, BLOCK_QM
, init_phase
);
10623 ecore_init_block(sc
, BLOCK_TCM
, init_phase
);
10624 ecore_init_block(sc
, BLOCK_UCM
, init_phase
);
10625 ecore_init_block(sc
, BLOCK_CCM
, init_phase
);
10626 ecore_init_block(sc
, BLOCK_XCM
, init_phase
);
10628 /* QM cid (connection) count */
10629 ecore_qm_init_cid_count(sc
, sc
->qm_cid_count
, INITOP_SET
);
10631 if (CNIC_SUPPORT(sc
)) {
10632 ecore_init_block(sc
, BLOCK_TM
, init_phase
);
10633 REG_WR(sc
, TM_REG_LIN0_SCAN_TIME
+ port
* 4, 20);
10634 REG_WR(sc
, TM_REG_LIN0_MAX_ACTIVE_CID
+ port
* 4, 31);
10637 ecore_init_block(sc
, BLOCK_DORQ
, init_phase
);
10639 ecore_init_block(sc
, BLOCK_BRB1
, init_phase
);
10641 if (CHIP_IS_E1H(sc
)) {
10643 low
= (BNX2X_ONE_PORT(sc
) ? 160 : 246);
10644 } else if (sc
->mtu
> 4096) {
10645 if (BNX2X_ONE_PORT(sc
)) {
10649 /* (24*1024 + val*4)/256 */
10650 low
= (96 + (val
/ 64) + ((val
% 64) ? 1 : 0));
10653 low
= (BNX2X_ONE_PORT(sc
) ? 80 : 160);
10655 high
= (low
+ 56); /* 14*1024/256 */
10656 REG_WR(sc
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
+ port
* 4, low
);
10657 REG_WR(sc
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
+ port
* 4, high
);
10660 if (CHIP_IS_MODE_4_PORT(sc
)) {
10661 REG_WR(sc
, SC_PORT(sc
) ?
10662 BRB1_REG_MAC_GUARANTIED_1
:
10663 BRB1_REG_MAC_GUARANTIED_0
, 40);
10666 ecore_init_block(sc
, BLOCK_PRS
, init_phase
);
10667 if (CHIP_IS_E3B0(sc
)) {
10668 if (IS_MF_AFEX(sc
)) {
10669 /* configure headers for AFEX mode */
10671 REG_WR(sc
, PRS_REG_HDRS_AFTER_BASIC_PORT_1
,
10673 REG_WR(sc
, PRS_REG_HDRS_AFTER_TAG_0_PORT_1
,
10675 REG_WR(sc
, PRS_REG_MUST_HAVE_HDRS_PORT_1
, 0xA);
10677 REG_WR(sc
, PRS_REG_HDRS_AFTER_BASIC_PORT_0
,
10679 REG_WR(sc
, PRS_REG_HDRS_AFTER_TAG_0_PORT_0
,
10681 REG_WR(sc
, PRS_REG_MUST_HAVE_HDRS_PORT_0
, 0xA);
10684 /* Ovlan exists only if we are in multi-function +
10685 * switch-dependent mode, in switch-independent there
10686 * is no ovlan headers
10688 REG_WR(sc
, SC_PORT(sc
) ?
10689 PRS_REG_HDRS_AFTER_BASIC_PORT_1
:
10690 PRS_REG_HDRS_AFTER_BASIC_PORT_0
,
10691 (sc
->devinfo
.mf_info
.path_has_ovlan
? 7 : 6));
10695 ecore_init_block(sc
, BLOCK_TSDM
, init_phase
);
10696 ecore_init_block(sc
, BLOCK_CSDM
, init_phase
);
10697 ecore_init_block(sc
, BLOCK_USDM
, init_phase
);
10698 ecore_init_block(sc
, BLOCK_XSDM
, init_phase
);
10700 ecore_init_block(sc
, BLOCK_TSEM
, init_phase
);
10701 ecore_init_block(sc
, BLOCK_USEM
, init_phase
);
10702 ecore_init_block(sc
, BLOCK_CSEM
, init_phase
);
10703 ecore_init_block(sc
, BLOCK_XSEM
, init_phase
);
10705 ecore_init_block(sc
, BLOCK_UPB
, init_phase
);
10706 ecore_init_block(sc
, BLOCK_XPB
, init_phase
);
10708 ecore_init_block(sc
, BLOCK_PBF
, init_phase
);
10710 if (CHIP_IS_E1x(sc
)) {
10711 /* configure PBF to work without PAUSE mtu 9000 */
10712 REG_WR(sc
, PBF_REG_P0_PAUSE_ENABLE
+ port
* 4, 0);
10714 /* update threshold */
10715 REG_WR(sc
, PBF_REG_P0_ARB_THRSH
+ port
* 4, (9040 / 16));
10716 /* update init credit */
10717 REG_WR(sc
, PBF_REG_P0_INIT_CRD
+ port
* 4,
10718 (9040 / 16) + 553 - 22);
10720 /* probe changes */
10721 REG_WR(sc
, PBF_REG_INIT_P0
+ port
* 4, 1);
10723 REG_WR(sc
, PBF_REG_INIT_P0
+ port
* 4, 0);
10726 if (CNIC_SUPPORT(sc
)) {
10727 ecore_init_block(sc
, BLOCK_SRC
, init_phase
);
10730 ecore_init_block(sc
, BLOCK_CDU
, init_phase
);
10731 ecore_init_block(sc
, BLOCK_CFC
, init_phase
);
10732 ecore_init_block(sc
, BLOCK_HC
, init_phase
);
10733 ecore_init_block(sc
, BLOCK_IGU
, init_phase
);
10734 ecore_init_block(sc
, BLOCK_MISC_AEU
, init_phase
);
10735 /* init aeu_mask_attn_func_0/1:
10736 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
10737 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
10738 * bits 4-7 are used for "per vn group attention" */
10739 val
= IS_MF(sc
) ? 0xF7 : 0x7;
10741 REG_WR(sc
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
* 4, val
);
10743 ecore_init_block(sc
, BLOCK_NIG
, init_phase
);
10745 if (!CHIP_IS_E1x(sc
)) {
10746 /* Bit-map indicating which L2 hdrs may appear after the
10747 * basic Ethernet header
10749 if (IS_MF_AFEX(sc
)) {
10750 REG_WR(sc
, SC_PORT(sc
) ?
10751 NIG_REG_P1_HDRS_AFTER_BASIC
:
10752 NIG_REG_P0_HDRS_AFTER_BASIC
, 0xE);
10754 REG_WR(sc
, SC_PORT(sc
) ?
10755 NIG_REG_P1_HDRS_AFTER_BASIC
:
10756 NIG_REG_P0_HDRS_AFTER_BASIC
,
10757 IS_MF_SD(sc
) ? 7 : 6);
10760 if (CHIP_IS_E3(sc
)) {
10761 REG_WR(sc
, SC_PORT(sc
) ?
10762 NIG_REG_LLH1_MF_MODE
:
10763 NIG_REG_LLH_MF_MODE
, IS_MF(sc
));
10766 if (!CHIP_IS_E3(sc
)) {
10767 REG_WR(sc
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
* 4, 1);
10770 /* 0x2 disable mf_ov, 0x1 enable */
10771 REG_WR(sc
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
* 4,
10772 (IS_MF_SD(sc
) ? 0x1 : 0x2));
10774 if (!CHIP_IS_E1x(sc
)) {
10776 switch (sc
->devinfo
.mf_info
.mf_mode
) {
10777 case MULTI_FUNCTION_SD
:
10780 case MULTI_FUNCTION_SI
:
10781 case MULTI_FUNCTION_AFEX
:
10786 REG_WR(sc
, (SC_PORT(sc
) ? NIG_REG_LLH1_CLS_TYPE
:
10787 NIG_REG_LLH0_CLS_TYPE
), val
);
10789 REG_WR(sc
, NIG_REG_LLFC_ENABLE_0
+ port
* 4, 0);
10790 REG_WR(sc
, NIG_REG_LLFC_OUT_EN_0
+ port
* 4, 0);
10791 REG_WR(sc
, NIG_REG_PAUSE_ENABLE_0
+ port
* 4, 1);
10793 /* If SPIO5 is set to generate interrupts, enable it for this port */
10794 val
= REG_RD(sc
, MISC_REG_SPIO_EVENT_EN
);
10795 if (val
& MISC_SPIO_SPIO5
) {
10796 uint32_t reg_addr
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
10797 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
10798 val
= REG_RD(sc
, reg_addr
);
10799 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
10800 REG_WR(sc
, reg_addr
, val
);
10807 bnx2x_flr_clnup_reg_poll(struct bnx2x_softc
*sc
, uint32_t reg
,
10808 uint32_t expected
, uint32_t poll_count
)
10810 uint32_t cur_cnt
= poll_count
;
10813 while ((val
= REG_RD(sc
, reg
)) != expected
&& cur_cnt
--) {
10814 DELAY(FLR_WAIT_INTERVAL
);
10821 bnx2x_flr_clnup_poll_hw_counter(struct bnx2x_softc
*sc
, uint32_t reg
,
10822 __rte_unused
const char *msg
, uint32_t poll_cnt
)
10824 uint32_t val
= bnx2x_flr_clnup_reg_poll(sc
, reg
, 0, poll_cnt
);
10827 PMD_DRV_LOG(NOTICE
, sc
, "%s usage count=%d", msg
, val
);
10834 /* Common routines with VF FLR cleanup */
10835 static uint32_t bnx2x_flr_clnup_poll_count(struct bnx2x_softc
*sc
)
10837 /* adjust polling timeout */
10838 if (CHIP_REV_IS_EMUL(sc
)) {
10839 return FLR_POLL_CNT
* 2000;
10842 if (CHIP_REV_IS_FPGA(sc
)) {
10843 return FLR_POLL_CNT
* 120;
10846 return FLR_POLL_CNT
;
10849 static int bnx2x_poll_hw_usage_counters(struct bnx2x_softc
*sc
, uint32_t poll_cnt
)
10851 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
10852 if (bnx2x_flr_clnup_poll_hw_counter(sc
,
10853 CFC_REG_NUM_LCIDS_INSIDE_PF
,
10854 "CFC PF usage counter timed out",
10859 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
10860 if (bnx2x_flr_clnup_poll_hw_counter(sc
,
10861 DORQ_REG_PF_USAGE_CNT
,
10862 "DQ PF usage counter timed out",
10867 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
10868 if (bnx2x_flr_clnup_poll_hw_counter(sc
,
10869 QM_REG_PF_USG_CNT_0
+ 4 * SC_FUNC(sc
),
10870 "QM PF usage counter timed out",
10875 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
10876 if (bnx2x_flr_clnup_poll_hw_counter(sc
,
10877 TM_REG_LIN0_VNIC_UC
+ 4 * SC_PORT(sc
),
10878 "Timers VNIC usage counter timed out",
10883 if (bnx2x_flr_clnup_poll_hw_counter(sc
,
10884 TM_REG_LIN0_NUM_SCANS
+
10886 "Timers NUM_SCANS usage counter timed out",
10891 /* Wait DMAE PF usage counter to zero */
10892 if (bnx2x_flr_clnup_poll_hw_counter(sc
,
10893 dmae_reg_go_c
[INIT_DMAE_C(sc
)],
10894 "DMAE dommand register timed out",
10902 #define OP_GEN_PARAM(param) \
10903 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
10904 #define OP_GEN_TYPE(type) \
10905 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
10906 #define OP_GEN_AGG_VECT(index) \
10907 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
10910 bnx2x_send_final_clnup(struct bnx2x_softc
*sc
, uint8_t clnup_func
,
10913 uint32_t op_gen_command
= 0;
10914 uint32_t comp_addr
= (BAR_CSTRORM_INTMEM
+
10915 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func
));
10918 if (REG_RD(sc
, comp_addr
)) {
10919 PMD_DRV_LOG(NOTICE
, sc
,
10920 "Cleanup complete was not 0 before sending");
10924 op_gen_command
|= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX
);
10925 op_gen_command
|= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE
);
10926 op_gen_command
|= OP_GEN_AGG_VECT(clnup_func
);
10927 op_gen_command
|= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT
;
10929 REG_WR(sc
, XSDM_REG_OPERATION_GEN
, op_gen_command
);
10931 if (bnx2x_flr_clnup_reg_poll(sc
, comp_addr
, 1, poll_cnt
) != 1) {
10932 PMD_DRV_LOG(NOTICE
, sc
, "FW final cleanup did not succeed");
10933 PMD_DRV_LOG(DEBUG
, sc
, "At timeout completion address contained %x",
10934 (REG_RD(sc
, comp_addr
)));
10935 rte_panic("FLR cleanup failed");
10939 /* Zero completion for nxt FLR */
10940 REG_WR(sc
, comp_addr
, 0);
10946 bnx2x_pbf_pN_buf_flushed(struct bnx2x_softc
*sc
, struct pbf_pN_buf_regs
*regs
,
10947 uint32_t poll_count
)
10949 uint32_t init_crd
, crd
, crd_start
, crd_freed
, crd_freed_start
;
10950 uint32_t cur_cnt
= poll_count
;
10952 crd_freed
= crd_freed_start
= REG_RD(sc
, regs
->crd_freed
);
10953 crd
= crd_start
= REG_RD(sc
, regs
->crd
);
10954 init_crd
= REG_RD(sc
, regs
->init_crd
);
10956 while ((crd
!= init_crd
) &&
10957 ((uint32_t) ((int32_t) crd_freed
- (int32_t) crd_freed_start
) <
10958 (init_crd
- crd_start
))) {
10960 DELAY(FLR_WAIT_INTERVAL
);
10961 crd
= REG_RD(sc
, regs
->crd
);
10962 crd_freed
= REG_RD(sc
, regs
->crd_freed
);
10970 bnx2x_pbf_pN_cmd_flushed(struct bnx2x_softc
*sc
, struct pbf_pN_cmd_regs
*regs
,
10971 uint32_t poll_count
)
10973 uint32_t occup
, to_free
, freed
, freed_start
;
10974 uint32_t cur_cnt
= poll_count
;
10976 occup
= to_free
= REG_RD(sc
, regs
->lines_occup
);
10977 freed
= freed_start
= REG_RD(sc
, regs
->lines_freed
);
10980 ((uint32_t) ((int32_t) freed
- (int32_t) freed_start
) <
10983 DELAY(FLR_WAIT_INTERVAL
);
10984 occup
= REG_RD(sc
, regs
->lines_occup
);
10985 freed
= REG_RD(sc
, regs
->lines_freed
);
10992 static void bnx2x_tx_hw_flushed(struct bnx2x_softc
*sc
, uint32_t poll_count
)
10994 struct pbf_pN_cmd_regs cmd_regs
[] = {
10995 {0, (CHIP_IS_E3B0(sc
)) ?
10996 PBF_REG_TQ_OCCUPANCY_Q0
: PBF_REG_P0_TQ_OCCUPANCY
,
10997 (CHIP_IS_E3B0(sc
)) ?
10998 PBF_REG_TQ_LINES_FREED_CNT_Q0
: PBF_REG_P0_TQ_LINES_FREED_CNT
},
10999 {1, (CHIP_IS_E3B0(sc
)) ?
11000 PBF_REG_TQ_OCCUPANCY_Q1
: PBF_REG_P1_TQ_OCCUPANCY
,
11001 (CHIP_IS_E3B0(sc
)) ?
11002 PBF_REG_TQ_LINES_FREED_CNT_Q1
: PBF_REG_P1_TQ_LINES_FREED_CNT
},
11003 {4, (CHIP_IS_E3B0(sc
)) ?
11004 PBF_REG_TQ_OCCUPANCY_LB_Q
: PBF_REG_P4_TQ_OCCUPANCY
,
11005 (CHIP_IS_E3B0(sc
)) ?
11006 PBF_REG_TQ_LINES_FREED_CNT_LB_Q
:
11007 PBF_REG_P4_TQ_LINES_FREED_CNT
}
11010 struct pbf_pN_buf_regs buf_regs
[] = {
11011 {0, (CHIP_IS_E3B0(sc
)) ?
11012 PBF_REG_INIT_CRD_Q0
: PBF_REG_P0_INIT_CRD
,
11013 (CHIP_IS_E3B0(sc
)) ? PBF_REG_CREDIT_Q0
: PBF_REG_P0_CREDIT
,
11014 (CHIP_IS_E3B0(sc
)) ?
11015 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0
:
11016 PBF_REG_P0_INTERNAL_CRD_FREED_CNT
},
11017 {1, (CHIP_IS_E3B0(sc
)) ?
11018 PBF_REG_INIT_CRD_Q1
: PBF_REG_P1_INIT_CRD
,
11019 (CHIP_IS_E3B0(sc
)) ? PBF_REG_CREDIT_Q1
: PBF_REG_P1_CREDIT
,
11020 (CHIP_IS_E3B0(sc
)) ?
11021 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1
:
11022 PBF_REG_P1_INTERNAL_CRD_FREED_CNT
},
11023 {4, (CHIP_IS_E3B0(sc
)) ?
11024 PBF_REG_INIT_CRD_LB_Q
: PBF_REG_P4_INIT_CRD
,
11025 (CHIP_IS_E3B0(sc
)) ? PBF_REG_CREDIT_LB_Q
: PBF_REG_P4_CREDIT
,
11026 (CHIP_IS_E3B0(sc
)) ?
11027 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q
:
11028 PBF_REG_P4_INTERNAL_CRD_FREED_CNT
},
11033 /* Verify the command queues are flushed P0, P1, P4 */
11034 for (i
= 0; i
< ARRAY_SIZE(cmd_regs
); i
++) {
11035 bnx2x_pbf_pN_cmd_flushed(sc
, &cmd_regs
[i
], poll_count
);
11038 /* Verify the transmission buffers are flushed P0, P1, P4 */
11039 for (i
= 0; i
< ARRAY_SIZE(buf_regs
); i
++) {
11040 bnx2x_pbf_pN_buf_flushed(sc
, &buf_regs
[i
], poll_count
);
11044 static void bnx2x_hw_enable_status(struct bnx2x_softc
*sc
)
11046 __rte_unused
uint32_t val
;
11048 val
= REG_RD(sc
, CFC_REG_WEAK_ENABLE_PF
);
11049 PMD_DRV_LOG(DEBUG
, sc
, "CFC_REG_WEAK_ENABLE_PF is 0x%x", val
);
11051 val
= REG_RD(sc
, PBF_REG_DISABLE_PF
);
11052 PMD_DRV_LOG(DEBUG
, sc
, "PBF_REG_DISABLE_PF is 0x%x", val
);
11054 val
= REG_RD(sc
, IGU_REG_PCI_PF_MSI_EN
);
11055 PMD_DRV_LOG(DEBUG
, sc
, "IGU_REG_PCI_PF_MSI_EN is 0x%x", val
);
11057 val
= REG_RD(sc
, IGU_REG_PCI_PF_MSIX_EN
);
11058 PMD_DRV_LOG(DEBUG
, sc
, "IGU_REG_PCI_PF_MSIX_EN is 0x%x", val
);
11060 val
= REG_RD(sc
, IGU_REG_PCI_PF_MSIX_FUNC_MASK
);
11061 PMD_DRV_LOG(DEBUG
, sc
, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x", val
);
11063 val
= REG_RD(sc
, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR
);
11064 PMD_DRV_LOG(DEBUG
, sc
,
11065 "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x", val
);
11067 val
= REG_RD(sc
, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR
);
11068 PMD_DRV_LOG(DEBUG
, sc
,
11069 "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x", val
);
11071 val
= REG_RD(sc
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
);
11072 PMD_DRV_LOG(DEBUG
, sc
, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x",
11077 * bnx2x_pf_flr_clnup
11078 * a. re-enable target read on the PF
11079 * b. poll cfc per function usgae counter
11080 * c. poll the qm perfunction usage counter
11081 * d. poll the tm per function usage counter
11082 * e. poll the tm per function scan-done indication
11083 * f. clear the dmae channel associated wit hthe PF
11084 * g. zero the igu 'trailing edge' and 'leading edge' regs (attentions)
11085 * h. call the common flr cleanup code with -1 (pf indication)
11087 static int bnx2x_pf_flr_clnup(struct bnx2x_softc
*sc
)
11089 uint32_t poll_cnt
= bnx2x_flr_clnup_poll_count(sc
);
11091 /* Re-enable PF target read access */
11092 REG_WR(sc
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ
, 1);
11094 /* Poll HW usage counters */
11095 if (bnx2x_poll_hw_usage_counters(sc
, poll_cnt
)) {
11099 /* Zero the igu 'trailing edge' and 'leading edge' */
11101 /* Send the FW cleanup command */
11102 if (bnx2x_send_final_clnup(sc
, (uint8_t) SC_FUNC(sc
), poll_cnt
)) {
11108 /* Verify TX hw is flushed */
11109 bnx2x_tx_hw_flushed(sc
, poll_cnt
);
11111 /* Wait 100ms (not adjusted according to platform) */
11114 /* Verify no pending pci transactions */
11115 if (bnx2x_is_pcie_pending(sc
)) {
11116 PMD_DRV_LOG(NOTICE
, sc
, "PCIE Transactions still pending");
11120 bnx2x_hw_enable_status(sc
);
11123 * Master enable - Due to WB DMAE writes performed before this
11124 * register is re-initialized as part of the regular function init
11126 REG_WR(sc
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, 1);
11131 static int bnx2x_init_hw_func(struct bnx2x_softc
*sc
)
11133 int port
= SC_PORT(sc
);
11134 int func
= SC_FUNC(sc
);
11135 int init_phase
= PHASE_PF0
+ func
;
11136 struct ecore_ilt
*ilt
= sc
->ilt
;
11137 uint16_t cdu_ilt_start
;
11138 uint32_t addr
, val
;
11139 uint32_t main_mem_base
, main_mem_size
, main_mem_prty_clr
;
11140 int main_mem_width
, rc
;
11143 PMD_DRV_LOG(DEBUG
, sc
, "starting func init for func %d", func
);
11146 if (!CHIP_IS_E1x(sc
)) {
11147 rc
= bnx2x_pf_flr_clnup(sc
);
11149 PMD_DRV_LOG(NOTICE
, sc
, "FLR cleanup failed!");
11154 /* set MSI reconfigure capability */
11155 if (sc
->devinfo
.int_block
== INT_BLOCK_HC
) {
11156 addr
= (port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
);
11157 val
= REG_RD(sc
, addr
);
11158 val
|= HC_CONFIG_0_REG_MSI_ATTN_EN_0
;
11159 REG_WR(sc
, addr
, val
);
11162 ecore_init_block(sc
, BLOCK_PXP
, init_phase
);
11163 ecore_init_block(sc
, BLOCK_PXP2
, init_phase
);
11166 cdu_ilt_start
= ilt
->clients
[ILT_CLIENT_CDU
].start
;
11168 for (i
= 0; i
< L2_ILT_LINES(sc
); i
++) {
11169 ilt
->lines
[cdu_ilt_start
+ i
].page
= sc
->context
[i
].vcxt
;
11170 ilt
->lines
[cdu_ilt_start
+ i
].page_mapping
=
11171 (rte_iova_t
)sc
->context
[i
].vcxt_dma
.paddr
;
11172 ilt
->lines
[cdu_ilt_start
+ i
].size
= sc
->context
[i
].size
;
11174 ecore_ilt_init_op(sc
, INITOP_SET
);
11176 REG_WR(sc
, PRS_REG_NIC_MODE
, 1);
11178 if (!CHIP_IS_E1x(sc
)) {
11179 uint32_t pf_conf
= IGU_PF_CONF_FUNC_EN
;
11181 /* Turn on a single ISR mode in IGU if driver is going to use
11184 if ((sc
->interrupt_mode
!= INTR_MODE_MSIX
)
11185 || (sc
->interrupt_mode
!= INTR_MODE_SINGLE_MSIX
)) {
11186 pf_conf
|= IGU_PF_CONF_SINGLE_ISR_EN
;
11190 * Timers workaround bug: function init part.
11191 * Need to wait 20msec after initializing ILT,
11192 * needed to make sure there are no requests in
11193 * one of the PXP internal queues with "old" ILT addresses
11198 * Master enable - Due to WB DMAE writes performed before this
11199 * register is re-initialized as part of the regular function
11202 REG_WR(sc
, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, 1);
11203 /* Enable the function in IGU */
11204 REG_WR(sc
, IGU_REG_PF_CONFIGURATION
, pf_conf
);
11207 sc
->dmae_ready
= 1;
11209 ecore_init_block(sc
, BLOCK_PGLUE_B
, init_phase
);
11211 if (!CHIP_IS_E1x(sc
))
11212 REG_WR(sc
, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR
, func
);
11214 ecore_init_block(sc
, BLOCK_ATC
, init_phase
);
11215 ecore_init_block(sc
, BLOCK_DMAE
, init_phase
);
11216 ecore_init_block(sc
, BLOCK_NIG
, init_phase
);
11217 ecore_init_block(sc
, BLOCK_SRC
, init_phase
);
11218 ecore_init_block(sc
, BLOCK_MISC
, init_phase
);
11219 ecore_init_block(sc
, BLOCK_TCM
, init_phase
);
11220 ecore_init_block(sc
, BLOCK_UCM
, init_phase
);
11221 ecore_init_block(sc
, BLOCK_CCM
, init_phase
);
11222 ecore_init_block(sc
, BLOCK_XCM
, init_phase
);
11223 ecore_init_block(sc
, BLOCK_TSEM
, init_phase
);
11224 ecore_init_block(sc
, BLOCK_USEM
, init_phase
);
11225 ecore_init_block(sc
, BLOCK_CSEM
, init_phase
);
11226 ecore_init_block(sc
, BLOCK_XSEM
, init_phase
);
11228 if (!CHIP_IS_E1x(sc
))
11229 REG_WR(sc
, QM_REG_PF_EN
, 1);
11231 if (!CHIP_IS_E1x(sc
)) {
11232 REG_WR(sc
, TSEM_REG_VFPF_ERR_NUM
, BNX2X_MAX_NUM_OF_VFS
+ func
);
11233 REG_WR(sc
, USEM_REG_VFPF_ERR_NUM
, BNX2X_MAX_NUM_OF_VFS
+ func
);
11234 REG_WR(sc
, CSEM_REG_VFPF_ERR_NUM
, BNX2X_MAX_NUM_OF_VFS
+ func
);
11235 REG_WR(sc
, XSEM_REG_VFPF_ERR_NUM
, BNX2X_MAX_NUM_OF_VFS
+ func
);
11237 ecore_init_block(sc
, BLOCK_QM
, init_phase
);
11239 ecore_init_block(sc
, BLOCK_TM
, init_phase
);
11240 ecore_init_block(sc
, BLOCK_DORQ
, init_phase
);
11242 ecore_init_block(sc
, BLOCK_BRB1
, init_phase
);
11243 ecore_init_block(sc
, BLOCK_PRS
, init_phase
);
11244 ecore_init_block(sc
, BLOCK_TSDM
, init_phase
);
11245 ecore_init_block(sc
, BLOCK_CSDM
, init_phase
);
11246 ecore_init_block(sc
, BLOCK_USDM
, init_phase
);
11247 ecore_init_block(sc
, BLOCK_XSDM
, init_phase
);
11248 ecore_init_block(sc
, BLOCK_UPB
, init_phase
);
11249 ecore_init_block(sc
, BLOCK_XPB
, init_phase
);
11250 ecore_init_block(sc
, BLOCK_PBF
, init_phase
);
11251 if (!CHIP_IS_E1x(sc
))
11252 REG_WR(sc
, PBF_REG_DISABLE_PF
, 0);
11254 ecore_init_block(sc
, BLOCK_CDU
, init_phase
);
11256 ecore_init_block(sc
, BLOCK_CFC
, init_phase
);
11258 if (!CHIP_IS_E1x(sc
))
11259 REG_WR(sc
, CFC_REG_WEAK_ENABLE_PF
, 1);
11262 REG_WR(sc
, NIG_REG_LLH0_FUNC_EN
+ port
* 8, 1);
11263 REG_WR(sc
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
* 8, OVLAN(sc
));
11266 ecore_init_block(sc
, BLOCK_MISC_AEU
, init_phase
);
11268 /* HC init per function */
11269 if (sc
->devinfo
.int_block
== INT_BLOCK_HC
) {
11270 if (CHIP_IS_E1H(sc
)) {
11271 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
* 4, 0);
11273 REG_WR(sc
, HC_REG_LEADING_EDGE_0
+ port
* 8, 0);
11274 REG_WR(sc
, HC_REG_TRAILING_EDGE_0
+ port
* 8, 0);
11276 ecore_init_block(sc
, BLOCK_HC
, init_phase
);
11279 uint32_t num_segs
, sb_idx
, prod_offset
;
11281 REG_WR(sc
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
* 4, 0);
11283 if (!CHIP_IS_E1x(sc
)) {
11284 REG_WR(sc
, IGU_REG_LEADING_EDGE_LATCH
, 0);
11285 REG_WR(sc
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
11288 ecore_init_block(sc
, BLOCK_IGU
, init_phase
);
11290 if (!CHIP_IS_E1x(sc
)) {
11294 * E2 mode: address 0-135 match to the mapping memory;
11295 * 136 - PF0 default prod; 137 - PF1 default prod;
11296 * 138 - PF2 default prod; 139 - PF3 default prod;
11297 * 140 - PF0 attn prod; 141 - PF1 attn prod;
11298 * 142 - PF2 attn prod; 143 - PF3 attn prod;
11299 * 144-147 reserved.
11301 * E1.5 mode - In backward compatible mode;
11302 * for non default SB; each even line in the memory
11303 * holds the U producer and each odd line hold
11304 * the C producer. The first 128 producers are for
11305 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
11306 * producers are for the DSB for each PF.
11307 * Each PF has five segments: (the order inside each
11308 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
11309 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
11310 * 144-147 attn prods;
11312 /* non-default-status-blocks */
11313 num_segs
= CHIP_INT_MODE_IS_BC(sc
) ?
11314 IGU_BC_NDSB_NUM_SEGS
: IGU_NORM_NDSB_NUM_SEGS
;
11315 for (sb_idx
= 0; sb_idx
< sc
->igu_sb_cnt
; sb_idx
++) {
11316 prod_offset
= (sc
->igu_base_sb
+ sb_idx
) *
11319 for (i
= 0; i
< num_segs
; i
++) {
11320 addr
= IGU_REG_PROD_CONS_MEMORY
+
11321 (prod_offset
+ i
) * 4;
11322 REG_WR(sc
, addr
, 0);
11324 /* send consumer update with value 0 */
11325 bnx2x_ack_sb(sc
, sc
->igu_base_sb
+ sb_idx
,
11326 USTORM_ID
, 0, IGU_INT_NOP
, 1);
11327 bnx2x_igu_clear_sb(sc
, sc
->igu_base_sb
+ sb_idx
);
11330 /* default-status-blocks */
11331 num_segs
= CHIP_INT_MODE_IS_BC(sc
) ?
11332 IGU_BC_DSB_NUM_SEGS
: IGU_NORM_DSB_NUM_SEGS
;
11334 if (CHIP_IS_MODE_4_PORT(sc
))
11335 dsb_idx
= SC_FUNC(sc
);
11337 dsb_idx
= SC_VN(sc
);
11339 prod_offset
= (CHIP_INT_MODE_IS_BC(sc
) ?
11340 IGU_BC_BASE_DSB_PROD
+ dsb_idx
:
11341 IGU_NORM_BASE_DSB_PROD
+ dsb_idx
);
11344 * igu prods come in chunks of E1HVN_MAX (4) -
11345 * does not matters what is the current chip mode
11347 for (i
= 0; i
< (num_segs
* E1HVN_MAX
); i
+= E1HVN_MAX
) {
11348 addr
= IGU_REG_PROD_CONS_MEMORY
+
11349 (prod_offset
+ i
) * 4;
11350 REG_WR(sc
, addr
, 0);
11352 /* send consumer update with 0 */
11353 if (CHIP_INT_MODE_IS_BC(sc
)) {
11354 bnx2x_ack_sb(sc
, sc
->igu_dsb_id
,
11355 USTORM_ID
, 0, IGU_INT_NOP
, 1);
11356 bnx2x_ack_sb(sc
, sc
->igu_dsb_id
,
11357 CSTORM_ID
, 0, IGU_INT_NOP
, 1);
11358 bnx2x_ack_sb(sc
, sc
->igu_dsb_id
,
11359 XSTORM_ID
, 0, IGU_INT_NOP
, 1);
11360 bnx2x_ack_sb(sc
, sc
->igu_dsb_id
,
11361 TSTORM_ID
, 0, IGU_INT_NOP
, 1);
11362 bnx2x_ack_sb(sc
, sc
->igu_dsb_id
,
11363 ATTENTION_ID
, 0, IGU_INT_NOP
, 1);
11365 bnx2x_ack_sb(sc
, sc
->igu_dsb_id
,
11366 USTORM_ID
, 0, IGU_INT_NOP
, 1);
11367 bnx2x_ack_sb(sc
, sc
->igu_dsb_id
,
11368 ATTENTION_ID
, 0, IGU_INT_NOP
, 1);
11370 bnx2x_igu_clear_sb(sc
, sc
->igu_dsb_id
);
11372 /* !!! these should become driver const once
11373 rf-tool supports split-68 const */
11374 REG_WR(sc
, IGU_REG_SB_INT_BEFORE_MASK_LSB
, 0);
11375 REG_WR(sc
, IGU_REG_SB_INT_BEFORE_MASK_MSB
, 0);
11376 REG_WR(sc
, IGU_REG_SB_MASK_LSB
, 0);
11377 REG_WR(sc
, IGU_REG_SB_MASK_MSB
, 0);
11378 REG_WR(sc
, IGU_REG_PBA_STATUS_LSB
, 0);
11379 REG_WR(sc
, IGU_REG_PBA_STATUS_MSB
, 0);
11383 /* Reset PCIE errors for debug */
11384 REG_WR(sc
, 0x2114, 0xffffffff);
11385 REG_WR(sc
, 0x2120, 0xffffffff);
11387 if (CHIP_IS_E1x(sc
)) {
11388 main_mem_size
= HC_REG_MAIN_MEMORY_SIZE
/ 2; /*dwords */
11389 main_mem_base
= HC_REG_MAIN_MEMORY
+
11390 SC_PORT(sc
) * (main_mem_size
* 4);
11391 main_mem_prty_clr
= HC_REG_HC_PRTY_STS_CLR
;
11392 main_mem_width
= 8;
11394 val
= REG_RD(sc
, main_mem_prty_clr
);
11396 PMD_DRV_LOG(DEBUG
, sc
,
11397 "Parity errors in HC block during function init (0x%x)!",
11401 /* Clear "false" parity errors in MSI-X table */
11402 for (i
= main_mem_base
;
11403 i
< main_mem_base
+ main_mem_size
* 4;
11404 i
+= main_mem_width
) {
11405 bnx2x_read_dmae(sc
, i
, main_mem_width
/ 4);
11406 bnx2x_write_dmae(sc
, BNX2X_SP_MAPPING(sc
, wb_data
),
11407 i
, main_mem_width
/ 4);
11409 /* Clear HC parity attention */
11410 REG_RD(sc
, main_mem_prty_clr
);
11413 /* Enable STORMs SP logging */
11414 REG_WR8(sc
, BAR_USTRORM_INTMEM
+
11415 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc
)), 1);
11416 REG_WR8(sc
, BAR_TSTRORM_INTMEM
+
11417 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc
)), 1);
11418 REG_WR8(sc
, BAR_CSTRORM_INTMEM
+
11419 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc
)), 1);
11420 REG_WR8(sc
, BAR_XSTRORM_INTMEM
+
11421 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc
)), 1);
11423 elink_phy_probe(&sc
->link_params
);
11428 static void bnx2x_link_reset(struct bnx2x_softc
*sc
)
11430 if (!BNX2X_NOMCP(sc
)) {
11431 bnx2x_acquire_phy_lock(sc
);
11432 elink_lfa_reset(&sc
->link_params
, &sc
->link_vars
);
11433 bnx2x_release_phy_lock(sc
);
11435 if (!CHIP_REV_IS_SLOW(sc
)) {
11436 PMD_DRV_LOG(WARNING
, sc
,
11437 "Bootcode is missing - cannot reset link");
11442 static void bnx2x_reset_port(struct bnx2x_softc
*sc
)
11444 int port
= SC_PORT(sc
);
11447 /* reset physical Link */
11448 bnx2x_link_reset(sc
);
11450 REG_WR(sc
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
* 4, 0);
11452 /* Do not rcv packets to BRB */
11453 REG_WR(sc
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
* 4, 0x0);
11454 /* Do not direct rcv packets that are not for MCP to the BRB */
11455 REG_WR(sc
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
11456 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
11458 /* Configure AEU */
11459 REG_WR(sc
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
* 4, 0);
11463 /* Check for BRB port occupancy */
11464 val
= REG_RD(sc
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
* 4);
11466 PMD_DRV_LOG(DEBUG
, sc
,
11467 "BRB1 is not empty, %d blocks are occupied", val
);
11471 static void bnx2x_ilt_wr(struct bnx2x_softc
*sc
, uint32_t index
, rte_iova_t addr
)
11474 uint32_t wb_write
[2];
11476 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
* 8;
11478 wb_write
[0] = ONCHIP_ADDR1(addr
);
11479 wb_write
[1] = ONCHIP_ADDR2(addr
);
11480 REG_WR_DMAE(sc
, reg
, wb_write
, 2);
11483 static void bnx2x_clear_func_ilt(struct bnx2x_softc
*sc
, uint32_t func
)
11485 uint32_t i
, base
= FUNC_ILT_BASE(func
);
11486 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++) {
11487 bnx2x_ilt_wr(sc
, i
, 0);
11491 static void bnx2x_reset_func(struct bnx2x_softc
*sc
)
11493 struct bnx2x_fastpath
*fp
;
11494 int port
= SC_PORT(sc
);
11495 int func
= SC_FUNC(sc
);
11498 /* Disable the function in the FW */
11499 REG_WR8(sc
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNC_EN_OFFSET(func
), 0);
11500 REG_WR8(sc
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNC_EN_OFFSET(func
), 0);
11501 REG_WR8(sc
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNC_EN_OFFSET(func
), 0);
11502 REG_WR8(sc
, BAR_USTRORM_INTMEM
+ USTORM_FUNC_EN_OFFSET(func
), 0);
11505 FOR_EACH_ETH_QUEUE(sc
, i
) {
11507 REG_WR8(sc
, BAR_CSTRORM_INTMEM
+
11508 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp
->fw_sb_id
),
11513 REG_WR8(sc
, BAR_CSTRORM_INTMEM
+
11514 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func
), SB_DISABLED
);
11516 for (i
= 0; i
< XSTORM_SPQ_DATA_SIZE
/ 4; i
++) {
11517 REG_WR(sc
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_DATA_OFFSET(func
),
11521 /* Configure IGU */
11522 if (sc
->devinfo
.int_block
== INT_BLOCK_HC
) {
11523 REG_WR(sc
, HC_REG_LEADING_EDGE_0
+ port
* 8, 0);
11524 REG_WR(sc
, HC_REG_TRAILING_EDGE_0
+ port
* 8, 0);
11526 REG_WR(sc
, IGU_REG_LEADING_EDGE_LATCH
, 0);
11527 REG_WR(sc
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
11530 if (CNIC_LOADED(sc
)) {
11531 /* Disable Timer scan */
11532 REG_WR(sc
, TM_REG_EN_LINEAR0_TIMER
+ port
* 4, 0);
11534 * Wait for at least 10ms and up to 2 second for the timers
11537 for (i
= 0; i
< 200; i
++) {
11539 if (!REG_RD(sc
, TM_REG_LIN0_SCAN_ON
+ port
* 4))
11545 bnx2x_clear_func_ilt(sc
, func
);
11548 * Timers workaround bug for E2: if this is vnic-3,
11549 * we need to set the entire ilt range for this timers.
11551 if (!CHIP_IS_E1x(sc
) && SC_VN(sc
) == 3) {
11552 struct ilt_client_info ilt_cli
;
11553 /* use dummy TM client */
11554 memset(&ilt_cli
, 0, sizeof(struct ilt_client_info
));
11556 ilt_cli
.end
= ILT_NUM_PAGE_ENTRIES
- 1;
11557 ilt_cli
.client_num
= ILT_CLIENT_TM
;
11559 ecore_ilt_boundry_init_op(sc
, &ilt_cli
, 0);
11562 /* this assumes that reset_port() called before reset_func() */
11563 if (!CHIP_IS_E1x(sc
)) {
11564 bnx2x_pf_disable(sc
);
11567 sc
->dmae_ready
= 0;
11570 static void bnx2x_release_firmware(struct bnx2x_softc
*sc
)
11572 rte_free(sc
->init_ops
);
11573 rte_free(sc
->init_ops_offsets
);
11574 rte_free(sc
->init_data
);
11575 rte_free(sc
->iro_array
);
11578 static int bnx2x_init_firmware(struct bnx2x_softc
*sc
)
11581 uint8_t *p
= sc
->firmware
;
11584 for (i
= 0; i
< 24; ++i
)
11585 off
[i
] = rte_be_to_cpu_32(*((uint32_t *) sc
->firmware
+ i
));
11588 sc
->init_ops
= rte_zmalloc("", len
, RTE_CACHE_LINE_SIZE
);
11591 bnx2x_data_to_init_ops(p
+ off
[1], sc
->init_ops
, len
);
11594 sc
->init_ops_offsets
= rte_zmalloc("", len
, RTE_CACHE_LINE_SIZE
);
11595 if (!sc
->init_ops_offsets
)
11597 bnx2x_data_to_init_offsets(p
+ off
[3], sc
->init_ops_offsets
, len
);
11600 sc
->init_data
= rte_zmalloc("", len
, RTE_CACHE_LINE_SIZE
);
11601 if (!sc
->init_data
)
11603 bnx2x_data_to_init_data(p
+ off
[5], sc
->init_data
, len
);
11605 sc
->tsem_int_table_data
= p
+ off
[7];
11606 sc
->tsem_pram_data
= p
+ off
[9];
11607 sc
->usem_int_table_data
= p
+ off
[11];
11608 sc
->usem_pram_data
= p
+ off
[13];
11609 sc
->csem_int_table_data
= p
+ off
[15];
11610 sc
->csem_pram_data
= p
+ off
[17];
11611 sc
->xsem_int_table_data
= p
+ off
[19];
11612 sc
->xsem_pram_data
= p
+ off
[21];
11615 sc
->iro_array
= rte_zmalloc("", len
, RTE_CACHE_LINE_SIZE
);
11616 if (!sc
->iro_array
)
11618 bnx2x_data_to_iro_array(p
+ off
[23], sc
->iro_array
, len
);
11623 bnx2x_release_firmware(sc
);
11627 static int cut_gzip_prefix(const uint8_t * zbuf
, int len
)
11629 #define MIN_PREFIX_SIZE (10)
11631 int n
= MIN_PREFIX_SIZE
;
11634 if (!(zbuf
[0] == 0x1f && zbuf
[1] == 0x8b && zbuf
[2] == Z_DEFLATED
) ||
11635 len
<= MIN_PREFIX_SIZE
) {
11639 /* optional extra fields are present */
11640 if (zbuf
[3] & 0x4) {
11647 /* file name is present */
11648 if (zbuf
[3] & 0x8) {
11649 while ((zbuf
[n
++] != 0) && (n
< len
)) ;
11655 static int ecore_gunzip(struct bnx2x_softc
*sc
, const uint8_t * zbuf
, int len
)
11658 int data_begin
= cut_gzip_prefix(zbuf
, len
);
11660 PMD_DRV_LOG(DEBUG
, sc
, "ecore_gunzip %d", len
);
11662 if (data_begin
<= 0) {
11663 PMD_DRV_LOG(NOTICE
, sc
, "bad gzip prefix");
11667 memset(&zlib_stream
, 0, sizeof(zlib_stream
));
11668 zlib_stream
.next_in
= zbuf
+ data_begin
;
11669 zlib_stream
.avail_in
= len
- data_begin
;
11670 zlib_stream
.next_out
= sc
->gz_buf
;
11671 zlib_stream
.avail_out
= FW_BUF_SIZE
;
11673 ret
= inflateInit2(&zlib_stream
, -MAX_WBITS
);
11675 PMD_DRV_LOG(NOTICE
, sc
, "zlib inflateInit2 error");
11679 ret
= inflate(&zlib_stream
, Z_FINISH
);
11680 if ((ret
!= Z_STREAM_END
) && (ret
!= Z_OK
)) {
11681 PMD_DRV_LOG(NOTICE
, sc
, "zlib inflate error: %d %s", ret
,
11685 sc
->gz_outlen
= zlib_stream
.total_out
;
11686 if (sc
->gz_outlen
& 0x3) {
11687 PMD_DRV_LOG(NOTICE
, sc
, "firmware is not aligned. gz_outlen == %d",
11690 sc
->gz_outlen
>>= 2;
11692 inflateEnd(&zlib_stream
);
11694 if (ret
== Z_STREAM_END
)
11701 ecore_write_dmae_phys_len(struct bnx2x_softc
*sc
, rte_iova_t phys_addr
,
11702 uint32_t addr
, uint32_t len
)
11704 bnx2x_write_dmae_phys_len(sc
, phys_addr
, addr
, len
);
11708 ecore_storm_memset_struct(struct bnx2x_softc
*sc
, uint32_t addr
, size_t size
,
11712 for (i
= 0; i
< size
/ 4; i
++) {
11713 REG_WR(sc
, addr
+ (i
* 4), data
[i
]);
11717 static const char *get_ext_phy_type(uint32_t ext_phy_type
)
11719 uint32_t phy_type_idx
= ext_phy_type
>> 8;
11720 static const char *types
[] =
11721 { "DIRECT", "BNX2X-8071", "BNX2X-8072", "BNX2X-8073",
11722 "BNX2X-8705", "BNX2X-8706", "BNX2X-8726", "BNX2X-8481", "SFX-7101",
11724 "BNX2X-8727-NOC", "BNX2X-84823", "NOT_CONN", "FAILURE"
11727 if (phy_type_idx
< 12)
11728 return types
[phy_type_idx
];
11729 else if (PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN
== ext_phy_type
)
11735 static const char *get_state(uint32_t state
)
11737 uint32_t state_idx
= state
>> 12;
11738 static const char *states
[] = { "CLOSED", "OPENING_WAIT4_LOAD",
11739 "OPENING_WAIT4_PORT", "OPEN", "CLOSING_WAIT4_HALT",
11740 "CLOSING_WAIT4_DELETE", "CLOSING_WAIT4_UNLOAD",
11741 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
11742 "UNKNOWN", "DISABLED", "DIAG", "ERROR", "UNDEFINED"
11745 if (state_idx
<= 0xF)
11746 return states
[state_idx
];
11748 return states
[0x10];
11751 static const char *get_recovery_state(uint32_t state
)
11753 static const char *states
[] = { "NONE", "DONE", "INIT",
11754 "WAIT", "FAILED", "NIC_LOADING"
11756 return states
[state
];
11759 static const char *get_rx_mode(uint32_t mode
)
11761 static const char *modes
[] = { "NONE", "NORMAL", "ALLMULTI",
11762 "PROMISC", "MAX_MULTICAST", "ERROR"
11766 return modes
[mode
];
11767 else if (BNX2X_MAX_MULTICAST
== mode
)
11773 #define BNX2X_INFO_STR_MAX 256
11774 static const char *get_bnx2x_flags(uint32_t flags
)
11777 static const char *flag
[] = { "ONE_PORT ", "NO_ISCSI ",
11778 "NO_FCOE ", "NO_WOL ", "USING_DAC ", "USING_MSIX ",
11779 "USING_MSI ", "DISABLE_MSI ", "UNKNOWN ", "NO_MCP ",
11780 "SAFC_TX_FLAG ", "MF_FUNC_DIS ", "TX_SWITCHING "
11782 static char flag_str
[BNX2X_INFO_STR_MAX
];
11783 memset(flag_str
, 0, BNX2X_INFO_STR_MAX
);
11785 for (i
= 0; i
< 5; i
++)
11786 if (flags
& (1 << i
)) {
11787 strlcat(flag_str
, flag
[i
], sizeof(flag_str
));
11791 static char unknown
[BNX2X_INFO_STR_MAX
];
11792 snprintf(unknown
, 32, "Unknown flag mask %x", flags
);
11793 strlcat(flag_str
, unknown
, sizeof(flag_str
));
11798 /* Prints useful adapter info. */
11799 void bnx2x_print_adapter_info(struct bnx2x_softc
*sc
)
11803 PMD_DRV_LOG(INFO
, sc
, "========================================");
11804 /* DPDK and Driver versions */
11805 PMD_DRV_LOG(INFO
, sc
, "%12s : %s", "DPDK",
11807 PMD_DRV_LOG(INFO
, sc
, "%12s : %s", "Driver",
11808 bnx2x_pmd_version());
11809 /* Firmware versions. */
11810 PMD_DRV_LOG(INFO
, sc
, "%12s : %d.%d.%d",
11812 BNX2X_5710_FW_MAJOR_VERSION
,
11813 BNX2X_5710_FW_MINOR_VERSION
,
11814 BNX2X_5710_FW_REVISION_VERSION
);
11815 PMD_DRV_LOG(INFO
, sc
, "%12s : %s",
11816 "Bootcode", sc
->devinfo
.bc_ver_str
);
11817 /* Hardware chip info. */
11818 PMD_DRV_LOG(INFO
, sc
, "%12s : %#08x", "ASIC", sc
->devinfo
.chip_id
);
11819 PMD_DRV_LOG(INFO
, sc
, "%12s : %c%d", "Rev", (CHIP_REV(sc
) >> 12) + 'A',
11820 (CHIP_METAL(sc
) >> 4));
11821 /* Bus PCIe info. */
11822 PMD_DRV_LOG(INFO
, sc
, "%12s : 0x%x", "Vendor Id",
11823 sc
->devinfo
.vendor_id
);
11824 PMD_DRV_LOG(INFO
, sc
, "%12s : 0x%x", "Device Id",
11825 sc
->devinfo
.device_id
);
11826 PMD_DRV_LOG(INFO
, sc
, "%12s : width x%d, ", "Bus PCIe",
11827 sc
->devinfo
.pcie_link_width
);
11828 switch (sc
->devinfo
.pcie_link_speed
) {
11830 PMD_DRV_LOG(INFO
, sc
, "%23s", "2.5 Gbps");
11833 PMD_DRV_LOG(INFO
, sc
, "%21s", "5 Gbps");
11836 PMD_DRV_LOG(INFO
, sc
, "%21s", "8 Gbps");
11839 PMD_DRV_LOG(INFO
, sc
, "%33s", "Unknown link speed");
11841 /* Device features. */
11842 PMD_DRV_LOG(INFO
, sc
, "%12s : ", "Flags");
11843 /* Miscellaneous flags. */
11844 if (sc
->devinfo
.pcie_cap_flags
& BNX2X_MSI_CAPABLE_FLAG
) {
11845 PMD_DRV_LOG(INFO
, sc
, "%18s", "MSI");
11848 if (sc
->devinfo
.pcie_cap_flags
& BNX2X_MSIX_CAPABLE_FLAG
) {
11850 PMD_DRV_LOG(INFO
, sc
, "|");
11851 PMD_DRV_LOG(INFO
, sc
, "%20s", "MSI-X");
11854 PMD_DRV_LOG(INFO
, sc
, "%12s : %s", "OVLAN", (OVLAN(sc
) ? "YES" : "NO"));
11855 PMD_DRV_LOG(INFO
, sc
, "%12s : %s", "MF", (IS_MF(sc
) ? "YES" : "NO"));
11856 PMD_DRV_LOG(INFO
, sc
, "========================================");
11859 /* Prints useful device info. */
11860 void bnx2x_print_device_info(struct bnx2x_softc
*sc
)
11862 __rte_unused
uint32_t ext_phy_type
;
11863 uint32_t offset
, reg_val
;
11865 PMD_INIT_FUNC_TRACE(sc
);
11866 offset
= offsetof(struct shmem_region
,
11867 dev_info
.port_hw_config
[0].external_phy_config
);
11868 reg_val
= REG_RD(sc
, sc
->devinfo
.shmem_base
+ offset
);
11869 if (sc
->link_vars
.phy_flags
& PHY_XGXS_FLAG
)
11870 ext_phy_type
= ELINK_XGXS_EXT_PHY_TYPE(reg_val
);
11872 ext_phy_type
= ELINK_SERDES_EXT_PHY_TYPE(reg_val
);
11874 /* Device features. */
11875 PMD_DRV_LOG(INFO
, sc
, "%12s : %u", "Bnx2x Func", sc
->pcie_func
);
11876 PMD_DRV_LOG(INFO
, sc
,
11877 "%12s : %s", "Bnx2x Flags", get_bnx2x_flags(sc
->flags
));
11878 PMD_DRV_LOG(INFO
, sc
, "%12s : %s", "DMAE Is",
11879 (sc
->dmae_ready
? "Ready" : "Not Ready"));
11880 PMD_DRV_LOG(INFO
, sc
, "%12s : %u", "MTU", sc
->mtu
);
11881 PMD_DRV_LOG(INFO
, sc
,
11882 "%12s : %s", "PHY Type", get_ext_phy_type(ext_phy_type
));
11883 PMD_DRV_LOG(INFO
, sc
, "%12s : %x:%x:%x:%x:%x:%x", "MAC Addr",
11884 sc
->link_params
.mac_addr
[0],
11885 sc
->link_params
.mac_addr
[1],
11886 sc
->link_params
.mac_addr
[2],
11887 sc
->link_params
.mac_addr
[3],
11888 sc
->link_params
.mac_addr
[4],
11889 sc
->link_params
.mac_addr
[5]);
11890 PMD_DRV_LOG(INFO
, sc
, "%12s : %s", "RX Mode", get_rx_mode(sc
->rx_mode
));
11891 PMD_DRV_LOG(INFO
, sc
, "%12s : %s", "State", get_state(sc
->state
));
11892 if (sc
->recovery_state
)
11893 PMD_DRV_LOG(INFO
, sc
, "%12s : %s", "Recovery",
11894 get_recovery_state(sc
->recovery_state
));
11897 switch (sc
->sp
->rss_rdata
.rss_mode
) {
11898 case ETH_RSS_MODE_DISABLED
:
11899 PMD_DRV_LOG(INFO
, sc
, "%12s : %s", "Queues", "RSS mode - None");
11901 case ETH_RSS_MODE_REGULAR
:
11902 PMD_DRV_LOG(INFO
, sc
, "%12s : %s,", "Queues", "RSS mode - Regular");
11903 PMD_DRV_LOG(INFO
, sc
, "%16d", sc
->num_queues
);
11906 PMD_DRV_LOG(INFO
, sc
, "%12s : %s", "Queues", "RSS mode - Unknown");
11910 PMD_DRV_LOG(INFO
, sc
, "%12s : CQ = %lx, EQ = %lx", "SPQ Left",
11911 sc
->cq_spq_left
, sc
->eq_spq_left
);
11913 PMD_DRV_LOG(INFO
, sc
,
11914 "%12s : %x", "Switch", sc
->link_params
.switch_cfg
);
11915 PMD_DRV_LOG(INFO
, sc
, "pcie_bus=%d, pcie_device=%d",
11916 sc
->pcie_bus
, sc
->pcie_device
);
11917 PMD_DRV_LOG(INFO
, sc
, "bar0.addr=%p, bar1.addr=%p",
11918 sc
->bar
[BAR0
].base_addr
, sc
->bar
[BAR1
].base_addr
);
11919 PMD_DRV_LOG(INFO
, sc
, "port=%d, path=%d, vnic=%d, func=%d",
11920 PORT_ID(sc
), PATH_ID(sc
), VNIC_ID(sc
), FUNC_ID(sc
));