1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/etherdevice.h>
21 #include <linux/qed/qed_chain.h>
22 #include <linux/qed/qed_if.h>
25 #include "qed_dev_api.h"
28 #include "qed_init_ops.h"
31 #include "qed_reg_addr.h"
34 /* API common to all protocols */
35 void qed_init_dp(struct qed_dev
*cdev
,
36 u32 dp_module
, u8 dp_level
)
40 cdev
->dp_level
= dp_level
;
41 cdev
->dp_module
= dp_module
;
42 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
43 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
45 p_hwfn
->dp_level
= dp_level
;
46 p_hwfn
->dp_module
= dp_module
;
50 void qed_init_struct(struct qed_dev
*cdev
)
54 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
55 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
59 p_hwfn
->b_active
= false;
61 mutex_init(&p_hwfn
->dmae_info
.mutex
);
64 /* hwfn 0 is always active */
65 cdev
->hwfns
[0].b_active
= true;
67 /* set the default cache alignment to 128 */
68 cdev
->cache_shift
= 7;
71 static void qed_qm_info_free(struct qed_hwfn
*p_hwfn
)
73 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
75 kfree(qm_info
->qm_pq_params
);
76 qm_info
->qm_pq_params
= NULL
;
77 kfree(qm_info
->qm_vport_params
);
78 qm_info
->qm_vport_params
= NULL
;
79 kfree(qm_info
->qm_port_params
);
80 qm_info
->qm_port_params
= NULL
;
83 void qed_resc_free(struct qed_dev
*cdev
)
90 kfree(cdev
->reset_stats
);
92 for_each_hwfn(cdev
, i
) {
93 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
95 kfree(p_hwfn
->p_tx_cids
);
96 p_hwfn
->p_tx_cids
= NULL
;
97 kfree(p_hwfn
->p_rx_cids
);
98 p_hwfn
->p_rx_cids
= NULL
;
101 for_each_hwfn(cdev
, i
) {
102 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
104 qed_cxt_mngr_free(p_hwfn
);
105 qed_qm_info_free(p_hwfn
);
106 qed_spq_free(p_hwfn
);
107 qed_eq_free(p_hwfn
, p_hwfn
->p_eq
);
108 qed_consq_free(p_hwfn
, p_hwfn
->p_consq
);
109 qed_int_free(p_hwfn
);
110 qed_dmae_info_free(p_hwfn
);
114 static int qed_init_qm_info(struct qed_hwfn
*p_hwfn
)
116 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
117 struct init_qm_port_params
*p_qm_port
;
118 u8 num_vports
, i
, vport_id
, num_ports
;
119 u16 num_pqs
, multi_cos_tcs
= 1;
121 memset(qm_info
, 0, sizeof(*qm_info
));
123 num_pqs
= multi_cos_tcs
+ 1; /* The '1' is for pure-LB */
124 num_vports
= (u8
)RESC_NUM(p_hwfn
, QED_VPORT
);
126 /* Sanity checking that setup requires legal number of resources */
127 if (num_pqs
> RESC_NUM(p_hwfn
, QED_PQ
)) {
129 "Need too many Physical queues - 0x%04x when only %04x are available\n",
130 num_pqs
, RESC_NUM(p_hwfn
, QED_PQ
));
134 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
136 qm_info
->qm_pq_params
= kzalloc(sizeof(*qm_info
->qm_pq_params
) *
137 num_pqs
, GFP_ATOMIC
);
138 if (!qm_info
->qm_pq_params
)
141 qm_info
->qm_vport_params
= kzalloc(sizeof(*qm_info
->qm_vport_params
) *
142 num_vports
, GFP_ATOMIC
);
143 if (!qm_info
->qm_vport_params
)
146 qm_info
->qm_port_params
= kzalloc(sizeof(*qm_info
->qm_port_params
) *
147 MAX_NUM_PORTS
, GFP_ATOMIC
);
148 if (!qm_info
->qm_port_params
)
151 vport_id
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
153 /* First init per-TC PQs */
154 for (i
= 0; i
< multi_cos_tcs
; i
++) {
155 struct init_qm_pq_params
*params
= &qm_info
->qm_pq_params
[i
];
157 params
->vport_id
= vport_id
;
158 params
->tc_id
= p_hwfn
->hw_info
.non_offload_tc
;
159 params
->wrr_group
= 1;
162 /* Then init pure-LB PQ */
163 qm_info
->pure_lb_pq
= i
;
164 qm_info
->qm_pq_params
[i
].vport_id
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
165 qm_info
->qm_pq_params
[i
].tc_id
= PURE_LB_TC
;
166 qm_info
->qm_pq_params
[i
].wrr_group
= 1;
169 qm_info
->offload_pq
= 0;
170 qm_info
->num_pqs
= num_pqs
;
171 qm_info
->num_vports
= num_vports
;
173 /* Initialize qm port parameters */
174 num_ports
= p_hwfn
->cdev
->num_ports_in_engines
;
175 for (i
= 0; i
< num_ports
; i
++) {
176 p_qm_port
= &qm_info
->qm_port_params
[i
];
177 p_qm_port
->active
= 1;
178 p_qm_port
->num_active_phys_tcs
= 4;
179 p_qm_port
->num_pbf_cmd_lines
= PBF_MAX_CMD_LINES
/ num_ports
;
180 p_qm_port
->num_btb_blocks
= BTB_MAX_BLOCKS
/ num_ports
;
183 qm_info
->max_phys_tcs_per_port
= NUM_OF_PHYS_TCS
;
185 qm_info
->start_pq
= (u16
)RESC_START(p_hwfn
, QED_PQ
);
187 qm_info
->start_vport
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
191 qm_info
->vport_rl_en
= 1;
196 DP_NOTICE(p_hwfn
, "Failed to allocate memory for QM params\n");
197 kfree(qm_info
->qm_pq_params
);
198 kfree(qm_info
->qm_vport_params
);
199 kfree(qm_info
->qm_port_params
);
204 int qed_resc_alloc(struct qed_dev
*cdev
)
206 struct qed_consq
*p_consq
;
210 cdev
->fw_data
= kzalloc(sizeof(*cdev
->fw_data
), GFP_KERNEL
);
214 /* Allocate Memory for the Queue->CID mapping */
215 for_each_hwfn(cdev
, i
) {
216 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
217 int tx_size
= sizeof(struct qed_hw_cid_data
) *
218 RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
219 int rx_size
= sizeof(struct qed_hw_cid_data
) *
220 RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
222 p_hwfn
->p_tx_cids
= kzalloc(tx_size
, GFP_KERNEL
);
223 if (!p_hwfn
->p_tx_cids
) {
225 "Failed to allocate memory for Tx Cids\n");
230 p_hwfn
->p_rx_cids
= kzalloc(rx_size
, GFP_KERNEL
);
231 if (!p_hwfn
->p_rx_cids
) {
233 "Failed to allocate memory for Rx Cids\n");
239 for_each_hwfn(cdev
, i
) {
240 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
242 /* First allocate the context manager structure */
243 rc
= qed_cxt_mngr_alloc(p_hwfn
);
247 /* Set the HW cid/tid numbers (in the contest manager)
248 * Must be done prior to any further computations.
250 rc
= qed_cxt_set_pf_params(p_hwfn
);
254 /* Prepare and process QM requirements */
255 rc
= qed_init_qm_info(p_hwfn
);
259 /* Compute the ILT client partition */
260 rc
= qed_cxt_cfg_ilt_compute(p_hwfn
);
264 /* CID map / ILT shadow table / T2
265 * The talbes sizes are determined by the computations above
267 rc
= qed_cxt_tables_alloc(p_hwfn
);
271 /* SPQ, must follow ILT because initializes SPQ context */
272 rc
= qed_spq_alloc(p_hwfn
);
276 /* SP status block allocation */
277 p_hwfn
->p_dpc_ptt
= qed_get_reserved_ptt(p_hwfn
,
280 rc
= qed_int_alloc(p_hwfn
, p_hwfn
->p_main_ptt
);
285 p_eq
= qed_eq_alloc(p_hwfn
, 256);
292 p_consq
= qed_consq_alloc(p_hwfn
);
297 p_hwfn
->p_consq
= p_consq
;
299 /* DMA info initialization */
300 rc
= qed_dmae_info_alloc(p_hwfn
);
303 "Failed to allocate memory for dmae_info structure\n");
308 cdev
->reset_stats
= kzalloc(sizeof(*cdev
->reset_stats
), GFP_KERNEL
);
309 if (!cdev
->reset_stats
) {
310 DP_NOTICE(cdev
, "Failed to allocate reset statistics\n");
322 void qed_resc_setup(struct qed_dev
*cdev
)
326 for_each_hwfn(cdev
, i
) {
327 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
329 qed_cxt_mngr_setup(p_hwfn
);
330 qed_spq_setup(p_hwfn
);
331 qed_eq_setup(p_hwfn
, p_hwfn
->p_eq
);
332 qed_consq_setup(p_hwfn
, p_hwfn
->p_consq
);
334 /* Read shadow of current MFW mailbox */
335 qed_mcp_read_mb(p_hwfn
, p_hwfn
->p_main_ptt
);
336 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
337 p_hwfn
->mcp_info
->mfw_mb_cur
,
338 p_hwfn
->mcp_info
->mfw_mb_length
);
340 qed_int_setup(p_hwfn
, p_hwfn
->p_main_ptt
);
344 #define FINAL_CLEANUP_CMD_OFFSET (0)
345 #define FINAL_CLEANUP_CMD (0x1)
346 #define FINAL_CLEANUP_VALID_OFFSET (6)
347 #define FINAL_CLEANUP_VFPF_ID_SHIFT (7)
348 #define FINAL_CLEANUP_COMP (0x2)
349 #define FINAL_CLEANUP_POLL_CNT (100)
350 #define FINAL_CLEANUP_POLL_TIME (10)
351 int qed_final_cleanup(struct qed_hwfn
*p_hwfn
,
352 struct qed_ptt
*p_ptt
,
355 u32 command
= 0, addr
, count
= FINAL_CLEANUP_POLL_CNT
;
358 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+ USTORM_FLR_FINAL_ACK_OFFSET
;
360 command
|= FINAL_CLEANUP_CMD
<< FINAL_CLEANUP_CMD_OFFSET
;
361 command
|= 1 << FINAL_CLEANUP_VALID_OFFSET
;
362 command
|= id
<< FINAL_CLEANUP_VFPF_ID_SHIFT
;
363 command
|= FINAL_CLEANUP_COMP
<< SDM_OP_GEN_COMP_TYPE_SHIFT
;
365 /* Make sure notification is not set before initiating final cleanup */
366 if (REG_RD(p_hwfn
, addr
)) {
369 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
370 REG_WR(p_hwfn
, addr
, 0);
373 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
374 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
377 qed_wr(p_hwfn
, p_ptt
, XSDM_REG_OPERATION_GEN
, command
);
379 /* Poll until completion */
380 while (!REG_RD(p_hwfn
, addr
) && count
--)
381 msleep(FINAL_CLEANUP_POLL_TIME
);
383 if (REG_RD(p_hwfn
, addr
))
387 "Failed to receive FW final cleanup notification\n");
389 /* Cleanup afterwards */
390 REG_WR(p_hwfn
, addr
, 0);
395 static void qed_calc_hw_mode(struct qed_hwfn
*p_hwfn
)
399 hw_mode
= (1 << MODE_BB_A0
);
401 switch (p_hwfn
->cdev
->num_ports_in_engines
) {
403 hw_mode
|= 1 << MODE_PORTS_PER_ENG_1
;
406 hw_mode
|= 1 << MODE_PORTS_PER_ENG_2
;
409 hw_mode
|= 1 << MODE_PORTS_PER_ENG_4
;
412 DP_NOTICE(p_hwfn
, "num_ports_in_engine = %d not supported\n",
413 p_hwfn
->cdev
->num_ports_in_engines
);
417 switch (p_hwfn
->cdev
->mf_mode
) {
419 hw_mode
|= 1 << MODE_SF
;
422 hw_mode
|= 1 << MODE_MF_SD
;
425 hw_mode
|= 1 << MODE_MF_SI
;
428 DP_NOTICE(p_hwfn
, "Unsupported MF mode, init as SF\n");
429 hw_mode
|= 1 << MODE_SF
;
432 hw_mode
|= 1 << MODE_ASIC
;
434 p_hwfn
->hw_info
.hw_mode
= hw_mode
;
437 /* Init run time data for all PFs on an engine. */
438 static void qed_init_cau_rt_data(struct qed_dev
*cdev
)
440 u32 offset
= CAU_REG_SB_VAR_MEMORY_RT_OFFSET
;
443 for_each_hwfn(cdev
, i
) {
444 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
445 struct qed_igu_info
*p_igu_info
;
446 struct qed_igu_block
*p_block
;
447 struct cau_sb_entry sb_entry
;
449 p_igu_info
= p_hwfn
->hw_info
.p_igu_info
;
451 for (sb_id
= 0; sb_id
< QED_MAPPING_MEMORY_SIZE(cdev
);
453 p_block
= &p_igu_info
->igu_map
.igu_blocks
[sb_id
];
457 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
458 p_block
->function_id
,
460 STORE_RT_REG_AGG(p_hwfn
, offset
+ sb_id
* 2,
466 static int qed_hw_init_common(struct qed_hwfn
*p_hwfn
,
467 struct qed_ptt
*p_ptt
,
470 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
471 struct qed_qm_common_rt_init_params params
;
472 struct qed_dev
*cdev
= p_hwfn
->cdev
;
475 qed_init_cau_rt_data(cdev
);
477 /* Program GTT windows */
478 qed_gtt_init(p_hwfn
);
480 if (p_hwfn
->mcp_info
) {
481 if (p_hwfn
->mcp_info
->func_info
.bandwidth_max
)
482 qm_info
->pf_rl_en
= 1;
483 if (p_hwfn
->mcp_info
->func_info
.bandwidth_min
)
484 qm_info
->pf_wfq_en
= 1;
487 memset(¶ms
, 0, sizeof(params
));
488 params
.max_ports_per_engine
= p_hwfn
->cdev
->num_ports_in_engines
;
489 params
.max_phys_tcs_per_port
= qm_info
->max_phys_tcs_per_port
;
490 params
.pf_rl_en
= qm_info
->pf_rl_en
;
491 params
.pf_wfq_en
= qm_info
->pf_wfq_en
;
492 params
.vport_rl_en
= qm_info
->vport_rl_en
;
493 params
.vport_wfq_en
= qm_info
->vport_wfq_en
;
494 params
.port_params
= qm_info
->qm_port_params
;
496 qed_qm_common_rt_init(p_hwfn
, ¶ms
);
498 qed_cxt_hw_init_common(p_hwfn
);
500 /* Close gate from NIG to BRB/Storm; By default they are open, but
501 * we close them to prevent NIG from passing data to reset blocks.
502 * Should have been done in the ENGINE phase, but init-tool lacks
503 * proper port-pretend capabilities.
505 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
506 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
507 qed_port_pretend(p_hwfn
, p_ptt
, p_hwfn
->port_id
^ 1);
508 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
509 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
510 qed_port_unpretend(p_hwfn
, p_ptt
);
512 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_ENGINE
, ANY_PHASE_ID
, hw_mode
);
516 qed_wr(p_hwfn
, p_ptt
, PSWRQ2_REG_L2P_VALIDATE_VFID
, 0);
517 qed_wr(p_hwfn
, p_ptt
, PGLUE_B_REG_USE_CLIENTID_IN_TAG
, 1);
519 /* Disable relaxed ordering in the PCI config space */
520 qed_wr(p_hwfn
, p_ptt
, 0x20b4,
521 qed_rd(p_hwfn
, p_ptt
, 0x20b4) & ~0x10);
526 static int qed_hw_init_port(struct qed_hwfn
*p_hwfn
,
527 struct qed_ptt
*p_ptt
,
532 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PORT
, p_hwfn
->port_id
,
537 static int qed_hw_init_pf(struct qed_hwfn
*p_hwfn
,
538 struct qed_ptt
*p_ptt
,
541 enum qed_int_mode int_mode
,
542 bool allow_npar_tx_switch
)
544 u8 rel_pf_id
= p_hwfn
->rel_pf_id
;
547 if (p_hwfn
->mcp_info
) {
548 struct qed_mcp_function_info
*p_info
;
550 p_info
= &p_hwfn
->mcp_info
->func_info
;
551 if (p_info
->bandwidth_min
)
552 p_hwfn
->qm_info
.pf_wfq
= p_info
->bandwidth_min
;
554 /* Update rate limit once we'll actually have a link */
555 p_hwfn
->qm_info
.pf_rl
= 100;
558 qed_cxt_hw_init_pf(p_hwfn
);
560 qed_int_igu_init_rt(p_hwfn
);
562 /* Set VLAN in NIG if needed */
563 if (hw_mode
& (1 << MODE_MF_SD
)) {
564 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
, "Configuring LLH_FUNC_TAG\n");
565 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET
, 1);
566 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET
,
567 p_hwfn
->hw_info
.ovlan
);
570 /* Enable classification by MAC if needed */
571 if (hw_mode
& (1 << MODE_MF_SI
)) {
572 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
573 "Configuring TAGMAC_CLS_TYPE\n");
575 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET
, 1);
578 /* Protocl Configuration */
579 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_TCP_RT_OFFSET
, 0);
580 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_FCOE_RT_OFFSET
, 0);
581 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_ROCE_RT_OFFSET
, 0);
583 /* Cleanup chip from previous driver if such remains exist */
584 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, rel_pf_id
);
588 /* PF Init sequence */
589 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PF
, rel_pf_id
, hw_mode
);
593 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
594 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_QM_PF
, rel_pf_id
, hw_mode
);
598 /* Pure runtime initializations - directly to the HW */
599 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, true, true);
602 /* enable interrupts */
603 qed_int_igu_enable(p_hwfn
, p_ptt
, int_mode
);
605 /* send function start command */
606 rc
= qed_sp_pf_start(p_hwfn
, p_hwfn
->cdev
->mf_mode
);
608 DP_NOTICE(p_hwfn
, "Function start ramrod failed\n");
613 static int qed_change_pci_hwfn(struct qed_hwfn
*p_hwfn
,
614 struct qed_ptt
*p_ptt
,
617 u32 delay_idx
= 0, val
, set_val
= enable
? 1 : 0;
619 /* Change PF in PXP */
620 qed_wr(p_hwfn
, p_ptt
,
621 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, set_val
);
623 /* wait until value is set - try for 1 second every 50us */
624 for (delay_idx
= 0; delay_idx
< 20000; delay_idx
++) {
625 val
= qed_rd(p_hwfn
, p_ptt
,
626 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
);
630 usleep_range(50, 60);
633 if (val
!= set_val
) {
635 "PFID_ENABLE_MASTER wasn't changed after a second\n");
642 static void qed_reset_mb_shadow(struct qed_hwfn
*p_hwfn
,
643 struct qed_ptt
*p_main_ptt
)
645 /* Read shadow of current MFW mailbox */
646 qed_mcp_read_mb(p_hwfn
, p_main_ptt
);
647 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
648 p_hwfn
->mcp_info
->mfw_mb_cur
,
649 p_hwfn
->mcp_info
->mfw_mb_length
);
652 int qed_hw_init(struct qed_dev
*cdev
,
654 enum qed_int_mode int_mode
,
655 bool allow_npar_tx_switch
,
656 const u8
*bin_fw_data
)
658 struct qed_storm_stats
*p_stat
;
659 u32 load_code
, param
, *p_address
;
663 rc
= qed_init_fw_data(cdev
, bin_fw_data
);
667 for_each_hwfn(cdev
, i
) {
668 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
670 rc
= qed_fw_vport(p_hwfn
, 0, &fw_vport
);
674 /* Enable DMAE in PXP */
675 rc
= qed_change_pci_hwfn(p_hwfn
, p_hwfn
->p_main_ptt
, true);
677 qed_calc_hw_mode(p_hwfn
);
679 rc
= qed_mcp_load_req(p_hwfn
, p_hwfn
->p_main_ptt
,
682 DP_NOTICE(p_hwfn
, "Failed sending LOAD_REQ command\n");
686 qed_reset_mb_shadow(p_hwfn
, p_hwfn
->p_main_ptt
);
688 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
689 "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
692 p_hwfn
->first_on_engine
= (load_code
==
693 FW_MSG_CODE_DRV_LOAD_ENGINE
);
696 case FW_MSG_CODE_DRV_LOAD_ENGINE
:
697 rc
= qed_hw_init_common(p_hwfn
, p_hwfn
->p_main_ptt
,
698 p_hwfn
->hw_info
.hw_mode
);
702 case FW_MSG_CODE_DRV_LOAD_PORT
:
703 rc
= qed_hw_init_port(p_hwfn
, p_hwfn
->p_main_ptt
,
704 p_hwfn
->hw_info
.hw_mode
);
709 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
710 rc
= qed_hw_init_pf(p_hwfn
, p_hwfn
->p_main_ptt
,
711 p_hwfn
->hw_info
.hw_mode
,
712 b_hw_start
, int_mode
,
713 allow_npar_tx_switch
);
722 "init phase failed for loadcode 0x%x (rc %d)\n",
725 /* ACK mfw regardless of success or failure of initialization */
726 mfw_rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
727 DRV_MSG_CODE_LOAD_DONE
,
728 0, &load_code
, ¶m
);
732 DP_NOTICE(p_hwfn
, "Failed sending LOAD_DONE command\n");
736 p_hwfn
->hw_init_done
= true;
739 p_stat
= &p_hwfn
->storm_stats
;
740 p_stat
->mstats
.address
= BAR0_MAP_REG_MSDM_RAM
+
741 MSTORM_QUEUE_STAT_OFFSET(fw_vport
);
742 p_stat
->mstats
.len
= sizeof(struct eth_mstorm_per_queue_stat
);
744 p_stat
->ustats
.address
= BAR0_MAP_REG_USDM_RAM
+
745 USTORM_QUEUE_STAT_OFFSET(fw_vport
);
746 p_stat
->ustats
.len
= sizeof(struct eth_ustorm_per_queue_stat
);
748 p_stat
->pstats
.address
= BAR0_MAP_REG_PSDM_RAM
+
749 PSTORM_QUEUE_STAT_OFFSET(fw_vport
);
750 p_stat
->pstats
.len
= sizeof(struct eth_pstorm_per_queue_stat
);
752 p_address
= &p_stat
->tstats
.address
;
753 *p_address
= BAR0_MAP_REG_TSDM_RAM
+
754 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn
));
755 p_stat
->tstats
.len
= sizeof(struct tstorm_per_port_stat
);
761 #define QED_HW_STOP_RETRY_LIMIT (10)
762 int qed_hw_stop(struct qed_dev
*cdev
)
767 for_each_hwfn(cdev
, j
) {
768 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
769 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
771 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Stopping hw/fw\n");
773 /* mark the hw as uninitialized... */
774 p_hwfn
->hw_init_done
= false;
776 rc
= qed_sp_pf_stop(p_hwfn
);
780 qed_wr(p_hwfn
, p_ptt
,
781 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
783 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
784 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
785 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
786 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
787 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
789 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_CONN
, 0x0);
790 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_TASK
, 0x0);
791 for (i
= 0; i
< QED_HW_STOP_RETRY_LIMIT
; i
++) {
792 if ((!qed_rd(p_hwfn
, p_ptt
,
793 TM_REG_PF_SCAN_ACTIVE_CONN
)) &&
794 (!qed_rd(p_hwfn
, p_ptt
,
795 TM_REG_PF_SCAN_ACTIVE_TASK
)))
798 usleep_range(1000, 2000);
800 if (i
== QED_HW_STOP_RETRY_LIMIT
)
802 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
803 (u8
)qed_rd(p_hwfn
, p_ptt
,
804 TM_REG_PF_SCAN_ACTIVE_CONN
),
805 (u8
)qed_rd(p_hwfn
, p_ptt
,
806 TM_REG_PF_SCAN_ACTIVE_TASK
));
808 /* Disable Attention Generation */
809 qed_int_igu_disable_int(p_hwfn
, p_ptt
);
811 qed_wr(p_hwfn
, p_ptt
, IGU_REG_LEADING_EDGE_LATCH
, 0);
812 qed_wr(p_hwfn
, p_ptt
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
814 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, true);
816 /* Need to wait 1ms to guarantee SBs are cleared */
817 usleep_range(1000, 2000);
820 /* Disable DMAE in PXP - in CMT, this should only be done for
821 * first hw-function, and only after all transactions have
822 * stopped for all active hw-functions.
824 t_rc
= qed_change_pci_hwfn(&cdev
->hwfns
[0],
825 cdev
->hwfns
[0].p_main_ptt
,
833 void qed_hw_stop_fastpath(struct qed_dev
*cdev
)
837 for_each_hwfn(cdev
, j
) {
838 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
839 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
843 "Shutting down the fastpath\n");
845 qed_wr(p_hwfn
, p_ptt
,
846 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
848 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
849 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
850 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
851 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
852 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
854 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_CONN
, 0x0);
855 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_TASK
, 0x0);
856 for (i
= 0; i
< QED_HW_STOP_RETRY_LIMIT
; i
++) {
857 if ((!qed_rd(p_hwfn
, p_ptt
,
858 TM_REG_PF_SCAN_ACTIVE_CONN
)) &&
859 (!qed_rd(p_hwfn
, p_ptt
,
860 TM_REG_PF_SCAN_ACTIVE_TASK
)))
863 usleep_range(1000, 2000);
865 if (i
== QED_HW_STOP_RETRY_LIMIT
)
867 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
868 (u8
)qed_rd(p_hwfn
, p_ptt
,
869 TM_REG_PF_SCAN_ACTIVE_CONN
),
870 (u8
)qed_rd(p_hwfn
, p_ptt
,
871 TM_REG_PF_SCAN_ACTIVE_TASK
));
873 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, false);
875 /* Need to wait 1ms to guarantee SBs are cleared */
876 usleep_range(1000, 2000);
880 void qed_hw_start_fastpath(struct qed_hwfn
*p_hwfn
)
882 /* Re-open incoming traffic */
883 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
884 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x0);
887 static int qed_reg_assert(struct qed_hwfn
*hwfn
,
888 struct qed_ptt
*ptt
, u32 reg
,
891 u32 assert_val
= qed_rd(hwfn
, ptt
, reg
);
893 if (assert_val
!= expected
) {
894 DP_NOTICE(hwfn
, "Value at address 0x%x != 0x%08x\n",
902 int qed_hw_reset(struct qed_dev
*cdev
)
905 u32 unload_resp
, unload_param
;
908 for_each_hwfn(cdev
, i
) {
909 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
911 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Resetting hw/fw\n");
913 /* Check for incorrect states */
914 qed_reg_assert(p_hwfn
, p_hwfn
->p_main_ptt
,
915 QM_REG_USG_CNT_PF_TX
, 0);
916 qed_reg_assert(p_hwfn
, p_hwfn
->p_main_ptt
,
917 QM_REG_USG_CNT_PF_OTHER
, 0);
919 /* Disable PF in HW blocks */
920 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, DORQ_REG_PF_DB_ENABLE
, 0);
921 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, QM_REG_PF_EN
, 0);
922 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
923 TCFC_REG_STRONG_ENABLE_PF
, 0);
924 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
925 CCFC_REG_STRONG_ENABLE_PF
, 0);
927 /* Send unload command to MCP */
928 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
929 DRV_MSG_CODE_UNLOAD_REQ
,
930 DRV_MB_PARAM_UNLOAD_WOL_MCP
,
931 &unload_resp
, &unload_param
);
933 DP_NOTICE(p_hwfn
, "qed_hw_reset: UNLOAD_REQ failed\n");
934 unload_resp
= FW_MSG_CODE_DRV_UNLOAD_ENGINE
;
937 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
938 DRV_MSG_CODE_UNLOAD_DONE
,
939 0, &unload_resp
, &unload_param
);
941 DP_NOTICE(p_hwfn
, "qed_hw_reset: UNLOAD_DONE failed\n");
949 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
950 static void qed_hw_hwfn_free(struct qed_hwfn
*p_hwfn
)
952 qed_ptt_pool_free(p_hwfn
);
953 kfree(p_hwfn
->hw_info
.p_igu_info
);
956 /* Setup bar access */
957 static int qed_hw_hwfn_prepare(struct qed_hwfn
*p_hwfn
)
961 /* Allocate PTT pool */
962 rc
= qed_ptt_pool_alloc(p_hwfn
);
966 /* Allocate the main PTT */
967 p_hwfn
->p_main_ptt
= qed_get_reserved_ptt(p_hwfn
, RESERVED_PTT_MAIN
);
969 /* clear indirect access */
970 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_88_F0
, 0);
971 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_8C_F0
, 0);
972 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_90_F0
, 0);
973 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_94_F0
, 0);
975 /* Clean Previous errors if such exist */
976 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
977 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR
,
978 1 << p_hwfn
->abs_pf_id
);
980 /* enable internal target-read */
981 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
982 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ
, 1);
987 static void get_function_id(struct qed_hwfn
*p_hwfn
)
990 p_hwfn
->hw_info
.opaque_fid
= (u16
)REG_RD(p_hwfn
, PXP_PF_ME_OPAQUE_ADDR
);
992 p_hwfn
->hw_info
.concrete_fid
= REG_RD(p_hwfn
, PXP_PF_ME_CONCRETE_ADDR
);
994 p_hwfn
->abs_pf_id
= (p_hwfn
->hw_info
.concrete_fid
>> 16) & 0xf;
995 p_hwfn
->rel_pf_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
996 PXP_CONCRETE_FID_PFID
);
997 p_hwfn
->port_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
998 PXP_CONCRETE_FID_PORT
);
1001 static void qed_hw_set_feat(struct qed_hwfn
*p_hwfn
)
1003 u32
*feat_num
= p_hwfn
->hw_info
.feat_num
;
1004 int num_features
= 1;
1006 feat_num
[QED_PF_L2_QUE
] = min_t(u32
, RESC_NUM(p_hwfn
, QED_SB
) /
1008 RESC_NUM(p_hwfn
, QED_L2_QUEUE
));
1009 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
1010 "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1011 feat_num
[QED_PF_L2_QUE
], RESC_NUM(p_hwfn
, QED_SB
),
1015 static void qed_hw_get_resc(struct qed_hwfn
*p_hwfn
)
1017 u32
*resc_start
= p_hwfn
->hw_info
.resc_start
;
1018 u32
*resc_num
= p_hwfn
->hw_info
.resc_num
;
1021 num_funcs
= IS_MF(p_hwfn
) ? MAX_NUM_PFS_BB
1022 : p_hwfn
->cdev
->num_ports_in_engines
;
1024 resc_num
[QED_SB
] = min_t(u32
,
1025 (MAX_SB_PER_PATH_BB
/ num_funcs
),
1026 qed_int_get_num_sbs(p_hwfn
, NULL
));
1027 resc_num
[QED_L2_QUEUE
] = MAX_NUM_L2_QUEUES_BB
/ num_funcs
;
1028 resc_num
[QED_VPORT
] = MAX_NUM_VPORTS_BB
/ num_funcs
;
1029 resc_num
[QED_RSS_ENG
] = ETH_RSS_ENGINE_NUM_BB
/ num_funcs
;
1030 resc_num
[QED_PQ
] = MAX_QM_TX_QUEUES_BB
/ num_funcs
;
1031 resc_num
[QED_RL
] = 8;
1032 resc_num
[QED_MAC
] = ETH_NUM_MAC_FILTERS
/ num_funcs
;
1033 resc_num
[QED_VLAN
] = (ETH_NUM_VLAN_FILTERS
- 1 /*For vlan0*/) /
1035 resc_num
[QED_ILT
] = 950;
1037 for (i
= 0; i
< QED_MAX_RESC
; i
++)
1038 resc_start
[i
] = resc_num
[i
] * p_hwfn
->rel_pf_id
;
1040 qed_hw_set_feat(p_hwfn
);
1042 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
1043 "The numbers for each resource are:\n"
1044 "SB = %d start = %d\n"
1045 "L2_QUEUE = %d start = %d\n"
1046 "VPORT = %d start = %d\n"
1047 "PQ = %d start = %d\n"
1048 "RL = %d start = %d\n"
1049 "MAC = %d start = %d\n"
1050 "VLAN = %d start = %d\n"
1051 "ILT = %d start = %d\n",
1052 p_hwfn
->hw_info
.resc_num
[QED_SB
],
1053 p_hwfn
->hw_info
.resc_start
[QED_SB
],
1054 p_hwfn
->hw_info
.resc_num
[QED_L2_QUEUE
],
1055 p_hwfn
->hw_info
.resc_start
[QED_L2_QUEUE
],
1056 p_hwfn
->hw_info
.resc_num
[QED_VPORT
],
1057 p_hwfn
->hw_info
.resc_start
[QED_VPORT
],
1058 p_hwfn
->hw_info
.resc_num
[QED_PQ
],
1059 p_hwfn
->hw_info
.resc_start
[QED_PQ
],
1060 p_hwfn
->hw_info
.resc_num
[QED_RL
],
1061 p_hwfn
->hw_info
.resc_start
[QED_RL
],
1062 p_hwfn
->hw_info
.resc_num
[QED_MAC
],
1063 p_hwfn
->hw_info
.resc_start
[QED_MAC
],
1064 p_hwfn
->hw_info
.resc_num
[QED_VLAN
],
1065 p_hwfn
->hw_info
.resc_start
[QED_VLAN
],
1066 p_hwfn
->hw_info
.resc_num
[QED_ILT
],
1067 p_hwfn
->hw_info
.resc_start
[QED_ILT
]);
1070 static int qed_hw_get_nvm_info(struct qed_hwfn
*p_hwfn
,
1071 struct qed_ptt
*p_ptt
)
1073 u32 nvm_cfg1_offset
, mf_mode
, addr
, generic_cont0
, core_cfg
;
1074 u32 port_cfg_addr
, link_temp
, val
, nvm_cfg_addr
;
1075 struct qed_mcp_link_params
*link
;
1077 /* Read global nvm_cfg address */
1078 nvm_cfg_addr
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_GEN_PURP_CR0
);
1080 /* Verify MCP has initialized it */
1081 if (!nvm_cfg_addr
) {
1082 DP_NOTICE(p_hwfn
, "Shared memory not initialized\n");
1086 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
1087 nvm_cfg1_offset
= qed_rd(p_hwfn
, p_ptt
, nvm_cfg_addr
+ 4);
1089 /* Read Vendor Id / Device Id */
1090 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1091 offsetof(struct nvm_cfg1
, glob
) +
1092 offsetof(struct nvm_cfg1_glob
, pci_id
);
1093 p_hwfn
->hw_info
.vendor_id
= qed_rd(p_hwfn
, p_ptt
, addr
) &
1094 NVM_CFG1_GLOB_VENDOR_ID_MASK
;
1096 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1097 offsetof(struct nvm_cfg1
, glob
) +
1098 offsetof(struct nvm_cfg1_glob
, core_cfg
);
1100 core_cfg
= qed_rd(p_hwfn
, p_ptt
, addr
);
1102 switch ((core_cfg
& NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK
) >>
1103 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET
) {
1104 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G
:
1105 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X40G
;
1107 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G
:
1108 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X50G
;
1110 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G
:
1111 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X100G
;
1113 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F
:
1114 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_F
;
1116 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E
:
1117 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_E
;
1119 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G
:
1120 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X20G
;
1122 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G
:
1123 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X40G
;
1125 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G
:
1126 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X25G
;
1128 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G
:
1129 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X25G
;
1132 DP_NOTICE(p_hwfn
, "Unknown port mode in 0x%08x\n",
1137 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1138 offsetof(struct nvm_cfg1
, func
[MCP_PF_ID(p_hwfn
)]) +
1139 offsetof(struct nvm_cfg1_func
, device_id
);
1140 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
1142 if (IS_MF(p_hwfn
)) {
1143 p_hwfn
->hw_info
.device_id
=
1144 (val
& NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK
) >>
1145 NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET
;
1147 p_hwfn
->hw_info
.device_id
=
1148 (val
& NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK
) >>
1149 NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET
;
1152 /* Read default link configuration */
1153 link
= &p_hwfn
->mcp_info
->link_input
;
1154 port_cfg_addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1155 offsetof(struct nvm_cfg1
, port
[MFW_PORT(p_hwfn
)]);
1156 link_temp
= qed_rd(p_hwfn
, p_ptt
,
1158 offsetof(struct nvm_cfg1_port
, speed_cap_mask
));
1159 link
->speed
.advertised_speeds
=
1160 link_temp
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK
;
1162 p_hwfn
->mcp_info
->link_capabilities
.speed_capabilities
=
1163 link
->speed
.advertised_speeds
;
1165 link_temp
= qed_rd(p_hwfn
, p_ptt
,
1167 offsetof(struct nvm_cfg1_port
, link_settings
));
1168 switch ((link_temp
& NVM_CFG1_PORT_DRV_LINK_SPEED_MASK
) >>
1169 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET
) {
1170 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG
:
1171 link
->speed
.autoneg
= true;
1173 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G
:
1174 link
->speed
.forced_speed
= 1000;
1176 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G
:
1177 link
->speed
.forced_speed
= 10000;
1179 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G
:
1180 link
->speed
.forced_speed
= 25000;
1182 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G
:
1183 link
->speed
.forced_speed
= 40000;
1185 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G
:
1186 link
->speed
.forced_speed
= 50000;
1188 case NVM_CFG1_PORT_DRV_LINK_SPEED_100G
:
1189 link
->speed
.forced_speed
= 100000;
1192 DP_NOTICE(p_hwfn
, "Unknown Speed in 0x%08x\n",
1196 link_temp
&= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK
;
1197 link_temp
>>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET
;
1198 link
->pause
.autoneg
= !!(link_temp
&
1199 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG
);
1200 link
->pause
.forced_rx
= !!(link_temp
&
1201 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX
);
1202 link
->pause
.forced_tx
= !!(link_temp
&
1203 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX
);
1204 link
->loopback_mode
= 0;
1206 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1207 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1208 link
->speed
.forced_speed
, link
->speed
.advertised_speeds
,
1209 link
->speed
.autoneg
, link
->pause
.autoneg
);
1211 /* Read Multi-function information from shmem */
1212 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1213 offsetof(struct nvm_cfg1
, glob
) +
1214 offsetof(struct nvm_cfg1_glob
, generic_cont0
);
1216 generic_cont0
= qed_rd(p_hwfn
, p_ptt
, addr
);
1218 mf_mode
= (generic_cont0
& NVM_CFG1_GLOB_MF_MODE_MASK
) >>
1219 NVM_CFG1_GLOB_MF_MODE_OFFSET
;
1222 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED
:
1223 p_hwfn
->cdev
->mf_mode
= MF_OVLAN
;
1225 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0
:
1226 p_hwfn
->cdev
->mf_mode
= MF_NPAR
;
1228 case NVM_CFG1_GLOB_MF_MODE_FORCED_SF
:
1229 p_hwfn
->cdev
->mf_mode
= SF
;
1232 DP_INFO(p_hwfn
, "Multi function mode is %08x\n",
1233 p_hwfn
->cdev
->mf_mode
);
1235 return qed_mcp_fill_shmem_func_info(p_hwfn
, p_ptt
);
1239 qed_get_hw_info(struct qed_hwfn
*p_hwfn
,
1240 struct qed_ptt
*p_ptt
,
1241 enum qed_pci_personality personality
)
1246 /* Read the port mode */
1247 port_mode
= qed_rd(p_hwfn
, p_ptt
,
1248 CNIG_REG_NW_PORT_MODE_BB_B0
);
1250 if (port_mode
< 3) {
1251 p_hwfn
->cdev
->num_ports_in_engines
= 1;
1252 } else if (port_mode
<= 5) {
1253 p_hwfn
->cdev
->num_ports_in_engines
= 2;
1255 DP_NOTICE(p_hwfn
, "PORT MODE: %d not supported\n",
1256 p_hwfn
->cdev
->num_ports_in_engines
);
1258 /* Default num_ports_in_engines to something */
1259 p_hwfn
->cdev
->num_ports_in_engines
= 1;
1262 qed_hw_get_nvm_info(p_hwfn
, p_ptt
);
1264 rc
= qed_int_igu_read_cam(p_hwfn
, p_ptt
);
1268 if (qed_mcp_is_init(p_hwfn
))
1269 ether_addr_copy(p_hwfn
->hw_info
.hw_mac_addr
,
1270 p_hwfn
->mcp_info
->func_info
.mac
);
1272 eth_random_addr(p_hwfn
->hw_info
.hw_mac_addr
);
1274 if (qed_mcp_is_init(p_hwfn
)) {
1275 if (p_hwfn
->mcp_info
->func_info
.ovlan
!= QED_MCP_VLAN_UNSET
)
1276 p_hwfn
->hw_info
.ovlan
=
1277 p_hwfn
->mcp_info
->func_info
.ovlan
;
1279 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
1282 if (qed_mcp_is_init(p_hwfn
)) {
1283 enum qed_pci_personality protocol
;
1285 protocol
= p_hwfn
->mcp_info
->func_info
.protocol
;
1286 p_hwfn
->hw_info
.personality
= protocol
;
1289 qed_hw_get_resc(p_hwfn
);
1294 static void qed_get_dev_info(struct qed_dev
*cdev
)
1298 cdev
->chip_num
= (u16
)qed_rd(cdev
->hwfns
, cdev
->hwfns
[0].p_main_ptt
,
1299 MISCS_REG_CHIP_NUM
);
1300 cdev
->chip_rev
= (u16
)qed_rd(cdev
->hwfns
, cdev
->hwfns
[0].p_main_ptt
,
1301 MISCS_REG_CHIP_REV
);
1302 MASK_FIELD(CHIP_REV
, cdev
->chip_rev
);
1304 /* Learn number of HW-functions */
1305 tmp
= qed_rd(cdev
->hwfns
, cdev
->hwfns
[0].p_main_ptt
,
1306 MISCS_REG_CMT_ENABLED_FOR_PAIR
);
1308 if (tmp
& (1 << cdev
->hwfns
[0].rel_pf_id
)) {
1309 DP_NOTICE(cdev
->hwfns
, "device in CMT mode\n");
1310 cdev
->num_hwfns
= 2;
1312 cdev
->num_hwfns
= 1;
1315 cdev
->chip_bond_id
= qed_rd(cdev
->hwfns
, cdev
->hwfns
[0].p_main_ptt
,
1316 MISCS_REG_CHIP_TEST_REG
) >> 4;
1317 MASK_FIELD(CHIP_BOND_ID
, cdev
->chip_bond_id
);
1318 cdev
->chip_metal
= (u16
)qed_rd(cdev
->hwfns
, cdev
->hwfns
[0].p_main_ptt
,
1319 MISCS_REG_CHIP_METAL
);
1320 MASK_FIELD(CHIP_METAL
, cdev
->chip_metal
);
1322 DP_INFO(cdev
->hwfns
,
1323 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1324 cdev
->chip_num
, cdev
->chip_rev
,
1325 cdev
->chip_bond_id
, cdev
->chip_metal
);
1328 static int qed_hw_prepare_single(struct qed_hwfn
*p_hwfn
,
1329 void __iomem
*p_regview
,
1330 void __iomem
*p_doorbells
,
1331 enum qed_pci_personality personality
)
1335 /* Split PCI bars evenly between hwfns */
1336 p_hwfn
->regview
= p_regview
;
1337 p_hwfn
->doorbells
= p_doorbells
;
1339 /* Validate that chip access is feasible */
1340 if (REG_RD(p_hwfn
, PXP_PF_ME_OPAQUE_ADDR
) == 0xffffffff) {
1342 "Reading the ME register returns all Fs; Preventing further chip access\n");
1346 get_function_id(p_hwfn
);
1348 rc
= qed_hw_hwfn_prepare(p_hwfn
);
1350 DP_NOTICE(p_hwfn
, "Failed to prepare hwfn's hw\n");
1354 /* First hwfn learns basic information, e.g., number of hwfns */
1356 qed_get_dev_info(p_hwfn
->cdev
);
1358 /* Initialize MCP structure */
1359 rc
= qed_mcp_cmd_init(p_hwfn
, p_hwfn
->p_main_ptt
);
1361 DP_NOTICE(p_hwfn
, "Failed initializing mcp command\n");
1365 /* Read the device configuration information from the HW and SHMEM */
1366 rc
= qed_get_hw_info(p_hwfn
, p_hwfn
->p_main_ptt
, personality
);
1368 DP_NOTICE(p_hwfn
, "Failed to get HW information\n");
1372 /* Allocate the init RT array and initialize the init-ops engine */
1373 rc
= qed_init_alloc(p_hwfn
);
1375 DP_NOTICE(p_hwfn
, "Failed to allocate the init array\n");
1381 qed_mcp_free(p_hwfn
);
1383 qed_hw_hwfn_free(p_hwfn
);
1388 static u32
qed_hw_bar_size(struct qed_hwfn
*p_hwfn
,
1391 u32 bar_reg
= (bar_id
== 0 ? PGLUE_B_REG_PF_BAR0_SIZE
1392 : PGLUE_B_REG_PF_BAR1_SIZE
);
1393 u32 val
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
, bar_reg
);
1395 /* Get the BAR size(in KB) from hardware given val */
1396 return 1 << (val
+ 15);
1399 int qed_hw_prepare(struct qed_dev
*cdev
,
1402 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
1405 /* Store the precompiled init data ptrs */
1406 qed_init_iro_array(cdev
);
1408 /* Initialize the first hwfn - will learn number of hwfns */
1409 rc
= qed_hw_prepare_single(p_hwfn
,
1411 cdev
->doorbells
, personality
);
1415 personality
= p_hwfn
->hw_info
.personality
;
1417 /* Initialize the rest of the hwfns */
1418 if (cdev
->num_hwfns
> 1) {
1419 void __iomem
*p_regview
, *p_doorbell
;
1422 /* adjust bar offset for second engine */
1423 addr
= cdev
->regview
+ qed_hw_bar_size(p_hwfn
, 0) / 2;
1426 /* adjust doorbell bar offset for second engine */
1427 addr
= cdev
->doorbells
+ qed_hw_bar_size(p_hwfn
, 1) / 2;
1430 /* prepare second hw function */
1431 rc
= qed_hw_prepare_single(&cdev
->hwfns
[1], p_regview
,
1432 p_doorbell
, personality
);
1434 /* in case of error, need to free the previously
1435 * initiliazed hwfn 0.
1438 qed_init_free(p_hwfn
);
1439 qed_mcp_free(p_hwfn
);
1440 qed_hw_hwfn_free(p_hwfn
);
1447 void qed_hw_remove(struct qed_dev
*cdev
)
1451 for_each_hwfn(cdev
, i
) {
1452 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1454 qed_init_free(p_hwfn
);
1455 qed_hw_hwfn_free(p_hwfn
);
1456 qed_mcp_free(p_hwfn
);
1460 int qed_chain_alloc(struct qed_dev
*cdev
,
1461 enum qed_chain_use_mode intended_use
,
1462 enum qed_chain_mode mode
,
1465 struct qed_chain
*p_chain
)
1467 dma_addr_t p_pbl_phys
= 0;
1468 void *p_pbl_virt
= NULL
;
1469 dma_addr_t p_phys
= 0;
1470 void *p_virt
= NULL
;
1474 if (mode
== QED_CHAIN_MODE_SINGLE
)
1477 page_cnt
= QED_CHAIN_PAGE_CNT(num_elems
, elem_size
, mode
);
1479 size
= page_cnt
* QED_CHAIN_PAGE_SIZE
;
1480 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
1481 size
, &p_phys
, GFP_KERNEL
);
1483 DP_NOTICE(cdev
, "Failed to allocate chain mem\n");
1487 if (mode
== QED_CHAIN_MODE_PBL
) {
1488 size
= page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
1489 p_pbl_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
1493 DP_NOTICE(cdev
, "Failed to allocate chain pbl mem\n");
1497 qed_chain_pbl_init(p_chain
, p_virt
, p_phys
, page_cnt
,
1498 (u8
)elem_size
, intended_use
,
1499 p_pbl_phys
, p_pbl_virt
);
1501 qed_chain_init(p_chain
, p_virt
, p_phys
, page_cnt
,
1502 (u8
)elem_size
, intended_use
, mode
);
1508 dma_free_coherent(&cdev
->pdev
->dev
,
1509 page_cnt
* QED_CHAIN_PAGE_SIZE
,
1511 dma_free_coherent(&cdev
->pdev
->dev
,
1512 page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
,
1513 p_pbl_virt
, p_pbl_phys
);
1518 void qed_chain_free(struct qed_dev
*cdev
,
1519 struct qed_chain
*p_chain
)
1523 if (!p_chain
->p_virt_addr
)
1526 if (p_chain
->mode
== QED_CHAIN_MODE_PBL
) {
1527 size
= p_chain
->page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
1528 dma_free_coherent(&cdev
->pdev
->dev
, size
,
1529 p_chain
->pbl
.p_virt_table
,
1530 p_chain
->pbl
.p_phys_table
);
1533 size
= p_chain
->page_cnt
* QED_CHAIN_PAGE_SIZE
;
1534 dma_free_coherent(&cdev
->pdev
->dev
, size
,
1535 p_chain
->p_virt_addr
,
1536 p_chain
->p_phys_addr
);
1539 static void __qed_get_vport_stats(struct qed_dev
*cdev
,
1540 struct qed_eth_stats
*stats
)
1544 memset(stats
, 0, sizeof(*stats
));
1546 for_each_hwfn(cdev
, i
) {
1547 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1548 struct eth_mstorm_per_queue_stat mstats
;
1549 struct eth_ustorm_per_queue_stat ustats
;
1550 struct eth_pstorm_per_queue_stat pstats
;
1551 struct tstorm_per_port_stat tstats
;
1552 struct port_stats port_stats
;
1553 struct qed_ptt
*p_ptt
= qed_ptt_acquire(p_hwfn
);
1556 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
1560 memset(&mstats
, 0, sizeof(mstats
));
1561 qed_memcpy_from(p_hwfn
, p_ptt
, &mstats
,
1562 p_hwfn
->storm_stats
.mstats
.address
,
1563 p_hwfn
->storm_stats
.mstats
.len
);
1565 memset(&ustats
, 0, sizeof(ustats
));
1566 qed_memcpy_from(p_hwfn
, p_ptt
, &ustats
,
1567 p_hwfn
->storm_stats
.ustats
.address
,
1568 p_hwfn
->storm_stats
.ustats
.len
);
1570 memset(&pstats
, 0, sizeof(pstats
));
1571 qed_memcpy_from(p_hwfn
, p_ptt
, &pstats
,
1572 p_hwfn
->storm_stats
.pstats
.address
,
1573 p_hwfn
->storm_stats
.pstats
.len
);
1575 memset(&tstats
, 0, sizeof(tstats
));
1576 qed_memcpy_from(p_hwfn
, p_ptt
, &tstats
,
1577 p_hwfn
->storm_stats
.tstats
.address
,
1578 p_hwfn
->storm_stats
.tstats
.len
);
1580 memset(&port_stats
, 0, sizeof(port_stats
));
1582 if (p_hwfn
->mcp_info
)
1583 qed_memcpy_from(p_hwfn
, p_ptt
, &port_stats
,
1584 p_hwfn
->mcp_info
->port_addr
+
1585 offsetof(struct public_port
, stats
),
1586 sizeof(port_stats
));
1587 qed_ptt_release(p_hwfn
, p_ptt
);
1589 stats
->no_buff_discards
+=
1590 HILO_64_REGPAIR(mstats
.no_buff_discard
);
1591 stats
->packet_too_big_discard
+=
1592 HILO_64_REGPAIR(mstats
.packet_too_big_discard
);
1593 stats
->ttl0_discard
+=
1594 HILO_64_REGPAIR(mstats
.ttl0_discard
);
1595 stats
->tpa_coalesced_pkts
+=
1596 HILO_64_REGPAIR(mstats
.tpa_coalesced_pkts
);
1597 stats
->tpa_coalesced_events
+=
1598 HILO_64_REGPAIR(mstats
.tpa_coalesced_events
);
1599 stats
->tpa_aborts_num
+=
1600 HILO_64_REGPAIR(mstats
.tpa_aborts_num
);
1601 stats
->tpa_coalesced_bytes
+=
1602 HILO_64_REGPAIR(mstats
.tpa_coalesced_bytes
);
1604 stats
->rx_ucast_bytes
+=
1605 HILO_64_REGPAIR(ustats
.rcv_ucast_bytes
);
1606 stats
->rx_mcast_bytes
+=
1607 HILO_64_REGPAIR(ustats
.rcv_mcast_bytes
);
1608 stats
->rx_bcast_bytes
+=
1609 HILO_64_REGPAIR(ustats
.rcv_bcast_bytes
);
1610 stats
->rx_ucast_pkts
+=
1611 HILO_64_REGPAIR(ustats
.rcv_ucast_pkts
);
1612 stats
->rx_mcast_pkts
+=
1613 HILO_64_REGPAIR(ustats
.rcv_mcast_pkts
);
1614 stats
->rx_bcast_pkts
+=
1615 HILO_64_REGPAIR(ustats
.rcv_bcast_pkts
);
1617 stats
->mftag_filter_discards
+=
1618 HILO_64_REGPAIR(tstats
.mftag_filter_discard
);
1619 stats
->mac_filter_discards
+=
1620 HILO_64_REGPAIR(tstats
.eth_mac_filter_discard
);
1622 stats
->tx_ucast_bytes
+=
1623 HILO_64_REGPAIR(pstats
.sent_ucast_bytes
);
1624 stats
->tx_mcast_bytes
+=
1625 HILO_64_REGPAIR(pstats
.sent_mcast_bytes
);
1626 stats
->tx_bcast_bytes
+=
1627 HILO_64_REGPAIR(pstats
.sent_bcast_bytes
);
1628 stats
->tx_ucast_pkts
+=
1629 HILO_64_REGPAIR(pstats
.sent_ucast_pkts
);
1630 stats
->tx_mcast_pkts
+=
1631 HILO_64_REGPAIR(pstats
.sent_mcast_pkts
);
1632 stats
->tx_bcast_pkts
+=
1633 HILO_64_REGPAIR(pstats
.sent_bcast_pkts
);
1634 stats
->tx_err_drop_pkts
+=
1635 HILO_64_REGPAIR(pstats
.error_drop_pkts
);
1636 stats
->rx_64_byte_packets
+= port_stats
.pmm
.r64
;
1637 stats
->rx_127_byte_packets
+= port_stats
.pmm
.r127
;
1638 stats
->rx_255_byte_packets
+= port_stats
.pmm
.r255
;
1639 stats
->rx_511_byte_packets
+= port_stats
.pmm
.r511
;
1640 stats
->rx_1023_byte_packets
+= port_stats
.pmm
.r1023
;
1641 stats
->rx_1518_byte_packets
+= port_stats
.pmm
.r1518
;
1642 stats
->rx_1522_byte_packets
+= port_stats
.pmm
.r1522
;
1643 stats
->rx_2047_byte_packets
+= port_stats
.pmm
.r2047
;
1644 stats
->rx_4095_byte_packets
+= port_stats
.pmm
.r4095
;
1645 stats
->rx_9216_byte_packets
+= port_stats
.pmm
.r9216
;
1646 stats
->rx_16383_byte_packets
+= port_stats
.pmm
.r16383
;
1647 stats
->rx_crc_errors
+= port_stats
.pmm
.rfcs
;
1648 stats
->rx_mac_crtl_frames
+= port_stats
.pmm
.rxcf
;
1649 stats
->rx_pause_frames
+= port_stats
.pmm
.rxpf
;
1650 stats
->rx_pfc_frames
+= port_stats
.pmm
.rxpp
;
1651 stats
->rx_align_errors
+= port_stats
.pmm
.raln
;
1652 stats
->rx_carrier_errors
+= port_stats
.pmm
.rfcr
;
1653 stats
->rx_oversize_packets
+= port_stats
.pmm
.rovr
;
1654 stats
->rx_jabbers
+= port_stats
.pmm
.rjbr
;
1655 stats
->rx_undersize_packets
+= port_stats
.pmm
.rund
;
1656 stats
->rx_fragments
+= port_stats
.pmm
.rfrg
;
1657 stats
->tx_64_byte_packets
+= port_stats
.pmm
.t64
;
1658 stats
->tx_65_to_127_byte_packets
+= port_stats
.pmm
.t127
;
1659 stats
->tx_128_to_255_byte_packets
+= port_stats
.pmm
.t255
;
1660 stats
->tx_256_to_511_byte_packets
+= port_stats
.pmm
.t511
;
1661 stats
->tx_512_to_1023_byte_packets
+= port_stats
.pmm
.t1023
;
1662 stats
->tx_1024_to_1518_byte_packets
+= port_stats
.pmm
.t1518
;
1663 stats
->tx_1519_to_2047_byte_packets
+= port_stats
.pmm
.t2047
;
1664 stats
->tx_2048_to_4095_byte_packets
+= port_stats
.pmm
.t4095
;
1665 stats
->tx_4096_to_9216_byte_packets
+= port_stats
.pmm
.t9216
;
1666 stats
->tx_9217_to_16383_byte_packets
+= port_stats
.pmm
.t16383
;
1667 stats
->tx_pause_frames
+= port_stats
.pmm
.txpf
;
1668 stats
->tx_pfc_frames
+= port_stats
.pmm
.txpp
;
1669 stats
->tx_lpi_entry_count
+= port_stats
.pmm
.tlpiec
;
1670 stats
->tx_total_collisions
+= port_stats
.pmm
.tncl
;
1671 stats
->rx_mac_bytes
+= port_stats
.pmm
.rbyte
;
1672 stats
->rx_mac_uc_packets
+= port_stats
.pmm
.rxuca
;
1673 stats
->rx_mac_mc_packets
+= port_stats
.pmm
.rxmca
;
1674 stats
->rx_mac_bc_packets
+= port_stats
.pmm
.rxbca
;
1675 stats
->rx_mac_frames_ok
+= port_stats
.pmm
.rxpok
;
1676 stats
->tx_mac_bytes
+= port_stats
.pmm
.tbyte
;
1677 stats
->tx_mac_uc_packets
+= port_stats
.pmm
.txuca
;
1678 stats
->tx_mac_mc_packets
+= port_stats
.pmm
.txmca
;
1679 stats
->tx_mac_bc_packets
+= port_stats
.pmm
.txbca
;
1680 stats
->tx_mac_ctrl_frames
+= port_stats
.pmm
.txcf
;
1682 for (j
= 0; j
< 8; j
++) {
1683 stats
->brb_truncates
+= port_stats
.brb
.brb_truncate
[j
];
1684 stats
->brb_discards
+= port_stats
.brb
.brb_discard
[j
];
1689 void qed_get_vport_stats(struct qed_dev
*cdev
,
1690 struct qed_eth_stats
*stats
)
1695 memset(stats
, 0, sizeof(*stats
));
1699 __qed_get_vport_stats(cdev
, stats
);
1701 if (!cdev
->reset_stats
)
1704 /* Reduce the statistics baseline */
1705 for (i
= 0; i
< sizeof(struct qed_eth_stats
) / sizeof(u64
); i
++)
1706 ((u64
*)stats
)[i
] -= ((u64
*)cdev
->reset_stats
)[i
];
1709 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1710 void qed_reset_vport_stats(struct qed_dev
*cdev
)
1714 for_each_hwfn(cdev
, i
) {
1715 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1716 struct eth_mstorm_per_queue_stat mstats
;
1717 struct eth_ustorm_per_queue_stat ustats
;
1718 struct eth_pstorm_per_queue_stat pstats
;
1719 struct qed_ptt
*p_ptt
= qed_ptt_acquire(p_hwfn
);
1722 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
1726 memset(&mstats
, 0, sizeof(mstats
));
1727 qed_memcpy_to(p_hwfn
, p_ptt
,
1728 p_hwfn
->storm_stats
.mstats
.address
,
1730 p_hwfn
->storm_stats
.mstats
.len
);
1732 memset(&ustats
, 0, sizeof(ustats
));
1733 qed_memcpy_to(p_hwfn
, p_ptt
,
1734 p_hwfn
->storm_stats
.ustats
.address
,
1736 p_hwfn
->storm_stats
.ustats
.len
);
1738 memset(&pstats
, 0, sizeof(pstats
));
1739 qed_memcpy_to(p_hwfn
, p_ptt
,
1740 p_hwfn
->storm_stats
.pstats
.address
,
1742 p_hwfn
->storm_stats
.pstats
.len
);
1744 qed_ptt_release(p_hwfn
, p_ptt
);
1747 /* PORT statistics are not necessarily reset, so we need to
1748 * read and create a baseline for future statistics.
1750 if (!cdev
->reset_stats
)
1751 DP_INFO(cdev
, "Reset stats not allocated\n");
1753 __qed_get_vport_stats(cdev
, cdev
->reset_stats
);
1756 int qed_fw_l2_queue(struct qed_hwfn
*p_hwfn
,
1757 u16 src_id
, u16
*dst_id
)
1759 if (src_id
>= RESC_NUM(p_hwfn
, QED_L2_QUEUE
)) {
1762 min
= (u16
)RESC_START(p_hwfn
, QED_L2_QUEUE
);
1763 max
= min
+ RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
1765 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1771 *dst_id
= RESC_START(p_hwfn
, QED_L2_QUEUE
) + src_id
;
1776 int qed_fw_vport(struct qed_hwfn
*p_hwfn
,
1777 u8 src_id
, u8
*dst_id
)
1779 if (src_id
>= RESC_NUM(p_hwfn
, QED_VPORT
)) {
1782 min
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
1783 max
= min
+ RESC_NUM(p_hwfn
, QED_VPORT
);
1785 "vport id [%d] is not valid, available indices [%d - %d]\n",
1791 *dst_id
= RESC_START(p_hwfn
, QED_VPORT
) + src_id
;
1796 int qed_fw_rss_eng(struct qed_hwfn
*p_hwfn
,
1797 u8 src_id
, u8
*dst_id
)
1799 if (src_id
>= RESC_NUM(p_hwfn
, QED_RSS_ENG
)) {
1802 min
= (u8
)RESC_START(p_hwfn
, QED_RSS_ENG
);
1803 max
= min
+ RESC_NUM(p_hwfn
, QED_RSS_ENG
);
1805 "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1811 *dst_id
= RESC_START(p_hwfn
, QED_RSS_ENG
) + src_id
;