1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/vmalloc.h>
21 #include <linux/etherdevice.h>
22 #include <linux/qed/qed_chain.h>
23 #include <linux/qed/qed_if.h>
27 #include "qed_dev_api.h"
30 #include "qed_init_ops.h"
32 #include "qed_iscsi.h"
36 #include "qed_reg_addr.h"
38 #include "qed_sriov.h"
42 static DEFINE_SPINLOCK(qm_lock
);
44 #define QED_MIN_DPIS (4)
45 #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
47 /* API common to all protocols */
49 BAR_ID_0
, /* used for GRC */
50 BAR_ID_1
/* Used for doorbells */
53 static u32
qed_hw_bar_size(struct qed_hwfn
*p_hwfn
, enum BAR_ID bar_id
)
55 u32 bar_reg
= (bar_id
== BAR_ID_0
?
56 PGLUE_B_REG_PF_BAR0_SIZE
: PGLUE_B_REG_PF_BAR1_SIZE
);
59 if (IS_VF(p_hwfn
->cdev
))
62 val
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
, bar_reg
);
64 return 1 << (val
+ 15);
66 /* Old MFW initialized above registered only conditionally */
67 if (p_hwfn
->cdev
->num_hwfns
> 1) {
69 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
70 return BAR_ID_0
? 256 * 1024 : 512 * 1024;
73 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
78 void qed_init_dp(struct qed_dev
*cdev
, u32 dp_module
, u8 dp_level
)
82 cdev
->dp_level
= dp_level
;
83 cdev
->dp_module
= dp_module
;
84 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
85 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
87 p_hwfn
->dp_level
= dp_level
;
88 p_hwfn
->dp_module
= dp_module
;
92 void qed_init_struct(struct qed_dev
*cdev
)
96 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
97 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
101 p_hwfn
->b_active
= false;
103 mutex_init(&p_hwfn
->dmae_info
.mutex
);
106 /* hwfn 0 is always active */
107 cdev
->hwfns
[0].b_active
= true;
109 /* set the default cache alignment to 128 */
110 cdev
->cache_shift
= 7;
113 static void qed_qm_info_free(struct qed_hwfn
*p_hwfn
)
115 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
117 kfree(qm_info
->qm_pq_params
);
118 qm_info
->qm_pq_params
= NULL
;
119 kfree(qm_info
->qm_vport_params
);
120 qm_info
->qm_vport_params
= NULL
;
121 kfree(qm_info
->qm_port_params
);
122 qm_info
->qm_port_params
= NULL
;
123 kfree(qm_info
->wfq_data
);
124 qm_info
->wfq_data
= NULL
;
127 void qed_resc_free(struct qed_dev
*cdev
)
134 kfree(cdev
->fw_data
);
135 cdev
->fw_data
= NULL
;
137 kfree(cdev
->reset_stats
);
139 for_each_hwfn(cdev
, i
) {
140 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
142 qed_cxt_mngr_free(p_hwfn
);
143 qed_qm_info_free(p_hwfn
);
144 qed_spq_free(p_hwfn
);
145 qed_eq_free(p_hwfn
, p_hwfn
->p_eq
);
146 qed_consq_free(p_hwfn
, p_hwfn
->p_consq
);
147 qed_int_free(p_hwfn
);
148 #ifdef CONFIG_QED_LL2
149 qed_ll2_free(p_hwfn
, p_hwfn
->p_ll2_info
);
151 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
152 qed_iscsi_free(p_hwfn
, p_hwfn
->p_iscsi_info
);
153 qed_ooo_free(p_hwfn
, p_hwfn
->p_ooo_info
);
155 qed_iov_free(p_hwfn
);
156 qed_dmae_info_free(p_hwfn
);
157 qed_dcbx_info_free(p_hwfn
, p_hwfn
->p_dcbx_info
);
161 static int qed_init_qm_info(struct qed_hwfn
*p_hwfn
, bool b_sleepable
)
163 u8 num_vports
, vf_offset
= 0, i
, vport_id
, num_ports
, curr_queue
= 0;
164 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
165 struct init_qm_port_params
*p_qm_port
;
166 bool init_rdma_offload_pq
= false;
167 bool init_pure_ack_pq
= false;
168 bool init_ooo_pq
= false;
169 u16 num_pqs
, multi_cos_tcs
= 1;
170 u8 pf_wfq
= qm_info
->pf_wfq
;
171 u32 pf_rl
= qm_info
->pf_rl
;
175 #ifdef CONFIG_QED_SRIOV
176 if (p_hwfn
->cdev
->p_iov_info
)
177 num_vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
179 memset(qm_info
, 0, sizeof(*qm_info
));
181 num_pqs
= multi_cos_tcs
+ num_vfs
+ 1; /* The '1' is for pure-LB */
182 num_vports
= (u8
)RESC_NUM(p_hwfn
, QED_VPORT
);
184 if (p_hwfn
->hw_info
.personality
== QED_PCI_ETH_ROCE
) {
185 num_pqs
++; /* for RoCE queue */
186 init_rdma_offload_pq
= true;
187 /* we subtract num_vfs because each require a rate limiter,
188 * and one default rate limiter
190 if (p_hwfn
->pf_params
.rdma_pf_params
.enable_dcqcn
)
191 num_pf_rls
= RESC_NUM(p_hwfn
, QED_RL
) - num_vfs
- 1;
193 num_pqs
+= num_pf_rls
;
194 qm_info
->num_pf_rls
= (u8
) num_pf_rls
;
197 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
198 num_pqs
+= 2; /* for iSCSI pure-ACK / OOO queue */
199 init_pure_ack_pq
= true;
203 /* Sanity checking that setup requires legal number of resources */
204 if (num_pqs
> RESC_NUM(p_hwfn
, QED_PQ
)) {
206 "Need too many Physical queues - 0x%04x when only %04x are available\n",
207 num_pqs
, RESC_NUM(p_hwfn
, QED_PQ
));
211 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
213 qm_info
->qm_pq_params
= kcalloc(num_pqs
,
214 sizeof(struct init_qm_pq_params
),
215 b_sleepable
? GFP_KERNEL
: GFP_ATOMIC
);
216 if (!qm_info
->qm_pq_params
)
219 qm_info
->qm_vport_params
= kcalloc(num_vports
,
220 sizeof(struct init_qm_vport_params
),
221 b_sleepable
? GFP_KERNEL
223 if (!qm_info
->qm_vport_params
)
226 qm_info
->qm_port_params
= kcalloc(MAX_NUM_PORTS
,
227 sizeof(struct init_qm_port_params
),
228 b_sleepable
? GFP_KERNEL
230 if (!qm_info
->qm_port_params
)
233 qm_info
->wfq_data
= kcalloc(num_vports
, sizeof(struct qed_wfq_data
),
234 b_sleepable
? GFP_KERNEL
: GFP_ATOMIC
);
235 if (!qm_info
->wfq_data
)
238 vport_id
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
240 /* First init rate limited queues */
241 for (curr_queue
= 0; curr_queue
< num_pf_rls
; curr_queue
++) {
242 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
++;
243 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
244 p_hwfn
->hw_info
.non_offload_tc
;
245 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
246 qm_info
->qm_pq_params
[curr_queue
].rl_valid
= 1;
249 /* First init per-TC PQs */
250 for (i
= 0; i
< multi_cos_tcs
; i
++) {
251 struct init_qm_pq_params
*params
=
252 &qm_info
->qm_pq_params
[curr_queue
++];
254 if (p_hwfn
->hw_info
.personality
== QED_PCI_ETH_ROCE
||
255 p_hwfn
->hw_info
.personality
== QED_PCI_ETH
) {
256 params
->vport_id
= vport_id
;
257 params
->tc_id
= p_hwfn
->hw_info
.non_offload_tc
;
258 params
->wrr_group
= 1;
260 params
->vport_id
= vport_id
;
261 params
->tc_id
= p_hwfn
->hw_info
.offload_tc
;
262 params
->wrr_group
= 1;
266 /* Then init pure-LB PQ */
267 qm_info
->pure_lb_pq
= curr_queue
;
268 qm_info
->qm_pq_params
[curr_queue
].vport_id
=
269 (u8
) RESC_START(p_hwfn
, QED_VPORT
);
270 qm_info
->qm_pq_params
[curr_queue
].tc_id
= PURE_LB_TC
;
271 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
274 qm_info
->offload_pq
= 0;
275 if (init_rdma_offload_pq
) {
276 qm_info
->offload_pq
= curr_queue
;
277 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
;
278 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
279 p_hwfn
->hw_info
.offload_tc
;
280 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
284 if (init_pure_ack_pq
) {
285 qm_info
->pure_ack_pq
= curr_queue
;
286 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
;
287 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
288 p_hwfn
->hw_info
.offload_tc
;
289 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
294 qm_info
->ooo_pq
= curr_queue
;
295 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
;
296 qm_info
->qm_pq_params
[curr_queue
].tc_id
= DCBX_ISCSI_OOO_TC
;
297 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
301 /* Then init per-VF PQs */
302 vf_offset
= curr_queue
;
303 for (i
= 0; i
< num_vfs
; i
++) {
304 /* First vport is used by the PF */
305 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
+ i
+ 1;
306 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
307 p_hwfn
->hw_info
.non_offload_tc
;
308 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
309 qm_info
->qm_pq_params
[curr_queue
].rl_valid
= 1;
313 qm_info
->vf_queues_offset
= vf_offset
;
314 qm_info
->num_pqs
= num_pqs
;
315 qm_info
->num_vports
= num_vports
;
317 /* Initialize qm port parameters */
318 num_ports
= p_hwfn
->cdev
->num_ports_in_engines
;
319 for (i
= 0; i
< num_ports
; i
++) {
320 p_qm_port
= &qm_info
->qm_port_params
[i
];
321 p_qm_port
->active
= 1;
323 p_qm_port
->active_phys_tcs
= 0x7;
325 p_qm_port
->active_phys_tcs
= 0x9f;
326 p_qm_port
->num_pbf_cmd_lines
= PBF_MAX_CMD_LINES
/ num_ports
;
327 p_qm_port
->num_btb_blocks
= BTB_MAX_BLOCKS
/ num_ports
;
330 qm_info
->max_phys_tcs_per_port
= NUM_OF_PHYS_TCS
;
332 qm_info
->start_pq
= (u16
)RESC_START(p_hwfn
, QED_PQ
);
334 qm_info
->num_vf_pqs
= num_vfs
;
335 qm_info
->start_vport
= (u8
) RESC_START(p_hwfn
, QED_VPORT
);
337 for (i
= 0; i
< qm_info
->num_vports
; i
++)
338 qm_info
->qm_vport_params
[i
].vport_wfq
= 1;
340 qm_info
->vport_rl_en
= 1;
341 qm_info
->vport_wfq_en
= 1;
342 qm_info
->pf_rl
= pf_rl
;
343 qm_info
->pf_wfq
= pf_wfq
;
348 qed_qm_info_free(p_hwfn
);
352 /* This function reconfigures the QM pf on the fly.
353 * For this purpose we:
354 * 1. reconfigure the QM database
355 * 2. set new values to runtime arrat
356 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
357 * 4. activate init tool in QM_PF stage
358 * 5. send an sdm_qm_cmd through rbc interface to release the QM
360 int qed_qm_reconf(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
362 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
366 /* qm_info is allocated in qed_init_qm_info() which is already called
367 * from qed_resc_alloc() or previous call of qed_qm_reconf().
368 * The allocated size may change each init, so we free it before next
371 qed_qm_info_free(p_hwfn
);
373 /* initialize qed's qm data structure */
374 rc
= qed_init_qm_info(p_hwfn
, false);
378 /* stop PF's qm queues */
379 spin_lock_bh(&qm_lock
);
380 b_rc
= qed_send_qm_stop_cmd(p_hwfn
, p_ptt
, false, true,
381 qm_info
->start_pq
, qm_info
->num_pqs
);
382 spin_unlock_bh(&qm_lock
);
386 /* clear the QM_PF runtime phase leftovers from previous init */
387 qed_init_clear_rt_data(p_hwfn
);
389 /* prepare QM portion of runtime array */
390 qed_qm_init_pf(p_hwfn
);
392 /* activate init tool on runtime array */
393 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_QM_PF
, p_hwfn
->rel_pf_id
,
394 p_hwfn
->hw_info
.hw_mode
);
398 /* start PF's qm queues */
399 spin_lock_bh(&qm_lock
);
400 b_rc
= qed_send_qm_stop_cmd(p_hwfn
, p_ptt
, true, true,
401 qm_info
->start_pq
, qm_info
->num_pqs
);
402 spin_unlock_bh(&qm_lock
);
409 int qed_resc_alloc(struct qed_dev
*cdev
)
411 struct qed_iscsi_info
*p_iscsi_info
;
412 struct qed_ooo_info
*p_ooo_info
;
413 #ifdef CONFIG_QED_LL2
414 struct qed_ll2_info
*p_ll2_info
;
416 struct qed_consq
*p_consq
;
423 cdev
->fw_data
= kzalloc(sizeof(*cdev
->fw_data
), GFP_KERNEL
);
427 for_each_hwfn(cdev
, i
) {
428 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
429 u32 n_eqes
, num_cons
;
431 /* First allocate the context manager structure */
432 rc
= qed_cxt_mngr_alloc(p_hwfn
);
436 /* Set the HW cid/tid numbers (in the contest manager)
437 * Must be done prior to any further computations.
439 rc
= qed_cxt_set_pf_params(p_hwfn
);
443 /* Prepare and process QM requirements */
444 rc
= qed_init_qm_info(p_hwfn
, true);
448 /* Compute the ILT client partition */
449 rc
= qed_cxt_cfg_ilt_compute(p_hwfn
);
453 /* CID map / ILT shadow table / T2
454 * The talbes sizes are determined by the computations above
456 rc
= qed_cxt_tables_alloc(p_hwfn
);
460 /* SPQ, must follow ILT because initializes SPQ context */
461 rc
= qed_spq_alloc(p_hwfn
);
465 /* SP status block allocation */
466 p_hwfn
->p_dpc_ptt
= qed_get_reserved_ptt(p_hwfn
,
469 rc
= qed_int_alloc(p_hwfn
, p_hwfn
->p_main_ptt
);
473 rc
= qed_iov_alloc(p_hwfn
);
478 n_eqes
= qed_chain_get_capacity(&p_hwfn
->p_spq
->chain
);
479 if (p_hwfn
->hw_info
.personality
== QED_PCI_ETH_ROCE
) {
480 num_cons
= qed_cxt_get_proto_cid_count(p_hwfn
,
483 n_eqes
+= num_cons
+ 2 * MAX_NUM_VFS_BB
;
484 } else if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
486 qed_cxt_get_proto_cid_count(p_hwfn
,
489 n_eqes
+= 2 * num_cons
;
492 if (n_eqes
> 0xFFFF) {
494 "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
500 p_eq
= qed_eq_alloc(p_hwfn
, (u16
) n_eqes
);
505 p_consq
= qed_consq_alloc(p_hwfn
);
508 p_hwfn
->p_consq
= p_consq
;
510 #ifdef CONFIG_QED_LL2
511 if (p_hwfn
->using_ll2
) {
512 p_ll2_info
= qed_ll2_alloc(p_hwfn
);
515 p_hwfn
->p_ll2_info
= p_ll2_info
;
518 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
519 p_iscsi_info
= qed_iscsi_alloc(p_hwfn
);
522 p_hwfn
->p_iscsi_info
= p_iscsi_info
;
523 p_ooo_info
= qed_ooo_alloc(p_hwfn
);
526 p_hwfn
->p_ooo_info
= p_ooo_info
;
529 /* DMA info initialization */
530 rc
= qed_dmae_info_alloc(p_hwfn
);
534 /* DCBX initialization */
535 rc
= qed_dcbx_info_alloc(p_hwfn
);
540 cdev
->reset_stats
= kzalloc(sizeof(*cdev
->reset_stats
), GFP_KERNEL
);
541 if (!cdev
->reset_stats
)
553 void qed_resc_setup(struct qed_dev
*cdev
)
560 for_each_hwfn(cdev
, i
) {
561 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
563 qed_cxt_mngr_setup(p_hwfn
);
564 qed_spq_setup(p_hwfn
);
565 qed_eq_setup(p_hwfn
, p_hwfn
->p_eq
);
566 qed_consq_setup(p_hwfn
, p_hwfn
->p_consq
);
568 /* Read shadow of current MFW mailbox */
569 qed_mcp_read_mb(p_hwfn
, p_hwfn
->p_main_ptt
);
570 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
571 p_hwfn
->mcp_info
->mfw_mb_cur
,
572 p_hwfn
->mcp_info
->mfw_mb_length
);
574 qed_int_setup(p_hwfn
, p_hwfn
->p_main_ptt
);
576 qed_iov_setup(p_hwfn
, p_hwfn
->p_main_ptt
);
577 #ifdef CONFIG_QED_LL2
578 if (p_hwfn
->using_ll2
)
579 qed_ll2_setup(p_hwfn
, p_hwfn
->p_ll2_info
);
581 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
582 qed_iscsi_setup(p_hwfn
, p_hwfn
->p_iscsi_info
);
583 qed_ooo_setup(p_hwfn
, p_hwfn
->p_ooo_info
);
588 #define FINAL_CLEANUP_POLL_CNT (100)
589 #define FINAL_CLEANUP_POLL_TIME (10)
590 int qed_final_cleanup(struct qed_hwfn
*p_hwfn
,
591 struct qed_ptt
*p_ptt
, u16 id
, bool is_vf
)
593 u32 command
= 0, addr
, count
= FINAL_CLEANUP_POLL_CNT
;
596 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+
597 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn
->rel_pf_id
);
602 command
|= X_FINAL_CLEANUP_AGG_INT
<<
603 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT
;
604 command
|= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT
;
605 command
|= id
<< SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT
;
606 command
|= SDM_COMP_TYPE_AGG_INT
<< SDM_OP_GEN_COMP_TYPE_SHIFT
;
608 /* Make sure notification is not set before initiating final cleanup */
609 if (REG_RD(p_hwfn
, addr
)) {
611 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
612 REG_WR(p_hwfn
, addr
, 0);
615 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
616 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
619 qed_wr(p_hwfn
, p_ptt
, XSDM_REG_OPERATION_GEN
, command
);
621 /* Poll until completion */
622 while (!REG_RD(p_hwfn
, addr
) && count
--)
623 msleep(FINAL_CLEANUP_POLL_TIME
);
625 if (REG_RD(p_hwfn
, addr
))
629 "Failed to receive FW final cleanup notification\n");
631 /* Cleanup afterwards */
632 REG_WR(p_hwfn
, addr
, 0);
637 static void qed_calc_hw_mode(struct qed_hwfn
*p_hwfn
)
641 hw_mode
= (1 << MODE_BB_B0
);
643 switch (p_hwfn
->cdev
->num_ports_in_engines
) {
645 hw_mode
|= 1 << MODE_PORTS_PER_ENG_1
;
648 hw_mode
|= 1 << MODE_PORTS_PER_ENG_2
;
651 hw_mode
|= 1 << MODE_PORTS_PER_ENG_4
;
654 DP_NOTICE(p_hwfn
, "num_ports_in_engine = %d not supported\n",
655 p_hwfn
->cdev
->num_ports_in_engines
);
659 switch (p_hwfn
->cdev
->mf_mode
) {
662 hw_mode
|= 1 << MODE_MF_SI
;
665 hw_mode
|= 1 << MODE_MF_SD
;
668 DP_NOTICE(p_hwfn
, "Unsupported MF mode, init as DEFAULT\n");
669 hw_mode
|= 1 << MODE_MF_SI
;
672 hw_mode
|= 1 << MODE_ASIC
;
674 if (p_hwfn
->cdev
->num_hwfns
> 1)
675 hw_mode
|= 1 << MODE_100G
;
677 p_hwfn
->hw_info
.hw_mode
= hw_mode
;
679 DP_VERBOSE(p_hwfn
, (NETIF_MSG_PROBE
| NETIF_MSG_IFUP
),
680 "Configuring function for hw_mode: 0x%08x\n",
681 p_hwfn
->hw_info
.hw_mode
);
684 /* Init run time data for all PFs on an engine. */
685 static void qed_init_cau_rt_data(struct qed_dev
*cdev
)
687 u32 offset
= CAU_REG_SB_VAR_MEMORY_RT_OFFSET
;
690 for_each_hwfn(cdev
, i
) {
691 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
692 struct qed_igu_info
*p_igu_info
;
693 struct qed_igu_block
*p_block
;
694 struct cau_sb_entry sb_entry
;
696 p_igu_info
= p_hwfn
->hw_info
.p_igu_info
;
698 for (sb_id
= 0; sb_id
< QED_MAPPING_MEMORY_SIZE(cdev
);
700 p_block
= &p_igu_info
->igu_map
.igu_blocks
[sb_id
];
704 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
705 p_block
->function_id
, 0, 0);
706 STORE_RT_REG_AGG(p_hwfn
, offset
+ sb_id
* 2, sb_entry
);
711 static int qed_hw_init_common(struct qed_hwfn
*p_hwfn
,
712 struct qed_ptt
*p_ptt
, int hw_mode
)
714 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
715 struct qed_qm_common_rt_init_params params
;
716 struct qed_dev
*cdev
= p_hwfn
->cdev
;
722 qed_init_cau_rt_data(cdev
);
724 /* Program GTT windows */
725 qed_gtt_init(p_hwfn
);
727 if (p_hwfn
->mcp_info
) {
728 if (p_hwfn
->mcp_info
->func_info
.bandwidth_max
)
729 qm_info
->pf_rl_en
= 1;
730 if (p_hwfn
->mcp_info
->func_info
.bandwidth_min
)
731 qm_info
->pf_wfq_en
= 1;
734 memset(¶ms
, 0, sizeof(params
));
735 params
.max_ports_per_engine
= p_hwfn
->cdev
->num_ports_in_engines
;
736 params
.max_phys_tcs_per_port
= qm_info
->max_phys_tcs_per_port
;
737 params
.pf_rl_en
= qm_info
->pf_rl_en
;
738 params
.pf_wfq_en
= qm_info
->pf_wfq_en
;
739 params
.vport_rl_en
= qm_info
->vport_rl_en
;
740 params
.vport_wfq_en
= qm_info
->vport_wfq_en
;
741 params
.port_params
= qm_info
->qm_port_params
;
743 qed_qm_common_rt_init(p_hwfn
, ¶ms
);
745 qed_cxt_hw_init_common(p_hwfn
);
747 /* Close gate from NIG to BRB/Storm; By default they are open, but
748 * we close them to prevent NIG from passing data to reset blocks.
749 * Should have been done in the ENGINE phase, but init-tool lacks
750 * proper port-pretend capabilities.
752 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
753 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
754 qed_port_pretend(p_hwfn
, p_ptt
, p_hwfn
->port_id
^ 1);
755 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
756 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
757 qed_port_unpretend(p_hwfn
, p_ptt
);
759 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_ENGINE
, ANY_PHASE_ID
, hw_mode
);
763 qed_wr(p_hwfn
, p_ptt
, PSWRQ2_REG_L2P_VALIDATE_VFID
, 0);
764 qed_wr(p_hwfn
, p_ptt
, PGLUE_B_REG_USE_CLIENTID_IN_TAG
, 1);
766 if (QED_IS_BB(p_hwfn
->cdev
)) {
767 num_pfs
= NUM_OF_ENG_PFS(p_hwfn
->cdev
);
768 for (pf_id
= 0; pf_id
< num_pfs
; pf_id
++) {
769 qed_fid_pretend(p_hwfn
, p_ptt
, pf_id
);
770 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
771 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
773 /* pretend to original PF */
774 qed_fid_pretend(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
);
777 for (vf_id
= 0; vf_id
< MAX_NUM_VFS_BB
; vf_id
++) {
778 concrete_fid
= qed_vfid_to_concrete(p_hwfn
, vf_id
);
779 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) concrete_fid
);
780 qed_wr(p_hwfn
, p_ptt
, CCFC_REG_STRONG_ENABLE_VF
, 0x1);
781 qed_wr(p_hwfn
, p_ptt
, CCFC_REG_WEAK_ENABLE_VF
, 0x0);
782 qed_wr(p_hwfn
, p_ptt
, TCFC_REG_STRONG_ENABLE_VF
, 0x1);
783 qed_wr(p_hwfn
, p_ptt
, TCFC_REG_WEAK_ENABLE_VF
, 0x0);
785 /* pretend to original PF */
786 qed_fid_pretend(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
);
792 qed_hw_init_dpi_size(struct qed_hwfn
*p_hwfn
,
793 struct qed_ptt
*p_ptt
, u32 pwm_region_size
, u32 n_cpus
)
795 u32 dpi_page_size_1
, dpi_page_size_2
, dpi_page_size
;
796 u32 dpi_bit_shift
, dpi_count
;
799 /* Calculate DPI size */
800 dpi_page_size_1
= QED_WID_SIZE
* n_cpus
;
801 dpi_page_size_2
= max_t(u32
, QED_WID_SIZE
, PAGE_SIZE
);
802 dpi_page_size
= max_t(u32
, dpi_page_size_1
, dpi_page_size_2
);
803 dpi_page_size
= roundup_pow_of_two(dpi_page_size
);
804 dpi_bit_shift
= ilog2(dpi_page_size
/ 4096);
806 dpi_count
= pwm_region_size
/ dpi_page_size
;
808 min_dpis
= p_hwfn
->pf_params
.rdma_pf_params
.min_dpis
;
809 min_dpis
= max_t(u32
, QED_MIN_DPIS
, min_dpis
);
811 p_hwfn
->dpi_size
= dpi_page_size
;
812 p_hwfn
->dpi_count
= dpi_count
;
814 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_DPI_BIT_SHIFT
, dpi_bit_shift
);
816 if (dpi_count
< min_dpis
)
822 enum QED_ROCE_EDPM_MODE
{
823 QED_ROCE_EDPM_MODE_ENABLE
= 0,
824 QED_ROCE_EDPM_MODE_FORCE_ON
= 1,
825 QED_ROCE_EDPM_MODE_DISABLE
= 2,
829 qed_hw_init_pf_doorbell_bar(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
831 u32 pwm_regsize
, norm_regsize
;
832 u32 non_pwm_conn
, min_addr_reg1
;
833 u32 db_bar_size
, n_cpus
;
839 db_bar_size
= qed_hw_bar_size(p_hwfn
, BAR_ID_1
);
840 if (p_hwfn
->cdev
->num_hwfns
> 1)
843 /* Calculate doorbell regions */
844 non_pwm_conn
= qed_cxt_get_proto_cid_start(p_hwfn
, PROTOCOLID_CORE
) +
845 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_CORE
,
847 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
,
849 norm_regsize
= roundup(QED_PF_DEMS_SIZE
* non_pwm_conn
, 4096);
850 min_addr_reg1
= norm_regsize
/ 4096;
851 pwm_regsize
= db_bar_size
- norm_regsize
;
853 /* Check that the normal and PWM sizes are valid */
854 if (db_bar_size
< norm_regsize
) {
856 "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
857 db_bar_size
, norm_regsize
);
861 if (pwm_regsize
< QED_MIN_PWM_REGION
) {
863 "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
865 QED_MIN_PWM_REGION
, db_bar_size
, norm_regsize
);
869 /* Calculate number of DPIs */
870 roce_edpm_mode
= p_hwfn
->pf_params
.rdma_pf_params
.roce_edpm_mode
;
871 if ((roce_edpm_mode
== QED_ROCE_EDPM_MODE_ENABLE
) ||
872 ((roce_edpm_mode
== QED_ROCE_EDPM_MODE_FORCE_ON
))) {
873 /* Either EDPM is mandatory, or we are attempting to allocate a
876 n_cpus
= num_active_cpus();
877 rc
= qed_hw_init_dpi_size(p_hwfn
, p_ptt
, pwm_regsize
, n_cpus
);
880 cond
= (rc
&& (roce_edpm_mode
== QED_ROCE_EDPM_MODE_ENABLE
)) ||
881 (roce_edpm_mode
== QED_ROCE_EDPM_MODE_DISABLE
);
882 if (cond
|| p_hwfn
->dcbx_no_edpm
) {
883 /* Either EDPM is disabled from user configuration, or it is
884 * disabled via DCBx, or it is not mandatory and we failed to
885 * allocated a WID per CPU.
888 rc
= qed_hw_init_dpi_size(p_hwfn
, p_ptt
, pwm_regsize
, n_cpus
);
891 qed_rdma_dpm_bar(p_hwfn
, p_ptt
);
895 "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
900 ((p_hwfn
->dcbx_no_edpm
) || (p_hwfn
->db_bar_no_edpm
)) ?
901 "disabled" : "enabled");
905 "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
907 p_hwfn
->pf_params
.rdma_pf_params
.min_dpis
);
911 p_hwfn
->dpi_start_offset
= norm_regsize
;
913 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
914 pf_dems_shift
= ilog2(QED_PF_DEMS_SIZE
/ 4);
915 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_ICID_BIT_SHIFT_NORM
, pf_dems_shift
);
916 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_MIN_ADDR_REG1
, min_addr_reg1
);
921 static int qed_hw_init_port(struct qed_hwfn
*p_hwfn
,
922 struct qed_ptt
*p_ptt
, int hw_mode
)
924 return qed_init_run(p_hwfn
, p_ptt
, PHASE_PORT
,
925 p_hwfn
->port_id
, hw_mode
);
928 static int qed_hw_init_pf(struct qed_hwfn
*p_hwfn
,
929 struct qed_ptt
*p_ptt
,
930 struct qed_tunn_start_params
*p_tunn
,
933 enum qed_int_mode int_mode
,
934 bool allow_npar_tx_switch
)
936 u8 rel_pf_id
= p_hwfn
->rel_pf_id
;
939 if (p_hwfn
->mcp_info
) {
940 struct qed_mcp_function_info
*p_info
;
942 p_info
= &p_hwfn
->mcp_info
->func_info
;
943 if (p_info
->bandwidth_min
)
944 p_hwfn
->qm_info
.pf_wfq
= p_info
->bandwidth_min
;
946 /* Update rate limit once we'll actually have a link */
947 p_hwfn
->qm_info
.pf_rl
= 100000;
950 qed_cxt_hw_init_pf(p_hwfn
);
952 qed_int_igu_init_rt(p_hwfn
);
954 /* Set VLAN in NIG if needed */
955 if (hw_mode
& BIT(MODE_MF_SD
)) {
956 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
, "Configuring LLH_FUNC_TAG\n");
957 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET
, 1);
958 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET
,
959 p_hwfn
->hw_info
.ovlan
);
962 /* Enable classification by MAC if needed */
963 if (hw_mode
& BIT(MODE_MF_SI
)) {
964 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
965 "Configuring TAGMAC_CLS_TYPE\n");
967 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET
, 1);
970 /* Protocl Configuration */
971 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_TCP_RT_OFFSET
,
972 (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) ? 1 : 0);
973 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_FCOE_RT_OFFSET
, 0);
974 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_ROCE_RT_OFFSET
, 0);
976 /* Cleanup chip from previous driver if such remains exist */
977 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, rel_pf_id
, false);
981 /* PF Init sequence */
982 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PF
, rel_pf_id
, hw_mode
);
986 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
987 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_QM_PF
, rel_pf_id
, hw_mode
);
991 /* Pure runtime initializations - directly to the HW */
992 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, true, true);
994 rc
= qed_hw_init_pf_doorbell_bar(p_hwfn
, p_ptt
);
999 /* enable interrupts */
1000 qed_int_igu_enable(p_hwfn
, p_ptt
, int_mode
);
1002 /* send function start command */
1003 rc
= qed_sp_pf_start(p_hwfn
, p_tunn
, p_hwfn
->cdev
->mf_mode
,
1004 allow_npar_tx_switch
);
1006 DP_NOTICE(p_hwfn
, "Function start ramrod failed\n");
1011 static int qed_change_pci_hwfn(struct qed_hwfn
*p_hwfn
,
1012 struct qed_ptt
*p_ptt
,
1015 u32 delay_idx
= 0, val
, set_val
= enable
? 1 : 0;
1017 /* Change PF in PXP */
1018 qed_wr(p_hwfn
, p_ptt
,
1019 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, set_val
);
1021 /* wait until value is set - try for 1 second every 50us */
1022 for (delay_idx
= 0; delay_idx
< 20000; delay_idx
++) {
1023 val
= qed_rd(p_hwfn
, p_ptt
,
1024 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
);
1028 usleep_range(50, 60);
1031 if (val
!= set_val
) {
1033 "PFID_ENABLE_MASTER wasn't changed after a second\n");
1040 static void qed_reset_mb_shadow(struct qed_hwfn
*p_hwfn
,
1041 struct qed_ptt
*p_main_ptt
)
1043 /* Read shadow of current MFW mailbox */
1044 qed_mcp_read_mb(p_hwfn
, p_main_ptt
);
1045 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
1046 p_hwfn
->mcp_info
->mfw_mb_cur
, p_hwfn
->mcp_info
->mfw_mb_length
);
1049 int qed_hw_init(struct qed_dev
*cdev
,
1050 struct qed_tunn_start_params
*p_tunn
,
1052 enum qed_int_mode int_mode
,
1053 bool allow_npar_tx_switch
,
1054 const u8
*bin_fw_data
)
1056 u32 load_code
, param
, drv_mb_param
;
1057 bool b_default_mtu
= true;
1058 struct qed_hwfn
*p_hwfn
;
1059 int rc
= 0, mfw_rc
, i
;
1061 if ((int_mode
== QED_INT_MODE_MSI
) && (cdev
->num_hwfns
> 1)) {
1062 DP_NOTICE(cdev
, "MSI mode is not supported for CMT devices\n");
1067 rc
= qed_init_fw_data(cdev
, bin_fw_data
);
1072 for_each_hwfn(cdev
, i
) {
1073 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1075 /* If management didn't provide a default, set one of our own */
1076 if (!p_hwfn
->hw_info
.mtu
) {
1077 p_hwfn
->hw_info
.mtu
= 1500;
1078 b_default_mtu
= false;
1082 p_hwfn
->b_int_enabled
= 1;
1086 /* Enable DMAE in PXP */
1087 rc
= qed_change_pci_hwfn(p_hwfn
, p_hwfn
->p_main_ptt
, true);
1089 qed_calc_hw_mode(p_hwfn
);
1091 rc
= qed_mcp_load_req(p_hwfn
, p_hwfn
->p_main_ptt
, &load_code
);
1093 DP_NOTICE(p_hwfn
, "Failed sending LOAD_REQ command\n");
1097 qed_reset_mb_shadow(p_hwfn
, p_hwfn
->p_main_ptt
);
1099 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
1100 "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
1103 p_hwfn
->first_on_engine
= (load_code
==
1104 FW_MSG_CODE_DRV_LOAD_ENGINE
);
1106 switch (load_code
) {
1107 case FW_MSG_CODE_DRV_LOAD_ENGINE
:
1108 rc
= qed_hw_init_common(p_hwfn
, p_hwfn
->p_main_ptt
,
1109 p_hwfn
->hw_info
.hw_mode
);
1113 case FW_MSG_CODE_DRV_LOAD_PORT
:
1114 rc
= qed_hw_init_port(p_hwfn
, p_hwfn
->p_main_ptt
,
1115 p_hwfn
->hw_info
.hw_mode
);
1120 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
1121 rc
= qed_hw_init_pf(p_hwfn
, p_hwfn
->p_main_ptt
,
1122 p_tunn
, p_hwfn
->hw_info
.hw_mode
,
1123 b_hw_start
, int_mode
,
1124 allow_npar_tx_switch
);
1133 "init phase failed for loadcode 0x%x (rc %d)\n",
1136 /* ACK mfw regardless of success or failure of initialization */
1137 mfw_rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1138 DRV_MSG_CODE_LOAD_DONE
,
1139 0, &load_code
, ¶m
);
1143 DP_NOTICE(p_hwfn
, "Failed sending LOAD_DONE command\n");
1147 /* send DCBX attention request command */
1150 "sending phony dcbx set command to trigger DCBx attention handling\n");
1151 mfw_rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1152 DRV_MSG_CODE_SET_DCBX
,
1153 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT
,
1154 &load_code
, ¶m
);
1157 "Failed to send DCBX attention request\n");
1161 p_hwfn
->hw_init_done
= true;
1165 p_hwfn
= QED_LEADING_HWFN(cdev
);
1166 drv_mb_param
= (FW_MAJOR_VERSION
<< 24) |
1167 (FW_MINOR_VERSION
<< 16) |
1168 (FW_REVISION_VERSION
<< 8) |
1169 (FW_ENGINEERING_VERSION
);
1170 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1171 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER
,
1172 drv_mb_param
, &load_code
, ¶m
);
1174 DP_INFO(p_hwfn
, "Failed to update firmware version\n");
1176 if (!b_default_mtu
) {
1177 rc
= qed_mcp_ov_update_mtu(p_hwfn
, p_hwfn
->p_main_ptt
,
1178 p_hwfn
->hw_info
.mtu
);
1181 "Failed to update default mtu\n");
1184 rc
= qed_mcp_ov_update_driver_state(p_hwfn
,
1186 QED_OV_DRIVER_STATE_DISABLED
);
1188 DP_INFO(p_hwfn
, "Failed to update driver state\n");
1190 rc
= qed_mcp_ov_update_eswitch(p_hwfn
, p_hwfn
->p_main_ptt
,
1191 QED_OV_ESWITCH_VEB
);
1193 DP_INFO(p_hwfn
, "Failed to update eswitch mode\n");
1199 #define QED_HW_STOP_RETRY_LIMIT (10)
1200 static void qed_hw_timers_stop(struct qed_dev
*cdev
,
1201 struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1206 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_CONN
, 0x0);
1207 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_TASK
, 0x0);
1209 for (i
= 0; i
< QED_HW_STOP_RETRY_LIMIT
; i
++) {
1210 if ((!qed_rd(p_hwfn
, p_ptt
,
1211 TM_REG_PF_SCAN_ACTIVE_CONN
)) &&
1212 (!qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_TASK
)))
1215 /* Dependent on number of connection/tasks, possibly
1216 * 1ms sleep is required between polls
1218 usleep_range(1000, 2000);
1221 if (i
< QED_HW_STOP_RETRY_LIMIT
)
1225 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
1226 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_CONN
),
1227 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_TASK
));
1230 void qed_hw_timers_stop_all(struct qed_dev
*cdev
)
1234 for_each_hwfn(cdev
, j
) {
1235 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
1236 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
1238 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
1242 int qed_hw_stop(struct qed_dev
*cdev
)
1247 for_each_hwfn(cdev
, j
) {
1248 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
1249 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
1251 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Stopping hw/fw\n");
1254 qed_vf_pf_int_cleanup(p_hwfn
);
1258 /* mark the hw as uninitialized... */
1259 p_hwfn
->hw_init_done
= false;
1261 rc
= qed_sp_pf_stop(p_hwfn
);
1264 "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
1266 qed_wr(p_hwfn
, p_ptt
,
1267 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
1269 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
1270 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
1271 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
1272 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
1273 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
1275 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
1277 /* Disable Attention Generation */
1278 qed_int_igu_disable_int(p_hwfn
, p_ptt
);
1280 qed_wr(p_hwfn
, p_ptt
, IGU_REG_LEADING_EDGE_LATCH
, 0);
1281 qed_wr(p_hwfn
, p_ptt
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
1283 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, true);
1285 /* Need to wait 1ms to guarantee SBs are cleared */
1286 usleep_range(1000, 2000);
1290 /* Disable DMAE in PXP - in CMT, this should only be done for
1291 * first hw-function, and only after all transactions have
1292 * stopped for all active hw-functions.
1294 t_rc
= qed_change_pci_hwfn(&cdev
->hwfns
[0],
1295 cdev
->hwfns
[0].p_main_ptt
, false);
1303 void qed_hw_stop_fastpath(struct qed_dev
*cdev
)
1307 for_each_hwfn(cdev
, j
) {
1308 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
1309 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
1312 qed_vf_pf_int_cleanup(p_hwfn
);
1317 NETIF_MSG_IFDOWN
, "Shutting down the fastpath\n");
1319 qed_wr(p_hwfn
, p_ptt
,
1320 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
1322 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
1323 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
1324 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
1325 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
1326 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
1328 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, false);
1330 /* Need to wait 1ms to guarantee SBs are cleared */
1331 usleep_range(1000, 2000);
1335 void qed_hw_start_fastpath(struct qed_hwfn
*p_hwfn
)
1337 if (IS_VF(p_hwfn
->cdev
))
1340 /* Re-open incoming traffic */
1341 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1342 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x0);
1345 static int qed_reg_assert(struct qed_hwfn
*p_hwfn
,
1346 struct qed_ptt
*p_ptt
, u32 reg
, bool expected
)
1348 u32 assert_val
= qed_rd(p_hwfn
, p_ptt
, reg
);
1350 if (assert_val
!= expected
) {
1351 DP_NOTICE(p_hwfn
, "Value at address 0x%08x != 0x%08x\n",
1359 int qed_hw_reset(struct qed_dev
*cdev
)
1362 u32 unload_resp
, unload_param
;
1366 switch (cdev
->wol_config
) {
1367 case QED_OV_WOL_DISABLED
:
1368 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_DISABLED
;
1370 case QED_OV_WOL_ENABLED
:
1371 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_ENABLED
;
1375 "Unknown WoL configuration %02x\n", cdev
->wol_config
);
1377 case QED_OV_WOL_DEFAULT
:
1378 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_MCP
;
1381 for_each_hwfn(cdev
, i
) {
1382 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1385 rc
= qed_vf_pf_reset(p_hwfn
);
1391 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Resetting hw/fw\n");
1393 /* Check for incorrect states */
1394 qed_reg_assert(p_hwfn
, p_hwfn
->p_main_ptt
,
1395 QM_REG_USG_CNT_PF_TX
, 0);
1396 qed_reg_assert(p_hwfn
, p_hwfn
->p_main_ptt
,
1397 QM_REG_USG_CNT_PF_OTHER
, 0);
1399 /* Disable PF in HW blocks */
1400 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, DORQ_REG_PF_DB_ENABLE
, 0);
1401 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, QM_REG_PF_EN
, 0);
1402 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1403 TCFC_REG_STRONG_ENABLE_PF
, 0);
1404 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1405 CCFC_REG_STRONG_ENABLE_PF
, 0);
1407 /* Send unload command to MCP */
1408 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1409 DRV_MSG_CODE_UNLOAD_REQ
, wol_param
,
1410 &unload_resp
, &unload_param
);
1412 DP_NOTICE(p_hwfn
, "qed_hw_reset: UNLOAD_REQ failed\n");
1413 unload_resp
= FW_MSG_CODE_DRV_UNLOAD_ENGINE
;
1416 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1417 DRV_MSG_CODE_UNLOAD_DONE
,
1418 0, &unload_resp
, &unload_param
);
1420 DP_NOTICE(p_hwfn
, "qed_hw_reset: UNLOAD_DONE failed\n");
1428 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
1429 static void qed_hw_hwfn_free(struct qed_hwfn
*p_hwfn
)
1431 qed_ptt_pool_free(p_hwfn
);
1432 kfree(p_hwfn
->hw_info
.p_igu_info
);
1435 /* Setup bar access */
1436 static void qed_hw_hwfn_prepare(struct qed_hwfn
*p_hwfn
)
1438 /* clear indirect access */
1439 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_88_F0
, 0);
1440 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_8C_F0
, 0);
1441 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_90_F0
, 0);
1442 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_94_F0
, 0);
1444 /* Clean Previous errors if such exist */
1445 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1446 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR
, 1 << p_hwfn
->abs_pf_id
);
1448 /* enable internal target-read */
1449 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1450 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ
, 1);
1453 static void get_function_id(struct qed_hwfn
*p_hwfn
)
1456 p_hwfn
->hw_info
.opaque_fid
= (u16
) REG_RD(p_hwfn
,
1457 PXP_PF_ME_OPAQUE_ADDR
);
1459 p_hwfn
->hw_info
.concrete_fid
= REG_RD(p_hwfn
, PXP_PF_ME_CONCRETE_ADDR
);
1461 p_hwfn
->abs_pf_id
= (p_hwfn
->hw_info
.concrete_fid
>> 16) & 0xf;
1462 p_hwfn
->rel_pf_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
1463 PXP_CONCRETE_FID_PFID
);
1464 p_hwfn
->port_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
1465 PXP_CONCRETE_FID_PORT
);
1467 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
1468 "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
1469 p_hwfn
->hw_info
.concrete_fid
, p_hwfn
->hw_info
.opaque_fid
);
1472 static void qed_hw_set_feat(struct qed_hwfn
*p_hwfn
)
1474 u32
*feat_num
= p_hwfn
->hw_info
.feat_num
;
1475 struct qed_sb_cnt_info sb_cnt_info
;
1476 int num_features
= 1;
1478 if (IS_ENABLED(CONFIG_QED_RDMA
) &&
1479 p_hwfn
->hw_info
.personality
== QED_PCI_ETH_ROCE
) {
1480 /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
1481 * the status blocks equally between L2 / RoCE but with
1482 * consideration as to how many l2 queues / cnqs we have.
1486 feat_num
[QED_RDMA_CNQ
] =
1487 min_t(u32
, RESC_NUM(p_hwfn
, QED_SB
) / num_features
,
1488 RESC_NUM(p_hwfn
, QED_RDMA_CNQ_RAM
));
1491 feat_num
[QED_PF_L2_QUE
] = min_t(u32
, RESC_NUM(p_hwfn
, QED_SB
) /
1493 RESC_NUM(p_hwfn
, QED_L2_QUEUE
));
1495 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
1496 qed_int_get_num_sbs(p_hwfn
, &sb_cnt_info
);
1497 feat_num
[QED_VF_L2_QUE
] =
1499 RESC_NUM(p_hwfn
, QED_L2_QUEUE
) -
1500 FEAT_NUM(p_hwfn
, QED_PF_L2_QUE
), sb_cnt_info
.sb_iov_cnt
);
1504 "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n",
1505 (int)FEAT_NUM(p_hwfn
, QED_PF_L2_QUE
),
1506 (int)FEAT_NUM(p_hwfn
, QED_VF_L2_QUE
),
1507 (int)FEAT_NUM(p_hwfn
, QED_RDMA_CNQ
),
1508 RESC_NUM(p_hwfn
, QED_SB
), num_features
);
1511 static enum resource_id_enum
qed_hw_get_mfw_res_id(enum qed_resources res_id
)
1513 enum resource_id_enum mfw_res_id
= RESOURCE_NUM_INVALID
;
1517 mfw_res_id
= RESOURCE_NUM_SB_E
;
1520 mfw_res_id
= RESOURCE_NUM_L2_QUEUE_E
;
1523 mfw_res_id
= RESOURCE_NUM_VPORT_E
;
1526 mfw_res_id
= RESOURCE_NUM_RSS_ENGINES_E
;
1529 mfw_res_id
= RESOURCE_NUM_PQ_E
;
1532 mfw_res_id
= RESOURCE_NUM_RL_E
;
1536 /* Each VFC resource can accommodate both a MAC and a VLAN */
1537 mfw_res_id
= RESOURCE_VFC_FILTER_E
;
1540 mfw_res_id
= RESOURCE_ILT_E
;
1543 mfw_res_id
= RESOURCE_LL2_QUEUE_E
;
1545 case QED_RDMA_CNQ_RAM
:
1547 /* CNQ/CMDQS are the same resource */
1548 mfw_res_id
= RESOURCE_CQS_E
;
1550 case QED_RDMA_STATS_QUEUE
:
1551 mfw_res_id
= RESOURCE_RDMA_STATS_QUEUE_E
;
1560 static u32
qed_hw_get_dflt_resc_num(struct qed_hwfn
*p_hwfn
,
1561 enum qed_resources res_id
)
1563 u8 num_funcs
= p_hwfn
->num_funcs_on_engine
;
1564 struct qed_sb_cnt_info sb_cnt_info
;
1565 u32 dflt_resc_num
= 0;
1569 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
1570 qed_int_get_num_sbs(p_hwfn
, &sb_cnt_info
);
1571 dflt_resc_num
= sb_cnt_info
.sb_cnt
;
1574 dflt_resc_num
= MAX_NUM_L2_QUEUES_BB
/ num_funcs
;
1577 dflt_resc_num
= MAX_NUM_VPORTS_BB
/ num_funcs
;
1580 dflt_resc_num
= ETH_RSS_ENGINE_NUM_BB
/ num_funcs
;
1583 /* The granularity of the PQs is 8 */
1584 dflt_resc_num
= MAX_QM_TX_QUEUES_BB
/ num_funcs
;
1585 dflt_resc_num
&= ~0x7;
1588 dflt_resc_num
= MAX_QM_GLOBAL_RLS
/ num_funcs
;
1592 /* Each VFC resource can accommodate both a MAC and a VLAN */
1593 dflt_resc_num
= ETH_NUM_MAC_FILTERS
/ num_funcs
;
1596 dflt_resc_num
= PXP_NUM_ILT_RECORDS_BB
/ num_funcs
;
1599 dflt_resc_num
= MAX_NUM_LL2_RX_QUEUES
/ num_funcs
;
1601 case QED_RDMA_CNQ_RAM
:
1603 /* CNQ/CMDQS are the same resource */
1604 dflt_resc_num
= NUM_OF_CMDQS_CQS
/ num_funcs
;
1606 case QED_RDMA_STATS_QUEUE
:
1607 dflt_resc_num
= RDMA_NUM_STATISTIC_COUNTERS_BB
/ num_funcs
;
1613 return dflt_resc_num
;
1616 static const char *qed_hw_get_resc_name(enum qed_resources res_id
)
1635 case QED_RDMA_CNQ_RAM
:
1636 return "RDMA_CNQ_RAM";
1643 case QED_RDMA_STATS_QUEUE
:
1644 return "RDMA_STATS_QUEUE";
1646 return "UNKNOWN_RESOURCE";
1650 static int qed_hw_set_resc_info(struct qed_hwfn
*p_hwfn
,
1651 enum qed_resources res_id
)
1653 u32 dflt_resc_num
= 0, dflt_resc_start
= 0, mcp_resp
, mcp_param
;
1654 u32
*p_resc_num
, *p_resc_start
;
1655 struct resource_info resc_info
;
1658 p_resc_num
= &RESC_NUM(p_hwfn
, res_id
);
1659 p_resc_start
= &RESC_START(p_hwfn
, res_id
);
1661 /* Default values assumes that each function received equal share */
1662 dflt_resc_num
= qed_hw_get_dflt_resc_num(p_hwfn
, res_id
);
1663 if (!dflt_resc_num
) {
1665 "Failed to get default amount for resource %d [%s]\n",
1666 res_id
, qed_hw_get_resc_name(res_id
));
1669 dflt_resc_start
= dflt_resc_num
* p_hwfn
->enabled_func_idx
;
1671 memset(&resc_info
, 0, sizeof(resc_info
));
1672 resc_info
.res_id
= qed_hw_get_mfw_res_id(res_id
);
1673 if (resc_info
.res_id
== RESOURCE_NUM_INVALID
) {
1675 "Failed to match resource %d [%s] with the MFW resources\n",
1676 res_id
, qed_hw_get_resc_name(res_id
));
1680 rc
= qed_mcp_get_resc_info(p_hwfn
, p_hwfn
->p_main_ptt
, &resc_info
,
1681 &mcp_resp
, &mcp_param
);
1684 "MFW response failure for an allocation request for resource %d [%s]\n",
1685 res_id
, qed_hw_get_resc_name(res_id
));
1689 /* Default driver values are applied in the following cases:
1690 * - The resource allocation MB command is not supported by the MFW
1691 * - There is an internal error in the MFW while processing the request
1692 * - The resource ID is unknown to the MFW
1694 if (mcp_resp
!= FW_MSG_CODE_RESOURCE_ALLOC_OK
&&
1695 mcp_resp
!= FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED
) {
1697 "Resource %d [%s]: No allocation info was received [mcp_resp 0x%x]. Applying default values [num %d, start %d].\n",
1699 qed_hw_get_resc_name(res_id
),
1700 mcp_resp
, dflt_resc_num
, dflt_resc_start
);
1701 *p_resc_num
= dflt_resc_num
;
1702 *p_resc_start
= dflt_resc_start
;
1706 /* Special handling for status blocks; Would be revised in future */
1707 if (res_id
== QED_SB
) {
1708 resc_info
.size
-= 1;
1709 resc_info
.offset
-= p_hwfn
->enabled_func_idx
;
1712 *p_resc_num
= resc_info
.size
;
1713 *p_resc_start
= resc_info
.offset
;
1716 /* PQs have to divide by 8 [that's the HW granularity].
1717 * Reduce number so it would fit.
1719 if ((res_id
== QED_PQ
) && ((*p_resc_num
% 8) || (*p_resc_start
% 8))) {
1721 "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
1723 (*p_resc_num
) & ~0x7,
1724 *p_resc_start
, (*p_resc_start
) & ~0x7);
1725 *p_resc_num
&= ~0x7;
1726 *p_resc_start
&= ~0x7;
1732 static int qed_hw_get_resc(struct qed_hwfn
*p_hwfn
)
1737 for (res_id
= 0; res_id
< QED_MAX_RESC
; res_id
++) {
1738 rc
= qed_hw_set_resc_info(p_hwfn
, res_id
);
1743 /* Sanity for ILT */
1744 if ((RESC_END(p_hwfn
, QED_ILT
) > PXP_NUM_ILT_RECORDS_BB
)) {
1745 DP_NOTICE(p_hwfn
, "Can't assign ILT pages [%08x,...,%08x]\n",
1746 RESC_START(p_hwfn
, QED_ILT
),
1747 RESC_END(p_hwfn
, QED_ILT
) - 1);
1751 qed_hw_set_feat(p_hwfn
);
1753 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
1754 "The numbers for each resource are:\n");
1755 for (res_id
= 0; res_id
< QED_MAX_RESC
; res_id
++)
1756 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
, "%s = %d start = %d\n",
1757 qed_hw_get_resc_name(res_id
),
1758 RESC_NUM(p_hwfn
, res_id
),
1759 RESC_START(p_hwfn
, res_id
));
1764 static int qed_hw_get_nvm_info(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1766 u32 nvm_cfg1_offset
, mf_mode
, addr
, generic_cont0
, core_cfg
;
1767 u32 port_cfg_addr
, link_temp
, nvm_cfg_addr
, device_capabilities
;
1768 struct qed_mcp_link_params
*link
;
1770 /* Read global nvm_cfg address */
1771 nvm_cfg_addr
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_GEN_PURP_CR0
);
1773 /* Verify MCP has initialized it */
1774 if (!nvm_cfg_addr
) {
1775 DP_NOTICE(p_hwfn
, "Shared memory not initialized\n");
1779 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
1780 nvm_cfg1_offset
= qed_rd(p_hwfn
, p_ptt
, nvm_cfg_addr
+ 4);
1782 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1783 offsetof(struct nvm_cfg1
, glob
) +
1784 offsetof(struct nvm_cfg1_glob
, core_cfg
);
1786 core_cfg
= qed_rd(p_hwfn
, p_ptt
, addr
);
1788 switch ((core_cfg
& NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK
) >>
1789 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET
) {
1790 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G
:
1791 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X40G
;
1793 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G
:
1794 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X50G
;
1796 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G
:
1797 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X100G
;
1799 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F
:
1800 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_F
;
1802 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E
:
1803 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_E
;
1805 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G
:
1806 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X20G
;
1808 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G
:
1809 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X40G
;
1811 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G
:
1812 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X25G
;
1814 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G
:
1815 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X25G
;
1818 DP_NOTICE(p_hwfn
, "Unknown port mode in 0x%08x\n", core_cfg
);
1822 /* Read default link configuration */
1823 link
= &p_hwfn
->mcp_info
->link_input
;
1824 port_cfg_addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1825 offsetof(struct nvm_cfg1
, port
[MFW_PORT(p_hwfn
)]);
1826 link_temp
= qed_rd(p_hwfn
, p_ptt
,
1828 offsetof(struct nvm_cfg1_port
, speed_cap_mask
));
1829 link_temp
&= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK
;
1830 link
->speed
.advertised_speeds
= link_temp
;
1832 link_temp
= link
->speed
.advertised_speeds
;
1833 p_hwfn
->mcp_info
->link_capabilities
.speed_capabilities
= link_temp
;
1835 link_temp
= qed_rd(p_hwfn
, p_ptt
,
1837 offsetof(struct nvm_cfg1_port
, link_settings
));
1838 switch ((link_temp
& NVM_CFG1_PORT_DRV_LINK_SPEED_MASK
) >>
1839 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET
) {
1840 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG
:
1841 link
->speed
.autoneg
= true;
1843 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G
:
1844 link
->speed
.forced_speed
= 1000;
1846 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G
:
1847 link
->speed
.forced_speed
= 10000;
1849 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G
:
1850 link
->speed
.forced_speed
= 25000;
1852 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G
:
1853 link
->speed
.forced_speed
= 40000;
1855 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G
:
1856 link
->speed
.forced_speed
= 50000;
1858 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G
:
1859 link
->speed
.forced_speed
= 100000;
1862 DP_NOTICE(p_hwfn
, "Unknown Speed in 0x%08x\n", link_temp
);
1865 link_temp
&= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK
;
1866 link_temp
>>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET
;
1867 link
->pause
.autoneg
= !!(link_temp
&
1868 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG
);
1869 link
->pause
.forced_rx
= !!(link_temp
&
1870 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX
);
1871 link
->pause
.forced_tx
= !!(link_temp
&
1872 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX
);
1873 link
->loopback_mode
= 0;
1875 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1876 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1877 link
->speed
.forced_speed
, link
->speed
.advertised_speeds
,
1878 link
->speed
.autoneg
, link
->pause
.autoneg
);
1880 /* Read Multi-function information from shmem */
1881 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1882 offsetof(struct nvm_cfg1
, glob
) +
1883 offsetof(struct nvm_cfg1_glob
, generic_cont0
);
1885 generic_cont0
= qed_rd(p_hwfn
, p_ptt
, addr
);
1887 mf_mode
= (generic_cont0
& NVM_CFG1_GLOB_MF_MODE_MASK
) >>
1888 NVM_CFG1_GLOB_MF_MODE_OFFSET
;
1891 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED
:
1892 p_hwfn
->cdev
->mf_mode
= QED_MF_OVLAN
;
1894 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0
:
1895 p_hwfn
->cdev
->mf_mode
= QED_MF_NPAR
;
1897 case NVM_CFG1_GLOB_MF_MODE_DEFAULT
:
1898 p_hwfn
->cdev
->mf_mode
= QED_MF_DEFAULT
;
1901 DP_INFO(p_hwfn
, "Multi function mode is %08x\n",
1902 p_hwfn
->cdev
->mf_mode
);
1904 /* Read Multi-function information from shmem */
1905 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1906 offsetof(struct nvm_cfg1
, glob
) +
1907 offsetof(struct nvm_cfg1_glob
, device_capabilities
);
1909 device_capabilities
= qed_rd(p_hwfn
, p_ptt
, addr
);
1910 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET
)
1911 __set_bit(QED_DEV_CAP_ETH
,
1912 &p_hwfn
->hw_info
.device_capabilities
);
1913 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI
)
1914 __set_bit(QED_DEV_CAP_ISCSI
,
1915 &p_hwfn
->hw_info
.device_capabilities
);
1916 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE
)
1917 __set_bit(QED_DEV_CAP_ROCE
,
1918 &p_hwfn
->hw_info
.device_capabilities
);
1920 return qed_mcp_fill_shmem_func_info(p_hwfn
, p_ptt
);
1923 static void qed_get_num_funcs(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1925 u8 num_funcs
, enabled_func_idx
= p_hwfn
->rel_pf_id
;
1926 u32 reg_function_hide
, tmp
, eng_mask
, low_pfs_mask
;
1928 num_funcs
= MAX_NUM_PFS_BB
;
1930 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
1931 * in the other bits are selected.
1932 * Bits 1-15 are for functions 1-15, respectively, and their value is
1933 * '0' only for enabled functions (function 0 always exists and
1935 * In case of CMT, only the "even" functions are enabled, and thus the
1936 * number of functions for both hwfns is learnt from the same bits.
1938 reg_function_hide
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_FUNCTION_HIDE
);
1940 if (reg_function_hide
& 0x1) {
1941 if (QED_PATH_ID(p_hwfn
) && p_hwfn
->cdev
->num_hwfns
== 1) {
1949 /* Get the number of the enabled functions on the engine */
1950 tmp
= (reg_function_hide
^ 0xffffffff) & eng_mask
;
1957 /* Get the PF index within the enabled functions */
1958 low_pfs_mask
= (0x1 << p_hwfn
->abs_pf_id
) - 1;
1959 tmp
= reg_function_hide
& eng_mask
& low_pfs_mask
;
1967 p_hwfn
->num_funcs_on_engine
= num_funcs
;
1968 p_hwfn
->enabled_func_idx
= enabled_func_idx
;
1972 "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
1975 p_hwfn
->enabled_func_idx
, p_hwfn
->num_funcs_on_engine
);
1979 qed_get_hw_info(struct qed_hwfn
*p_hwfn
,
1980 struct qed_ptt
*p_ptt
,
1981 enum qed_pci_personality personality
)
1986 /* Since all information is common, only first hwfns should do this */
1987 if (IS_LEAD_HWFN(p_hwfn
)) {
1988 rc
= qed_iov_hw_info(p_hwfn
);
1993 /* Read the port mode */
1994 port_mode
= qed_rd(p_hwfn
, p_ptt
,
1995 CNIG_REG_NW_PORT_MODE_BB_B0
);
1997 if (port_mode
< 3) {
1998 p_hwfn
->cdev
->num_ports_in_engines
= 1;
1999 } else if (port_mode
<= 5) {
2000 p_hwfn
->cdev
->num_ports_in_engines
= 2;
2002 DP_NOTICE(p_hwfn
, "PORT MODE: %d not supported\n",
2003 p_hwfn
->cdev
->num_ports_in_engines
);
2005 /* Default num_ports_in_engines to something */
2006 p_hwfn
->cdev
->num_ports_in_engines
= 1;
2009 qed_hw_get_nvm_info(p_hwfn
, p_ptt
);
2011 rc
= qed_int_igu_read_cam(p_hwfn
, p_ptt
);
2015 if (qed_mcp_is_init(p_hwfn
))
2016 ether_addr_copy(p_hwfn
->hw_info
.hw_mac_addr
,
2017 p_hwfn
->mcp_info
->func_info
.mac
);
2019 eth_random_addr(p_hwfn
->hw_info
.hw_mac_addr
);
2021 if (qed_mcp_is_init(p_hwfn
)) {
2022 if (p_hwfn
->mcp_info
->func_info
.ovlan
!= QED_MCP_VLAN_UNSET
)
2023 p_hwfn
->hw_info
.ovlan
=
2024 p_hwfn
->mcp_info
->func_info
.ovlan
;
2026 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
2029 if (qed_mcp_is_init(p_hwfn
)) {
2030 enum qed_pci_personality protocol
;
2032 protocol
= p_hwfn
->mcp_info
->func_info
.protocol
;
2033 p_hwfn
->hw_info
.personality
= protocol
;
2036 qed_get_num_funcs(p_hwfn
, p_ptt
);
2038 if (qed_mcp_is_init(p_hwfn
))
2039 p_hwfn
->hw_info
.mtu
= p_hwfn
->mcp_info
->func_info
.mtu
;
2041 return qed_hw_get_resc(p_hwfn
);
2044 static int qed_get_dev_info(struct qed_dev
*cdev
)
2046 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2049 /* Read Vendor Id / Device Id */
2050 pci_read_config_word(cdev
->pdev
, PCI_VENDOR_ID
, &cdev
->vendor_id
);
2051 pci_read_config_word(cdev
->pdev
, PCI_DEVICE_ID
, &cdev
->device_id
);
2053 cdev
->chip_num
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
2054 MISCS_REG_CHIP_NUM
);
2055 cdev
->chip_rev
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
2056 MISCS_REG_CHIP_REV
);
2057 MASK_FIELD(CHIP_REV
, cdev
->chip_rev
);
2059 cdev
->type
= QED_DEV_TYPE_BB
;
2060 /* Learn number of HW-functions */
2061 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
2062 MISCS_REG_CMT_ENABLED_FOR_PAIR
);
2064 if (tmp
& (1 << p_hwfn
->rel_pf_id
)) {
2065 DP_NOTICE(cdev
->hwfns
, "device in CMT mode\n");
2066 cdev
->num_hwfns
= 2;
2068 cdev
->num_hwfns
= 1;
2071 cdev
->chip_bond_id
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
2072 MISCS_REG_CHIP_TEST_REG
) >> 4;
2073 MASK_FIELD(CHIP_BOND_ID
, cdev
->chip_bond_id
);
2074 cdev
->chip_metal
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
2075 MISCS_REG_CHIP_METAL
);
2076 MASK_FIELD(CHIP_METAL
, cdev
->chip_metal
);
2078 DP_INFO(cdev
->hwfns
,
2079 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
2080 cdev
->chip_num
, cdev
->chip_rev
,
2081 cdev
->chip_bond_id
, cdev
->chip_metal
);
2083 if (QED_IS_BB(cdev
) && CHIP_REV_IS_A0(cdev
)) {
2084 DP_NOTICE(cdev
->hwfns
,
2085 "The chip type/rev (BB A0) is not supported!\n");
2092 static int qed_hw_prepare_single(struct qed_hwfn
*p_hwfn
,
2093 void __iomem
*p_regview
,
2094 void __iomem
*p_doorbells
,
2095 enum qed_pci_personality personality
)
2099 /* Split PCI bars evenly between hwfns */
2100 p_hwfn
->regview
= p_regview
;
2101 p_hwfn
->doorbells
= p_doorbells
;
2103 if (IS_VF(p_hwfn
->cdev
))
2104 return qed_vf_hw_prepare(p_hwfn
);
2106 /* Validate that chip access is feasible */
2107 if (REG_RD(p_hwfn
, PXP_PF_ME_OPAQUE_ADDR
) == 0xffffffff) {
2109 "Reading the ME register returns all Fs; Preventing further chip access\n");
2113 get_function_id(p_hwfn
);
2115 /* Allocate PTT pool */
2116 rc
= qed_ptt_pool_alloc(p_hwfn
);
2120 /* Allocate the main PTT */
2121 p_hwfn
->p_main_ptt
= qed_get_reserved_ptt(p_hwfn
, RESERVED_PTT_MAIN
);
2123 /* First hwfn learns basic information, e.g., number of hwfns */
2124 if (!p_hwfn
->my_id
) {
2125 rc
= qed_get_dev_info(p_hwfn
->cdev
);
2130 qed_hw_hwfn_prepare(p_hwfn
);
2132 /* Initialize MCP structure */
2133 rc
= qed_mcp_cmd_init(p_hwfn
, p_hwfn
->p_main_ptt
);
2135 DP_NOTICE(p_hwfn
, "Failed initializing mcp command\n");
2139 /* Read the device configuration information from the HW and SHMEM */
2140 rc
= qed_get_hw_info(p_hwfn
, p_hwfn
->p_main_ptt
, personality
);
2142 DP_NOTICE(p_hwfn
, "Failed to get HW information\n");
2146 /* Allocate the init RT array and initialize the init-ops engine */
2147 rc
= qed_init_alloc(p_hwfn
);
2153 if (IS_LEAD_HWFN(p_hwfn
))
2154 qed_iov_free_hw_info(p_hwfn
->cdev
);
2155 qed_mcp_free(p_hwfn
);
2157 qed_hw_hwfn_free(p_hwfn
);
2162 int qed_hw_prepare(struct qed_dev
*cdev
,
2165 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2168 /* Store the precompiled init data ptrs */
2170 qed_init_iro_array(cdev
);
2172 /* Initialize the first hwfn - will learn number of hwfns */
2173 rc
= qed_hw_prepare_single(p_hwfn
,
2175 cdev
->doorbells
, personality
);
2179 personality
= p_hwfn
->hw_info
.personality
;
2181 /* Initialize the rest of the hwfns */
2182 if (cdev
->num_hwfns
> 1) {
2183 void __iomem
*p_regview
, *p_doorbell
;
2186 /* adjust bar offset for second engine */
2187 addr
= cdev
->regview
+ qed_hw_bar_size(p_hwfn
, BAR_ID_0
) / 2;
2190 /* adjust doorbell bar offset for second engine */
2191 addr
= cdev
->doorbells
+ qed_hw_bar_size(p_hwfn
, BAR_ID_1
) / 2;
2194 /* prepare second hw function */
2195 rc
= qed_hw_prepare_single(&cdev
->hwfns
[1], p_regview
,
2196 p_doorbell
, personality
);
2198 /* in case of error, need to free the previously
2199 * initiliazed hwfn 0.
2203 qed_init_free(p_hwfn
);
2204 qed_mcp_free(p_hwfn
);
2205 qed_hw_hwfn_free(p_hwfn
);
2213 void qed_hw_remove(struct qed_dev
*cdev
)
2215 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2219 qed_mcp_ov_update_driver_state(p_hwfn
, p_hwfn
->p_main_ptt
,
2220 QED_OV_DRIVER_STATE_NOT_LOADED
);
2222 for_each_hwfn(cdev
, i
) {
2223 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2226 qed_vf_pf_release(p_hwfn
);
2230 qed_init_free(p_hwfn
);
2231 qed_hw_hwfn_free(p_hwfn
);
2232 qed_mcp_free(p_hwfn
);
2235 qed_iov_free_hw_info(cdev
);
2238 static void qed_chain_free_next_ptr(struct qed_dev
*cdev
,
2239 struct qed_chain
*p_chain
)
2241 void *p_virt
= p_chain
->p_virt_addr
, *p_virt_next
= NULL
;
2242 dma_addr_t p_phys
= p_chain
->p_phys_addr
, p_phys_next
= 0;
2243 struct qed_chain_next
*p_next
;
2249 size
= p_chain
->elem_size
* p_chain
->usable_per_page
;
2251 for (i
= 0; i
< p_chain
->page_cnt
; i
++) {
2255 p_next
= (struct qed_chain_next
*)((u8
*)p_virt
+ size
);
2256 p_virt_next
= p_next
->next_virt
;
2257 p_phys_next
= HILO_DMA_REGPAIR(p_next
->next_phys
);
2259 dma_free_coherent(&cdev
->pdev
->dev
,
2260 QED_CHAIN_PAGE_SIZE
, p_virt
, p_phys
);
2262 p_virt
= p_virt_next
;
2263 p_phys
= p_phys_next
;
2267 static void qed_chain_free_single(struct qed_dev
*cdev
,
2268 struct qed_chain
*p_chain
)
2270 if (!p_chain
->p_virt_addr
)
2273 dma_free_coherent(&cdev
->pdev
->dev
,
2274 QED_CHAIN_PAGE_SIZE
,
2275 p_chain
->p_virt_addr
, p_chain
->p_phys_addr
);
2278 static void qed_chain_free_pbl(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
2280 void **pp_virt_addr_tbl
= p_chain
->pbl
.pp_virt_addr_tbl
;
2281 u32 page_cnt
= p_chain
->page_cnt
, i
, pbl_size
;
2282 u8
*p_pbl_virt
= p_chain
->pbl_sp
.p_virt_table
;
2284 if (!pp_virt_addr_tbl
)
2290 for (i
= 0; i
< page_cnt
; i
++) {
2291 if (!pp_virt_addr_tbl
[i
])
2294 dma_free_coherent(&cdev
->pdev
->dev
,
2295 QED_CHAIN_PAGE_SIZE
,
2296 pp_virt_addr_tbl
[i
],
2297 *(dma_addr_t
*)p_pbl_virt
);
2299 p_pbl_virt
+= QED_CHAIN_PBL_ENTRY_SIZE
;
2302 pbl_size
= page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
2303 dma_free_coherent(&cdev
->pdev
->dev
,
2305 p_chain
->pbl_sp
.p_virt_table
,
2306 p_chain
->pbl_sp
.p_phys_table
);
2308 vfree(p_chain
->pbl
.pp_virt_addr_tbl
);
2311 void qed_chain_free(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
2313 switch (p_chain
->mode
) {
2314 case QED_CHAIN_MODE_NEXT_PTR
:
2315 qed_chain_free_next_ptr(cdev
, p_chain
);
2317 case QED_CHAIN_MODE_SINGLE
:
2318 qed_chain_free_single(cdev
, p_chain
);
2320 case QED_CHAIN_MODE_PBL
:
2321 qed_chain_free_pbl(cdev
, p_chain
);
2327 qed_chain_alloc_sanity_check(struct qed_dev
*cdev
,
2328 enum qed_chain_cnt_type cnt_type
,
2329 size_t elem_size
, u32 page_cnt
)
2331 u64 chain_size
= ELEMS_PER_PAGE(elem_size
) * page_cnt
;
2333 /* The actual chain size can be larger than the maximal possible value
2334 * after rounding up the requested elements number to pages, and after
2335 * taking into acount the unusuable elements (next-ptr elements).
2336 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
2337 * size/capacity fields are of a u32 type.
2339 if ((cnt_type
== QED_CHAIN_CNT_TYPE_U16
&&
2340 chain_size
> 0x10000) ||
2341 (cnt_type
== QED_CHAIN_CNT_TYPE_U32
&&
2342 chain_size
> 0x100000000ULL
)) {
2344 "The actual chain size (0x%llx) is larger than the maximal possible value\n",
2353 qed_chain_alloc_next_ptr(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
2355 void *p_virt
= NULL
, *p_virt_prev
= NULL
;
2356 dma_addr_t p_phys
= 0;
2359 for (i
= 0; i
< p_chain
->page_cnt
; i
++) {
2360 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
2361 QED_CHAIN_PAGE_SIZE
,
2362 &p_phys
, GFP_KERNEL
);
2367 qed_chain_init_mem(p_chain
, p_virt
, p_phys
);
2368 qed_chain_reset(p_chain
);
2370 qed_chain_init_next_ptr_elem(p_chain
, p_virt_prev
,
2374 p_virt_prev
= p_virt
;
2376 /* Last page's next element should point to the beginning of the
2379 qed_chain_init_next_ptr_elem(p_chain
, p_virt_prev
,
2380 p_chain
->p_virt_addr
,
2381 p_chain
->p_phys_addr
);
2387 qed_chain_alloc_single(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
2389 dma_addr_t p_phys
= 0;
2390 void *p_virt
= NULL
;
2392 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
2393 QED_CHAIN_PAGE_SIZE
, &p_phys
, GFP_KERNEL
);
2397 qed_chain_init_mem(p_chain
, p_virt
, p_phys
);
2398 qed_chain_reset(p_chain
);
2403 static int qed_chain_alloc_pbl(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
2405 u32 page_cnt
= p_chain
->page_cnt
, size
, i
;
2406 dma_addr_t p_phys
= 0, p_pbl_phys
= 0;
2407 void **pp_virt_addr_tbl
= NULL
;
2408 u8
*p_pbl_virt
= NULL
;
2409 void *p_virt
= NULL
;
2411 size
= page_cnt
* sizeof(*pp_virt_addr_tbl
);
2412 pp_virt_addr_tbl
= vzalloc(size
);
2413 if (!pp_virt_addr_tbl
)
2416 /* The allocation of the PBL table is done with its full size, since it
2417 * is expected to be successive.
2418 * qed_chain_init_pbl_mem() is called even in a case of an allocation
2419 * failure, since pp_virt_addr_tbl was previously allocated, and it
2420 * should be saved to allow its freeing during the error flow.
2422 size
= page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
2423 p_pbl_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
2424 size
, &p_pbl_phys
, GFP_KERNEL
);
2425 qed_chain_init_pbl_mem(p_chain
, p_pbl_virt
, p_pbl_phys
,
2430 for (i
= 0; i
< page_cnt
; i
++) {
2431 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
2432 QED_CHAIN_PAGE_SIZE
,
2433 &p_phys
, GFP_KERNEL
);
2438 qed_chain_init_mem(p_chain
, p_virt
, p_phys
);
2439 qed_chain_reset(p_chain
);
2442 /* Fill the PBL table with the physical address of the page */
2443 *(dma_addr_t
*)p_pbl_virt
= p_phys
;
2444 /* Keep the virtual address of the page */
2445 p_chain
->pbl
.pp_virt_addr_tbl
[i
] = p_virt
;
2447 p_pbl_virt
+= QED_CHAIN_PBL_ENTRY_SIZE
;
2453 int qed_chain_alloc(struct qed_dev
*cdev
,
2454 enum qed_chain_use_mode intended_use
,
2455 enum qed_chain_mode mode
,
2456 enum qed_chain_cnt_type cnt_type
,
2457 u32 num_elems
, size_t elem_size
, struct qed_chain
*p_chain
)
2462 if (mode
== QED_CHAIN_MODE_SINGLE
)
2465 page_cnt
= QED_CHAIN_PAGE_CNT(num_elems
, elem_size
, mode
);
2467 rc
= qed_chain_alloc_sanity_check(cdev
, cnt_type
, elem_size
, page_cnt
);
2470 "Cannot allocate a chain with the given arguments:\n");
2472 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
2473 intended_use
, mode
, cnt_type
, num_elems
, elem_size
);
2477 qed_chain_init_params(p_chain
, page_cnt
, (u8
) elem_size
, intended_use
,
2481 case QED_CHAIN_MODE_NEXT_PTR
:
2482 rc
= qed_chain_alloc_next_ptr(cdev
, p_chain
);
2484 case QED_CHAIN_MODE_SINGLE
:
2485 rc
= qed_chain_alloc_single(cdev
, p_chain
);
2487 case QED_CHAIN_MODE_PBL
:
2488 rc
= qed_chain_alloc_pbl(cdev
, p_chain
);
2497 qed_chain_free(cdev
, p_chain
);
2501 int qed_fw_l2_queue(struct qed_hwfn
*p_hwfn
, u16 src_id
, u16
*dst_id
)
2503 if (src_id
>= RESC_NUM(p_hwfn
, QED_L2_QUEUE
)) {
2506 min
= (u16
) RESC_START(p_hwfn
, QED_L2_QUEUE
);
2507 max
= min
+ RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
2509 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
2515 *dst_id
= RESC_START(p_hwfn
, QED_L2_QUEUE
) + src_id
;
2520 int qed_fw_vport(struct qed_hwfn
*p_hwfn
, u8 src_id
, u8
*dst_id
)
2522 if (src_id
>= RESC_NUM(p_hwfn
, QED_VPORT
)) {
2525 min
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
2526 max
= min
+ RESC_NUM(p_hwfn
, QED_VPORT
);
2528 "vport id [%d] is not valid, available indices [%d - %d]\n",
2534 *dst_id
= RESC_START(p_hwfn
, QED_VPORT
) + src_id
;
2539 int qed_fw_rss_eng(struct qed_hwfn
*p_hwfn
, u8 src_id
, u8
*dst_id
)
2541 if (src_id
>= RESC_NUM(p_hwfn
, QED_RSS_ENG
)) {
2544 min
= (u8
)RESC_START(p_hwfn
, QED_RSS_ENG
);
2545 max
= min
+ RESC_NUM(p_hwfn
, QED_RSS_ENG
);
2547 "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
2553 *dst_id
= RESC_START(p_hwfn
, QED_RSS_ENG
) + src_id
;
2558 static void qed_llh_mac_to_filter(u32
*p_high
, u32
*p_low
,
2561 *p_high
= p_filter
[1] | (p_filter
[0] << 8);
2562 *p_low
= p_filter
[5] | (p_filter
[4] << 8) |
2563 (p_filter
[3] << 16) | (p_filter
[2] << 24);
2566 int qed_llh_add_mac_filter(struct qed_hwfn
*p_hwfn
,
2567 struct qed_ptt
*p_ptt
, u8
*p_filter
)
2569 u32 high
= 0, low
= 0, en
;
2572 if (!(IS_MF_SI(p_hwfn
) || IS_MF_DEFAULT(p_hwfn
)))
2575 qed_llh_mac_to_filter(&high
, &low
, p_filter
);
2577 /* Find a free entry and utilize it */
2578 for (i
= 0; i
< NIG_REG_LLH_FUNC_FILTER_EN_SIZE
; i
++) {
2579 en
= qed_rd(p_hwfn
, p_ptt
,
2580 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
));
2583 qed_wr(p_hwfn
, p_ptt
,
2584 NIG_REG_LLH_FUNC_FILTER_VALUE
+
2585 2 * i
* sizeof(u32
), low
);
2586 qed_wr(p_hwfn
, p_ptt
,
2587 NIG_REG_LLH_FUNC_FILTER_VALUE
+
2588 (2 * i
+ 1) * sizeof(u32
), high
);
2589 qed_wr(p_hwfn
, p_ptt
,
2590 NIG_REG_LLH_FUNC_FILTER_MODE
+ i
* sizeof(u32
), 0);
2591 qed_wr(p_hwfn
, p_ptt
,
2592 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE
+
2593 i
* sizeof(u32
), 0);
2594 qed_wr(p_hwfn
, p_ptt
,
2595 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
), 1);
2598 if (i
>= NIG_REG_LLH_FUNC_FILTER_EN_SIZE
) {
2600 "Failed to find an empty LLH filter to utilize\n");
2604 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
2605 "mac: %pM is added at %d\n",
2611 void qed_llh_remove_mac_filter(struct qed_hwfn
*p_hwfn
,
2612 struct qed_ptt
*p_ptt
, u8
*p_filter
)
2614 u32 high
= 0, low
= 0;
2617 if (!(IS_MF_SI(p_hwfn
) || IS_MF_DEFAULT(p_hwfn
)))
2620 qed_llh_mac_to_filter(&high
, &low
, p_filter
);
2622 /* Find the entry and clean it */
2623 for (i
= 0; i
< NIG_REG_LLH_FUNC_FILTER_EN_SIZE
; i
++) {
2624 if (qed_rd(p_hwfn
, p_ptt
,
2625 NIG_REG_LLH_FUNC_FILTER_VALUE
+
2626 2 * i
* sizeof(u32
)) != low
)
2628 if (qed_rd(p_hwfn
, p_ptt
,
2629 NIG_REG_LLH_FUNC_FILTER_VALUE
+
2630 (2 * i
+ 1) * sizeof(u32
)) != high
)
2633 qed_wr(p_hwfn
, p_ptt
,
2634 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
), 0);
2635 qed_wr(p_hwfn
, p_ptt
,
2636 NIG_REG_LLH_FUNC_FILTER_VALUE
+ 2 * i
* sizeof(u32
), 0);
2637 qed_wr(p_hwfn
, p_ptt
,
2638 NIG_REG_LLH_FUNC_FILTER_VALUE
+
2639 (2 * i
+ 1) * sizeof(u32
), 0);
2641 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
2642 "mac: %pM is removed from %d\n",
2646 if (i
>= NIG_REG_LLH_FUNC_FILTER_EN_SIZE
)
2647 DP_NOTICE(p_hwfn
, "Tried to remove a non-configured filter\n");
2650 static int qed_set_coalesce(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
2651 u32 hw_addr
, void *p_eth_qzone
,
2652 size_t eth_qzone_size
, u8 timeset
)
2654 struct coalescing_timeset
*p_coal_timeset
;
2656 if (p_hwfn
->cdev
->int_coalescing_mode
!= QED_COAL_MODE_ENABLE
) {
2657 DP_NOTICE(p_hwfn
, "Coalescing configuration not enabled\n");
2661 p_coal_timeset
= p_eth_qzone
;
2662 memset(p_coal_timeset
, 0, eth_qzone_size
);
2663 SET_FIELD(p_coal_timeset
->value
, COALESCING_TIMESET_TIMESET
, timeset
);
2664 SET_FIELD(p_coal_timeset
->value
, COALESCING_TIMESET_VALID
, 1);
2665 qed_memcpy_to(p_hwfn
, p_ptt
, hw_addr
, p_eth_qzone
, eth_qzone_size
);
2670 int qed_set_rxq_coalesce(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
2671 u16 coalesce
, u8 qid
, u16 sb_id
)
2673 struct ustorm_eth_queue_zone eth_qzone
;
2674 u8 timeset
, timer_res
;
2679 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
2680 if (coalesce
<= 0x7F) {
2682 } else if (coalesce
<= 0xFF) {
2684 } else if (coalesce
<= 0x1FF) {
2687 DP_ERR(p_hwfn
, "Invalid coalesce value - %d\n", coalesce
);
2690 timeset
= (u8
)(coalesce
>> timer_res
);
2692 rc
= qed_fw_l2_queue(p_hwfn
, (u16
)qid
, &fw_qid
);
2696 rc
= qed_int_set_timer_res(p_hwfn
, p_ptt
, timer_res
, sb_id
, false);
2700 address
= BAR0_MAP_REG_USDM_RAM
+ USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid
);
2702 rc
= qed_set_coalesce(p_hwfn
, p_ptt
, address
, ð_qzone
,
2703 sizeof(struct ustorm_eth_queue_zone
), timeset
);
2707 p_hwfn
->cdev
->rx_coalesce_usecs
= coalesce
;
2712 int qed_set_txq_coalesce(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
2713 u16 coalesce
, u8 qid
, u16 sb_id
)
2715 struct xstorm_eth_queue_zone eth_qzone
;
2716 u8 timeset
, timer_res
;
2721 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
2722 if (coalesce
<= 0x7F) {
2724 } else if (coalesce
<= 0xFF) {
2726 } else if (coalesce
<= 0x1FF) {
2729 DP_ERR(p_hwfn
, "Invalid coalesce value - %d\n", coalesce
);
2732 timeset
= (u8
)(coalesce
>> timer_res
);
2734 rc
= qed_fw_l2_queue(p_hwfn
, (u16
)qid
, &fw_qid
);
2738 rc
= qed_int_set_timer_res(p_hwfn
, p_ptt
, timer_res
, sb_id
, true);
2742 address
= BAR0_MAP_REG_XSDM_RAM
+ XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid
);
2744 rc
= qed_set_coalesce(p_hwfn
, p_ptt
, address
, ð_qzone
,
2745 sizeof(struct xstorm_eth_queue_zone
), timeset
);
2749 p_hwfn
->cdev
->tx_coalesce_usecs
= coalesce
;
2754 /* Calculate final WFQ values for all vports and configure them.
2755 * After this configuration each vport will have
2756 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
2758 static void qed_configure_wfq_for_all_vports(struct qed_hwfn
*p_hwfn
,
2759 struct qed_ptt
*p_ptt
,
2762 struct init_qm_vport_params
*vport_params
;
2765 vport_params
= p_hwfn
->qm_info
.qm_vport_params
;
2767 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
2768 u32 wfq_speed
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
2770 vport_params
[i
].vport_wfq
= (wfq_speed
* QED_WFQ_UNIT
) /
2772 qed_init_vport_wfq(p_hwfn
, p_ptt
,
2773 vport_params
[i
].first_tx_pq_id
,
2774 vport_params
[i
].vport_wfq
);
2778 static void qed_init_wfq_default_param(struct qed_hwfn
*p_hwfn
,
2784 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++)
2785 p_hwfn
->qm_info
.qm_vport_params
[i
].vport_wfq
= 1;
2788 static void qed_disable_wfq_for_all_vports(struct qed_hwfn
*p_hwfn
,
2789 struct qed_ptt
*p_ptt
,
2792 struct init_qm_vport_params
*vport_params
;
2795 vport_params
= p_hwfn
->qm_info
.qm_vport_params
;
2797 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
2798 qed_init_wfq_default_param(p_hwfn
, min_pf_rate
);
2799 qed_init_vport_wfq(p_hwfn
, p_ptt
,
2800 vport_params
[i
].first_tx_pq_id
,
2801 vport_params
[i
].vport_wfq
);
2805 /* This function performs several validations for WFQ
2806 * configuration and required min rate for a given vport
2807 * 1. req_rate must be greater than one percent of min_pf_rate.
2808 * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
2809 * rates to get less than one percent of min_pf_rate.
2810 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
2812 static int qed_init_wfq_param(struct qed_hwfn
*p_hwfn
,
2813 u16 vport_id
, u32 req_rate
, u32 min_pf_rate
)
2815 u32 total_req_min_rate
= 0, total_left_rate
= 0, left_rate_per_vp
= 0;
2816 int non_requested_count
= 0, req_count
= 0, i
, num_vports
;
2818 num_vports
= p_hwfn
->qm_info
.num_vports
;
2820 /* Accounting for the vports which are configured for WFQ explicitly */
2821 for (i
= 0; i
< num_vports
; i
++) {
2824 if ((i
!= vport_id
) &&
2825 p_hwfn
->qm_info
.wfq_data
[i
].configured
) {
2827 tmp_speed
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
2828 total_req_min_rate
+= tmp_speed
;
2832 /* Include current vport data as well */
2834 total_req_min_rate
+= req_rate
;
2835 non_requested_count
= num_vports
- req_count
;
2837 if (req_rate
< min_pf_rate
/ QED_WFQ_UNIT
) {
2838 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2839 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
2840 vport_id
, req_rate
, min_pf_rate
);
2844 if (num_vports
> QED_WFQ_UNIT
) {
2845 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2846 "Number of vports is greater than %d\n",
2851 if (total_req_min_rate
> min_pf_rate
) {
2852 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2853 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
2854 total_req_min_rate
, min_pf_rate
);
2858 total_left_rate
= min_pf_rate
- total_req_min_rate
;
2860 left_rate_per_vp
= total_left_rate
/ non_requested_count
;
2861 if (left_rate_per_vp
< min_pf_rate
/ QED_WFQ_UNIT
) {
2862 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2863 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
2864 left_rate_per_vp
, min_pf_rate
);
2868 p_hwfn
->qm_info
.wfq_data
[vport_id
].min_speed
= req_rate
;
2869 p_hwfn
->qm_info
.wfq_data
[vport_id
].configured
= true;
2871 for (i
= 0; i
< num_vports
; i
++) {
2872 if (p_hwfn
->qm_info
.wfq_data
[i
].configured
)
2875 p_hwfn
->qm_info
.wfq_data
[i
].min_speed
= left_rate_per_vp
;
2881 static int __qed_configure_vport_wfq(struct qed_hwfn
*p_hwfn
,
2882 struct qed_ptt
*p_ptt
, u16 vp_id
, u32 rate
)
2884 struct qed_mcp_link_state
*p_link
;
2887 p_link
= &p_hwfn
->cdev
->hwfns
[0].mcp_info
->link_output
;
2889 if (!p_link
->min_pf_rate
) {
2890 p_hwfn
->qm_info
.wfq_data
[vp_id
].min_speed
= rate
;
2891 p_hwfn
->qm_info
.wfq_data
[vp_id
].configured
= true;
2895 rc
= qed_init_wfq_param(p_hwfn
, vp_id
, rate
, p_link
->min_pf_rate
);
2898 qed_configure_wfq_for_all_vports(p_hwfn
, p_ptt
,
2899 p_link
->min_pf_rate
);
2902 "Validation failed while configuring min rate\n");
2907 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn
*p_hwfn
,
2908 struct qed_ptt
*p_ptt
,
2911 bool use_wfq
= false;
2915 /* Validate all pre configured vports for wfq */
2916 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
2919 if (!p_hwfn
->qm_info
.wfq_data
[i
].configured
)
2922 rate
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
2925 rc
= qed_init_wfq_param(p_hwfn
, i
, rate
, min_pf_rate
);
2928 "WFQ validation failed while configuring min rate\n");
2934 qed_configure_wfq_for_all_vports(p_hwfn
, p_ptt
, min_pf_rate
);
2936 qed_disable_wfq_for_all_vports(p_hwfn
, p_ptt
, min_pf_rate
);
2941 /* Main API for qed clients to configure vport min rate.
2942 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
2943 * rate - Speed in Mbps needs to be assigned to a given vport.
2945 int qed_configure_vport_wfq(struct qed_dev
*cdev
, u16 vp_id
, u32 rate
)
2947 int i
, rc
= -EINVAL
;
2949 /* Currently not supported; Might change in future */
2950 if (cdev
->num_hwfns
> 1) {
2952 "WFQ configuration is not supported for this device\n");
2956 for_each_hwfn(cdev
, i
) {
2957 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2958 struct qed_ptt
*p_ptt
;
2960 p_ptt
= qed_ptt_acquire(p_hwfn
);
2964 rc
= __qed_configure_vport_wfq(p_hwfn
, p_ptt
, vp_id
, rate
);
2967 qed_ptt_release(p_hwfn
, p_ptt
);
2971 qed_ptt_release(p_hwfn
, p_ptt
);
2977 /* API to configure WFQ from mcp link change */
2978 void qed_configure_vp_wfq_on_link_change(struct qed_dev
*cdev
, u32 min_pf_rate
)
2982 if (cdev
->num_hwfns
> 1) {
2985 "WFQ configuration is not supported for this device\n");
2989 for_each_hwfn(cdev
, i
) {
2990 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2992 __qed_configure_vp_wfq_on_link_change(p_hwfn
,
2998 int __qed_configure_pf_max_bandwidth(struct qed_hwfn
*p_hwfn
,
2999 struct qed_ptt
*p_ptt
,
3000 struct qed_mcp_link_state
*p_link
,
3005 p_hwfn
->mcp_info
->func_info
.bandwidth_max
= max_bw
;
3007 if (!p_link
->line_speed
&& (max_bw
!= 100))
3010 p_link
->speed
= (p_link
->line_speed
* max_bw
) / 100;
3011 p_hwfn
->qm_info
.pf_rl
= p_link
->speed
;
3013 /* Since the limiter also affects Tx-switched traffic, we don't want it
3014 * to limit such traffic in case there's no actual limit.
3015 * In that case, set limit to imaginary high boundary.
3018 p_hwfn
->qm_info
.pf_rl
= 100000;
3020 rc
= qed_init_pf_rl(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
,
3021 p_hwfn
->qm_info
.pf_rl
);
3023 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
3024 "Configured MAX bandwidth to be %08x Mb/sec\n",
3030 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
3031 int qed_configure_pf_max_bandwidth(struct qed_dev
*cdev
, u8 max_bw
)
3033 int i
, rc
= -EINVAL
;
3035 if (max_bw
< 1 || max_bw
> 100) {
3036 DP_NOTICE(cdev
, "PF max bw valid range is [1-100]\n");
3040 for_each_hwfn(cdev
, i
) {
3041 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3042 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(cdev
);
3043 struct qed_mcp_link_state
*p_link
;
3044 struct qed_ptt
*p_ptt
;
3046 p_link
= &p_lead
->mcp_info
->link_output
;
3048 p_ptt
= qed_ptt_acquire(p_hwfn
);
3052 rc
= __qed_configure_pf_max_bandwidth(p_hwfn
, p_ptt
,
3055 qed_ptt_release(p_hwfn
, p_ptt
);
3064 int __qed_configure_pf_min_bandwidth(struct qed_hwfn
*p_hwfn
,
3065 struct qed_ptt
*p_ptt
,
3066 struct qed_mcp_link_state
*p_link
,
3071 p_hwfn
->mcp_info
->func_info
.bandwidth_min
= min_bw
;
3072 p_hwfn
->qm_info
.pf_wfq
= min_bw
;
3074 if (!p_link
->line_speed
)
3077 p_link
->min_pf_rate
= (p_link
->line_speed
* min_bw
) / 100;
3079 rc
= qed_init_pf_wfq(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
, min_bw
);
3081 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
3082 "Configured MIN bandwidth to be %d Mb/sec\n",
3083 p_link
->min_pf_rate
);
3088 /* Main API to configure PF min bandwidth where bw range is [1-100] */
3089 int qed_configure_pf_min_bandwidth(struct qed_dev
*cdev
, u8 min_bw
)
3091 int i
, rc
= -EINVAL
;
3093 if (min_bw
< 1 || min_bw
> 100) {
3094 DP_NOTICE(cdev
, "PF min bw valid range is [1-100]\n");
3098 for_each_hwfn(cdev
, i
) {
3099 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3100 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(cdev
);
3101 struct qed_mcp_link_state
*p_link
;
3102 struct qed_ptt
*p_ptt
;
3104 p_link
= &p_lead
->mcp_info
->link_output
;
3106 p_ptt
= qed_ptt_acquire(p_hwfn
);
3110 rc
= __qed_configure_pf_min_bandwidth(p_hwfn
, p_ptt
,
3113 qed_ptt_release(p_hwfn
, p_ptt
);
3117 if (p_link
->min_pf_rate
) {
3118 u32 min_rate
= p_link
->min_pf_rate
;
3120 rc
= __qed_configure_vp_wfq_on_link_change(p_hwfn
,
3125 qed_ptt_release(p_hwfn
, p_ptt
);
3131 void qed_clean_wfq_db(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3133 struct qed_mcp_link_state
*p_link
;
3135 p_link
= &p_hwfn
->mcp_info
->link_output
;
3137 if (p_link
->min_pf_rate
)
3138 qed_disable_wfq_for_all_vports(p_hwfn
, p_ptt
,
3139 p_link
->min_pf_rate
);
3141 memset(p_hwfn
->qm_info
.wfq_data
, 0,
3142 sizeof(*p_hwfn
->qm_info
.wfq_data
) * p_hwfn
->qm_info
.num_vports
);