1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/mutex.h>
41 #include <linux/pci.h>
42 #include <linux/slab.h>
43 #include <linux/string.h>
44 #include <linux/vmalloc.h>
45 #include <linux/etherdevice.h>
46 #include <linux/qed/qed_chain.h>
47 #include <linux/qed/qed_if.h>
51 #include "qed_dev_api.h"
55 #include "qed_init_ops.h"
57 #include "qed_iscsi.h"
61 #include "qed_reg_addr.h"
63 #include "qed_sriov.h"
67 static DEFINE_SPINLOCK(qm_lock
);
69 #define QED_MIN_DPIS (4)
70 #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
72 /* API common to all protocols */
74 BAR_ID_0
, /* used for GRC */
75 BAR_ID_1
/* Used for doorbells */
78 static u32
qed_hw_bar_size(struct qed_hwfn
*p_hwfn
, enum BAR_ID bar_id
)
80 u32 bar_reg
= (bar_id
== BAR_ID_0
?
81 PGLUE_B_REG_PF_BAR0_SIZE
: PGLUE_B_REG_PF_BAR1_SIZE
);
84 if (IS_VF(p_hwfn
->cdev
))
87 val
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
, bar_reg
);
89 return 1 << (val
+ 15);
91 /* Old MFW initialized above registered only conditionally */
92 if (p_hwfn
->cdev
->num_hwfns
> 1) {
94 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
95 return BAR_ID_0
? 256 * 1024 : 512 * 1024;
98 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
103 void qed_init_dp(struct qed_dev
*cdev
, u32 dp_module
, u8 dp_level
)
107 cdev
->dp_level
= dp_level
;
108 cdev
->dp_module
= dp_module
;
109 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
110 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
112 p_hwfn
->dp_level
= dp_level
;
113 p_hwfn
->dp_module
= dp_module
;
117 void qed_init_struct(struct qed_dev
*cdev
)
121 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
122 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
126 p_hwfn
->b_active
= false;
128 mutex_init(&p_hwfn
->dmae_info
.mutex
);
131 /* hwfn 0 is always active */
132 cdev
->hwfns
[0].b_active
= true;
134 /* set the default cache alignment to 128 */
135 cdev
->cache_shift
= 7;
138 static void qed_qm_info_free(struct qed_hwfn
*p_hwfn
)
140 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
142 kfree(qm_info
->qm_pq_params
);
143 qm_info
->qm_pq_params
= NULL
;
144 kfree(qm_info
->qm_vport_params
);
145 qm_info
->qm_vport_params
= NULL
;
146 kfree(qm_info
->qm_port_params
);
147 qm_info
->qm_port_params
= NULL
;
148 kfree(qm_info
->wfq_data
);
149 qm_info
->wfq_data
= NULL
;
152 void qed_resc_free(struct qed_dev
*cdev
)
159 kfree(cdev
->fw_data
);
160 cdev
->fw_data
= NULL
;
162 kfree(cdev
->reset_stats
);
164 for_each_hwfn(cdev
, i
) {
165 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
167 qed_cxt_mngr_free(p_hwfn
);
168 qed_qm_info_free(p_hwfn
);
169 qed_spq_free(p_hwfn
);
170 qed_eq_free(p_hwfn
, p_hwfn
->p_eq
);
171 qed_consq_free(p_hwfn
, p_hwfn
->p_consq
);
172 qed_int_free(p_hwfn
);
173 #ifdef CONFIG_QED_LL2
174 qed_ll2_free(p_hwfn
, p_hwfn
->p_ll2_info
);
176 if (p_hwfn
->hw_info
.personality
== QED_PCI_FCOE
)
177 qed_fcoe_free(p_hwfn
, p_hwfn
->p_fcoe_info
);
179 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
180 qed_iscsi_free(p_hwfn
, p_hwfn
->p_iscsi_info
);
181 qed_ooo_free(p_hwfn
, p_hwfn
->p_ooo_info
);
183 qed_iov_free(p_hwfn
);
184 qed_dmae_info_free(p_hwfn
);
185 qed_dcbx_info_free(p_hwfn
, p_hwfn
->p_dcbx_info
);
189 static int qed_init_qm_info(struct qed_hwfn
*p_hwfn
, bool b_sleepable
)
191 u8 num_vports
, vf_offset
= 0, i
, vport_id
, num_ports
, curr_queue
= 0;
192 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
193 struct init_qm_port_params
*p_qm_port
;
194 bool init_rdma_offload_pq
= false;
195 bool init_pure_ack_pq
= false;
196 bool init_ooo_pq
= false;
197 u16 num_pqs
, multi_cos_tcs
= 1;
198 u8 pf_wfq
= qm_info
->pf_wfq
;
199 u32 pf_rl
= qm_info
->pf_rl
;
203 #ifdef CONFIG_QED_SRIOV
204 if (p_hwfn
->cdev
->p_iov_info
)
205 num_vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
207 memset(qm_info
, 0, sizeof(*qm_info
));
209 num_pqs
= multi_cos_tcs
+ num_vfs
+ 1; /* The '1' is for pure-LB */
210 num_vports
= (u8
)RESC_NUM(p_hwfn
, QED_VPORT
);
212 if (p_hwfn
->hw_info
.personality
== QED_PCI_ETH_ROCE
) {
213 num_pqs
++; /* for RoCE queue */
214 init_rdma_offload_pq
= true;
215 /* we subtract num_vfs because each require a rate limiter,
216 * and one default rate limiter
218 if (p_hwfn
->pf_params
.rdma_pf_params
.enable_dcqcn
)
219 num_pf_rls
= RESC_NUM(p_hwfn
, QED_RL
) - num_vfs
- 1;
221 num_pqs
+= num_pf_rls
;
222 qm_info
->num_pf_rls
= (u8
) num_pf_rls
;
225 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
226 num_pqs
+= 2; /* for iSCSI pure-ACK / OOO queue */
227 init_pure_ack_pq
= true;
231 /* Sanity checking that setup requires legal number of resources */
232 if (num_pqs
> RESC_NUM(p_hwfn
, QED_PQ
)) {
234 "Need too many Physical queues - 0x%04x when only %04x are available\n",
235 num_pqs
, RESC_NUM(p_hwfn
, QED_PQ
));
239 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
241 qm_info
->qm_pq_params
= kcalloc(num_pqs
,
242 sizeof(struct init_qm_pq_params
),
243 b_sleepable
? GFP_KERNEL
: GFP_ATOMIC
);
244 if (!qm_info
->qm_pq_params
)
247 qm_info
->qm_vport_params
= kcalloc(num_vports
,
248 sizeof(struct init_qm_vport_params
),
249 b_sleepable
? GFP_KERNEL
251 if (!qm_info
->qm_vport_params
)
254 qm_info
->qm_port_params
= kcalloc(MAX_NUM_PORTS
,
255 sizeof(struct init_qm_port_params
),
256 b_sleepable
? GFP_KERNEL
258 if (!qm_info
->qm_port_params
)
261 qm_info
->wfq_data
= kcalloc(num_vports
, sizeof(struct qed_wfq_data
),
262 b_sleepable
? GFP_KERNEL
: GFP_ATOMIC
);
263 if (!qm_info
->wfq_data
)
266 vport_id
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
268 /* First init rate limited queues */
269 for (curr_queue
= 0; curr_queue
< num_pf_rls
; curr_queue
++) {
270 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
++;
271 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
272 p_hwfn
->hw_info
.non_offload_tc
;
273 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
274 qm_info
->qm_pq_params
[curr_queue
].rl_valid
= 1;
277 /* First init per-TC PQs */
278 for (i
= 0; i
< multi_cos_tcs
; i
++) {
279 struct init_qm_pq_params
*params
=
280 &qm_info
->qm_pq_params
[curr_queue
++];
282 if (p_hwfn
->hw_info
.personality
== QED_PCI_ETH_ROCE
||
283 p_hwfn
->hw_info
.personality
== QED_PCI_ETH
) {
284 params
->vport_id
= vport_id
;
285 params
->tc_id
= p_hwfn
->hw_info
.non_offload_tc
;
286 params
->wrr_group
= 1;
288 params
->vport_id
= vport_id
;
289 params
->tc_id
= p_hwfn
->hw_info
.offload_tc
;
290 params
->wrr_group
= 1;
294 /* Then init pure-LB PQ */
295 qm_info
->pure_lb_pq
= curr_queue
;
296 qm_info
->qm_pq_params
[curr_queue
].vport_id
=
297 (u8
) RESC_START(p_hwfn
, QED_VPORT
);
298 qm_info
->qm_pq_params
[curr_queue
].tc_id
= PURE_LB_TC
;
299 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
302 qm_info
->offload_pq
= 0;
303 if (init_rdma_offload_pq
) {
304 qm_info
->offload_pq
= curr_queue
;
305 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
;
306 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
307 p_hwfn
->hw_info
.offload_tc
;
308 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
312 if (init_pure_ack_pq
) {
313 qm_info
->pure_ack_pq
= curr_queue
;
314 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
;
315 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
316 p_hwfn
->hw_info
.offload_tc
;
317 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
322 qm_info
->ooo_pq
= curr_queue
;
323 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
;
324 qm_info
->qm_pq_params
[curr_queue
].tc_id
= DCBX_ISCSI_OOO_TC
;
325 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
329 /* Then init per-VF PQs */
330 vf_offset
= curr_queue
;
331 for (i
= 0; i
< num_vfs
; i
++) {
332 /* First vport is used by the PF */
333 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
+ i
+ 1;
334 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
335 p_hwfn
->hw_info
.non_offload_tc
;
336 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
337 qm_info
->qm_pq_params
[curr_queue
].rl_valid
= 1;
341 qm_info
->vf_queues_offset
= vf_offset
;
342 qm_info
->num_pqs
= num_pqs
;
343 qm_info
->num_vports
= num_vports
;
345 /* Initialize qm port parameters */
346 num_ports
= p_hwfn
->cdev
->num_ports_in_engines
;
347 for (i
= 0; i
< num_ports
; i
++) {
348 p_qm_port
= &qm_info
->qm_port_params
[i
];
349 p_qm_port
->active
= 1;
351 p_qm_port
->active_phys_tcs
= 0x7;
353 p_qm_port
->active_phys_tcs
= 0x9f;
354 p_qm_port
->num_pbf_cmd_lines
= PBF_MAX_CMD_LINES
/ num_ports
;
355 p_qm_port
->num_btb_blocks
= BTB_MAX_BLOCKS
/ num_ports
;
358 qm_info
->max_phys_tcs_per_port
= NUM_OF_PHYS_TCS
;
360 qm_info
->start_pq
= (u16
)RESC_START(p_hwfn
, QED_PQ
);
362 qm_info
->num_vf_pqs
= num_vfs
;
363 qm_info
->start_vport
= (u8
) RESC_START(p_hwfn
, QED_VPORT
);
365 for (i
= 0; i
< qm_info
->num_vports
; i
++)
366 qm_info
->qm_vport_params
[i
].vport_wfq
= 1;
368 qm_info
->vport_rl_en
= 1;
369 qm_info
->vport_wfq_en
= 1;
370 qm_info
->pf_rl
= pf_rl
;
371 qm_info
->pf_wfq
= pf_wfq
;
376 qed_qm_info_free(p_hwfn
);
380 /* This function reconfigures the QM pf on the fly.
381 * For this purpose we:
382 * 1. reconfigure the QM database
383 * 2. set new values to runtime arrat
384 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
385 * 4. activate init tool in QM_PF stage
386 * 5. send an sdm_qm_cmd through rbc interface to release the QM
388 int qed_qm_reconf(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
390 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
394 /* qm_info is allocated in qed_init_qm_info() which is already called
395 * from qed_resc_alloc() or previous call of qed_qm_reconf().
396 * The allocated size may change each init, so we free it before next
399 qed_qm_info_free(p_hwfn
);
401 /* initialize qed's qm data structure */
402 rc
= qed_init_qm_info(p_hwfn
, false);
406 /* stop PF's qm queues */
407 spin_lock_bh(&qm_lock
);
408 b_rc
= qed_send_qm_stop_cmd(p_hwfn
, p_ptt
, false, true,
409 qm_info
->start_pq
, qm_info
->num_pqs
);
410 spin_unlock_bh(&qm_lock
);
414 /* clear the QM_PF runtime phase leftovers from previous init */
415 qed_init_clear_rt_data(p_hwfn
);
417 /* prepare QM portion of runtime array */
418 qed_qm_init_pf(p_hwfn
);
420 /* activate init tool on runtime array */
421 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_QM_PF
, p_hwfn
->rel_pf_id
,
422 p_hwfn
->hw_info
.hw_mode
);
426 /* start PF's qm queues */
427 spin_lock_bh(&qm_lock
);
428 b_rc
= qed_send_qm_stop_cmd(p_hwfn
, p_ptt
, true, true,
429 qm_info
->start_pq
, qm_info
->num_pqs
);
430 spin_unlock_bh(&qm_lock
);
437 int qed_resc_alloc(struct qed_dev
*cdev
)
439 struct qed_iscsi_info
*p_iscsi_info
;
440 struct qed_fcoe_info
*p_fcoe_info
;
441 struct qed_ooo_info
*p_ooo_info
;
442 #ifdef CONFIG_QED_LL2
443 struct qed_ll2_info
*p_ll2_info
;
445 struct qed_consq
*p_consq
;
452 cdev
->fw_data
= kzalloc(sizeof(*cdev
->fw_data
), GFP_KERNEL
);
456 for_each_hwfn(cdev
, i
) {
457 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
458 u32 n_eqes
, num_cons
;
460 /* First allocate the context manager structure */
461 rc
= qed_cxt_mngr_alloc(p_hwfn
);
465 /* Set the HW cid/tid numbers (in the contest manager)
466 * Must be done prior to any further computations.
468 rc
= qed_cxt_set_pf_params(p_hwfn
);
472 /* Prepare and process QM requirements */
473 rc
= qed_init_qm_info(p_hwfn
, true);
477 /* Compute the ILT client partition */
478 rc
= qed_cxt_cfg_ilt_compute(p_hwfn
);
482 /* CID map / ILT shadow table / T2
483 * The talbes sizes are determined by the computations above
485 rc
= qed_cxt_tables_alloc(p_hwfn
);
489 /* SPQ, must follow ILT because initializes SPQ context */
490 rc
= qed_spq_alloc(p_hwfn
);
494 /* SP status block allocation */
495 p_hwfn
->p_dpc_ptt
= qed_get_reserved_ptt(p_hwfn
,
498 rc
= qed_int_alloc(p_hwfn
, p_hwfn
->p_main_ptt
);
502 rc
= qed_iov_alloc(p_hwfn
);
507 n_eqes
= qed_chain_get_capacity(&p_hwfn
->p_spq
->chain
);
508 if (p_hwfn
->hw_info
.personality
== QED_PCI_ETH_ROCE
) {
509 num_cons
= qed_cxt_get_proto_cid_count(p_hwfn
,
512 n_eqes
+= num_cons
+ 2 * MAX_NUM_VFS_BB
;
513 } else if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
515 qed_cxt_get_proto_cid_count(p_hwfn
,
518 n_eqes
+= 2 * num_cons
;
521 if (n_eqes
> 0xFFFF) {
523 "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
529 p_eq
= qed_eq_alloc(p_hwfn
, (u16
) n_eqes
);
534 p_consq
= qed_consq_alloc(p_hwfn
);
537 p_hwfn
->p_consq
= p_consq
;
539 #ifdef CONFIG_QED_LL2
540 if (p_hwfn
->using_ll2
) {
541 p_ll2_info
= qed_ll2_alloc(p_hwfn
);
544 p_hwfn
->p_ll2_info
= p_ll2_info
;
548 if (p_hwfn
->hw_info
.personality
== QED_PCI_FCOE
) {
549 p_fcoe_info
= qed_fcoe_alloc(p_hwfn
);
552 p_hwfn
->p_fcoe_info
= p_fcoe_info
;
555 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
556 p_iscsi_info
= qed_iscsi_alloc(p_hwfn
);
559 p_hwfn
->p_iscsi_info
= p_iscsi_info
;
560 p_ooo_info
= qed_ooo_alloc(p_hwfn
);
563 p_hwfn
->p_ooo_info
= p_ooo_info
;
566 /* DMA info initialization */
567 rc
= qed_dmae_info_alloc(p_hwfn
);
571 /* DCBX initialization */
572 rc
= qed_dcbx_info_alloc(p_hwfn
);
577 cdev
->reset_stats
= kzalloc(sizeof(*cdev
->reset_stats
), GFP_KERNEL
);
578 if (!cdev
->reset_stats
)
590 void qed_resc_setup(struct qed_dev
*cdev
)
597 for_each_hwfn(cdev
, i
) {
598 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
600 qed_cxt_mngr_setup(p_hwfn
);
601 qed_spq_setup(p_hwfn
);
602 qed_eq_setup(p_hwfn
, p_hwfn
->p_eq
);
603 qed_consq_setup(p_hwfn
, p_hwfn
->p_consq
);
605 /* Read shadow of current MFW mailbox */
606 qed_mcp_read_mb(p_hwfn
, p_hwfn
->p_main_ptt
);
607 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
608 p_hwfn
->mcp_info
->mfw_mb_cur
,
609 p_hwfn
->mcp_info
->mfw_mb_length
);
611 qed_int_setup(p_hwfn
, p_hwfn
->p_main_ptt
);
613 qed_iov_setup(p_hwfn
, p_hwfn
->p_main_ptt
);
614 #ifdef CONFIG_QED_LL2
615 if (p_hwfn
->using_ll2
)
616 qed_ll2_setup(p_hwfn
, p_hwfn
->p_ll2_info
);
618 if (p_hwfn
->hw_info
.personality
== QED_PCI_FCOE
)
619 qed_fcoe_setup(p_hwfn
, p_hwfn
->p_fcoe_info
);
621 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
622 qed_iscsi_setup(p_hwfn
, p_hwfn
->p_iscsi_info
);
623 qed_ooo_setup(p_hwfn
, p_hwfn
->p_ooo_info
);
628 #define FINAL_CLEANUP_POLL_CNT (100)
629 #define FINAL_CLEANUP_POLL_TIME (10)
630 int qed_final_cleanup(struct qed_hwfn
*p_hwfn
,
631 struct qed_ptt
*p_ptt
, u16 id
, bool is_vf
)
633 u32 command
= 0, addr
, count
= FINAL_CLEANUP_POLL_CNT
;
636 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+
637 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn
->rel_pf_id
);
642 command
|= X_FINAL_CLEANUP_AGG_INT
<<
643 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT
;
644 command
|= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT
;
645 command
|= id
<< SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT
;
646 command
|= SDM_COMP_TYPE_AGG_INT
<< SDM_OP_GEN_COMP_TYPE_SHIFT
;
648 /* Make sure notification is not set before initiating final cleanup */
649 if (REG_RD(p_hwfn
, addr
)) {
651 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
652 REG_WR(p_hwfn
, addr
, 0);
655 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
656 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
659 qed_wr(p_hwfn
, p_ptt
, XSDM_REG_OPERATION_GEN
, command
);
661 /* Poll until completion */
662 while (!REG_RD(p_hwfn
, addr
) && count
--)
663 msleep(FINAL_CLEANUP_POLL_TIME
);
665 if (REG_RD(p_hwfn
, addr
))
669 "Failed to receive FW final cleanup notification\n");
671 /* Cleanup afterwards */
672 REG_WR(p_hwfn
, addr
, 0);
677 static int qed_calc_hw_mode(struct qed_hwfn
*p_hwfn
)
681 if (QED_IS_BB_B0(p_hwfn
->cdev
)) {
682 hw_mode
|= 1 << MODE_BB
;
683 } else if (QED_IS_AH(p_hwfn
->cdev
)) {
684 hw_mode
|= 1 << MODE_K2
;
686 DP_NOTICE(p_hwfn
, "Unknown chip type %#x\n",
691 switch (p_hwfn
->cdev
->num_ports_in_engines
) {
693 hw_mode
|= 1 << MODE_PORTS_PER_ENG_1
;
696 hw_mode
|= 1 << MODE_PORTS_PER_ENG_2
;
699 hw_mode
|= 1 << MODE_PORTS_PER_ENG_4
;
702 DP_NOTICE(p_hwfn
, "num_ports_in_engine = %d not supported\n",
703 p_hwfn
->cdev
->num_ports_in_engines
);
707 switch (p_hwfn
->cdev
->mf_mode
) {
710 hw_mode
|= 1 << MODE_MF_SI
;
713 hw_mode
|= 1 << MODE_MF_SD
;
716 DP_NOTICE(p_hwfn
, "Unsupported MF mode, init as DEFAULT\n");
717 hw_mode
|= 1 << MODE_MF_SI
;
720 hw_mode
|= 1 << MODE_ASIC
;
722 if (p_hwfn
->cdev
->num_hwfns
> 1)
723 hw_mode
|= 1 << MODE_100G
;
725 p_hwfn
->hw_info
.hw_mode
= hw_mode
;
727 DP_VERBOSE(p_hwfn
, (NETIF_MSG_PROBE
| NETIF_MSG_IFUP
),
728 "Configuring function for hw_mode: 0x%08x\n",
729 p_hwfn
->hw_info
.hw_mode
);
734 /* Init run time data for all PFs on an engine. */
735 static void qed_init_cau_rt_data(struct qed_dev
*cdev
)
737 u32 offset
= CAU_REG_SB_VAR_MEMORY_RT_OFFSET
;
740 for_each_hwfn(cdev
, i
) {
741 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
742 struct qed_igu_info
*p_igu_info
;
743 struct qed_igu_block
*p_block
;
744 struct cau_sb_entry sb_entry
;
746 p_igu_info
= p_hwfn
->hw_info
.p_igu_info
;
748 for (sb_id
= 0; sb_id
< QED_MAPPING_MEMORY_SIZE(cdev
);
750 p_block
= &p_igu_info
->igu_map
.igu_blocks
[sb_id
];
754 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
755 p_block
->function_id
, 0, 0);
756 STORE_RT_REG_AGG(p_hwfn
, offset
+ sb_id
* 2, sb_entry
);
761 static int qed_hw_init_common(struct qed_hwfn
*p_hwfn
,
762 struct qed_ptt
*p_ptt
, int hw_mode
)
764 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
765 struct qed_qm_common_rt_init_params params
;
766 struct qed_dev
*cdev
= p_hwfn
->cdev
;
767 u8 vf_id
, max_num_vfs
;
772 qed_init_cau_rt_data(cdev
);
774 /* Program GTT windows */
775 qed_gtt_init(p_hwfn
);
777 if (p_hwfn
->mcp_info
) {
778 if (p_hwfn
->mcp_info
->func_info
.bandwidth_max
)
779 qm_info
->pf_rl_en
= 1;
780 if (p_hwfn
->mcp_info
->func_info
.bandwidth_min
)
781 qm_info
->pf_wfq_en
= 1;
784 memset(¶ms
, 0, sizeof(params
));
785 params
.max_ports_per_engine
= p_hwfn
->cdev
->num_ports_in_engines
;
786 params
.max_phys_tcs_per_port
= qm_info
->max_phys_tcs_per_port
;
787 params
.pf_rl_en
= qm_info
->pf_rl_en
;
788 params
.pf_wfq_en
= qm_info
->pf_wfq_en
;
789 params
.vport_rl_en
= qm_info
->vport_rl_en
;
790 params
.vport_wfq_en
= qm_info
->vport_wfq_en
;
791 params
.port_params
= qm_info
->qm_port_params
;
793 qed_qm_common_rt_init(p_hwfn
, ¶ms
);
795 qed_cxt_hw_init_common(p_hwfn
);
797 /* Close gate from NIG to BRB/Storm; By default they are open, but
798 * we close them to prevent NIG from passing data to reset blocks.
799 * Should have been done in the ENGINE phase, but init-tool lacks
800 * proper port-pretend capabilities.
802 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
803 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
804 qed_port_pretend(p_hwfn
, p_ptt
, p_hwfn
->port_id
^ 1);
805 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
806 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
807 qed_port_unpretend(p_hwfn
, p_ptt
);
809 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_ENGINE
, ANY_PHASE_ID
, hw_mode
);
813 qed_wr(p_hwfn
, p_ptt
, PSWRQ2_REG_L2P_VALIDATE_VFID
, 0);
814 qed_wr(p_hwfn
, p_ptt
, PGLUE_B_REG_USE_CLIENTID_IN_TAG
, 1);
816 if (QED_IS_BB(p_hwfn
->cdev
)) {
817 num_pfs
= NUM_OF_ENG_PFS(p_hwfn
->cdev
);
818 for (pf_id
= 0; pf_id
< num_pfs
; pf_id
++) {
819 qed_fid_pretend(p_hwfn
, p_ptt
, pf_id
);
820 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
821 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
823 /* pretend to original PF */
824 qed_fid_pretend(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
);
827 max_num_vfs
= QED_IS_AH(cdev
) ? MAX_NUM_VFS_K2
: MAX_NUM_VFS_BB
;
828 for (vf_id
= 0; vf_id
< max_num_vfs
; vf_id
++) {
829 concrete_fid
= qed_vfid_to_concrete(p_hwfn
, vf_id
);
830 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) concrete_fid
);
831 qed_wr(p_hwfn
, p_ptt
, CCFC_REG_STRONG_ENABLE_VF
, 0x1);
832 qed_wr(p_hwfn
, p_ptt
, CCFC_REG_WEAK_ENABLE_VF
, 0x0);
833 qed_wr(p_hwfn
, p_ptt
, TCFC_REG_STRONG_ENABLE_VF
, 0x1);
834 qed_wr(p_hwfn
, p_ptt
, TCFC_REG_WEAK_ENABLE_VF
, 0x0);
836 /* pretend to original PF */
837 qed_fid_pretend(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
);
843 qed_hw_init_dpi_size(struct qed_hwfn
*p_hwfn
,
844 struct qed_ptt
*p_ptt
, u32 pwm_region_size
, u32 n_cpus
)
846 u32 dpi_page_size_1
, dpi_page_size_2
, dpi_page_size
;
847 u32 dpi_bit_shift
, dpi_count
;
850 /* Calculate DPI size */
851 dpi_page_size_1
= QED_WID_SIZE
* n_cpus
;
852 dpi_page_size_2
= max_t(u32
, QED_WID_SIZE
, PAGE_SIZE
);
853 dpi_page_size
= max_t(u32
, dpi_page_size_1
, dpi_page_size_2
);
854 dpi_page_size
= roundup_pow_of_two(dpi_page_size
);
855 dpi_bit_shift
= ilog2(dpi_page_size
/ 4096);
857 dpi_count
= pwm_region_size
/ dpi_page_size
;
859 min_dpis
= p_hwfn
->pf_params
.rdma_pf_params
.min_dpis
;
860 min_dpis
= max_t(u32
, QED_MIN_DPIS
, min_dpis
);
862 p_hwfn
->dpi_size
= dpi_page_size
;
863 p_hwfn
->dpi_count
= dpi_count
;
865 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_DPI_BIT_SHIFT
, dpi_bit_shift
);
867 if (dpi_count
< min_dpis
)
873 enum QED_ROCE_EDPM_MODE
{
874 QED_ROCE_EDPM_MODE_ENABLE
= 0,
875 QED_ROCE_EDPM_MODE_FORCE_ON
= 1,
876 QED_ROCE_EDPM_MODE_DISABLE
= 2,
880 qed_hw_init_pf_doorbell_bar(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
882 u32 pwm_regsize
, norm_regsize
;
883 u32 non_pwm_conn
, min_addr_reg1
;
884 u32 db_bar_size
, n_cpus
;
890 db_bar_size
= qed_hw_bar_size(p_hwfn
, BAR_ID_1
);
891 if (p_hwfn
->cdev
->num_hwfns
> 1)
894 /* Calculate doorbell regions */
895 non_pwm_conn
= qed_cxt_get_proto_cid_start(p_hwfn
, PROTOCOLID_CORE
) +
896 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_CORE
,
898 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
,
900 norm_regsize
= roundup(QED_PF_DEMS_SIZE
* non_pwm_conn
, 4096);
901 min_addr_reg1
= norm_regsize
/ 4096;
902 pwm_regsize
= db_bar_size
- norm_regsize
;
904 /* Check that the normal and PWM sizes are valid */
905 if (db_bar_size
< norm_regsize
) {
907 "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
908 db_bar_size
, norm_regsize
);
912 if (pwm_regsize
< QED_MIN_PWM_REGION
) {
914 "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
916 QED_MIN_PWM_REGION
, db_bar_size
, norm_regsize
);
920 /* Calculate number of DPIs */
921 roce_edpm_mode
= p_hwfn
->pf_params
.rdma_pf_params
.roce_edpm_mode
;
922 if ((roce_edpm_mode
== QED_ROCE_EDPM_MODE_ENABLE
) ||
923 ((roce_edpm_mode
== QED_ROCE_EDPM_MODE_FORCE_ON
))) {
924 /* Either EDPM is mandatory, or we are attempting to allocate a
927 n_cpus
= num_present_cpus();
928 rc
= qed_hw_init_dpi_size(p_hwfn
, p_ptt
, pwm_regsize
, n_cpus
);
931 cond
= (rc
&& (roce_edpm_mode
== QED_ROCE_EDPM_MODE_ENABLE
)) ||
932 (roce_edpm_mode
== QED_ROCE_EDPM_MODE_DISABLE
);
933 if (cond
|| p_hwfn
->dcbx_no_edpm
) {
934 /* Either EDPM is disabled from user configuration, or it is
935 * disabled via DCBx, or it is not mandatory and we failed to
936 * allocated a WID per CPU.
939 rc
= qed_hw_init_dpi_size(p_hwfn
, p_ptt
, pwm_regsize
, n_cpus
);
942 qed_rdma_dpm_bar(p_hwfn
, p_ptt
);
946 "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
951 ((p_hwfn
->dcbx_no_edpm
) || (p_hwfn
->db_bar_no_edpm
)) ?
952 "disabled" : "enabled");
956 "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
958 p_hwfn
->pf_params
.rdma_pf_params
.min_dpis
);
962 p_hwfn
->dpi_start_offset
= norm_regsize
;
964 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
965 pf_dems_shift
= ilog2(QED_PF_DEMS_SIZE
/ 4);
966 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_ICID_BIT_SHIFT_NORM
, pf_dems_shift
);
967 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_MIN_ADDR_REG1
, min_addr_reg1
);
972 static int qed_hw_init_port(struct qed_hwfn
*p_hwfn
,
973 struct qed_ptt
*p_ptt
, int hw_mode
)
975 return qed_init_run(p_hwfn
, p_ptt
, PHASE_PORT
,
976 p_hwfn
->port_id
, hw_mode
);
979 static int qed_hw_init_pf(struct qed_hwfn
*p_hwfn
,
980 struct qed_ptt
*p_ptt
,
981 struct qed_tunn_start_params
*p_tunn
,
984 enum qed_int_mode int_mode
,
985 bool allow_npar_tx_switch
)
987 u8 rel_pf_id
= p_hwfn
->rel_pf_id
;
990 if (p_hwfn
->mcp_info
) {
991 struct qed_mcp_function_info
*p_info
;
993 p_info
= &p_hwfn
->mcp_info
->func_info
;
994 if (p_info
->bandwidth_min
)
995 p_hwfn
->qm_info
.pf_wfq
= p_info
->bandwidth_min
;
997 /* Update rate limit once we'll actually have a link */
998 p_hwfn
->qm_info
.pf_rl
= 100000;
1001 qed_cxt_hw_init_pf(p_hwfn
);
1003 qed_int_igu_init_rt(p_hwfn
);
1005 /* Set VLAN in NIG if needed */
1006 if (hw_mode
& BIT(MODE_MF_SD
)) {
1007 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
, "Configuring LLH_FUNC_TAG\n");
1008 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET
, 1);
1009 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET
,
1010 p_hwfn
->hw_info
.ovlan
);
1013 /* Enable classification by MAC if needed */
1014 if (hw_mode
& BIT(MODE_MF_SI
)) {
1015 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
1016 "Configuring TAGMAC_CLS_TYPE\n");
1017 STORE_RT_REG(p_hwfn
,
1018 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET
, 1);
1021 /* Protocl Configuration */
1022 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_TCP_RT_OFFSET
,
1023 (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) ? 1 : 0);
1024 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_FCOE_RT_OFFSET
,
1025 (p_hwfn
->hw_info
.personality
== QED_PCI_FCOE
) ? 1 : 0);
1026 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_ROCE_RT_OFFSET
, 0);
1028 /* Cleanup chip from previous driver if such remains exist */
1029 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, rel_pf_id
, false);
1033 /* PF Init sequence */
1034 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PF
, rel_pf_id
, hw_mode
);
1038 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
1039 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_QM_PF
, rel_pf_id
, hw_mode
);
1043 /* Pure runtime initializations - directly to the HW */
1044 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, true, true);
1046 rc
= qed_hw_init_pf_doorbell_bar(p_hwfn
, p_ptt
);
1051 /* enable interrupts */
1052 qed_int_igu_enable(p_hwfn
, p_ptt
, int_mode
);
1054 /* send function start command */
1055 rc
= qed_sp_pf_start(p_hwfn
, p_tunn
, p_hwfn
->cdev
->mf_mode
,
1056 allow_npar_tx_switch
);
1058 DP_NOTICE(p_hwfn
, "Function start ramrod failed\n");
1061 if (p_hwfn
->hw_info
.personality
== QED_PCI_FCOE
) {
1062 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TAG1
, BIT(2));
1063 qed_wr(p_hwfn
, p_ptt
,
1064 PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST
,
1071 static int qed_change_pci_hwfn(struct qed_hwfn
*p_hwfn
,
1072 struct qed_ptt
*p_ptt
,
1075 u32 delay_idx
= 0, val
, set_val
= enable
? 1 : 0;
1077 /* Change PF in PXP */
1078 qed_wr(p_hwfn
, p_ptt
,
1079 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, set_val
);
1081 /* wait until value is set - try for 1 second every 50us */
1082 for (delay_idx
= 0; delay_idx
< 20000; delay_idx
++) {
1083 val
= qed_rd(p_hwfn
, p_ptt
,
1084 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
);
1088 usleep_range(50, 60);
1091 if (val
!= set_val
) {
1093 "PFID_ENABLE_MASTER wasn't changed after a second\n");
1100 static void qed_reset_mb_shadow(struct qed_hwfn
*p_hwfn
,
1101 struct qed_ptt
*p_main_ptt
)
1103 /* Read shadow of current MFW mailbox */
1104 qed_mcp_read_mb(p_hwfn
, p_main_ptt
);
1105 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
1106 p_hwfn
->mcp_info
->mfw_mb_cur
, p_hwfn
->mcp_info
->mfw_mb_length
);
1110 qed_fill_load_req_params(struct qed_load_req_params
*p_load_req
,
1111 struct qed_drv_load_params
*p_drv_load
)
1113 memset(p_load_req
, 0, sizeof(*p_load_req
));
1115 p_load_req
->drv_role
= p_drv_load
->is_crash_kernel
?
1116 QED_DRV_ROLE_KDUMP
: QED_DRV_ROLE_OS
;
1117 p_load_req
->timeout_val
= p_drv_load
->mfw_timeout_val
;
1118 p_load_req
->avoid_eng_reset
= p_drv_load
->avoid_eng_reset
;
1119 p_load_req
->override_force_load
= p_drv_load
->override_force_load
;
1122 int qed_hw_init(struct qed_dev
*cdev
, struct qed_hw_init_params
*p_params
)
1124 struct qed_load_req_params load_req_params
;
1125 u32 load_code
, param
, drv_mb_param
;
1126 bool b_default_mtu
= true;
1127 struct qed_hwfn
*p_hwfn
;
1128 int rc
= 0, mfw_rc
, i
;
1130 if ((p_params
->int_mode
== QED_INT_MODE_MSI
) && (cdev
->num_hwfns
> 1)) {
1131 DP_NOTICE(cdev
, "MSI mode is not supported for CMT devices\n");
1136 rc
= qed_init_fw_data(cdev
, p_params
->bin_fw_data
);
1141 for_each_hwfn(cdev
, i
) {
1142 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1144 /* If management didn't provide a default, set one of our own */
1145 if (!p_hwfn
->hw_info
.mtu
) {
1146 p_hwfn
->hw_info
.mtu
= 1500;
1147 b_default_mtu
= false;
1151 p_hwfn
->b_int_enabled
= 1;
1155 /* Enable DMAE in PXP */
1156 rc
= qed_change_pci_hwfn(p_hwfn
, p_hwfn
->p_main_ptt
, true);
1158 rc
= qed_calc_hw_mode(p_hwfn
);
1162 qed_fill_load_req_params(&load_req_params
,
1163 p_params
->p_drv_load_params
);
1164 rc
= qed_mcp_load_req(p_hwfn
, p_hwfn
->p_main_ptt
,
1167 DP_NOTICE(p_hwfn
, "Failed sending a LOAD_REQ command\n");
1171 load_code
= load_req_params
.load_code
;
1172 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
1173 "Load request was sent. Load code: 0x%x\n",
1176 qed_reset_mb_shadow(p_hwfn
, p_hwfn
->p_main_ptt
);
1178 p_hwfn
->first_on_engine
= (load_code
==
1179 FW_MSG_CODE_DRV_LOAD_ENGINE
);
1181 switch (load_code
) {
1182 case FW_MSG_CODE_DRV_LOAD_ENGINE
:
1183 rc
= qed_hw_init_common(p_hwfn
, p_hwfn
->p_main_ptt
,
1184 p_hwfn
->hw_info
.hw_mode
);
1188 case FW_MSG_CODE_DRV_LOAD_PORT
:
1189 rc
= qed_hw_init_port(p_hwfn
, p_hwfn
->p_main_ptt
,
1190 p_hwfn
->hw_info
.hw_mode
);
1195 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
1196 rc
= qed_hw_init_pf(p_hwfn
, p_hwfn
->p_main_ptt
,
1198 p_hwfn
->hw_info
.hw_mode
,
1199 p_params
->b_hw_start
,
1201 p_params
->allow_npar_tx_switch
);
1205 "Unexpected load code [0x%08x]", load_code
);
1212 "init phase failed for loadcode 0x%x (rc %d)\n",
1215 /* ACK mfw regardless of success or failure of initialization */
1216 mfw_rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1217 DRV_MSG_CODE_LOAD_DONE
,
1218 0, &load_code
, ¶m
);
1222 DP_NOTICE(p_hwfn
, "Failed sending LOAD_DONE command\n");
1226 /* send DCBX attention request command */
1229 "sending phony dcbx set command to trigger DCBx attention handling\n");
1230 mfw_rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1231 DRV_MSG_CODE_SET_DCBX
,
1232 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT
,
1233 &load_code
, ¶m
);
1236 "Failed to send DCBX attention request\n");
1240 p_hwfn
->hw_init_done
= true;
1244 p_hwfn
= QED_LEADING_HWFN(cdev
);
1245 drv_mb_param
= STORM_FW_VERSION
;
1246 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1247 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER
,
1248 drv_mb_param
, &load_code
, ¶m
);
1250 DP_INFO(p_hwfn
, "Failed to update firmware version\n");
1252 if (!b_default_mtu
) {
1253 rc
= qed_mcp_ov_update_mtu(p_hwfn
, p_hwfn
->p_main_ptt
,
1254 p_hwfn
->hw_info
.mtu
);
1257 "Failed to update default mtu\n");
1260 rc
= qed_mcp_ov_update_driver_state(p_hwfn
,
1262 QED_OV_DRIVER_STATE_DISABLED
);
1264 DP_INFO(p_hwfn
, "Failed to update driver state\n");
1266 rc
= qed_mcp_ov_update_eswitch(p_hwfn
, p_hwfn
->p_main_ptt
,
1267 QED_OV_ESWITCH_VEB
);
1269 DP_INFO(p_hwfn
, "Failed to update eswitch mode\n");
1275 #define QED_HW_STOP_RETRY_LIMIT (10)
1276 static void qed_hw_timers_stop(struct qed_dev
*cdev
,
1277 struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1282 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_CONN
, 0x0);
1283 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_TASK
, 0x0);
1285 for (i
= 0; i
< QED_HW_STOP_RETRY_LIMIT
; i
++) {
1286 if ((!qed_rd(p_hwfn
, p_ptt
,
1287 TM_REG_PF_SCAN_ACTIVE_CONN
)) &&
1288 (!qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_TASK
)))
1291 /* Dependent on number of connection/tasks, possibly
1292 * 1ms sleep is required between polls
1294 usleep_range(1000, 2000);
1297 if (i
< QED_HW_STOP_RETRY_LIMIT
)
1301 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
1302 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_CONN
),
1303 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_TASK
));
1306 void qed_hw_timers_stop_all(struct qed_dev
*cdev
)
1310 for_each_hwfn(cdev
, j
) {
1311 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
1312 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
1314 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
1318 int qed_hw_stop(struct qed_dev
*cdev
)
1320 struct qed_hwfn
*p_hwfn
;
1321 struct qed_ptt
*p_ptt
;
1325 for_each_hwfn(cdev
, j
) {
1326 p_hwfn
= &cdev
->hwfns
[j
];
1327 p_ptt
= p_hwfn
->p_main_ptt
;
1329 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Stopping hw/fw\n");
1332 qed_vf_pf_int_cleanup(p_hwfn
);
1333 rc
= qed_vf_pf_reset(p_hwfn
);
1336 "qed_vf_pf_reset failed. rc = %d.\n",
1343 /* mark the hw as uninitialized... */
1344 p_hwfn
->hw_init_done
= false;
1346 /* Send unload command to MCP */
1347 rc
= qed_mcp_unload_req(p_hwfn
, p_ptt
);
1350 "Failed sending a UNLOAD_REQ command. rc = %d.\n",
1355 qed_slowpath_irq_sync(p_hwfn
);
1357 /* After this point no MFW attentions are expected, e.g. prevent
1358 * race between pf stop and dcbx pf update.
1360 rc
= qed_sp_pf_stop(p_hwfn
);
1363 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
1368 qed_wr(p_hwfn
, p_ptt
,
1369 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
1371 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
1372 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
1373 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
1374 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
1375 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
1377 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
1379 /* Disable Attention Generation */
1380 qed_int_igu_disable_int(p_hwfn
, p_ptt
);
1382 qed_wr(p_hwfn
, p_ptt
, IGU_REG_LEADING_EDGE_LATCH
, 0);
1383 qed_wr(p_hwfn
, p_ptt
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
1385 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, true);
1387 /* Need to wait 1ms to guarantee SBs are cleared */
1388 usleep_range(1000, 2000);
1390 /* Disable PF in HW blocks */
1391 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_DB_ENABLE
, 0);
1392 qed_wr(p_hwfn
, p_ptt
, QM_REG_PF_EN
, 0);
1394 qed_mcp_unload_done(p_hwfn
, p_ptt
);
1397 "Failed sending a UNLOAD_DONE command. rc = %d.\n",
1404 p_hwfn
= QED_LEADING_HWFN(cdev
);
1405 p_ptt
= QED_LEADING_HWFN(cdev
)->p_main_ptt
;
1407 /* Disable DMAE in PXP - in CMT, this should only be done for
1408 * first hw-function, and only after all transactions have
1409 * stopped for all active hw-functions.
1411 rc
= qed_change_pci_hwfn(p_hwfn
, p_ptt
, false);
1414 "qed_change_pci_hwfn failed. rc = %d.\n", rc
);
1422 void qed_hw_stop_fastpath(struct qed_dev
*cdev
)
1426 for_each_hwfn(cdev
, j
) {
1427 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
1428 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
1431 qed_vf_pf_int_cleanup(p_hwfn
);
1436 NETIF_MSG_IFDOWN
, "Shutting down the fastpath\n");
1438 qed_wr(p_hwfn
, p_ptt
,
1439 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
1441 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
1442 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
1443 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
1444 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
1445 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
1447 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, false);
1449 /* Need to wait 1ms to guarantee SBs are cleared */
1450 usleep_range(1000, 2000);
1454 void qed_hw_start_fastpath(struct qed_hwfn
*p_hwfn
)
1456 if (IS_VF(p_hwfn
->cdev
))
1459 /* Re-open incoming traffic */
1460 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1461 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x0);
1464 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
1465 static void qed_hw_hwfn_free(struct qed_hwfn
*p_hwfn
)
1467 qed_ptt_pool_free(p_hwfn
);
1468 kfree(p_hwfn
->hw_info
.p_igu_info
);
1471 /* Setup bar access */
1472 static void qed_hw_hwfn_prepare(struct qed_hwfn
*p_hwfn
)
1474 /* clear indirect access */
1475 if (QED_IS_AH(p_hwfn
->cdev
)) {
1476 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1477 PGLUE_B_REG_PGL_ADDR_E8_F0_K2
, 0);
1478 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1479 PGLUE_B_REG_PGL_ADDR_EC_F0_K2
, 0);
1480 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1481 PGLUE_B_REG_PGL_ADDR_F0_F0_K2
, 0);
1482 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1483 PGLUE_B_REG_PGL_ADDR_F4_F0_K2
, 0);
1485 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1486 PGLUE_B_REG_PGL_ADDR_88_F0_BB
, 0);
1487 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1488 PGLUE_B_REG_PGL_ADDR_8C_F0_BB
, 0);
1489 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1490 PGLUE_B_REG_PGL_ADDR_90_F0_BB
, 0);
1491 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1492 PGLUE_B_REG_PGL_ADDR_94_F0_BB
, 0);
1495 /* Clean Previous errors if such exist */
1496 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1497 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR
, 1 << p_hwfn
->abs_pf_id
);
1499 /* enable internal target-read */
1500 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1501 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ
, 1);
1504 static void get_function_id(struct qed_hwfn
*p_hwfn
)
1507 p_hwfn
->hw_info
.opaque_fid
= (u16
) REG_RD(p_hwfn
,
1508 PXP_PF_ME_OPAQUE_ADDR
);
1510 p_hwfn
->hw_info
.concrete_fid
= REG_RD(p_hwfn
, PXP_PF_ME_CONCRETE_ADDR
);
1512 p_hwfn
->abs_pf_id
= (p_hwfn
->hw_info
.concrete_fid
>> 16) & 0xf;
1513 p_hwfn
->rel_pf_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
1514 PXP_CONCRETE_FID_PFID
);
1515 p_hwfn
->port_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
1516 PXP_CONCRETE_FID_PORT
);
1518 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
1519 "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
1520 p_hwfn
->hw_info
.concrete_fid
, p_hwfn
->hw_info
.opaque_fid
);
1523 static void qed_hw_set_feat(struct qed_hwfn
*p_hwfn
)
1525 u32
*feat_num
= p_hwfn
->hw_info
.feat_num
;
1526 struct qed_sb_cnt_info sb_cnt_info
;
1529 if (IS_ENABLED(CONFIG_QED_RDMA
) &&
1530 p_hwfn
->hw_info
.personality
== QED_PCI_ETH_ROCE
) {
1531 /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
1532 * the status blocks equally between L2 / RoCE but with
1533 * consideration as to how many l2 queues / cnqs we have.
1535 feat_num
[QED_RDMA_CNQ
] =
1536 min_t(u32
, RESC_NUM(p_hwfn
, QED_SB
) / 2,
1537 RESC_NUM(p_hwfn
, QED_RDMA_CNQ_RAM
));
1539 non_l2_sbs
= feat_num
[QED_RDMA_CNQ
];
1542 if (p_hwfn
->hw_info
.personality
== QED_PCI_ETH_ROCE
||
1543 p_hwfn
->hw_info
.personality
== QED_PCI_ETH
) {
1544 /* Start by allocating VF queues, then PF's */
1545 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
1546 qed_int_get_num_sbs(p_hwfn
, &sb_cnt_info
);
1547 feat_num
[QED_VF_L2_QUE
] = min_t(u32
,
1548 RESC_NUM(p_hwfn
, QED_L2_QUEUE
),
1549 sb_cnt_info
.sb_iov_cnt
);
1550 feat_num
[QED_PF_L2_QUE
] = min_t(u32
,
1551 RESC_NUM(p_hwfn
, QED_SB
) -
1561 "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d\n",
1562 (int)FEAT_NUM(p_hwfn
, QED_PF_L2_QUE
),
1563 (int)FEAT_NUM(p_hwfn
, QED_VF_L2_QUE
),
1564 (int)FEAT_NUM(p_hwfn
, QED_RDMA_CNQ
),
1565 RESC_NUM(p_hwfn
, QED_SB
));
1568 const char *qed_hw_get_resc_name(enum qed_resources res_id
)
1585 case QED_RDMA_CNQ_RAM
:
1586 return "RDMA_CNQ_RAM";
1593 case QED_RDMA_STATS_QUEUE
:
1594 return "RDMA_STATS_QUEUE";
1600 return "UNKNOWN_RESOURCE";
1605 __qed_hw_set_soft_resc_size(struct qed_hwfn
*p_hwfn
,
1606 struct qed_ptt
*p_ptt
,
1607 enum qed_resources res_id
,
1608 u32 resc_max_val
, u32
*p_mcp_resp
)
1612 rc
= qed_mcp_set_resc_max_val(p_hwfn
, p_ptt
, res_id
,
1613 resc_max_val
, p_mcp_resp
);
1616 "MFW response failure for a max value setting of resource %d [%s]\n",
1617 res_id
, qed_hw_get_resc_name(res_id
));
1621 if (*p_mcp_resp
!= FW_MSG_CODE_RESOURCE_ALLOC_OK
)
1623 "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
1624 res_id
, qed_hw_get_resc_name(res_id
), *p_mcp_resp
);
1630 qed_hw_set_soft_resc_size(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1632 bool b_ah
= QED_IS_AH(p_hwfn
->cdev
);
1633 u32 resc_max_val
, mcp_resp
;
1637 for (res_id
= 0; res_id
< QED_MAX_RESC
; res_id
++) {
1640 resc_max_val
= MAX_NUM_LL2_RX_QUEUES
;
1642 case QED_RDMA_CNQ_RAM
:
1643 /* No need for a case for QED_CMDQS_CQS since
1644 * CNQ/CMDQS are the same resource.
1646 resc_max_val
= NUM_OF_CMDQS_CQS
;
1648 case QED_RDMA_STATS_QUEUE
:
1649 resc_max_val
= b_ah
? RDMA_NUM_STATISTIC_COUNTERS_K2
1650 : RDMA_NUM_STATISTIC_COUNTERS_BB
;
1653 resc_max_val
= BDQ_NUM_RESOURCES
;
1659 rc
= __qed_hw_set_soft_resc_size(p_hwfn
, p_ptt
, res_id
,
1660 resc_max_val
, &mcp_resp
);
1664 /* There's no point to continue to the next resource if the
1665 * command is not supported by the MFW.
1666 * We do continue if the command is supported but the resource
1667 * is unknown to the MFW. Such a resource will be later
1668 * configured with the default allocation values.
1670 if (mcp_resp
== FW_MSG_CODE_UNSUPPORTED
)
1678 int qed_hw_get_dflt_resc(struct qed_hwfn
*p_hwfn
,
1679 enum qed_resources res_id
,
1680 u32
*p_resc_num
, u32
*p_resc_start
)
1682 u8 num_funcs
= p_hwfn
->num_funcs_on_engine
;
1683 bool b_ah
= QED_IS_AH(p_hwfn
->cdev
);
1684 struct qed_sb_cnt_info sb_cnt_info
;
1688 *p_resc_num
= (b_ah
? MAX_NUM_L2_QUEUES_K2
:
1689 MAX_NUM_L2_QUEUES_BB
) / num_funcs
;
1692 *p_resc_num
= (b_ah
? MAX_NUM_VPORTS_K2
:
1693 MAX_NUM_VPORTS_BB
) / num_funcs
;
1696 *p_resc_num
= (b_ah
? ETH_RSS_ENGINE_NUM_K2
:
1697 ETH_RSS_ENGINE_NUM_BB
) / num_funcs
;
1700 *p_resc_num
= (b_ah
? MAX_QM_TX_QUEUES_K2
:
1701 MAX_QM_TX_QUEUES_BB
) / num_funcs
;
1702 *p_resc_num
&= ~0x7; /* The granularity of the PQs is 8 */
1705 *p_resc_num
= MAX_QM_GLOBAL_RLS
/ num_funcs
;
1709 /* Each VFC resource can accommodate both a MAC and a VLAN */
1710 *p_resc_num
= ETH_NUM_MAC_FILTERS
/ num_funcs
;
1713 *p_resc_num
= (b_ah
? PXP_NUM_ILT_RECORDS_K2
:
1714 PXP_NUM_ILT_RECORDS_BB
) / num_funcs
;
1717 *p_resc_num
= MAX_NUM_LL2_RX_QUEUES
/ num_funcs
;
1719 case QED_RDMA_CNQ_RAM
:
1721 /* CNQ/CMDQS are the same resource */
1722 *p_resc_num
= NUM_OF_CMDQS_CQS
/ num_funcs
;
1724 case QED_RDMA_STATS_QUEUE
:
1725 *p_resc_num
= (b_ah
? RDMA_NUM_STATISTIC_COUNTERS_K2
:
1726 RDMA_NUM_STATISTIC_COUNTERS_BB
) / num_funcs
;
1729 if (p_hwfn
->hw_info
.personality
!= QED_PCI_ISCSI
&&
1730 p_hwfn
->hw_info
.personality
!= QED_PCI_FCOE
)
1736 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
1737 qed_int_get_num_sbs(p_hwfn
, &sb_cnt_info
);
1738 *p_resc_num
= sb_cnt_info
.sb_cnt
;
1748 else if (p_hwfn
->cdev
->num_ports_in_engines
== 4)
1749 *p_resc_start
= p_hwfn
->port_id
;
1750 else if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
)
1751 *p_resc_start
= p_hwfn
->port_id
;
1752 else if (p_hwfn
->hw_info
.personality
== QED_PCI_FCOE
)
1753 *p_resc_start
= p_hwfn
->port_id
+ 2;
1756 *p_resc_start
= *p_resc_num
* p_hwfn
->enabled_func_idx
;
1763 static int __qed_hw_set_resc_info(struct qed_hwfn
*p_hwfn
,
1764 enum qed_resources res_id
)
1766 u32 dflt_resc_num
= 0, dflt_resc_start
= 0;
1767 u32 mcp_resp
, *p_resc_num
, *p_resc_start
;
1770 p_resc_num
= &RESC_NUM(p_hwfn
, res_id
);
1771 p_resc_start
= &RESC_START(p_hwfn
, res_id
);
1773 rc
= qed_hw_get_dflt_resc(p_hwfn
, res_id
, &dflt_resc_num
,
1777 "Failed to get default amount for resource %d [%s]\n",
1778 res_id
, qed_hw_get_resc_name(res_id
));
1782 rc
= qed_mcp_get_resc_info(p_hwfn
, p_hwfn
->p_main_ptt
, res_id
,
1783 &mcp_resp
, p_resc_num
, p_resc_start
);
1786 "MFW response failure for an allocation request for resource %d [%s]\n",
1787 res_id
, qed_hw_get_resc_name(res_id
));
1791 /* Default driver values are applied in the following cases:
1792 * - The resource allocation MB command is not supported by the MFW
1793 * - There is an internal error in the MFW while processing the request
1794 * - The resource ID is unknown to the MFW
1796 if (mcp_resp
!= FW_MSG_CODE_RESOURCE_ALLOC_OK
) {
1798 "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
1800 qed_hw_get_resc_name(res_id
),
1801 mcp_resp
, dflt_resc_num
, dflt_resc_start
);
1802 *p_resc_num
= dflt_resc_num
;
1803 *p_resc_start
= dflt_resc_start
;
1807 /* Special handling for status blocks; Would be revised in future */
1808 if (res_id
== QED_SB
) {
1810 *p_resc_start
-= p_hwfn
->enabled_func_idx
;
1813 /* PQs have to divide by 8 [that's the HW granularity].
1814 * Reduce number so it would fit.
1816 if ((res_id
== QED_PQ
) && ((*p_resc_num
% 8) || (*p_resc_start
% 8))) {
1818 "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
1820 (*p_resc_num
) & ~0x7,
1821 *p_resc_start
, (*p_resc_start
) & ~0x7);
1822 *p_resc_num
&= ~0x7;
1823 *p_resc_start
&= ~0x7;
1829 static int qed_hw_set_resc_info(struct qed_hwfn
*p_hwfn
)
1834 for (res_id
= 0; res_id
< QED_MAX_RESC
; res_id
++) {
1835 rc
= __qed_hw_set_resc_info(p_hwfn
, res_id
);
1843 #define QED_RESC_ALLOC_LOCK_RETRY_CNT 10
1844 #define QED_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
1846 static int qed_hw_get_resc(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1848 struct qed_resc_unlock_params resc_unlock_params
;
1849 struct qed_resc_lock_params resc_lock_params
;
1850 bool b_ah
= QED_IS_AH(p_hwfn
->cdev
);
1854 /* Setting the max values of the soft resources and the following
1855 * resources allocation queries should be atomic. Since several PFs can
1856 * run in parallel - a resource lock is needed.
1857 * If either the resource lock or resource set value commands are not
1858 * supported - skip the the max values setting, release the lock if
1859 * needed, and proceed to the queries. Other failures, including a
1860 * failure to acquire the lock, will cause this function to fail.
1862 memset(&resc_lock_params
, 0, sizeof(resc_lock_params
));
1863 resc_lock_params
.resource
= QED_RESC_LOCK_RESC_ALLOC
;
1864 resc_lock_params
.retry_num
= QED_RESC_ALLOC_LOCK_RETRY_CNT
;
1865 resc_lock_params
.retry_interval
= QED_RESC_ALLOC_LOCK_RETRY_INTVL_US
;
1866 resc_lock_params
.sleep_b4_retry
= true;
1867 memset(&resc_unlock_params
, 0, sizeof(resc_unlock_params
));
1868 resc_unlock_params
.resource
= QED_RESC_LOCK_RESC_ALLOC
;
1870 rc
= qed_mcp_resc_lock(p_hwfn
, p_ptt
, &resc_lock_params
);
1871 if (rc
&& rc
!= -EINVAL
) {
1873 } else if (rc
== -EINVAL
) {
1875 "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
1876 } else if (!rc
&& !resc_lock_params
.b_granted
) {
1878 "Failed to acquire the resource lock for the resource allocation commands\n");
1881 rc
= qed_hw_set_soft_resc_size(p_hwfn
, p_ptt
);
1882 if (rc
&& rc
!= -EINVAL
) {
1884 "Failed to set the max values of the soft resources\n");
1885 goto unlock_and_exit
;
1886 } else if (rc
== -EINVAL
) {
1888 "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
1889 rc
= qed_mcp_resc_unlock(p_hwfn
, p_ptt
,
1890 &resc_unlock_params
);
1893 "Failed to release the resource lock for the resource allocation commands\n");
1897 rc
= qed_hw_set_resc_info(p_hwfn
);
1899 goto unlock_and_exit
;
1901 if (resc_lock_params
.b_granted
&& !resc_unlock_params
.b_released
) {
1902 rc
= qed_mcp_resc_unlock(p_hwfn
, p_ptt
, &resc_unlock_params
);
1905 "Failed to release the resource lock for the resource allocation commands\n");
1908 /* Sanity for ILT */
1909 if ((b_ah
&& (RESC_END(p_hwfn
, QED_ILT
) > PXP_NUM_ILT_RECORDS_K2
)) ||
1910 (!b_ah
&& (RESC_END(p_hwfn
, QED_ILT
) > PXP_NUM_ILT_RECORDS_BB
))) {
1911 DP_NOTICE(p_hwfn
, "Can't assign ILT pages [%08x,...,%08x]\n",
1912 RESC_START(p_hwfn
, QED_ILT
),
1913 RESC_END(p_hwfn
, QED_ILT
) - 1);
1917 qed_hw_set_feat(p_hwfn
);
1919 for (res_id
= 0; res_id
< QED_MAX_RESC
; res_id
++)
1920 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
, "%s = %d start = %d\n",
1921 qed_hw_get_resc_name(res_id
),
1922 RESC_NUM(p_hwfn
, res_id
),
1923 RESC_START(p_hwfn
, res_id
));
1928 if (resc_lock_params
.b_granted
&& !resc_unlock_params
.b_released
)
1929 qed_mcp_resc_unlock(p_hwfn
, p_ptt
, &resc_unlock_params
);
1933 static int qed_hw_get_nvm_info(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1935 u32 port_cfg_addr
, link_temp
, nvm_cfg_addr
, device_capabilities
;
1936 u32 nvm_cfg1_offset
, mf_mode
, addr
, generic_cont0
, core_cfg
;
1937 struct qed_mcp_link_params
*link
;
1939 /* Read global nvm_cfg address */
1940 nvm_cfg_addr
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_GEN_PURP_CR0
);
1942 /* Verify MCP has initialized it */
1943 if (!nvm_cfg_addr
) {
1944 DP_NOTICE(p_hwfn
, "Shared memory not initialized\n");
1948 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
1949 nvm_cfg1_offset
= qed_rd(p_hwfn
, p_ptt
, nvm_cfg_addr
+ 4);
1951 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1952 offsetof(struct nvm_cfg1
, glob
) +
1953 offsetof(struct nvm_cfg1_glob
, core_cfg
);
1955 core_cfg
= qed_rd(p_hwfn
, p_ptt
, addr
);
1957 switch ((core_cfg
& NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK
) >>
1958 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET
) {
1959 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G
:
1960 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X40G
;
1962 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G
:
1963 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X50G
;
1965 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G
:
1966 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X100G
;
1968 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F
:
1969 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_F
;
1971 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E
:
1972 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_E
;
1974 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G
:
1975 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X20G
;
1977 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G
:
1978 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X40G
;
1980 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G
:
1981 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X25G
;
1983 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G
:
1984 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X10G
;
1986 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G
:
1987 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X25G
;
1989 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G
:
1990 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X25G
;
1993 DP_NOTICE(p_hwfn
, "Unknown port mode in 0x%08x\n", core_cfg
);
1997 /* Read default link configuration */
1998 link
= &p_hwfn
->mcp_info
->link_input
;
1999 port_cfg_addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
2000 offsetof(struct nvm_cfg1
, port
[MFW_PORT(p_hwfn
)]);
2001 link_temp
= qed_rd(p_hwfn
, p_ptt
,
2003 offsetof(struct nvm_cfg1_port
, speed_cap_mask
));
2004 link_temp
&= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK
;
2005 link
->speed
.advertised_speeds
= link_temp
;
2007 link_temp
= link
->speed
.advertised_speeds
;
2008 p_hwfn
->mcp_info
->link_capabilities
.speed_capabilities
= link_temp
;
2010 link_temp
= qed_rd(p_hwfn
, p_ptt
,
2012 offsetof(struct nvm_cfg1_port
, link_settings
));
2013 switch ((link_temp
& NVM_CFG1_PORT_DRV_LINK_SPEED_MASK
) >>
2014 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET
) {
2015 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG
:
2016 link
->speed
.autoneg
= true;
2018 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G
:
2019 link
->speed
.forced_speed
= 1000;
2021 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G
:
2022 link
->speed
.forced_speed
= 10000;
2024 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G
:
2025 link
->speed
.forced_speed
= 25000;
2027 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G
:
2028 link
->speed
.forced_speed
= 40000;
2030 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G
:
2031 link
->speed
.forced_speed
= 50000;
2033 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G
:
2034 link
->speed
.forced_speed
= 100000;
2037 DP_NOTICE(p_hwfn
, "Unknown Speed in 0x%08x\n", link_temp
);
2040 link_temp
&= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK
;
2041 link_temp
>>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET
;
2042 link
->pause
.autoneg
= !!(link_temp
&
2043 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG
);
2044 link
->pause
.forced_rx
= !!(link_temp
&
2045 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX
);
2046 link
->pause
.forced_tx
= !!(link_temp
&
2047 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX
);
2048 link
->loopback_mode
= 0;
2050 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2051 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
2052 link
->speed
.forced_speed
, link
->speed
.advertised_speeds
,
2053 link
->speed
.autoneg
, link
->pause
.autoneg
);
2055 /* Read Multi-function information from shmem */
2056 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
2057 offsetof(struct nvm_cfg1
, glob
) +
2058 offsetof(struct nvm_cfg1_glob
, generic_cont0
);
2060 generic_cont0
= qed_rd(p_hwfn
, p_ptt
, addr
);
2062 mf_mode
= (generic_cont0
& NVM_CFG1_GLOB_MF_MODE_MASK
) >>
2063 NVM_CFG1_GLOB_MF_MODE_OFFSET
;
2066 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED
:
2067 p_hwfn
->cdev
->mf_mode
= QED_MF_OVLAN
;
2069 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0
:
2070 p_hwfn
->cdev
->mf_mode
= QED_MF_NPAR
;
2072 case NVM_CFG1_GLOB_MF_MODE_DEFAULT
:
2073 p_hwfn
->cdev
->mf_mode
= QED_MF_DEFAULT
;
2076 DP_INFO(p_hwfn
, "Multi function mode is %08x\n",
2077 p_hwfn
->cdev
->mf_mode
);
2079 /* Read Multi-function information from shmem */
2080 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
2081 offsetof(struct nvm_cfg1
, glob
) +
2082 offsetof(struct nvm_cfg1_glob
, device_capabilities
);
2084 device_capabilities
= qed_rd(p_hwfn
, p_ptt
, addr
);
2085 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET
)
2086 __set_bit(QED_DEV_CAP_ETH
,
2087 &p_hwfn
->hw_info
.device_capabilities
);
2088 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE
)
2089 __set_bit(QED_DEV_CAP_FCOE
,
2090 &p_hwfn
->hw_info
.device_capabilities
);
2091 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI
)
2092 __set_bit(QED_DEV_CAP_ISCSI
,
2093 &p_hwfn
->hw_info
.device_capabilities
);
2094 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE
)
2095 __set_bit(QED_DEV_CAP_ROCE
,
2096 &p_hwfn
->hw_info
.device_capabilities
);
2098 return qed_mcp_fill_shmem_func_info(p_hwfn
, p_ptt
);
2101 static void qed_get_num_funcs(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2103 u8 num_funcs
, enabled_func_idx
= p_hwfn
->rel_pf_id
;
2104 u32 reg_function_hide
, tmp
, eng_mask
, low_pfs_mask
;
2105 struct qed_dev
*cdev
= p_hwfn
->cdev
;
2107 num_funcs
= QED_IS_AH(cdev
) ? MAX_NUM_PFS_K2
: MAX_NUM_PFS_BB
;
2109 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
2110 * in the other bits are selected.
2111 * Bits 1-15 are for functions 1-15, respectively, and their value is
2112 * '0' only for enabled functions (function 0 always exists and
2114 * In case of CMT, only the "even" functions are enabled, and thus the
2115 * number of functions for both hwfns is learnt from the same bits.
2117 reg_function_hide
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_FUNCTION_HIDE
);
2119 if (reg_function_hide
& 0x1) {
2120 if (QED_IS_BB(cdev
)) {
2121 if (QED_PATH_ID(p_hwfn
) && cdev
->num_hwfns
== 1) {
2133 /* Get the number of the enabled functions on the engine */
2134 tmp
= (reg_function_hide
^ 0xffffffff) & eng_mask
;
2141 /* Get the PF index within the enabled functions */
2142 low_pfs_mask
= (0x1 << p_hwfn
->abs_pf_id
) - 1;
2143 tmp
= reg_function_hide
& eng_mask
& low_pfs_mask
;
2151 p_hwfn
->num_funcs_on_engine
= num_funcs
;
2152 p_hwfn
->enabled_func_idx
= enabled_func_idx
;
2156 "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
2159 p_hwfn
->enabled_func_idx
, p_hwfn
->num_funcs_on_engine
);
2162 static void qed_hw_info_port_num_bb(struct qed_hwfn
*p_hwfn
,
2163 struct qed_ptt
*p_ptt
)
2167 port_mode
= qed_rd(p_hwfn
, p_ptt
, CNIG_REG_NW_PORT_MODE_BB_B0
);
2169 if (port_mode
< 3) {
2170 p_hwfn
->cdev
->num_ports_in_engines
= 1;
2171 } else if (port_mode
<= 5) {
2172 p_hwfn
->cdev
->num_ports_in_engines
= 2;
2174 DP_NOTICE(p_hwfn
, "PORT MODE: %d not supported\n",
2175 p_hwfn
->cdev
->num_ports_in_engines
);
2177 /* Default num_ports_in_engines to something */
2178 p_hwfn
->cdev
->num_ports_in_engines
= 1;
2182 static void qed_hw_info_port_num_ah(struct qed_hwfn
*p_hwfn
,
2183 struct qed_ptt
*p_ptt
)
2188 p_hwfn
->cdev
->num_ports_in_engines
= 0;
2190 for (i
= 0; i
< MAX_NUM_PORTS_K2
; i
++) {
2191 port
= qed_rd(p_hwfn
, p_ptt
,
2192 CNIG_REG_NIG_PORT0_CONF_K2
+ (i
* 4));
2194 p_hwfn
->cdev
->num_ports_in_engines
++;
2197 if (!p_hwfn
->cdev
->num_ports_in_engines
) {
2198 DP_NOTICE(p_hwfn
, "All NIG ports are inactive\n");
2200 /* Default num_ports_in_engine to something */
2201 p_hwfn
->cdev
->num_ports_in_engines
= 1;
2205 static void qed_hw_info_port_num(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2207 if (QED_IS_BB(p_hwfn
->cdev
))
2208 qed_hw_info_port_num_bb(p_hwfn
, p_ptt
);
2210 qed_hw_info_port_num_ah(p_hwfn
, p_ptt
);
2214 qed_get_hw_info(struct qed_hwfn
*p_hwfn
,
2215 struct qed_ptt
*p_ptt
,
2216 enum qed_pci_personality personality
)
2220 /* Since all information is common, only first hwfns should do this */
2221 if (IS_LEAD_HWFN(p_hwfn
)) {
2222 rc
= qed_iov_hw_info(p_hwfn
);
2227 qed_hw_info_port_num(p_hwfn
, p_ptt
);
2229 qed_hw_get_nvm_info(p_hwfn
, p_ptt
);
2231 rc
= qed_int_igu_read_cam(p_hwfn
, p_ptt
);
2235 if (qed_mcp_is_init(p_hwfn
))
2236 ether_addr_copy(p_hwfn
->hw_info
.hw_mac_addr
,
2237 p_hwfn
->mcp_info
->func_info
.mac
);
2239 eth_random_addr(p_hwfn
->hw_info
.hw_mac_addr
);
2241 if (qed_mcp_is_init(p_hwfn
)) {
2242 if (p_hwfn
->mcp_info
->func_info
.ovlan
!= QED_MCP_VLAN_UNSET
)
2243 p_hwfn
->hw_info
.ovlan
=
2244 p_hwfn
->mcp_info
->func_info
.ovlan
;
2246 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
2249 if (qed_mcp_is_init(p_hwfn
)) {
2250 enum qed_pci_personality protocol
;
2252 protocol
= p_hwfn
->mcp_info
->func_info
.protocol
;
2253 p_hwfn
->hw_info
.personality
= protocol
;
2256 qed_get_num_funcs(p_hwfn
, p_ptt
);
2258 if (qed_mcp_is_init(p_hwfn
))
2259 p_hwfn
->hw_info
.mtu
= p_hwfn
->mcp_info
->func_info
.mtu
;
2261 return qed_hw_get_resc(p_hwfn
, p_ptt
);
2264 static int qed_get_dev_info(struct qed_dev
*cdev
)
2266 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2270 /* Read Vendor Id / Device Id */
2271 pci_read_config_word(cdev
->pdev
, PCI_VENDOR_ID
, &cdev
->vendor_id
);
2272 pci_read_config_word(cdev
->pdev
, PCI_DEVICE_ID
, &cdev
->device_id
);
2274 /* Determine type */
2275 device_id_mask
= cdev
->device_id
& QED_DEV_ID_MASK
;
2276 switch (device_id_mask
) {
2277 case QED_DEV_ID_MASK_BB
:
2278 cdev
->type
= QED_DEV_TYPE_BB
;
2280 case QED_DEV_ID_MASK_AH
:
2281 cdev
->type
= QED_DEV_TYPE_AH
;
2284 DP_NOTICE(p_hwfn
, "Unknown device id 0x%x\n", cdev
->device_id
);
2288 cdev
->chip_num
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
2289 MISCS_REG_CHIP_NUM
);
2290 cdev
->chip_rev
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
2291 MISCS_REG_CHIP_REV
);
2292 MASK_FIELD(CHIP_REV
, cdev
->chip_rev
);
2294 /* Learn number of HW-functions */
2295 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
2296 MISCS_REG_CMT_ENABLED_FOR_PAIR
);
2298 if (tmp
& (1 << p_hwfn
->rel_pf_id
)) {
2299 DP_NOTICE(cdev
->hwfns
, "device in CMT mode\n");
2300 cdev
->num_hwfns
= 2;
2302 cdev
->num_hwfns
= 1;
2305 cdev
->chip_bond_id
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
2306 MISCS_REG_CHIP_TEST_REG
) >> 4;
2307 MASK_FIELD(CHIP_BOND_ID
, cdev
->chip_bond_id
);
2308 cdev
->chip_metal
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
2309 MISCS_REG_CHIP_METAL
);
2310 MASK_FIELD(CHIP_METAL
, cdev
->chip_metal
);
2312 DP_INFO(cdev
->hwfns
,
2313 "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
2314 QED_IS_BB(cdev
) ? "BB" : "AH",
2315 'A' + cdev
->chip_rev
,
2316 (int)cdev
->chip_metal
,
2317 cdev
->chip_num
, cdev
->chip_rev
,
2318 cdev
->chip_bond_id
, cdev
->chip_metal
);
2320 if (QED_IS_BB(cdev
) && CHIP_REV_IS_A0(cdev
)) {
2321 DP_NOTICE(cdev
->hwfns
,
2322 "The chip type/rev (BB A0) is not supported!\n");
2329 static int qed_hw_prepare_single(struct qed_hwfn
*p_hwfn
,
2330 void __iomem
*p_regview
,
2331 void __iomem
*p_doorbells
,
2332 enum qed_pci_personality personality
)
2336 /* Split PCI bars evenly between hwfns */
2337 p_hwfn
->regview
= p_regview
;
2338 p_hwfn
->doorbells
= p_doorbells
;
2340 if (IS_VF(p_hwfn
->cdev
))
2341 return qed_vf_hw_prepare(p_hwfn
);
2343 /* Validate that chip access is feasible */
2344 if (REG_RD(p_hwfn
, PXP_PF_ME_OPAQUE_ADDR
) == 0xffffffff) {
2346 "Reading the ME register returns all Fs; Preventing further chip access\n");
2350 get_function_id(p_hwfn
);
2352 /* Allocate PTT pool */
2353 rc
= qed_ptt_pool_alloc(p_hwfn
);
2357 /* Allocate the main PTT */
2358 p_hwfn
->p_main_ptt
= qed_get_reserved_ptt(p_hwfn
, RESERVED_PTT_MAIN
);
2360 /* First hwfn learns basic information, e.g., number of hwfns */
2361 if (!p_hwfn
->my_id
) {
2362 rc
= qed_get_dev_info(p_hwfn
->cdev
);
2367 qed_hw_hwfn_prepare(p_hwfn
);
2369 /* Initialize MCP structure */
2370 rc
= qed_mcp_cmd_init(p_hwfn
, p_hwfn
->p_main_ptt
);
2372 DP_NOTICE(p_hwfn
, "Failed initializing mcp command\n");
2376 /* Read the device configuration information from the HW and SHMEM */
2377 rc
= qed_get_hw_info(p_hwfn
, p_hwfn
->p_main_ptt
, personality
);
2379 DP_NOTICE(p_hwfn
, "Failed to get HW information\n");
2383 /* Sending a mailbox to the MFW should be done after qed_get_hw_info()
2384 * is called as it sets the ports number in an engine.
2386 if (IS_LEAD_HWFN(p_hwfn
)) {
2387 rc
= qed_mcp_initiate_pf_flr(p_hwfn
, p_hwfn
->p_main_ptt
);
2389 DP_NOTICE(p_hwfn
, "Failed to initiate PF FLR\n");
2392 /* Allocate the init RT array and initialize the init-ops engine */
2393 rc
= qed_init_alloc(p_hwfn
);
2399 if (IS_LEAD_HWFN(p_hwfn
))
2400 qed_iov_free_hw_info(p_hwfn
->cdev
);
2401 qed_mcp_free(p_hwfn
);
2403 qed_hw_hwfn_free(p_hwfn
);
2408 int qed_hw_prepare(struct qed_dev
*cdev
,
2411 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2414 /* Store the precompiled init data ptrs */
2416 qed_init_iro_array(cdev
);
2418 /* Initialize the first hwfn - will learn number of hwfns */
2419 rc
= qed_hw_prepare_single(p_hwfn
,
2421 cdev
->doorbells
, personality
);
2425 personality
= p_hwfn
->hw_info
.personality
;
2427 /* Initialize the rest of the hwfns */
2428 if (cdev
->num_hwfns
> 1) {
2429 void __iomem
*p_regview
, *p_doorbell
;
2432 /* adjust bar offset for second engine */
2433 addr
= cdev
->regview
+ qed_hw_bar_size(p_hwfn
, BAR_ID_0
) / 2;
2436 /* adjust doorbell bar offset for second engine */
2437 addr
= cdev
->doorbells
+ qed_hw_bar_size(p_hwfn
, BAR_ID_1
) / 2;
2440 /* prepare second hw function */
2441 rc
= qed_hw_prepare_single(&cdev
->hwfns
[1], p_regview
,
2442 p_doorbell
, personality
);
2444 /* in case of error, need to free the previously
2445 * initiliazed hwfn 0.
2449 qed_init_free(p_hwfn
);
2450 qed_mcp_free(p_hwfn
);
2451 qed_hw_hwfn_free(p_hwfn
);
2459 void qed_hw_remove(struct qed_dev
*cdev
)
2461 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2465 qed_mcp_ov_update_driver_state(p_hwfn
, p_hwfn
->p_main_ptt
,
2466 QED_OV_DRIVER_STATE_NOT_LOADED
);
2468 for_each_hwfn(cdev
, i
) {
2469 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2472 qed_vf_pf_release(p_hwfn
);
2476 qed_init_free(p_hwfn
);
2477 qed_hw_hwfn_free(p_hwfn
);
2478 qed_mcp_free(p_hwfn
);
2481 qed_iov_free_hw_info(cdev
);
2484 static void qed_chain_free_next_ptr(struct qed_dev
*cdev
,
2485 struct qed_chain
*p_chain
)
2487 void *p_virt
= p_chain
->p_virt_addr
, *p_virt_next
= NULL
;
2488 dma_addr_t p_phys
= p_chain
->p_phys_addr
, p_phys_next
= 0;
2489 struct qed_chain_next
*p_next
;
2495 size
= p_chain
->elem_size
* p_chain
->usable_per_page
;
2497 for (i
= 0; i
< p_chain
->page_cnt
; i
++) {
2501 p_next
= (struct qed_chain_next
*)((u8
*)p_virt
+ size
);
2502 p_virt_next
= p_next
->next_virt
;
2503 p_phys_next
= HILO_DMA_REGPAIR(p_next
->next_phys
);
2505 dma_free_coherent(&cdev
->pdev
->dev
,
2506 QED_CHAIN_PAGE_SIZE
, p_virt
, p_phys
);
2508 p_virt
= p_virt_next
;
2509 p_phys
= p_phys_next
;
2513 static void qed_chain_free_single(struct qed_dev
*cdev
,
2514 struct qed_chain
*p_chain
)
2516 if (!p_chain
->p_virt_addr
)
2519 dma_free_coherent(&cdev
->pdev
->dev
,
2520 QED_CHAIN_PAGE_SIZE
,
2521 p_chain
->p_virt_addr
, p_chain
->p_phys_addr
);
2524 static void qed_chain_free_pbl(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
2526 void **pp_virt_addr_tbl
= p_chain
->pbl
.pp_virt_addr_tbl
;
2527 u32 page_cnt
= p_chain
->page_cnt
, i
, pbl_size
;
2528 u8
*p_pbl_virt
= p_chain
->pbl_sp
.p_virt_table
;
2530 if (!pp_virt_addr_tbl
)
2536 for (i
= 0; i
< page_cnt
; i
++) {
2537 if (!pp_virt_addr_tbl
[i
])
2540 dma_free_coherent(&cdev
->pdev
->dev
,
2541 QED_CHAIN_PAGE_SIZE
,
2542 pp_virt_addr_tbl
[i
],
2543 *(dma_addr_t
*)p_pbl_virt
);
2545 p_pbl_virt
+= QED_CHAIN_PBL_ENTRY_SIZE
;
2548 pbl_size
= page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
2549 dma_free_coherent(&cdev
->pdev
->dev
,
2551 p_chain
->pbl_sp
.p_virt_table
,
2552 p_chain
->pbl_sp
.p_phys_table
);
2554 vfree(p_chain
->pbl
.pp_virt_addr_tbl
);
2557 void qed_chain_free(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
2559 switch (p_chain
->mode
) {
2560 case QED_CHAIN_MODE_NEXT_PTR
:
2561 qed_chain_free_next_ptr(cdev
, p_chain
);
2563 case QED_CHAIN_MODE_SINGLE
:
2564 qed_chain_free_single(cdev
, p_chain
);
2566 case QED_CHAIN_MODE_PBL
:
2567 qed_chain_free_pbl(cdev
, p_chain
);
2573 qed_chain_alloc_sanity_check(struct qed_dev
*cdev
,
2574 enum qed_chain_cnt_type cnt_type
,
2575 size_t elem_size
, u32 page_cnt
)
2577 u64 chain_size
= ELEMS_PER_PAGE(elem_size
) * page_cnt
;
2579 /* The actual chain size can be larger than the maximal possible value
2580 * after rounding up the requested elements number to pages, and after
2581 * taking into acount the unusuable elements (next-ptr elements).
2582 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
2583 * size/capacity fields are of a u32 type.
2585 if ((cnt_type
== QED_CHAIN_CNT_TYPE_U16
&&
2586 chain_size
> ((u32
)U16_MAX
+ 1)) ||
2587 (cnt_type
== QED_CHAIN_CNT_TYPE_U32
&& chain_size
> U32_MAX
)) {
2589 "The actual chain size (0x%llx) is larger than the maximal possible value\n",
2598 qed_chain_alloc_next_ptr(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
2600 void *p_virt
= NULL
, *p_virt_prev
= NULL
;
2601 dma_addr_t p_phys
= 0;
2604 for (i
= 0; i
< p_chain
->page_cnt
; i
++) {
2605 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
2606 QED_CHAIN_PAGE_SIZE
,
2607 &p_phys
, GFP_KERNEL
);
2612 qed_chain_init_mem(p_chain
, p_virt
, p_phys
);
2613 qed_chain_reset(p_chain
);
2615 qed_chain_init_next_ptr_elem(p_chain
, p_virt_prev
,
2619 p_virt_prev
= p_virt
;
2621 /* Last page's next element should point to the beginning of the
2624 qed_chain_init_next_ptr_elem(p_chain
, p_virt_prev
,
2625 p_chain
->p_virt_addr
,
2626 p_chain
->p_phys_addr
);
2632 qed_chain_alloc_single(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
2634 dma_addr_t p_phys
= 0;
2635 void *p_virt
= NULL
;
2637 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
2638 QED_CHAIN_PAGE_SIZE
, &p_phys
, GFP_KERNEL
);
2642 qed_chain_init_mem(p_chain
, p_virt
, p_phys
);
2643 qed_chain_reset(p_chain
);
2648 static int qed_chain_alloc_pbl(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
2650 u32 page_cnt
= p_chain
->page_cnt
, size
, i
;
2651 dma_addr_t p_phys
= 0, p_pbl_phys
= 0;
2652 void **pp_virt_addr_tbl
= NULL
;
2653 u8
*p_pbl_virt
= NULL
;
2654 void *p_virt
= NULL
;
2656 size
= page_cnt
* sizeof(*pp_virt_addr_tbl
);
2657 pp_virt_addr_tbl
= vzalloc(size
);
2658 if (!pp_virt_addr_tbl
)
2661 /* The allocation of the PBL table is done with its full size, since it
2662 * is expected to be successive.
2663 * qed_chain_init_pbl_mem() is called even in a case of an allocation
2664 * failure, since pp_virt_addr_tbl was previously allocated, and it
2665 * should be saved to allow its freeing during the error flow.
2667 size
= page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
2668 p_pbl_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
2669 size
, &p_pbl_phys
, GFP_KERNEL
);
2670 qed_chain_init_pbl_mem(p_chain
, p_pbl_virt
, p_pbl_phys
,
2675 for (i
= 0; i
< page_cnt
; i
++) {
2676 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
2677 QED_CHAIN_PAGE_SIZE
,
2678 &p_phys
, GFP_KERNEL
);
2683 qed_chain_init_mem(p_chain
, p_virt
, p_phys
);
2684 qed_chain_reset(p_chain
);
2687 /* Fill the PBL table with the physical address of the page */
2688 *(dma_addr_t
*)p_pbl_virt
= p_phys
;
2689 /* Keep the virtual address of the page */
2690 p_chain
->pbl
.pp_virt_addr_tbl
[i
] = p_virt
;
2692 p_pbl_virt
+= QED_CHAIN_PBL_ENTRY_SIZE
;
2698 int qed_chain_alloc(struct qed_dev
*cdev
,
2699 enum qed_chain_use_mode intended_use
,
2700 enum qed_chain_mode mode
,
2701 enum qed_chain_cnt_type cnt_type
,
2702 u32 num_elems
, size_t elem_size
, struct qed_chain
*p_chain
)
2707 if (mode
== QED_CHAIN_MODE_SINGLE
)
2710 page_cnt
= QED_CHAIN_PAGE_CNT(num_elems
, elem_size
, mode
);
2712 rc
= qed_chain_alloc_sanity_check(cdev
, cnt_type
, elem_size
, page_cnt
);
2715 "Cannot allocate a chain with the given arguments:\n");
2717 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
2718 intended_use
, mode
, cnt_type
, num_elems
, elem_size
);
2722 qed_chain_init_params(p_chain
, page_cnt
, (u8
) elem_size
, intended_use
,
2726 case QED_CHAIN_MODE_NEXT_PTR
:
2727 rc
= qed_chain_alloc_next_ptr(cdev
, p_chain
);
2729 case QED_CHAIN_MODE_SINGLE
:
2730 rc
= qed_chain_alloc_single(cdev
, p_chain
);
2732 case QED_CHAIN_MODE_PBL
:
2733 rc
= qed_chain_alloc_pbl(cdev
, p_chain
);
2742 qed_chain_free(cdev
, p_chain
);
2746 int qed_fw_l2_queue(struct qed_hwfn
*p_hwfn
, u16 src_id
, u16
*dst_id
)
2748 if (src_id
>= RESC_NUM(p_hwfn
, QED_L2_QUEUE
)) {
2751 min
= (u16
) RESC_START(p_hwfn
, QED_L2_QUEUE
);
2752 max
= min
+ RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
2754 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
2760 *dst_id
= RESC_START(p_hwfn
, QED_L2_QUEUE
) + src_id
;
2765 int qed_fw_vport(struct qed_hwfn
*p_hwfn
, u8 src_id
, u8
*dst_id
)
2767 if (src_id
>= RESC_NUM(p_hwfn
, QED_VPORT
)) {
2770 min
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
2771 max
= min
+ RESC_NUM(p_hwfn
, QED_VPORT
);
2773 "vport id [%d] is not valid, available indices [%d - %d]\n",
2779 *dst_id
= RESC_START(p_hwfn
, QED_VPORT
) + src_id
;
2784 int qed_fw_rss_eng(struct qed_hwfn
*p_hwfn
, u8 src_id
, u8
*dst_id
)
2786 if (src_id
>= RESC_NUM(p_hwfn
, QED_RSS_ENG
)) {
2789 min
= (u8
)RESC_START(p_hwfn
, QED_RSS_ENG
);
2790 max
= min
+ RESC_NUM(p_hwfn
, QED_RSS_ENG
);
2792 "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
2798 *dst_id
= RESC_START(p_hwfn
, QED_RSS_ENG
) + src_id
;
2803 static void qed_llh_mac_to_filter(u32
*p_high
, u32
*p_low
,
2806 *p_high
= p_filter
[1] | (p_filter
[0] << 8);
2807 *p_low
= p_filter
[5] | (p_filter
[4] << 8) |
2808 (p_filter
[3] << 16) | (p_filter
[2] << 24);
2811 int qed_llh_add_mac_filter(struct qed_hwfn
*p_hwfn
,
2812 struct qed_ptt
*p_ptt
, u8
*p_filter
)
2814 u32 high
= 0, low
= 0, en
;
2817 if (!(IS_MF_SI(p_hwfn
) || IS_MF_DEFAULT(p_hwfn
)))
2820 qed_llh_mac_to_filter(&high
, &low
, p_filter
);
2822 /* Find a free entry and utilize it */
2823 for (i
= 0; i
< NIG_REG_LLH_FUNC_FILTER_EN_SIZE
; i
++) {
2824 en
= qed_rd(p_hwfn
, p_ptt
,
2825 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
));
2828 qed_wr(p_hwfn
, p_ptt
,
2829 NIG_REG_LLH_FUNC_FILTER_VALUE
+
2830 2 * i
* sizeof(u32
), low
);
2831 qed_wr(p_hwfn
, p_ptt
,
2832 NIG_REG_LLH_FUNC_FILTER_VALUE
+
2833 (2 * i
+ 1) * sizeof(u32
), high
);
2834 qed_wr(p_hwfn
, p_ptt
,
2835 NIG_REG_LLH_FUNC_FILTER_MODE
+ i
* sizeof(u32
), 0);
2836 qed_wr(p_hwfn
, p_ptt
,
2837 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE
+
2838 i
* sizeof(u32
), 0);
2839 qed_wr(p_hwfn
, p_ptt
,
2840 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
), 1);
2843 if (i
>= NIG_REG_LLH_FUNC_FILTER_EN_SIZE
) {
2845 "Failed to find an empty LLH filter to utilize\n");
2849 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
2850 "mac: %pM is added at %d\n",
2856 void qed_llh_remove_mac_filter(struct qed_hwfn
*p_hwfn
,
2857 struct qed_ptt
*p_ptt
, u8
*p_filter
)
2859 u32 high
= 0, low
= 0;
2862 if (!(IS_MF_SI(p_hwfn
) || IS_MF_DEFAULT(p_hwfn
)))
2865 qed_llh_mac_to_filter(&high
, &low
, p_filter
);
2867 /* Find the entry and clean it */
2868 for (i
= 0; i
< NIG_REG_LLH_FUNC_FILTER_EN_SIZE
; i
++) {
2869 if (qed_rd(p_hwfn
, p_ptt
,
2870 NIG_REG_LLH_FUNC_FILTER_VALUE
+
2871 2 * i
* sizeof(u32
)) != low
)
2873 if (qed_rd(p_hwfn
, p_ptt
,
2874 NIG_REG_LLH_FUNC_FILTER_VALUE
+
2875 (2 * i
+ 1) * sizeof(u32
)) != high
)
2878 qed_wr(p_hwfn
, p_ptt
,
2879 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
), 0);
2880 qed_wr(p_hwfn
, p_ptt
,
2881 NIG_REG_LLH_FUNC_FILTER_VALUE
+ 2 * i
* sizeof(u32
), 0);
2882 qed_wr(p_hwfn
, p_ptt
,
2883 NIG_REG_LLH_FUNC_FILTER_VALUE
+
2884 (2 * i
+ 1) * sizeof(u32
), 0);
2886 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
2887 "mac: %pM is removed from %d\n",
2891 if (i
>= NIG_REG_LLH_FUNC_FILTER_EN_SIZE
)
2892 DP_NOTICE(p_hwfn
, "Tried to remove a non-configured filter\n");
2896 qed_llh_add_protocol_filter(struct qed_hwfn
*p_hwfn
,
2897 struct qed_ptt
*p_ptt
,
2898 u16 source_port_or_eth_type
,
2899 u16 dest_port
, enum qed_llh_port_filter_type_t type
)
2901 u32 high
= 0, low
= 0, en
;
2904 if (!(IS_MF_SI(p_hwfn
) || IS_MF_DEFAULT(p_hwfn
)))
2908 case QED_LLH_FILTER_ETHERTYPE
:
2909 high
= source_port_or_eth_type
;
2911 case QED_LLH_FILTER_TCP_SRC_PORT
:
2912 case QED_LLH_FILTER_UDP_SRC_PORT
:
2913 low
= source_port_or_eth_type
<< 16;
2915 case QED_LLH_FILTER_TCP_DEST_PORT
:
2916 case QED_LLH_FILTER_UDP_DEST_PORT
:
2919 case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT
:
2920 case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT
:
2921 low
= (source_port_or_eth_type
<< 16) | dest_port
;
2925 "Non valid LLH protocol filter type %d\n", type
);
2928 /* Find a free entry and utilize it */
2929 for (i
= 0; i
< NIG_REG_LLH_FUNC_FILTER_EN_SIZE
; i
++) {
2930 en
= qed_rd(p_hwfn
, p_ptt
,
2931 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
));
2934 qed_wr(p_hwfn
, p_ptt
,
2935 NIG_REG_LLH_FUNC_FILTER_VALUE
+
2936 2 * i
* sizeof(u32
), low
);
2937 qed_wr(p_hwfn
, p_ptt
,
2938 NIG_REG_LLH_FUNC_FILTER_VALUE
+
2939 (2 * i
+ 1) * sizeof(u32
), high
);
2940 qed_wr(p_hwfn
, p_ptt
,
2941 NIG_REG_LLH_FUNC_FILTER_MODE
+ i
* sizeof(u32
), 1);
2942 qed_wr(p_hwfn
, p_ptt
,
2943 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE
+
2944 i
* sizeof(u32
), 1 << type
);
2945 qed_wr(p_hwfn
, p_ptt
,
2946 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
), 1);
2949 if (i
>= NIG_REG_LLH_FUNC_FILTER_EN_SIZE
) {
2951 "Failed to find an empty LLH filter to utilize\n");
2955 case QED_LLH_FILTER_ETHERTYPE
:
2956 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
2957 "ETH type %x is added at %d\n",
2958 source_port_or_eth_type
, i
);
2960 case QED_LLH_FILTER_TCP_SRC_PORT
:
2961 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
2962 "TCP src port %x is added at %d\n",
2963 source_port_or_eth_type
, i
);
2965 case QED_LLH_FILTER_UDP_SRC_PORT
:
2966 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
2967 "UDP src port %x is added at %d\n",
2968 source_port_or_eth_type
, i
);
2970 case QED_LLH_FILTER_TCP_DEST_PORT
:
2971 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
2972 "TCP dst port %x is added at %d\n", dest_port
, i
);
2974 case QED_LLH_FILTER_UDP_DEST_PORT
:
2975 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
2976 "UDP dst port %x is added at %d\n", dest_port
, i
);
2978 case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT
:
2979 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
2980 "TCP src/dst ports %x/%x are added at %d\n",
2981 source_port_or_eth_type
, dest_port
, i
);
2983 case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT
:
2984 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
2985 "UDP src/dst ports %x/%x are added at %d\n",
2986 source_port_or_eth_type
, dest_port
, i
);
2993 qed_llh_remove_protocol_filter(struct qed_hwfn
*p_hwfn
,
2994 struct qed_ptt
*p_ptt
,
2995 u16 source_port_or_eth_type
,
2997 enum qed_llh_port_filter_type_t type
)
2999 u32 high
= 0, low
= 0;
3002 if (!(IS_MF_SI(p_hwfn
) || IS_MF_DEFAULT(p_hwfn
)))
3006 case QED_LLH_FILTER_ETHERTYPE
:
3007 high
= source_port_or_eth_type
;
3009 case QED_LLH_FILTER_TCP_SRC_PORT
:
3010 case QED_LLH_FILTER_UDP_SRC_PORT
:
3011 low
= source_port_or_eth_type
<< 16;
3013 case QED_LLH_FILTER_TCP_DEST_PORT
:
3014 case QED_LLH_FILTER_UDP_DEST_PORT
:
3017 case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT
:
3018 case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT
:
3019 low
= (source_port_or_eth_type
<< 16) | dest_port
;
3023 "Non valid LLH protocol filter type %d\n", type
);
3027 for (i
= 0; i
< NIG_REG_LLH_FUNC_FILTER_EN_SIZE
; i
++) {
3028 if (!qed_rd(p_hwfn
, p_ptt
,
3029 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
)))
3031 if (!qed_rd(p_hwfn
, p_ptt
,
3032 NIG_REG_LLH_FUNC_FILTER_MODE
+ i
* sizeof(u32
)))
3034 if (!(qed_rd(p_hwfn
, p_ptt
,
3035 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE
+
3036 i
* sizeof(u32
)) & BIT(type
)))
3038 if (qed_rd(p_hwfn
, p_ptt
,
3039 NIG_REG_LLH_FUNC_FILTER_VALUE
+
3040 2 * i
* sizeof(u32
)) != low
)
3042 if (qed_rd(p_hwfn
, p_ptt
,
3043 NIG_REG_LLH_FUNC_FILTER_VALUE
+
3044 (2 * i
+ 1) * sizeof(u32
)) != high
)
3047 qed_wr(p_hwfn
, p_ptt
,
3048 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
), 0);
3049 qed_wr(p_hwfn
, p_ptt
,
3050 NIG_REG_LLH_FUNC_FILTER_MODE
+ i
* sizeof(u32
), 0);
3051 qed_wr(p_hwfn
, p_ptt
,
3052 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE
+
3053 i
* sizeof(u32
), 0);
3054 qed_wr(p_hwfn
, p_ptt
,
3055 NIG_REG_LLH_FUNC_FILTER_VALUE
+ 2 * i
* sizeof(u32
), 0);
3056 qed_wr(p_hwfn
, p_ptt
,
3057 NIG_REG_LLH_FUNC_FILTER_VALUE
+
3058 (2 * i
+ 1) * sizeof(u32
), 0);
3062 if (i
>= NIG_REG_LLH_FUNC_FILTER_EN_SIZE
)
3063 DP_NOTICE(p_hwfn
, "Tried to remove a non-configured filter\n");
3066 static int qed_set_coalesce(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
3067 u32 hw_addr
, void *p_eth_qzone
,
3068 size_t eth_qzone_size
, u8 timeset
)
3070 struct coalescing_timeset
*p_coal_timeset
;
3072 if (p_hwfn
->cdev
->int_coalescing_mode
!= QED_COAL_MODE_ENABLE
) {
3073 DP_NOTICE(p_hwfn
, "Coalescing configuration not enabled\n");
3077 p_coal_timeset
= p_eth_qzone
;
3078 memset(p_coal_timeset
, 0, eth_qzone_size
);
3079 SET_FIELD(p_coal_timeset
->value
, COALESCING_TIMESET_TIMESET
, timeset
);
3080 SET_FIELD(p_coal_timeset
->value
, COALESCING_TIMESET_VALID
, 1);
3081 qed_memcpy_to(p_hwfn
, p_ptt
, hw_addr
, p_eth_qzone
, eth_qzone_size
);
3086 int qed_set_rxq_coalesce(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
3087 u16 coalesce
, u8 qid
, u16 sb_id
)
3089 struct ustorm_eth_queue_zone eth_qzone
;
3090 u8 timeset
, timer_res
;
3095 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
3096 if (coalesce
<= 0x7F) {
3098 } else if (coalesce
<= 0xFF) {
3100 } else if (coalesce
<= 0x1FF) {
3103 DP_ERR(p_hwfn
, "Invalid coalesce value - %d\n", coalesce
);
3106 timeset
= (u8
)(coalesce
>> timer_res
);
3108 rc
= qed_fw_l2_queue(p_hwfn
, (u16
)qid
, &fw_qid
);
3112 rc
= qed_int_set_timer_res(p_hwfn
, p_ptt
, timer_res
, sb_id
, false);
3116 address
= BAR0_MAP_REG_USDM_RAM
+ USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid
);
3118 rc
= qed_set_coalesce(p_hwfn
, p_ptt
, address
, ð_qzone
,
3119 sizeof(struct ustorm_eth_queue_zone
), timeset
);
3123 p_hwfn
->cdev
->rx_coalesce_usecs
= coalesce
;
3128 int qed_set_txq_coalesce(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
3129 u16 coalesce
, u8 qid
, u16 sb_id
)
3131 struct xstorm_eth_queue_zone eth_qzone
;
3132 u8 timeset
, timer_res
;
3137 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
3138 if (coalesce
<= 0x7F) {
3140 } else if (coalesce
<= 0xFF) {
3142 } else if (coalesce
<= 0x1FF) {
3145 DP_ERR(p_hwfn
, "Invalid coalesce value - %d\n", coalesce
);
3148 timeset
= (u8
)(coalesce
>> timer_res
);
3150 rc
= qed_fw_l2_queue(p_hwfn
, (u16
)qid
, &fw_qid
);
3154 rc
= qed_int_set_timer_res(p_hwfn
, p_ptt
, timer_res
, sb_id
, true);
3158 address
= BAR0_MAP_REG_XSDM_RAM
+ XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid
);
3160 rc
= qed_set_coalesce(p_hwfn
, p_ptt
, address
, ð_qzone
,
3161 sizeof(struct xstorm_eth_queue_zone
), timeset
);
3165 p_hwfn
->cdev
->tx_coalesce_usecs
= coalesce
;
3170 /* Calculate final WFQ values for all vports and configure them.
3171 * After this configuration each vport will have
3172 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
3174 static void qed_configure_wfq_for_all_vports(struct qed_hwfn
*p_hwfn
,
3175 struct qed_ptt
*p_ptt
,
3178 struct init_qm_vport_params
*vport_params
;
3181 vport_params
= p_hwfn
->qm_info
.qm_vport_params
;
3183 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
3184 u32 wfq_speed
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
3186 vport_params
[i
].vport_wfq
= (wfq_speed
* QED_WFQ_UNIT
) /
3188 qed_init_vport_wfq(p_hwfn
, p_ptt
,
3189 vport_params
[i
].first_tx_pq_id
,
3190 vport_params
[i
].vport_wfq
);
3194 static void qed_init_wfq_default_param(struct qed_hwfn
*p_hwfn
,
3200 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++)
3201 p_hwfn
->qm_info
.qm_vport_params
[i
].vport_wfq
= 1;
3204 static void qed_disable_wfq_for_all_vports(struct qed_hwfn
*p_hwfn
,
3205 struct qed_ptt
*p_ptt
,
3208 struct init_qm_vport_params
*vport_params
;
3211 vport_params
= p_hwfn
->qm_info
.qm_vport_params
;
3213 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
3214 qed_init_wfq_default_param(p_hwfn
, min_pf_rate
);
3215 qed_init_vport_wfq(p_hwfn
, p_ptt
,
3216 vport_params
[i
].first_tx_pq_id
,
3217 vport_params
[i
].vport_wfq
);
3221 /* This function performs several validations for WFQ
3222 * configuration and required min rate for a given vport
3223 * 1. req_rate must be greater than one percent of min_pf_rate.
3224 * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
3225 * rates to get less than one percent of min_pf_rate.
3226 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
3228 static int qed_init_wfq_param(struct qed_hwfn
*p_hwfn
,
3229 u16 vport_id
, u32 req_rate
, u32 min_pf_rate
)
3231 u32 total_req_min_rate
= 0, total_left_rate
= 0, left_rate_per_vp
= 0;
3232 int non_requested_count
= 0, req_count
= 0, i
, num_vports
;
3234 num_vports
= p_hwfn
->qm_info
.num_vports
;
3236 /* Accounting for the vports which are configured for WFQ explicitly */
3237 for (i
= 0; i
< num_vports
; i
++) {
3240 if ((i
!= vport_id
) &&
3241 p_hwfn
->qm_info
.wfq_data
[i
].configured
) {
3243 tmp_speed
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
3244 total_req_min_rate
+= tmp_speed
;
3248 /* Include current vport data as well */
3250 total_req_min_rate
+= req_rate
;
3251 non_requested_count
= num_vports
- req_count
;
3253 if (req_rate
< min_pf_rate
/ QED_WFQ_UNIT
) {
3254 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
3255 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
3256 vport_id
, req_rate
, min_pf_rate
);
3260 if (num_vports
> QED_WFQ_UNIT
) {
3261 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
3262 "Number of vports is greater than %d\n",
3267 if (total_req_min_rate
> min_pf_rate
) {
3268 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
3269 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
3270 total_req_min_rate
, min_pf_rate
);
3274 total_left_rate
= min_pf_rate
- total_req_min_rate
;
3276 left_rate_per_vp
= total_left_rate
/ non_requested_count
;
3277 if (left_rate_per_vp
< min_pf_rate
/ QED_WFQ_UNIT
) {
3278 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
3279 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
3280 left_rate_per_vp
, min_pf_rate
);
3284 p_hwfn
->qm_info
.wfq_data
[vport_id
].min_speed
= req_rate
;
3285 p_hwfn
->qm_info
.wfq_data
[vport_id
].configured
= true;
3287 for (i
= 0; i
< num_vports
; i
++) {
3288 if (p_hwfn
->qm_info
.wfq_data
[i
].configured
)
3291 p_hwfn
->qm_info
.wfq_data
[i
].min_speed
= left_rate_per_vp
;
3297 static int __qed_configure_vport_wfq(struct qed_hwfn
*p_hwfn
,
3298 struct qed_ptt
*p_ptt
, u16 vp_id
, u32 rate
)
3300 struct qed_mcp_link_state
*p_link
;
3303 p_link
= &p_hwfn
->cdev
->hwfns
[0].mcp_info
->link_output
;
3305 if (!p_link
->min_pf_rate
) {
3306 p_hwfn
->qm_info
.wfq_data
[vp_id
].min_speed
= rate
;
3307 p_hwfn
->qm_info
.wfq_data
[vp_id
].configured
= true;
3311 rc
= qed_init_wfq_param(p_hwfn
, vp_id
, rate
, p_link
->min_pf_rate
);
3314 qed_configure_wfq_for_all_vports(p_hwfn
, p_ptt
,
3315 p_link
->min_pf_rate
);
3318 "Validation failed while configuring min rate\n");
3323 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn
*p_hwfn
,
3324 struct qed_ptt
*p_ptt
,
3327 bool use_wfq
= false;
3331 /* Validate all pre configured vports for wfq */
3332 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
3335 if (!p_hwfn
->qm_info
.wfq_data
[i
].configured
)
3338 rate
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
3341 rc
= qed_init_wfq_param(p_hwfn
, i
, rate
, min_pf_rate
);
3344 "WFQ validation failed while configuring min rate\n");
3350 qed_configure_wfq_for_all_vports(p_hwfn
, p_ptt
, min_pf_rate
);
3352 qed_disable_wfq_for_all_vports(p_hwfn
, p_ptt
, min_pf_rate
);
3357 /* Main API for qed clients to configure vport min rate.
3358 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
3359 * rate - Speed in Mbps needs to be assigned to a given vport.
3361 int qed_configure_vport_wfq(struct qed_dev
*cdev
, u16 vp_id
, u32 rate
)
3363 int i
, rc
= -EINVAL
;
3365 /* Currently not supported; Might change in future */
3366 if (cdev
->num_hwfns
> 1) {
3368 "WFQ configuration is not supported for this device\n");
3372 for_each_hwfn(cdev
, i
) {
3373 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3374 struct qed_ptt
*p_ptt
;
3376 p_ptt
= qed_ptt_acquire(p_hwfn
);
3380 rc
= __qed_configure_vport_wfq(p_hwfn
, p_ptt
, vp_id
, rate
);
3383 qed_ptt_release(p_hwfn
, p_ptt
);
3387 qed_ptt_release(p_hwfn
, p_ptt
);
3393 /* API to configure WFQ from mcp link change */
3394 void qed_configure_vp_wfq_on_link_change(struct qed_dev
*cdev
,
3395 struct qed_ptt
*p_ptt
, u32 min_pf_rate
)
3399 if (cdev
->num_hwfns
> 1) {
3402 "WFQ configuration is not supported for this device\n");
3406 for_each_hwfn(cdev
, i
) {
3407 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3409 __qed_configure_vp_wfq_on_link_change(p_hwfn
, p_ptt
,
3414 int __qed_configure_pf_max_bandwidth(struct qed_hwfn
*p_hwfn
,
3415 struct qed_ptt
*p_ptt
,
3416 struct qed_mcp_link_state
*p_link
,
3421 p_hwfn
->mcp_info
->func_info
.bandwidth_max
= max_bw
;
3423 if (!p_link
->line_speed
&& (max_bw
!= 100))
3426 p_link
->speed
= (p_link
->line_speed
* max_bw
) / 100;
3427 p_hwfn
->qm_info
.pf_rl
= p_link
->speed
;
3429 /* Since the limiter also affects Tx-switched traffic, we don't want it
3430 * to limit such traffic in case there's no actual limit.
3431 * In that case, set limit to imaginary high boundary.
3434 p_hwfn
->qm_info
.pf_rl
= 100000;
3436 rc
= qed_init_pf_rl(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
,
3437 p_hwfn
->qm_info
.pf_rl
);
3439 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
3440 "Configured MAX bandwidth to be %08x Mb/sec\n",
3446 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
3447 int qed_configure_pf_max_bandwidth(struct qed_dev
*cdev
, u8 max_bw
)
3449 int i
, rc
= -EINVAL
;
3451 if (max_bw
< 1 || max_bw
> 100) {
3452 DP_NOTICE(cdev
, "PF max bw valid range is [1-100]\n");
3456 for_each_hwfn(cdev
, i
) {
3457 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3458 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(cdev
);
3459 struct qed_mcp_link_state
*p_link
;
3460 struct qed_ptt
*p_ptt
;
3462 p_link
= &p_lead
->mcp_info
->link_output
;
3464 p_ptt
= qed_ptt_acquire(p_hwfn
);
3468 rc
= __qed_configure_pf_max_bandwidth(p_hwfn
, p_ptt
,
3471 qed_ptt_release(p_hwfn
, p_ptt
);
3480 int __qed_configure_pf_min_bandwidth(struct qed_hwfn
*p_hwfn
,
3481 struct qed_ptt
*p_ptt
,
3482 struct qed_mcp_link_state
*p_link
,
3487 p_hwfn
->mcp_info
->func_info
.bandwidth_min
= min_bw
;
3488 p_hwfn
->qm_info
.pf_wfq
= min_bw
;
3490 if (!p_link
->line_speed
)
3493 p_link
->min_pf_rate
= (p_link
->line_speed
* min_bw
) / 100;
3495 rc
= qed_init_pf_wfq(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
, min_bw
);
3497 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
3498 "Configured MIN bandwidth to be %d Mb/sec\n",
3499 p_link
->min_pf_rate
);
3504 /* Main API to configure PF min bandwidth where bw range is [1-100] */
3505 int qed_configure_pf_min_bandwidth(struct qed_dev
*cdev
, u8 min_bw
)
3507 int i
, rc
= -EINVAL
;
3509 if (min_bw
< 1 || min_bw
> 100) {
3510 DP_NOTICE(cdev
, "PF min bw valid range is [1-100]\n");
3514 for_each_hwfn(cdev
, i
) {
3515 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3516 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(cdev
);
3517 struct qed_mcp_link_state
*p_link
;
3518 struct qed_ptt
*p_ptt
;
3520 p_link
= &p_lead
->mcp_info
->link_output
;
3522 p_ptt
= qed_ptt_acquire(p_hwfn
);
3526 rc
= __qed_configure_pf_min_bandwidth(p_hwfn
, p_ptt
,
3529 qed_ptt_release(p_hwfn
, p_ptt
);
3533 if (p_link
->min_pf_rate
) {
3534 u32 min_rate
= p_link
->min_pf_rate
;
3536 rc
= __qed_configure_vp_wfq_on_link_change(p_hwfn
,
3541 qed_ptt_release(p_hwfn
, p_ptt
);
3547 void qed_clean_wfq_db(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3549 struct qed_mcp_link_state
*p_link
;
3551 p_link
= &p_hwfn
->mcp_info
->link_output
;
3553 if (p_link
->min_pf_rate
)
3554 qed_disable_wfq_for_all_vports(p_hwfn
, p_ptt
,
3555 p_link
->min_pf_rate
);
3557 memset(p_hwfn
->qm_info
.wfq_data
, 0,
3558 sizeof(*p_hwfn
->qm_info
.wfq_data
) * p_hwfn
->qm_info
.num_vports
);
3561 int qed_device_num_engines(struct qed_dev
*cdev
)
3563 return QED_IS_BB(cdev
) ? 2 : 1;