1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/stddef.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/delay.h>
12 #include <asm/byteorder.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/string.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/workqueue.h>
18 #include <linux/ethtool.h>
19 #include <linux/etherdevice.h>
20 #include <linux/vmalloc.h>
21 #include <linux/crash_dump.h>
22 #include <linux/crc32.h>
23 #include <linux/qed/qed_if.h>
24 #include <linux/qed/qed_ll2_if.h>
25 #include <net/devlink.h>
26 #include <linux/aer.h>
27 #include <linux/phylink.h>
30 #include "qed_sriov.h"
32 #include "qed_dev_api.h"
35 #include "qed_iscsi.h"
38 #include "qed_reg_addr.h"
40 #include "qed_selftest.h"
41 #include "qed_debug.h"
43 #define QED_ROCE_QPS (8192)
44 #define QED_ROCE_DPIS (8)
45 #define QED_RDMA_SRQS QED_ROCE_QPS
46 #define QED_NVM_CFG_GET_FLAGS 0xA
47 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A
48 #define QED_NVM_CFG_MAX_ATTRS 50
50 static char version
[] =
51 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION
"\n";
53 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
54 MODULE_LICENSE("GPL");
55 MODULE_VERSION(DRV_MODULE_VERSION
);
57 #define FW_FILE_VERSION \
58 __stringify(FW_MAJOR_VERSION) "." \
59 __stringify(FW_MINOR_VERSION) "." \
60 __stringify(FW_REVISION_VERSION) "." \
61 __stringify(FW_ENGINEERING_VERSION)
63 #define QED_FW_FILE_NAME \
64 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
66 MODULE_FIRMWARE(QED_FW_FILE_NAME
);
68 /* MFW speed capabilities maps */
70 struct qed_mfw_speed_map
{
72 __ETHTOOL_DECLARE_LINK_MODE_MASK(caps
);
78 #define QED_MFW_SPEED_MAP(type, arr) \
82 .arr_size = ARRAY_SIZE(arr), \
85 static const u32 qed_mfw_ext_1g
[] __initconst
= {
86 ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
87 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
88 ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
91 static const u32 qed_mfw_ext_10g
[] __initconst
= {
92 ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
93 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
94 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
95 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT
,
96 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT
,
97 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
98 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT
,
99 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT
,
102 static const u32 qed_mfw_ext_20g
[] __initconst
= {
103 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
,
106 static const u32 qed_mfw_ext_25g
[] __initconst
= {
107 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
108 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
109 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
112 static const u32 qed_mfw_ext_40g
[] __initconst
= {
113 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
114 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
115 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
116 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
119 static const u32 qed_mfw_ext_50g_base_r
[] __initconst
= {
120 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
,
121 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT
,
122 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT
,
123 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT
,
124 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT
,
127 static const u32 qed_mfw_ext_50g_base_r2
[] __initconst
= {
128 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
129 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
130 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
133 static const u32 qed_mfw_ext_100g_base_r2
[] __initconst
= {
134 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT
,
135 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT
,
136 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT
,
137 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT
,
138 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT
,
141 static const u32 qed_mfw_ext_100g_base_r4
[] __initconst
= {
142 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
143 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
144 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
145 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
148 static struct qed_mfw_speed_map qed_mfw_ext_maps
[] __ro_after_init
= {
149 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G
, qed_mfw_ext_1g
),
150 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G
, qed_mfw_ext_10g
),
151 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G
, qed_mfw_ext_20g
),
152 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G
, qed_mfw_ext_25g
),
153 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G
, qed_mfw_ext_40g
),
154 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R
,
155 qed_mfw_ext_50g_base_r
),
156 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2
,
157 qed_mfw_ext_50g_base_r2
),
158 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2
,
159 qed_mfw_ext_100g_base_r2
),
160 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4
,
161 qed_mfw_ext_100g_base_r4
),
164 static const u32 qed_mfw_legacy_1g
[] __initconst
= {
165 ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
166 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
167 ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
170 static const u32 qed_mfw_legacy_10g
[] __initconst
= {
171 ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
172 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
173 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
174 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT
,
175 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT
,
176 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
177 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT
,
178 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT
,
181 static const u32 qed_mfw_legacy_20g
[] __initconst
= {
182 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
,
185 static const u32 qed_mfw_legacy_25g
[] __initconst
= {
186 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
187 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
188 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
191 static const u32 qed_mfw_legacy_40g
[] __initconst
= {
192 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
193 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
194 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
195 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
198 static const u32 qed_mfw_legacy_50g
[] __initconst
= {
199 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
200 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
201 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
204 static const u32 qed_mfw_legacy_bb_100g
[] __initconst
= {
205 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
206 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
207 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
208 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
211 static struct qed_mfw_speed_map qed_mfw_legacy_maps
[] __ro_after_init
= {
212 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
,
214 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
,
216 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
,
218 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
,
220 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
,
222 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
,
224 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
,
225 qed_mfw_legacy_bb_100g
),
228 static void __init
qed_mfw_speed_map_populate(struct qed_mfw_speed_map
*map
)
230 linkmode_set_bit_array(map
->cap_arr
, map
->arr_size
, map
->caps
);
236 static void __init
qed_mfw_speed_maps_init(void)
240 for (i
= 0; i
< ARRAY_SIZE(qed_mfw_ext_maps
); i
++)
241 qed_mfw_speed_map_populate(qed_mfw_ext_maps
+ i
);
243 for (i
= 0; i
< ARRAY_SIZE(qed_mfw_legacy_maps
); i
++)
244 qed_mfw_speed_map_populate(qed_mfw_legacy_maps
+ i
);
247 static int __init
qed_init(void)
249 pr_info("%s", version
);
251 qed_mfw_speed_maps_init();
255 module_init(qed_init
);
257 static void __exit
qed_exit(void)
259 /* To prevent marking this module as "permanent" */
261 module_exit(qed_exit
);
263 /* Check if the DMA controller on the machine can properly handle the DMA
264 * addressing required by the device.
266 static int qed_set_coherency_mask(struct qed_dev
*cdev
)
268 struct device
*dev
= &cdev
->pdev
->dev
;
270 if (dma_set_mask(dev
, DMA_BIT_MASK(64)) == 0) {
271 if (dma_set_coherent_mask(dev
, DMA_BIT_MASK(64)) != 0) {
273 "Can't request 64-bit consistent allocations\n");
276 } else if (dma_set_mask(dev
, DMA_BIT_MASK(32)) != 0) {
277 DP_NOTICE(cdev
, "Can't request 64b/32b DMA addresses\n");
284 static void qed_free_pci(struct qed_dev
*cdev
)
286 struct pci_dev
*pdev
= cdev
->pdev
;
288 pci_disable_pcie_error_reporting(pdev
);
290 if (cdev
->doorbells
&& cdev
->db_size
)
291 iounmap(cdev
->doorbells
);
293 iounmap(cdev
->regview
);
294 if (atomic_read(&pdev
->enable_cnt
) == 1)
295 pci_release_regions(pdev
);
297 pci_disable_device(pdev
);
300 #define PCI_REVISION_ID_ERROR_VAL 0xff
302 /* Performs PCI initializations as well as initializing PCI-related parameters
303 * in the device structrue. Returns 0 in case of success.
305 static int qed_init_pci(struct qed_dev
*cdev
, struct pci_dev
*pdev
)
312 rc
= pci_enable_device(pdev
);
314 DP_NOTICE(cdev
, "Cannot enable PCI device\n");
318 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
319 DP_NOTICE(cdev
, "No memory region found in bar #0\n");
324 if (IS_PF(cdev
) && !(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
325 DP_NOTICE(cdev
, "No memory region found in bar #2\n");
330 if (atomic_read(&pdev
->enable_cnt
) == 1) {
331 rc
= pci_request_regions(pdev
, "qed");
334 "Failed to request PCI memory resources\n");
337 pci_set_master(pdev
);
338 pci_save_state(pdev
);
341 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
342 if (rev_id
== PCI_REVISION_ID_ERROR_VAL
) {
344 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
349 if (!pci_is_pcie(pdev
)) {
350 DP_NOTICE(cdev
, "The bus is not PCI Express\n");
355 cdev
->pci_params
.pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
356 if (IS_PF(cdev
) && !cdev
->pci_params
.pm_cap
)
357 DP_NOTICE(cdev
, "Cannot find power management capability\n");
359 rc
= qed_set_coherency_mask(cdev
);
363 cdev
->pci_params
.mem_start
= pci_resource_start(pdev
, 0);
364 cdev
->pci_params
.mem_end
= pci_resource_end(pdev
, 0);
365 cdev
->pci_params
.irq
= pdev
->irq
;
367 cdev
->regview
= pci_ioremap_bar(pdev
, 0);
368 if (!cdev
->regview
) {
369 DP_NOTICE(cdev
, "Cannot map register space, aborting\n");
374 cdev
->db_phys_addr
= pci_resource_start(cdev
->pdev
, 2);
375 cdev
->db_size
= pci_resource_len(cdev
->pdev
, 2);
376 if (!cdev
->db_size
) {
378 DP_NOTICE(cdev
, "No Doorbell bar available\n");
385 cdev
->doorbells
= ioremap_wc(cdev
->db_phys_addr
, cdev
->db_size
);
387 if (!cdev
->doorbells
) {
388 DP_NOTICE(cdev
, "Cannot map doorbell space\n");
392 /* AER (Advanced Error reporting) configuration */
393 rc
= pci_enable_pcie_error_reporting(pdev
);
395 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
396 "Failed to configure PCIe AER [%d]\n", rc
);
401 pci_release_regions(pdev
);
403 pci_disable_device(pdev
);
408 int qed_fill_dev_info(struct qed_dev
*cdev
,
409 struct qed_dev_info
*dev_info
)
411 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
412 struct qed_hw_info
*hw_info
= &p_hwfn
->hw_info
;
413 struct qed_tunnel_info
*tun
= &cdev
->tunnel
;
416 memset(dev_info
, 0, sizeof(struct qed_dev_info
));
418 if (tun
->vxlan
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
419 tun
->vxlan
.b_mode_enabled
)
420 dev_info
->vxlan_enable
= true;
422 if (tun
->l2_gre
.b_mode_enabled
&& tun
->ip_gre
.b_mode_enabled
&&
423 tun
->l2_gre
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
424 tun
->ip_gre
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
)
425 dev_info
->gre_enable
= true;
427 if (tun
->l2_geneve
.b_mode_enabled
&& tun
->ip_geneve
.b_mode_enabled
&&
428 tun
->l2_geneve
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
429 tun
->ip_geneve
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
)
430 dev_info
->geneve_enable
= true;
432 dev_info
->num_hwfns
= cdev
->num_hwfns
;
433 dev_info
->pci_mem_start
= cdev
->pci_params
.mem_start
;
434 dev_info
->pci_mem_end
= cdev
->pci_params
.mem_end
;
435 dev_info
->pci_irq
= cdev
->pci_params
.irq
;
436 dev_info
->rdma_supported
= QED_IS_RDMA_PERSONALITY(p_hwfn
);
437 dev_info
->dev_type
= cdev
->type
;
438 ether_addr_copy(dev_info
->hw_mac
, hw_info
->hw_mac_addr
);
441 dev_info
->fw_major
= FW_MAJOR_VERSION
;
442 dev_info
->fw_minor
= FW_MINOR_VERSION
;
443 dev_info
->fw_rev
= FW_REVISION_VERSION
;
444 dev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
445 dev_info
->b_inter_pf_switch
= test_bit(QED_MF_INTER_PF_SWITCH
,
447 dev_info
->tx_switching
= true;
449 if (hw_info
->b_wol_support
== QED_WOL_SUPPORT_PME
)
450 dev_info
->wol_support
= true;
452 dev_info
->smart_an
= qed_mcp_is_smart_an_supported(p_hwfn
);
454 dev_info
->abs_pf_id
= QED_LEADING_HWFN(cdev
)->abs_pf_id
;
456 qed_vf_get_fw_version(&cdev
->hwfns
[0], &dev_info
->fw_major
,
457 &dev_info
->fw_minor
, &dev_info
->fw_rev
,
462 ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
464 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev
), ptt
,
465 &dev_info
->mfw_rev
, NULL
);
467 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev
), ptt
,
468 &dev_info
->mbi_version
);
470 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev
), ptt
,
471 &dev_info
->flash_size
);
473 qed_ptt_release(QED_LEADING_HWFN(cdev
), ptt
);
476 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev
), NULL
,
477 &dev_info
->mfw_rev
, NULL
);
480 dev_info
->mtu
= hw_info
->mtu
;
485 static void qed_free_cdev(struct qed_dev
*cdev
)
490 static struct qed_dev
*qed_alloc_cdev(struct pci_dev
*pdev
)
492 struct qed_dev
*cdev
;
494 cdev
= kzalloc(sizeof(*cdev
), GFP_KERNEL
);
498 qed_init_struct(cdev
);
503 /* Sets the requested power state */
504 static int qed_set_power_state(struct qed_dev
*cdev
, pci_power_t state
)
509 DP_VERBOSE(cdev
, NETIF_MSG_DRV
, "Omitting Power state change\n");
514 struct qed_dev
*cdev
;
517 enum qed_devlink_param_id
{
518 QED_DEVLINK_PARAM_ID_BASE
= DEVLINK_PARAM_GENERIC_ID_MAX
,
519 QED_DEVLINK_PARAM_ID_IWARP_CMT
,
522 static int qed_dl_param_get(struct devlink
*dl
, u32 id
,
523 struct devlink_param_gset_ctx
*ctx
)
525 struct qed_devlink
*qed_dl
;
526 struct qed_dev
*cdev
;
528 qed_dl
= devlink_priv(dl
);
530 ctx
->val
.vbool
= cdev
->iwarp_cmt
;
535 static int qed_dl_param_set(struct devlink
*dl
, u32 id
,
536 struct devlink_param_gset_ctx
*ctx
)
538 struct qed_devlink
*qed_dl
;
539 struct qed_dev
*cdev
;
541 qed_dl
= devlink_priv(dl
);
543 cdev
->iwarp_cmt
= ctx
->val
.vbool
;
548 static const struct devlink_param qed_devlink_params
[] = {
549 DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT
,
550 "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL
,
551 BIT(DEVLINK_PARAM_CMODE_RUNTIME
),
552 qed_dl_param_get
, qed_dl_param_set
, NULL
),
555 static const struct devlink_ops qed_dl_ops
;
557 static int qed_devlink_register(struct qed_dev
*cdev
)
559 union devlink_param_value value
;
560 struct qed_devlink
*qed_dl
;
564 dl
= devlink_alloc(&qed_dl_ops
, sizeof(*qed_dl
));
568 qed_dl
= devlink_priv(dl
);
573 rc
= devlink_register(dl
, &cdev
->pdev
->dev
);
577 rc
= devlink_params_register(dl
, qed_devlink_params
,
578 ARRAY_SIZE(qed_devlink_params
));
583 devlink_param_driverinit_value_set(dl
,
584 QED_DEVLINK_PARAM_ID_IWARP_CMT
,
587 devlink_params_publish(dl
);
588 cdev
->iwarp_cmt
= false;
593 devlink_unregister(dl
);
602 static void qed_devlink_unregister(struct qed_dev
*cdev
)
607 devlink_params_unregister(cdev
->dl
, qed_devlink_params
,
608 ARRAY_SIZE(qed_devlink_params
));
610 devlink_unregister(cdev
->dl
);
611 devlink_free(cdev
->dl
);
615 static struct qed_dev
*qed_probe(struct pci_dev
*pdev
,
616 struct qed_probe_params
*params
)
618 struct qed_dev
*cdev
;
621 cdev
= qed_alloc_cdev(pdev
);
625 cdev
->drv_type
= DRV_ID_DRV_TYPE_LINUX
;
626 cdev
->protocol
= params
->protocol
;
629 cdev
->b_is_vf
= true;
631 qed_init_dp(cdev
, params
->dp_module
, params
->dp_level
);
633 cdev
->recov_in_prog
= params
->recov_in_prog
;
635 rc
= qed_init_pci(cdev
, pdev
);
637 DP_ERR(cdev
, "init pci failed\n");
640 DP_INFO(cdev
, "PCI init completed successfully\n");
642 rc
= qed_devlink_register(cdev
);
644 DP_INFO(cdev
, "Failed to register devlink.\n");
648 rc
= qed_hw_prepare(cdev
, QED_PCI_DEFAULT
);
650 DP_ERR(cdev
, "hw prepare failed\n");
654 DP_INFO(cdev
, "qed_probe completed successfully\n");
666 static void qed_remove(struct qed_dev
*cdev
)
675 qed_set_power_state(cdev
, PCI_D3hot
);
677 qed_devlink_unregister(cdev
);
682 static void qed_disable_msix(struct qed_dev
*cdev
)
684 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
685 pci_disable_msix(cdev
->pdev
);
686 kfree(cdev
->int_params
.msix_table
);
687 } else if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSI
) {
688 pci_disable_msi(cdev
->pdev
);
691 memset(&cdev
->int_params
.out
, 0, sizeof(struct qed_int_param
));
694 static int qed_enable_msix(struct qed_dev
*cdev
,
695 struct qed_int_params
*int_params
)
699 cnt
= int_params
->in
.num_vectors
;
701 for (i
= 0; i
< cnt
; i
++)
702 int_params
->msix_table
[i
].entry
= i
;
704 rc
= pci_enable_msix_range(cdev
->pdev
, int_params
->msix_table
,
705 int_params
->in
.min_msix_cnt
, cnt
);
706 if (rc
< cnt
&& rc
>= int_params
->in
.min_msix_cnt
&&
707 (rc
% cdev
->num_hwfns
)) {
708 pci_disable_msix(cdev
->pdev
);
710 /* If fastpath is initialized, we need at least one interrupt
711 * per hwfn [and the slow path interrupts]. New requested number
712 * should be a multiple of the number of hwfns.
714 cnt
= (rc
/ cdev
->num_hwfns
) * cdev
->num_hwfns
;
716 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
717 cnt
, int_params
->in
.num_vectors
);
718 rc
= pci_enable_msix_exact(cdev
->pdev
, int_params
->msix_table
,
725 /* MSI-x configuration was achieved */
726 int_params
->out
.int_mode
= QED_INT_MODE_MSIX
;
727 int_params
->out
.num_vectors
= rc
;
731 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
738 /* This function outputs the int mode and the number of enabled msix vector */
739 static int qed_set_int_mode(struct qed_dev
*cdev
, bool force_mode
)
741 struct qed_int_params
*int_params
= &cdev
->int_params
;
742 struct msix_entry
*tbl
;
745 switch (int_params
->in
.int_mode
) {
746 case QED_INT_MODE_MSIX
:
747 /* Allocate MSIX table */
748 cnt
= int_params
->in
.num_vectors
;
749 int_params
->msix_table
= kcalloc(cnt
, sizeof(*tbl
), GFP_KERNEL
);
750 if (!int_params
->msix_table
) {
756 rc
= qed_enable_msix(cdev
, int_params
);
760 DP_NOTICE(cdev
, "Failed to enable MSI-X\n");
761 kfree(int_params
->msix_table
);
766 case QED_INT_MODE_MSI
:
767 if (cdev
->num_hwfns
== 1) {
768 rc
= pci_enable_msi(cdev
->pdev
);
770 int_params
->out
.int_mode
= QED_INT_MODE_MSI
;
774 DP_NOTICE(cdev
, "Failed to enable MSI\n");
780 case QED_INT_MODE_INTA
:
781 int_params
->out
.int_mode
= QED_INT_MODE_INTA
;
785 DP_NOTICE(cdev
, "Unknown int_mode value %d\n",
786 int_params
->in
.int_mode
);
792 DP_INFO(cdev
, "Using %s interrupts\n",
793 int_params
->out
.int_mode
== QED_INT_MODE_INTA
?
794 "INTa" : int_params
->out
.int_mode
== QED_INT_MODE_MSI
?
796 cdev
->int_coalescing_mode
= QED_COAL_MODE_ENABLE
;
801 static void qed_simd_handler_config(struct qed_dev
*cdev
, void *token
,
802 int index
, void(*handler
)(void *))
804 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
805 int relative_idx
= index
/ cdev
->num_hwfns
;
807 hwfn
->simd_proto_handler
[relative_idx
].func
= handler
;
808 hwfn
->simd_proto_handler
[relative_idx
].token
= token
;
811 static void qed_simd_handler_clean(struct qed_dev
*cdev
, int index
)
813 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
814 int relative_idx
= index
/ cdev
->num_hwfns
;
816 memset(&hwfn
->simd_proto_handler
[relative_idx
], 0,
817 sizeof(struct qed_simd_fp_handler
));
820 static irqreturn_t
qed_msix_sp_int(int irq
, void *tasklet
)
822 tasklet_schedule((struct tasklet_struct
*)tasklet
);
826 static irqreturn_t
qed_single_int(int irq
, void *dev_instance
)
828 struct qed_dev
*cdev
= (struct qed_dev
*)dev_instance
;
829 struct qed_hwfn
*hwfn
;
830 irqreturn_t rc
= IRQ_NONE
;
834 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
835 status
= qed_int_igu_read_sisr_reg(&cdev
->hwfns
[i
]);
840 hwfn
= &cdev
->hwfns
[i
];
842 /* Slowpath interrupt */
843 if (unlikely(status
& 0x1)) {
844 tasklet_schedule(hwfn
->sp_dpc
);
849 /* Fastpath interrupts */
850 for (j
= 0; j
< 64; j
++) {
851 if ((0x2ULL
<< j
) & status
) {
852 struct qed_simd_fp_handler
*p_handler
=
853 &hwfn
->simd_proto_handler
[j
];
856 p_handler
->func(p_handler
->token
);
859 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
862 status
&= ~(0x2ULL
<< j
);
867 if (unlikely(status
))
868 DP_VERBOSE(hwfn
, NETIF_MSG_INTR
,
869 "got an unknown interrupt status 0x%llx\n",
876 int qed_slowpath_irq_req(struct qed_hwfn
*hwfn
)
878 struct qed_dev
*cdev
= hwfn
->cdev
;
883 int_mode
= cdev
->int_params
.out
.int_mode
;
884 if (int_mode
== QED_INT_MODE_MSIX
) {
886 snprintf(hwfn
->name
, NAME_SIZE
, "sp-%d-%02x:%02x.%02x",
887 id
, cdev
->pdev
->bus
->number
,
888 PCI_SLOT(cdev
->pdev
->devfn
), hwfn
->abs_pf_id
);
889 rc
= request_irq(cdev
->int_params
.msix_table
[id
].vector
,
890 qed_msix_sp_int
, 0, hwfn
->name
, hwfn
->sp_dpc
);
892 unsigned long flags
= 0;
894 snprintf(cdev
->name
, NAME_SIZE
, "%02x:%02x.%02x",
895 cdev
->pdev
->bus
->number
, PCI_SLOT(cdev
->pdev
->devfn
),
896 PCI_FUNC(cdev
->pdev
->devfn
));
898 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_INTA
)
899 flags
|= IRQF_SHARED
;
901 rc
= request_irq(cdev
->pdev
->irq
, qed_single_int
,
902 flags
, cdev
->name
, cdev
);
906 DP_NOTICE(cdev
, "request_irq failed, rc = %d\n", rc
);
908 DP_VERBOSE(hwfn
, (NETIF_MSG_INTR
| QED_MSG_SP
),
909 "Requested slowpath %s\n",
910 (int_mode
== QED_INT_MODE_MSIX
) ? "MSI-X" : "IRQ");
915 static void qed_slowpath_tasklet_flush(struct qed_hwfn
*p_hwfn
)
917 /* Calling the disable function will make sure that any
918 * currently-running function is completed. The following call to the
919 * enable function makes this sequence a flush-like operation.
921 if (p_hwfn
->b_sp_dpc_enabled
) {
922 tasklet_disable(p_hwfn
->sp_dpc
);
923 tasklet_enable(p_hwfn
->sp_dpc
);
927 void qed_slowpath_irq_sync(struct qed_hwfn
*p_hwfn
)
929 struct qed_dev
*cdev
= p_hwfn
->cdev
;
930 u8 id
= p_hwfn
->my_id
;
933 int_mode
= cdev
->int_params
.out
.int_mode
;
934 if (int_mode
== QED_INT_MODE_MSIX
)
935 synchronize_irq(cdev
->int_params
.msix_table
[id
].vector
);
937 synchronize_irq(cdev
->pdev
->irq
);
939 qed_slowpath_tasklet_flush(p_hwfn
);
942 static void qed_slowpath_irq_free(struct qed_dev
*cdev
)
946 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
947 for_each_hwfn(cdev
, i
) {
948 if (!cdev
->hwfns
[i
].b_int_requested
)
950 synchronize_irq(cdev
->int_params
.msix_table
[i
].vector
);
951 free_irq(cdev
->int_params
.msix_table
[i
].vector
,
952 cdev
->hwfns
[i
].sp_dpc
);
955 if (QED_LEADING_HWFN(cdev
)->b_int_requested
)
956 free_irq(cdev
->pdev
->irq
, cdev
);
958 qed_int_disable_post_isr_release(cdev
);
961 static int qed_nic_stop(struct qed_dev
*cdev
)
965 rc
= qed_hw_stop(cdev
);
967 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
968 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
970 if (p_hwfn
->b_sp_dpc_enabled
) {
971 tasklet_disable(p_hwfn
->sp_dpc
);
972 p_hwfn
->b_sp_dpc_enabled
= false;
973 DP_VERBOSE(cdev
, NETIF_MSG_IFDOWN
,
974 "Disabled sp tasklet [hwfn %d] at %p\n",
979 qed_dbg_pf_exit(cdev
);
984 static int qed_nic_setup(struct qed_dev
*cdev
)
988 /* Determine if interface is going to require LL2 */
989 if (QED_LEADING_HWFN(cdev
)->hw_info
.personality
!= QED_PCI_ETH
) {
990 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
991 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
993 p_hwfn
->using_ll2
= true;
997 rc
= qed_resc_alloc(cdev
);
1001 DP_INFO(cdev
, "Allocated qed resources\n");
1003 qed_resc_setup(cdev
);
1008 static int qed_set_int_fp(struct qed_dev
*cdev
, u16 cnt
)
1012 /* Mark the fastpath as free/used */
1013 cdev
->int_params
.fp_initialized
= cnt
? true : false;
1015 if (cdev
->int_params
.out
.int_mode
!= QED_INT_MODE_MSIX
)
1016 limit
= cdev
->num_hwfns
* 63;
1017 else if (cdev
->int_params
.fp_msix_cnt
)
1018 limit
= cdev
->int_params
.fp_msix_cnt
;
1023 return min_t(int, cnt
, limit
);
1026 static int qed_get_int_fp(struct qed_dev
*cdev
, struct qed_int_info
*info
)
1028 memset(info
, 0, sizeof(struct qed_int_info
));
1030 if (!cdev
->int_params
.fp_initialized
) {
1032 "Protocol driver requested interrupt information, but its support is not yet configured\n");
1036 /* Need to expose only MSI-X information; Single IRQ is handled solely
1039 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
1040 int msix_base
= cdev
->int_params
.fp_msix_base
;
1042 info
->msix_cnt
= cdev
->int_params
.fp_msix_cnt
;
1043 info
->msix
= &cdev
->int_params
.msix_table
[msix_base
];
1049 static int qed_slowpath_setup_int(struct qed_dev
*cdev
,
1050 enum qed_int_mode int_mode
)
1052 struct qed_sb_cnt_info sb_cnt_info
;
1053 int num_l2_queues
= 0;
1057 if ((int_mode
== QED_INT_MODE_MSI
) && (cdev
->num_hwfns
> 1)) {
1058 DP_NOTICE(cdev
, "MSI mode is not supported for CMT devices\n");
1062 memset(&cdev
->int_params
, 0, sizeof(struct qed_int_params
));
1063 cdev
->int_params
.in
.int_mode
= int_mode
;
1064 for_each_hwfn(cdev
, i
) {
1065 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
1066 qed_int_get_num_sbs(&cdev
->hwfns
[i
], &sb_cnt_info
);
1067 cdev
->int_params
.in
.num_vectors
+= sb_cnt_info
.cnt
;
1068 cdev
->int_params
.in
.num_vectors
++; /* slowpath */
1071 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
1072 cdev
->int_params
.in
.min_msix_cnt
= cdev
->num_hwfns
* 2;
1074 if (is_kdump_kernel()) {
1076 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
1077 cdev
->int_params
.in
.min_msix_cnt
);
1078 cdev
->int_params
.in
.num_vectors
=
1079 cdev
->int_params
.in
.min_msix_cnt
;
1082 rc
= qed_set_int_mode(cdev
, false);
1084 DP_ERR(cdev
, "qed_slowpath_setup_int ERR\n");
1088 cdev
->int_params
.fp_msix_base
= cdev
->num_hwfns
;
1089 cdev
->int_params
.fp_msix_cnt
= cdev
->int_params
.out
.num_vectors
-
1092 if (!IS_ENABLED(CONFIG_QED_RDMA
) ||
1093 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev
)))
1096 for_each_hwfn(cdev
, i
)
1097 num_l2_queues
+= FEAT_NUM(&cdev
->hwfns
[i
], QED_PF_L2_QUE
);
1099 DP_VERBOSE(cdev
, QED_MSG_RDMA
,
1100 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
1101 cdev
->int_params
.fp_msix_cnt
, num_l2_queues
);
1103 if (cdev
->int_params
.fp_msix_cnt
> num_l2_queues
) {
1104 cdev
->int_params
.rdma_msix_cnt
=
1105 (cdev
->int_params
.fp_msix_cnt
- num_l2_queues
)
1107 cdev
->int_params
.rdma_msix_base
=
1108 cdev
->int_params
.fp_msix_base
+ num_l2_queues
;
1109 cdev
->int_params
.fp_msix_cnt
= num_l2_queues
;
1111 cdev
->int_params
.rdma_msix_cnt
= 0;
1114 DP_VERBOSE(cdev
, QED_MSG_RDMA
, "roce_msix_cnt=%d roce_msix_base=%d\n",
1115 cdev
->int_params
.rdma_msix_cnt
,
1116 cdev
->int_params
.rdma_msix_base
);
1121 static int qed_slowpath_vf_setup_int(struct qed_dev
*cdev
)
1125 memset(&cdev
->int_params
, 0, sizeof(struct qed_int_params
));
1126 cdev
->int_params
.in
.int_mode
= QED_INT_MODE_MSIX
;
1128 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev
),
1129 &cdev
->int_params
.in
.num_vectors
);
1130 if (cdev
->num_hwfns
> 1) {
1133 qed_vf_get_num_rxqs(&cdev
->hwfns
[1], &vectors
);
1134 cdev
->int_params
.in
.num_vectors
+= vectors
;
1137 /* We want a minimum of one fastpath vector per vf hwfn */
1138 cdev
->int_params
.in
.min_msix_cnt
= cdev
->num_hwfns
;
1140 rc
= qed_set_int_mode(cdev
, true);
1144 cdev
->int_params
.fp_msix_base
= 0;
1145 cdev
->int_params
.fp_msix_cnt
= cdev
->int_params
.out
.num_vectors
;
1150 u32
qed_unzip_data(struct qed_hwfn
*p_hwfn
, u32 input_len
,
1151 u8
*input_buf
, u32 max_size
, u8
*unzip_buf
)
1155 p_hwfn
->stream
->next_in
= input_buf
;
1156 p_hwfn
->stream
->avail_in
= input_len
;
1157 p_hwfn
->stream
->next_out
= unzip_buf
;
1158 p_hwfn
->stream
->avail_out
= max_size
;
1160 rc
= zlib_inflateInit2(p_hwfn
->stream
, MAX_WBITS
);
1163 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "zlib init failed, rc = %d\n",
1168 rc
= zlib_inflate(p_hwfn
->stream
, Z_FINISH
);
1169 zlib_inflateEnd(p_hwfn
->stream
);
1171 if (rc
!= Z_OK
&& rc
!= Z_STREAM_END
) {
1172 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "FW unzip error: %s, rc=%d\n",
1173 p_hwfn
->stream
->msg
, rc
);
1177 return p_hwfn
->stream
->total_out
/ 4;
1180 static int qed_alloc_stream_mem(struct qed_dev
*cdev
)
1185 for_each_hwfn(cdev
, i
) {
1186 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1188 p_hwfn
->stream
= kzalloc(sizeof(*p_hwfn
->stream
), GFP_KERNEL
);
1189 if (!p_hwfn
->stream
)
1192 workspace
= vzalloc(zlib_inflate_workspacesize());
1195 p_hwfn
->stream
->workspace
= workspace
;
1201 static void qed_free_stream_mem(struct qed_dev
*cdev
)
1205 for_each_hwfn(cdev
, i
) {
1206 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1208 if (!p_hwfn
->stream
)
1211 vfree(p_hwfn
->stream
->workspace
);
1212 kfree(p_hwfn
->stream
);
1216 static void qed_update_pf_params(struct qed_dev
*cdev
,
1217 struct qed_pf_params
*params
)
1221 if (IS_ENABLED(CONFIG_QED_RDMA
)) {
1222 params
->rdma_pf_params
.num_qps
= QED_ROCE_QPS
;
1223 params
->rdma_pf_params
.min_dpis
= QED_ROCE_DPIS
;
1224 params
->rdma_pf_params
.num_srqs
= QED_RDMA_SRQS
;
1225 /* divide by 3 the MRs to avoid MF ILT overflow */
1226 params
->rdma_pf_params
.gl_pi
= QED_ROCE_PROTOCOL_INDEX
;
1229 if (cdev
->num_hwfns
> 1 || IS_VF(cdev
))
1230 params
->eth_pf_params
.num_arfs_filters
= 0;
1232 /* In case we might support RDMA, don't allow qede to be greedy
1233 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1236 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev
))) {
1239 num_cons
= ¶ms
->eth_pf_params
.num_cons
;
1240 *num_cons
= min_t(u16
, *num_cons
, QED_MAX_L2_CONS
);
1243 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
1244 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1246 p_hwfn
->pf_params
= *params
;
1250 #define QED_PERIODIC_DB_REC_COUNT 10
1251 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100
1252 #define QED_PERIODIC_DB_REC_INTERVAL \
1253 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1255 static int qed_slowpath_delayed_work(struct qed_hwfn
*hwfn
,
1256 enum qed_slowpath_wq_flag wq_flag
,
1257 unsigned long delay
)
1259 if (!hwfn
->slowpath_wq_active
)
1262 /* Memory barrier for setting atomic bit */
1263 smp_mb__before_atomic();
1264 set_bit(wq_flag
, &hwfn
->slowpath_task_flags
);
1265 smp_mb__after_atomic();
1266 queue_delayed_work(hwfn
->slowpath_wq
, &hwfn
->slowpath_task
, delay
);
1271 void qed_periodic_db_rec_start(struct qed_hwfn
*p_hwfn
)
1273 /* Reset periodic Doorbell Recovery counter */
1274 p_hwfn
->periodic_db_rec_count
= QED_PERIODIC_DB_REC_COUNT
;
1276 /* Don't schedule periodic Doorbell Recovery if already scheduled */
1277 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC
,
1278 &p_hwfn
->slowpath_task_flags
))
1281 qed_slowpath_delayed_work(p_hwfn
, QED_SLOWPATH_PERIODIC_DB_REC
,
1282 QED_PERIODIC_DB_REC_INTERVAL
);
1285 static void qed_slowpath_wq_stop(struct qed_dev
*cdev
)
1292 for_each_hwfn(cdev
, i
) {
1293 if (!cdev
->hwfns
[i
].slowpath_wq
)
1296 /* Stop queuing new delayed works */
1297 cdev
->hwfns
[i
].slowpath_wq_active
= false;
1299 cancel_delayed_work(&cdev
->hwfns
[i
].slowpath_task
);
1300 destroy_workqueue(cdev
->hwfns
[i
].slowpath_wq
);
1304 static void qed_slowpath_task(struct work_struct
*work
)
1306 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
1307 slowpath_task
.work
);
1308 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
1311 if (hwfn
->slowpath_wq_active
)
1312 queue_delayed_work(hwfn
->slowpath_wq
,
1313 &hwfn
->slowpath_task
, 0);
1318 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ
,
1319 &hwfn
->slowpath_task_flags
))
1320 qed_mfw_process_tlv_req(hwfn
, ptt
);
1322 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC
,
1323 &hwfn
->slowpath_task_flags
)) {
1324 qed_db_rec_handler(hwfn
, ptt
);
1325 if (hwfn
->periodic_db_rec_count
--)
1326 qed_slowpath_delayed_work(hwfn
,
1327 QED_SLOWPATH_PERIODIC_DB_REC
,
1328 QED_PERIODIC_DB_REC_INTERVAL
);
1331 qed_ptt_release(hwfn
, ptt
);
1334 static int qed_slowpath_wq_start(struct qed_dev
*cdev
)
1336 struct qed_hwfn
*hwfn
;
1337 char name
[NAME_SIZE
];
1343 for_each_hwfn(cdev
, i
) {
1344 hwfn
= &cdev
->hwfns
[i
];
1346 snprintf(name
, NAME_SIZE
, "slowpath-%02x:%02x.%02x",
1347 cdev
->pdev
->bus
->number
,
1348 PCI_SLOT(cdev
->pdev
->devfn
), hwfn
->abs_pf_id
);
1350 hwfn
->slowpath_wq
= alloc_workqueue(name
, 0, 0);
1351 if (!hwfn
->slowpath_wq
) {
1352 DP_NOTICE(hwfn
, "Cannot create slowpath workqueue\n");
1356 INIT_DELAYED_WORK(&hwfn
->slowpath_task
, qed_slowpath_task
);
1357 hwfn
->slowpath_wq_active
= true;
1363 static int qed_slowpath_start(struct qed_dev
*cdev
,
1364 struct qed_slowpath_params
*params
)
1366 struct qed_drv_load_params drv_load_params
;
1367 struct qed_hw_init_params hw_init_params
;
1368 struct qed_mcp_drv_version drv_version
;
1369 struct qed_tunnel_info tunn_info
;
1370 const u8
*data
= NULL
;
1371 struct qed_hwfn
*hwfn
;
1372 struct qed_ptt
*p_ptt
;
1375 if (qed_iov_wq_start(cdev
))
1378 if (qed_slowpath_wq_start(cdev
))
1382 rc
= request_firmware(&cdev
->firmware
, QED_FW_FILE_NAME
,
1386 "Failed to find fw file - /lib/firmware/%s\n",
1391 if (cdev
->num_hwfns
== 1) {
1392 p_ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
1394 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
= p_ptt
;
1397 "Failed to acquire PTT for aRFS\n");
1403 cdev
->rx_coalesce_usecs
= QED_DEFAULT_RX_USECS
;
1404 rc
= qed_nic_setup(cdev
);
1409 rc
= qed_slowpath_setup_int(cdev
, params
->int_mode
);
1411 rc
= qed_slowpath_vf_setup_int(cdev
);
1416 /* Allocate stream for unzipping */
1417 rc
= qed_alloc_stream_mem(cdev
);
1421 /* First Dword used to differentiate between various sources */
1422 data
= cdev
->firmware
->data
+ sizeof(u32
);
1424 qed_dbg_pf_init(cdev
);
1427 /* Start the slowpath */
1428 memset(&hw_init_params
, 0, sizeof(hw_init_params
));
1429 memset(&tunn_info
, 0, sizeof(tunn_info
));
1430 tunn_info
.vxlan
.b_mode_enabled
= true;
1431 tunn_info
.l2_gre
.b_mode_enabled
= true;
1432 tunn_info
.ip_gre
.b_mode_enabled
= true;
1433 tunn_info
.l2_geneve
.b_mode_enabled
= true;
1434 tunn_info
.ip_geneve
.b_mode_enabled
= true;
1435 tunn_info
.vxlan
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1436 tunn_info
.l2_gre
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1437 tunn_info
.ip_gre
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1438 tunn_info
.l2_geneve
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1439 tunn_info
.ip_geneve
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1440 hw_init_params
.p_tunn
= &tunn_info
;
1441 hw_init_params
.b_hw_start
= true;
1442 hw_init_params
.int_mode
= cdev
->int_params
.out
.int_mode
;
1443 hw_init_params
.allow_npar_tx_switch
= true;
1444 hw_init_params
.bin_fw_data
= data
;
1446 memset(&drv_load_params
, 0, sizeof(drv_load_params
));
1447 drv_load_params
.is_crash_kernel
= is_kdump_kernel();
1448 drv_load_params
.mfw_timeout_val
= QED_LOAD_REQ_LOCK_TO_DEFAULT
;
1449 drv_load_params
.avoid_eng_reset
= false;
1450 drv_load_params
.override_force_load
= QED_OVERRIDE_FORCE_LOAD_NONE
;
1451 hw_init_params
.p_drv_load_params
= &drv_load_params
;
1453 rc
= qed_hw_init(cdev
, &hw_init_params
);
1458 "HW initialization and function start completed successfully\n");
1461 cdev
->tunn_feature_mask
= (BIT(QED_MODE_VXLAN_TUNN
) |
1462 BIT(QED_MODE_L2GENEVE_TUNN
) |
1463 BIT(QED_MODE_IPGENEVE_TUNN
) |
1464 BIT(QED_MODE_L2GRE_TUNN
) |
1465 BIT(QED_MODE_IPGRE_TUNN
));
1468 /* Allocate LL2 interface if needed */
1469 if (QED_LEADING_HWFN(cdev
)->using_ll2
) {
1470 rc
= qed_ll2_alloc_if(cdev
);
1475 hwfn
= QED_LEADING_HWFN(cdev
);
1476 drv_version
.version
= (params
->drv_major
<< 24) |
1477 (params
->drv_minor
<< 16) |
1478 (params
->drv_rev
<< 8) |
1480 strlcpy(drv_version
.name
, params
->name
,
1481 MCP_DRV_VER_STR_SIZE
- 4);
1482 rc
= qed_mcp_send_drv_version(hwfn
, hwfn
->p_main_ptt
,
1485 DP_NOTICE(cdev
, "Failed sending drv version command\n");
1490 qed_reset_vport_stats(cdev
);
1495 qed_ll2_dealloc_if(cdev
);
1499 qed_hw_timers_stop_all(cdev
);
1501 qed_slowpath_irq_free(cdev
);
1502 qed_free_stream_mem(cdev
);
1503 qed_disable_msix(cdev
);
1505 qed_resc_free(cdev
);
1508 release_firmware(cdev
->firmware
);
1510 if (IS_PF(cdev
) && (cdev
->num_hwfns
== 1) &&
1511 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
)
1512 qed_ptt_release(QED_LEADING_HWFN(cdev
),
1513 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
);
1515 qed_iov_wq_stop(cdev
, false);
1517 qed_slowpath_wq_stop(cdev
);
1522 static int qed_slowpath_stop(struct qed_dev
*cdev
)
1527 qed_slowpath_wq_stop(cdev
);
1529 qed_ll2_dealloc_if(cdev
);
1532 if (cdev
->num_hwfns
== 1)
1533 qed_ptt_release(QED_LEADING_HWFN(cdev
),
1534 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
);
1535 qed_free_stream_mem(cdev
);
1536 if (IS_QED_ETH_IF(cdev
))
1537 qed_sriov_disable(cdev
, true);
1543 qed_slowpath_irq_free(cdev
);
1545 qed_disable_msix(cdev
);
1547 qed_resc_free(cdev
);
1549 qed_iov_wq_stop(cdev
, true);
1552 release_firmware(cdev
->firmware
);
1557 static void qed_set_name(struct qed_dev
*cdev
, char name
[NAME_SIZE
])
1561 memcpy(cdev
->name
, name
, NAME_SIZE
);
1562 for_each_hwfn(cdev
, i
)
1563 snprintf(cdev
->hwfns
[i
].name
, NAME_SIZE
, "%s-%d", name
, i
);
1566 static u32
qed_sb_init(struct qed_dev
*cdev
,
1567 struct qed_sb_info
*sb_info
,
1569 dma_addr_t sb_phy_addr
, u16 sb_id
,
1570 enum qed_sb_type type
)
1572 struct qed_hwfn
*p_hwfn
;
1573 struct qed_ptt
*p_ptt
;
1577 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1578 if (type
== QED_SB_TYPE_L2_QUEUE
) {
1579 p_hwfn
= &cdev
->hwfns
[sb_id
% cdev
->num_hwfns
];
1580 rel_sb_id
= sb_id
/ cdev
->num_hwfns
;
1582 p_hwfn
= QED_AFFIN_HWFN(cdev
);
1586 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
1587 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1588 IS_LEAD_HWFN(p_hwfn
) ? 0 : 1, rel_sb_id
, sb_id
);
1590 if (IS_PF(p_hwfn
->cdev
)) {
1591 p_ptt
= qed_ptt_acquire(p_hwfn
);
1595 rc
= qed_int_sb_init(p_hwfn
, p_ptt
, sb_info
, sb_virt_addr
,
1596 sb_phy_addr
, rel_sb_id
);
1597 qed_ptt_release(p_hwfn
, p_ptt
);
1599 rc
= qed_int_sb_init(p_hwfn
, NULL
, sb_info
, sb_virt_addr
,
1600 sb_phy_addr
, rel_sb_id
);
1606 static u32
qed_sb_release(struct qed_dev
*cdev
,
1607 struct qed_sb_info
*sb_info
,
1609 enum qed_sb_type type
)
1611 struct qed_hwfn
*p_hwfn
;
1615 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1616 if (type
== QED_SB_TYPE_L2_QUEUE
) {
1617 p_hwfn
= &cdev
->hwfns
[sb_id
% cdev
->num_hwfns
];
1618 rel_sb_id
= sb_id
/ cdev
->num_hwfns
;
1620 p_hwfn
= QED_AFFIN_HWFN(cdev
);
1624 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
1625 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1626 IS_LEAD_HWFN(p_hwfn
) ? 0 : 1, rel_sb_id
, sb_id
);
1628 rc
= qed_int_sb_release(p_hwfn
, sb_info
, rel_sb_id
);
1633 static bool qed_can_link_change(struct qed_dev
*cdev
)
1638 static void qed_set_ext_speed_params(struct qed_mcp_link_params
*link_params
,
1639 const struct qed_link_params
*params
)
1641 struct qed_mcp_link_speed_params
*ext_speed
= &link_params
->ext_speed
;
1642 const struct qed_mfw_speed_map
*map
;
1645 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_AUTONEG
)
1646 ext_speed
->autoneg
= !!params
->autoneg
;
1648 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS
) {
1649 ext_speed
->advertised_speeds
= 0;
1651 for (i
= 0; i
< ARRAY_SIZE(qed_mfw_ext_maps
); i
++) {
1652 map
= qed_mfw_ext_maps
+ i
;
1654 if (linkmode_intersects(params
->adv_speeds
, map
->caps
))
1655 ext_speed
->advertised_speeds
|= map
->mfw_val
;
1659 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_FORCED_SPEED
) {
1660 switch (params
->forced_speed
) {
1662 ext_speed
->forced_speed
= QED_EXT_SPEED_1G
;
1665 ext_speed
->forced_speed
= QED_EXT_SPEED_10G
;
1668 ext_speed
->forced_speed
= QED_EXT_SPEED_20G
;
1671 ext_speed
->forced_speed
= QED_EXT_SPEED_25G
;
1674 ext_speed
->forced_speed
= QED_EXT_SPEED_40G
;
1677 ext_speed
->forced_speed
= QED_EXT_SPEED_50G_R
|
1678 QED_EXT_SPEED_50G_R2
;
1681 ext_speed
->forced_speed
= QED_EXT_SPEED_100G_R2
|
1682 QED_EXT_SPEED_100G_R4
|
1683 QED_EXT_SPEED_100G_P4
;
1690 if (!(params
->override_flags
& QED_LINK_OVERRIDE_FEC_CONFIG
))
1693 switch (params
->forced_speed
) {
1695 switch (params
->fec
) {
1696 case FEC_FORCE_MODE_NONE
:
1697 link_params
->ext_fec_mode
= ETH_EXT_FEC_25G_NONE
;
1699 case FEC_FORCE_MODE_FIRECODE
:
1700 link_params
->ext_fec_mode
= ETH_EXT_FEC_25G_BASE_R
;
1702 case FEC_FORCE_MODE_RS
:
1703 link_params
->ext_fec_mode
= ETH_EXT_FEC_25G_RS528
;
1705 case FEC_FORCE_MODE_AUTO
:
1706 link_params
->ext_fec_mode
= ETH_EXT_FEC_25G_RS528
|
1707 ETH_EXT_FEC_25G_BASE_R
|
1708 ETH_EXT_FEC_25G_NONE
;
1716 switch (params
->fec
) {
1717 case FEC_FORCE_MODE_NONE
:
1718 link_params
->ext_fec_mode
= ETH_EXT_FEC_40G_NONE
;
1720 case FEC_FORCE_MODE_FIRECODE
:
1721 link_params
->ext_fec_mode
= ETH_EXT_FEC_40G_BASE_R
;
1723 case FEC_FORCE_MODE_AUTO
:
1724 link_params
->ext_fec_mode
= ETH_EXT_FEC_40G_BASE_R
|
1725 ETH_EXT_FEC_40G_NONE
;
1733 switch (params
->fec
) {
1734 case FEC_FORCE_MODE_NONE
:
1735 link_params
->ext_fec_mode
= ETH_EXT_FEC_50G_NONE
;
1737 case FEC_FORCE_MODE_FIRECODE
:
1738 link_params
->ext_fec_mode
= ETH_EXT_FEC_50G_BASE_R
;
1740 case FEC_FORCE_MODE_RS
:
1741 link_params
->ext_fec_mode
= ETH_EXT_FEC_50G_RS528
;
1743 case FEC_FORCE_MODE_AUTO
:
1744 link_params
->ext_fec_mode
= ETH_EXT_FEC_50G_RS528
|
1745 ETH_EXT_FEC_50G_BASE_R
|
1746 ETH_EXT_FEC_50G_NONE
;
1754 switch (params
->fec
) {
1755 case FEC_FORCE_MODE_NONE
:
1756 link_params
->ext_fec_mode
= ETH_EXT_FEC_100G_NONE
;
1758 case FEC_FORCE_MODE_FIRECODE
:
1759 link_params
->ext_fec_mode
= ETH_EXT_FEC_100G_BASE_R
;
1761 case FEC_FORCE_MODE_RS
:
1762 link_params
->ext_fec_mode
= ETH_EXT_FEC_100G_RS528
;
1764 case FEC_FORCE_MODE_AUTO
:
1765 link_params
->ext_fec_mode
= ETH_EXT_FEC_100G_RS528
|
1766 ETH_EXT_FEC_100G_BASE_R
|
1767 ETH_EXT_FEC_100G_NONE
;
1779 static int qed_set_link(struct qed_dev
*cdev
, struct qed_link_params
*params
)
1781 struct qed_mcp_link_params
*link_params
;
1782 struct qed_mcp_link_speed_params
*speed
;
1783 const struct qed_mfw_speed_map
*map
;
1784 struct qed_hwfn
*hwfn
;
1785 struct qed_ptt
*ptt
;
1792 /* The link should be set only once per PF */
1793 hwfn
= &cdev
->hwfns
[0];
1795 /* When VF wants to set link, force it to read the bulletin instead.
1796 * This mimics the PF behavior, where a noitification [both immediate
1797 * and possible later] would be generated when changing properties.
1800 qed_schedule_iov(hwfn
, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG
);
1804 ptt
= qed_ptt_acquire(hwfn
);
1808 link_params
= qed_mcp_get_link_params(hwfn
);
1812 speed
= &link_params
->speed
;
1814 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_AUTONEG
)
1815 speed
->autoneg
= !!params
->autoneg
;
1817 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS
) {
1818 speed
->advertised_speeds
= 0;
1820 for (i
= 0; i
< ARRAY_SIZE(qed_mfw_legacy_maps
); i
++) {
1821 map
= qed_mfw_legacy_maps
+ i
;
1823 if (linkmode_intersects(params
->adv_speeds
, map
->caps
))
1824 speed
->advertised_speeds
|= map
->mfw_val
;
1828 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_FORCED_SPEED
)
1829 speed
->forced_speed
= params
->forced_speed
;
1831 if (qed_mcp_is_ext_speed_supported(hwfn
))
1832 qed_set_ext_speed_params(link_params
, params
);
1834 if (params
->override_flags
& QED_LINK_OVERRIDE_PAUSE_CONFIG
) {
1835 if (params
->pause_config
& QED_LINK_PAUSE_AUTONEG_ENABLE
)
1836 link_params
->pause
.autoneg
= true;
1838 link_params
->pause
.autoneg
= false;
1839 if (params
->pause_config
& QED_LINK_PAUSE_RX_ENABLE
)
1840 link_params
->pause
.forced_rx
= true;
1842 link_params
->pause
.forced_rx
= false;
1843 if (params
->pause_config
& QED_LINK_PAUSE_TX_ENABLE
)
1844 link_params
->pause
.forced_tx
= true;
1846 link_params
->pause
.forced_tx
= false;
1849 if (params
->override_flags
& QED_LINK_OVERRIDE_LOOPBACK_MODE
) {
1850 switch (params
->loopback_mode
) {
1851 case QED_LINK_LOOPBACK_INT_PHY
:
1852 link_params
->loopback_mode
= ETH_LOOPBACK_INT_PHY
;
1854 case QED_LINK_LOOPBACK_EXT_PHY
:
1855 link_params
->loopback_mode
= ETH_LOOPBACK_EXT_PHY
;
1857 case QED_LINK_LOOPBACK_EXT
:
1858 link_params
->loopback_mode
= ETH_LOOPBACK_EXT
;
1860 case QED_LINK_LOOPBACK_MAC
:
1861 link_params
->loopback_mode
= ETH_LOOPBACK_MAC
;
1863 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123
:
1864 link_params
->loopback_mode
=
1865 ETH_LOOPBACK_CNIG_AH_ONLY_0123
;
1867 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301
:
1868 link_params
->loopback_mode
=
1869 ETH_LOOPBACK_CNIG_AH_ONLY_2301
;
1871 case QED_LINK_LOOPBACK_PCS_AH_ONLY
:
1872 link_params
->loopback_mode
= ETH_LOOPBACK_PCS_AH_ONLY
;
1874 case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY
:
1875 link_params
->loopback_mode
=
1876 ETH_LOOPBACK_REVERSE_MAC_AH_ONLY
;
1878 case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY
:
1879 link_params
->loopback_mode
=
1880 ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY
;
1883 link_params
->loopback_mode
= ETH_LOOPBACK_NONE
;
1888 if (params
->override_flags
& QED_LINK_OVERRIDE_EEE_CONFIG
)
1889 memcpy(&link_params
->eee
, ¶ms
->eee
,
1890 sizeof(link_params
->eee
));
1892 if (params
->override_flags
& QED_LINK_OVERRIDE_FEC_CONFIG
)
1893 link_params
->fec
= params
->fec
;
1895 rc
= qed_mcp_set_link(hwfn
, ptt
, params
->link_up
);
1897 qed_ptt_release(hwfn
, ptt
);
1902 static int qed_get_port_type(u32 media_type
)
1906 switch (media_type
) {
1907 case MEDIA_SFPP_10G_FIBER
:
1908 case MEDIA_SFP_1G_FIBER
:
1909 case MEDIA_XFP_FIBER
:
1910 case MEDIA_MODULE_FIBER
:
1911 port_type
= PORT_FIBRE
;
1913 case MEDIA_DA_TWINAX
:
1914 port_type
= PORT_DA
;
1917 port_type
= PORT_TP
;
1920 case MEDIA_NOT_PRESENT
:
1921 port_type
= PORT_NONE
;
1923 case MEDIA_UNSPECIFIED
:
1925 port_type
= PORT_OTHER
;
1931 static int qed_get_link_data(struct qed_hwfn
*hwfn
,
1932 struct qed_mcp_link_params
*params
,
1933 struct qed_mcp_link_state
*link
,
1934 struct qed_mcp_link_capabilities
*link_caps
)
1938 if (!IS_PF(hwfn
->cdev
)) {
1939 qed_vf_get_link_params(hwfn
, params
);
1940 qed_vf_get_link_state(hwfn
, link
);
1941 qed_vf_get_link_caps(hwfn
, link_caps
);
1946 p
= qed_mcp_get_link_params(hwfn
);
1949 memcpy(params
, p
, sizeof(*params
));
1951 p
= qed_mcp_get_link_state(hwfn
);
1954 memcpy(link
, p
, sizeof(*link
));
1956 p
= qed_mcp_get_link_capabilities(hwfn
);
1959 memcpy(link_caps
, p
, sizeof(*link_caps
));
1964 static void qed_fill_link_capability(struct qed_hwfn
*hwfn
,
1965 struct qed_ptt
*ptt
, u32 capability
,
1966 unsigned long *if_caps
)
1968 u32 media_type
, tcvr_state
, tcvr_type
;
1969 u32 speed_mask
, board_cfg
;
1971 if (qed_mcp_get_media_type(hwfn
, ptt
, &media_type
))
1972 media_type
= MEDIA_UNSPECIFIED
;
1974 if (qed_mcp_get_transceiver_data(hwfn
, ptt
, &tcvr_state
, &tcvr_type
))
1975 tcvr_type
= ETH_TRANSCEIVER_STATE_UNPLUGGED
;
1977 if (qed_mcp_trans_speed_mask(hwfn
, ptt
, &speed_mask
))
1978 speed_mask
= 0xFFFFFFFF;
1980 if (qed_mcp_get_board_config(hwfn
, ptt
, &board_cfg
))
1981 board_cfg
= NVM_CFG1_PORT_PORT_TYPE_UNDEFINED
;
1983 DP_VERBOSE(hwfn
->cdev
, NETIF_MSG_DRV
,
1984 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1985 media_type
, tcvr_state
, tcvr_type
, speed_mask
, board_cfg
);
1987 switch (media_type
) {
1988 case MEDIA_DA_TWINAX
:
1989 phylink_set(if_caps
, FIBRE
);
1991 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
)
1992 phylink_set(if_caps
, 20000baseKR2_Full
);
1994 /* For DAC media multiple speed capabilities are supported */
1995 capability
|= speed_mask
;
1997 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1998 phylink_set(if_caps
, 1000baseKX_Full
);
1999 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
2000 phylink_set(if_caps
, 10000baseCR_Full
);
2002 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
2003 switch (tcvr_type
) {
2004 case ETH_TRANSCEIVER_TYPE_40G_CR4
:
2005 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR
:
2006 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR
:
2007 phylink_set(if_caps
, 40000baseCR4_Full
);
2013 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
2014 phylink_set(if_caps
, 25000baseCR_Full
);
2015 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
2016 phylink_set(if_caps
, 50000baseCR2_Full
);
2019 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
2020 switch (tcvr_type
) {
2021 case ETH_TRANSCEIVER_TYPE_100G_CR4
:
2022 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR
:
2023 phylink_set(if_caps
, 100000baseCR4_Full
);
2031 phylink_set(if_caps
, TP
);
2033 if (board_cfg
& NVM_CFG1_PORT_PORT_TYPE_EXT_PHY
) {
2035 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
2036 phylink_set(if_caps
, 1000baseT_Full
);
2038 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
2039 phylink_set(if_caps
, 10000baseT_Full
);
2042 if (board_cfg
& NVM_CFG1_PORT_PORT_TYPE_MODULE
) {
2043 phylink_set(if_caps
, FIBRE
);
2045 switch (tcvr_type
) {
2046 case ETH_TRANSCEIVER_TYPE_1000BASET
:
2047 phylink_set(if_caps
, 1000baseT_Full
);
2049 case ETH_TRANSCEIVER_TYPE_10G_BASET
:
2050 phylink_set(if_caps
, 10000baseT_Full
);
2058 case MEDIA_SFP_1G_FIBER
:
2059 case MEDIA_SFPP_10G_FIBER
:
2060 case MEDIA_XFP_FIBER
:
2061 case MEDIA_MODULE_FIBER
:
2062 phylink_set(if_caps
, FIBRE
);
2063 capability
|= speed_mask
;
2065 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
2066 switch (tcvr_type
) {
2067 case ETH_TRANSCEIVER_TYPE_1G_LX
:
2068 case ETH_TRANSCEIVER_TYPE_1G_SX
:
2069 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR
:
2070 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR
:
2071 phylink_set(if_caps
, 1000baseKX_Full
);
2077 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
2078 switch (tcvr_type
) {
2079 case ETH_TRANSCEIVER_TYPE_10G_SR
:
2080 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR
:
2081 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR
:
2082 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR
:
2083 phylink_set(if_caps
, 10000baseSR_Full
);
2085 case ETH_TRANSCEIVER_TYPE_10G_LR
:
2086 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR
:
2087 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR
:
2088 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR
:
2089 phylink_set(if_caps
, 10000baseLR_Full
);
2091 case ETH_TRANSCEIVER_TYPE_10G_LRM
:
2092 phylink_set(if_caps
, 10000baseLRM_Full
);
2094 case ETH_TRANSCEIVER_TYPE_10G_ER
:
2095 phylink_set(if_caps
, 10000baseR_FEC
);
2101 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
)
2102 phylink_set(if_caps
, 20000baseKR2_Full
);
2104 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
2105 switch (tcvr_type
) {
2106 case ETH_TRANSCEIVER_TYPE_25G_SR
:
2107 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR
:
2108 phylink_set(if_caps
, 25000baseSR_Full
);
2114 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
2115 switch (tcvr_type
) {
2116 case ETH_TRANSCEIVER_TYPE_40G_LR4
:
2117 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR
:
2118 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR
:
2119 phylink_set(if_caps
, 40000baseLR4_Full
);
2121 case ETH_TRANSCEIVER_TYPE_40G_SR4
:
2122 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR
:
2123 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR
:
2124 phylink_set(if_caps
, 40000baseSR4_Full
);
2130 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
2131 phylink_set(if_caps
, 50000baseKR2_Full
);
2134 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
2135 switch (tcvr_type
) {
2136 case ETH_TRANSCEIVER_TYPE_100G_SR4
:
2137 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR
:
2138 phylink_set(if_caps
, 100000baseSR4_Full
);
2140 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR
:
2141 phylink_set(if_caps
, 100000baseLR4_ER4_Full
);
2149 phylink_set(if_caps
, Backplane
);
2151 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
)
2152 phylink_set(if_caps
, 20000baseKR2_Full
);
2153 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
2154 phylink_set(if_caps
, 1000baseKX_Full
);
2155 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
2156 phylink_set(if_caps
, 10000baseKR_Full
);
2157 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
2158 phylink_set(if_caps
, 25000baseKR_Full
);
2159 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
2160 phylink_set(if_caps
, 40000baseKR4_Full
);
2161 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
2162 phylink_set(if_caps
, 50000baseKR2_Full
);
2164 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
2165 phylink_set(if_caps
, 100000baseKR4_Full
);
2168 case MEDIA_UNSPECIFIED
:
2169 case MEDIA_NOT_PRESENT
:
2171 DP_VERBOSE(hwfn
->cdev
, QED_MSG_DEBUG
,
2172 "Unknown media and transceiver type;\n");
2177 static void qed_lp_caps_to_speed_mask(u32 caps
, u32
*speed_mask
)
2182 (QED_LINK_PARTNER_SPEED_1G_FD
| QED_LINK_PARTNER_SPEED_1G_HD
))
2183 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
2184 if (caps
& QED_LINK_PARTNER_SPEED_10G
)
2185 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
2186 if (caps
& QED_LINK_PARTNER_SPEED_20G
)
2187 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
;
2188 if (caps
& QED_LINK_PARTNER_SPEED_25G
)
2189 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
;
2190 if (caps
& QED_LINK_PARTNER_SPEED_40G
)
2191 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
;
2192 if (caps
& QED_LINK_PARTNER_SPEED_50G
)
2193 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
;
2194 if (caps
& QED_LINK_PARTNER_SPEED_100G
)
2195 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
;
2198 static void qed_fill_link(struct qed_hwfn
*hwfn
,
2199 struct qed_ptt
*ptt
,
2200 struct qed_link_output
*if_link
)
2202 struct qed_mcp_link_capabilities link_caps
;
2203 struct qed_mcp_link_params params
;
2204 struct qed_mcp_link_state link
;
2205 u32 media_type
, speed_mask
;
2207 memset(if_link
, 0, sizeof(*if_link
));
2209 /* Prepare source inputs */
2210 if (qed_get_link_data(hwfn
, ¶ms
, &link
, &link_caps
)) {
2211 dev_warn(&hwfn
->cdev
->pdev
->dev
, "no link data available\n");
2215 /* Set the link parameters to pass to protocol driver */
2217 if_link
->link_up
= true;
2219 if (IS_PF(hwfn
->cdev
) && qed_mcp_is_ext_speed_supported(hwfn
)) {
2220 if (link_caps
.default_ext_autoneg
)
2221 phylink_set(if_link
->supported_caps
, Autoneg
);
2223 linkmode_copy(if_link
->advertised_caps
, if_link
->supported_caps
);
2225 if (params
.ext_speed
.autoneg
)
2226 phylink_set(if_link
->advertised_caps
, Autoneg
);
2228 phylink_clear(if_link
->advertised_caps
, Autoneg
);
2230 qed_fill_link_capability(hwfn
, ptt
,
2231 params
.ext_speed
.advertised_speeds
,
2232 if_link
->advertised_caps
);
2234 if (link_caps
.default_speed_autoneg
)
2235 phylink_set(if_link
->supported_caps
, Autoneg
);
2237 linkmode_copy(if_link
->advertised_caps
, if_link
->supported_caps
);
2239 if (params
.speed
.autoneg
)
2240 phylink_set(if_link
->advertised_caps
, Autoneg
);
2242 phylink_clear(if_link
->advertised_caps
, Autoneg
);
2245 if (params
.pause
.autoneg
||
2246 (params
.pause
.forced_rx
&& params
.pause
.forced_tx
))
2247 phylink_set(if_link
->supported_caps
, Asym_Pause
);
2248 if (params
.pause
.autoneg
|| params
.pause
.forced_rx
||
2249 params
.pause
.forced_tx
)
2250 phylink_set(if_link
->supported_caps
, Pause
);
2252 if_link
->sup_fec
= link_caps
.fec_default
;
2253 if_link
->active_fec
= params
.fec
;
2255 /* Fill link advertised capability */
2256 qed_fill_link_capability(hwfn
, ptt
, params
.speed
.advertised_speeds
,
2257 if_link
->advertised_caps
);
2259 /* Fill link supported capability */
2260 qed_fill_link_capability(hwfn
, ptt
, link_caps
.speed_capabilities
,
2261 if_link
->supported_caps
);
2263 /* Fill partner advertised capability */
2264 qed_lp_caps_to_speed_mask(link
.partner_adv_speed
, &speed_mask
);
2265 qed_fill_link_capability(hwfn
, ptt
, speed_mask
, if_link
->lp_caps
);
2268 if_link
->speed
= link
.speed
;
2270 /* TODO - fill duplex properly */
2271 if_link
->duplex
= DUPLEX_FULL
;
2272 qed_mcp_get_media_type(hwfn
, ptt
, &media_type
);
2273 if_link
->port
= qed_get_port_type(media_type
);
2275 if_link
->autoneg
= params
.speed
.autoneg
;
2277 if (params
.pause
.autoneg
)
2278 if_link
->pause_config
|= QED_LINK_PAUSE_AUTONEG_ENABLE
;
2279 if (params
.pause
.forced_rx
)
2280 if_link
->pause_config
|= QED_LINK_PAUSE_RX_ENABLE
;
2281 if (params
.pause
.forced_tx
)
2282 if_link
->pause_config
|= QED_LINK_PAUSE_TX_ENABLE
;
2284 if (link
.an_complete
)
2285 phylink_set(if_link
->lp_caps
, Autoneg
);
2286 if (link
.partner_adv_pause
)
2287 phylink_set(if_link
->lp_caps
, Pause
);
2288 if (link
.partner_adv_pause
== QED_LINK_PARTNER_ASYMMETRIC_PAUSE
||
2289 link
.partner_adv_pause
== QED_LINK_PARTNER_BOTH_PAUSE
)
2290 phylink_set(if_link
->lp_caps
, Asym_Pause
);
2292 if (link_caps
.default_eee
== QED_MCP_EEE_UNSUPPORTED
) {
2293 if_link
->eee_supported
= false;
2295 if_link
->eee_supported
= true;
2296 if_link
->eee_active
= link
.eee_active
;
2297 if_link
->sup_caps
= link_caps
.eee_speed_caps
;
2298 /* MFW clears adv_caps on eee disable; use configured value */
2299 if_link
->eee
.adv_caps
= link
.eee_adv_caps
? link
.eee_adv_caps
:
2300 params
.eee
.adv_caps
;
2301 if_link
->eee
.lp_adv_caps
= link
.eee_lp_adv_caps
;
2302 if_link
->eee
.enable
= params
.eee
.enable
;
2303 if_link
->eee
.tx_lpi_enable
= params
.eee
.tx_lpi_enable
;
2304 if_link
->eee
.tx_lpi_timer
= params
.eee
.tx_lpi_timer
;
2308 static void qed_get_current_link(struct qed_dev
*cdev
,
2309 struct qed_link_output
*if_link
)
2311 struct qed_hwfn
*hwfn
;
2312 struct qed_ptt
*ptt
;
2315 hwfn
= &cdev
->hwfns
[0];
2317 ptt
= qed_ptt_acquire(hwfn
);
2319 qed_fill_link(hwfn
, ptt
, if_link
);
2320 qed_ptt_release(hwfn
, ptt
);
2322 DP_NOTICE(hwfn
, "Failed to fill link; No PTT\n");
2325 qed_fill_link(hwfn
, NULL
, if_link
);
2328 for_each_hwfn(cdev
, i
)
2329 qed_inform_vf_link_state(&cdev
->hwfns
[i
]);
2332 void qed_link_update(struct qed_hwfn
*hwfn
, struct qed_ptt
*ptt
)
2334 void *cookie
= hwfn
->cdev
->ops_cookie
;
2335 struct qed_common_cb_ops
*op
= hwfn
->cdev
->protocol_ops
.common
;
2336 struct qed_link_output if_link
;
2338 qed_fill_link(hwfn
, ptt
, &if_link
);
2339 qed_inform_vf_link_state(hwfn
);
2341 if (IS_LEAD_HWFN(hwfn
) && cookie
)
2342 op
->link_update(cookie
, &if_link
);
2345 void qed_bw_update(struct qed_hwfn
*hwfn
, struct qed_ptt
*ptt
)
2347 void *cookie
= hwfn
->cdev
->ops_cookie
;
2348 struct qed_common_cb_ops
*op
= hwfn
->cdev
->protocol_ops
.common
;
2350 if (IS_LEAD_HWFN(hwfn
) && cookie
&& op
&& op
->bw_update
)
2351 op
->bw_update(cookie
);
2354 static int qed_drain(struct qed_dev
*cdev
)
2356 struct qed_hwfn
*hwfn
;
2357 struct qed_ptt
*ptt
;
2363 for_each_hwfn(cdev
, i
) {
2364 hwfn
= &cdev
->hwfns
[i
];
2365 ptt
= qed_ptt_acquire(hwfn
);
2367 DP_NOTICE(hwfn
, "Failed to drain NIG; No PTT\n");
2370 rc
= qed_mcp_drain(hwfn
, ptt
);
2371 qed_ptt_release(hwfn
, ptt
);
2379 static u32
qed_nvm_flash_image_access_crc(struct qed_dev
*cdev
,
2380 struct qed_nvm_image_att
*nvm_image
,
2386 /* Allocate a buffer for holding the nvram image */
2387 buf
= kzalloc(nvm_image
->length
, GFP_KERNEL
);
2391 /* Read image into buffer */
2392 rc
= qed_mcp_nvm_read(cdev
, nvm_image
->start_addr
,
2393 buf
, nvm_image
->length
);
2395 DP_ERR(cdev
, "Failed reading image from nvm\n");
2399 /* Convert the buffer into big-endian format (excluding the
2400 * closing 4 bytes of CRC).
2402 cpu_to_be32_array((__force __be32
*)buf
, (const u32
*)buf
,
2403 DIV_ROUND_UP(nvm_image
->length
- 4, 4));
2405 /* Calc CRC for the "actual" image buffer, i.e. not including
2406 * the last 4 CRC bytes.
2408 *crc
= ~crc32(~0U, buf
, nvm_image
->length
- 4);
2409 *crc
= (__force u32
)cpu_to_be32p(crc
);
2417 /* Binary file format -
2418 * /----------------------------------------------------------------------\
2419 * 0B | 0x4 [command index] |
2420 * 4B | image_type | Options | Number of register settings |
2424 * \----------------------------------------------------------------------/
2425 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2426 * Options - 0'b - Calculate & Update CRC for image
2428 static int qed_nvm_flash_image_access(struct qed_dev
*cdev
, const u8
**data
,
2431 struct qed_nvm_image_att nvm_image
;
2432 struct qed_hwfn
*p_hwfn
;
2433 bool is_crc
= false;
2439 image_type
= **data
;
2440 p_hwfn
= QED_LEADING_HWFN(cdev
);
2441 for (i
= 0; i
< p_hwfn
->nvm_info
.num_images
; i
++)
2442 if (image_type
== p_hwfn
->nvm_info
.image_att
[i
].image_type
)
2444 if (i
== p_hwfn
->nvm_info
.num_images
) {
2445 DP_ERR(cdev
, "Failed to find nvram image of type %08x\n",
2450 nvm_image
.start_addr
= p_hwfn
->nvm_info
.image_att
[i
].nvm_start_addr
;
2451 nvm_image
.length
= p_hwfn
->nvm_info
.image_att
[i
].len
;
2453 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2454 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2455 **data
, image_type
, nvm_image
.start_addr
,
2456 nvm_image
.start_addr
+ nvm_image
.length
- 1);
2458 is_crc
= !!(**data
& BIT(0));
2460 len
= *((u16
*)*data
);
2465 rc
= qed_nvm_flash_image_access_crc(cdev
, &nvm_image
, &crc
);
2467 DP_ERR(cdev
, "Failed calculating CRC, rc = %d\n", rc
);
2471 rc
= qed_mcp_nvm_write(cdev
, QED_NVM_WRITE_NVRAM
,
2472 (nvm_image
.start_addr
+
2473 nvm_image
.length
- 4), (u8
*)&crc
, 4);
2475 DP_ERR(cdev
, "Failed writing to %08x, rc = %d\n",
2476 nvm_image
.start_addr
+ nvm_image
.length
- 4, rc
);
2480 /* Iterate over the values for setting */
2482 u32 offset
, mask
, value
, cur_value
;
2485 value
= *((u32
*)*data
);
2487 mask
= *((u32
*)*data
);
2489 offset
= *((u32
*)*data
);
2492 rc
= qed_mcp_nvm_read(cdev
, nvm_image
.start_addr
+ offset
, buf
,
2495 DP_ERR(cdev
, "Failed reading from %08x\n",
2496 nvm_image
.start_addr
+ offset
);
2500 cur_value
= le32_to_cpu(*((__le32
*)buf
));
2501 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2502 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2503 nvm_image
.start_addr
+ offset
, cur_value
,
2504 (cur_value
& ~mask
) | (value
& mask
), value
, mask
);
2505 value
= (value
& mask
) | (cur_value
& ~mask
);
2506 rc
= qed_mcp_nvm_write(cdev
, QED_NVM_WRITE_NVRAM
,
2507 nvm_image
.start_addr
+ offset
,
2510 DP_ERR(cdev
, "Failed writing to %08x\n",
2511 nvm_image
.start_addr
+ offset
);
2521 /* Binary file format -
2522 * /----------------------------------------------------------------------\
2523 * 0B | 0x3 [command index] |
2524 * 4B | b'0: check_response? | b'1-31 reserved |
2525 * 8B | File-type | reserved |
2526 * 12B | Image length in bytes |
2527 * \----------------------------------------------------------------------/
2528 * Start a new file of the provided type
2530 static int qed_nvm_flash_image_file_start(struct qed_dev
*cdev
,
2531 const u8
**data
, bool *check_resp
)
2533 u32 file_type
, file_size
= 0;
2537 *check_resp
= !!(**data
& BIT(0));
2541 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2542 "About to start a new file of type %02x\n", file_type
);
2543 if (file_type
== DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI
) {
2545 file_size
= *((u32
*)(*data
));
2548 rc
= qed_mcp_nvm_write(cdev
, QED_PUT_FILE_BEGIN
, file_type
,
2549 (u8
*)(&file_size
), 4);
2555 /* Binary file format -
2556 * /----------------------------------------------------------------------\
2557 * 0B | 0x2 [command index] |
2558 * 4B | Length in bytes |
2559 * 8B | b'0: check_response? | b'1-31 reserved |
2560 * 12B | Offset in bytes |
2562 * \----------------------------------------------------------------------/
2563 * Write data as part of a file that was previously started. Data should be
2564 * of length equal to that provided in the message
2566 static int qed_nvm_flash_image_file_data(struct qed_dev
*cdev
,
2567 const u8
**data
, bool *check_resp
)
2573 len
= *((u32
*)(*data
));
2575 *check_resp
= !!(**data
& BIT(0));
2577 offset
= *((u32
*)(*data
));
2580 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2581 "About to write File-data: %08x bytes to offset %08x\n",
2584 rc
= qed_mcp_nvm_write(cdev
, QED_PUT_FILE_DATA
, offset
,
2585 (char *)(*data
), len
);
2591 /* Binary file format [General header] -
2592 * /----------------------------------------------------------------------\
2593 * 0B | QED_NVM_SIGNATURE |
2594 * 4B | Length in bytes |
2595 * 8B | Highest command in this batchfile | Reserved |
2596 * \----------------------------------------------------------------------/
2598 static int qed_nvm_flash_image_validate(struct qed_dev
*cdev
,
2599 const struct firmware
*image
,
2604 /* Check minimum size */
2605 if (image
->size
< 12) {
2606 DP_ERR(cdev
, "Image is too short [%08x]\n", (u32
)image
->size
);
2610 /* Check signature */
2611 signature
= *((u32
*)(*data
));
2612 if (signature
!= QED_NVM_SIGNATURE
) {
2613 DP_ERR(cdev
, "Wrong signature '%08x'\n", signature
);
2618 /* Validate internal size equals the image-size */
2619 len
= *((u32
*)(*data
));
2620 if (len
!= image
->size
) {
2621 DP_ERR(cdev
, "Size mismatch: internal = %08x image = %08x\n",
2622 len
, (u32
)image
->size
);
2627 /* Make sure driver familiar with all commands necessary for this */
2628 if (*((u16
*)(*data
)) >= QED_NVM_FLASH_CMD_NVM_MAX
) {
2629 DP_ERR(cdev
, "File contains unsupported commands [Need %04x]\n",
2639 /* Binary file format -
2640 * /----------------------------------------------------------------------\
2641 * 0B | 0x5 [command index] |
2642 * 4B | Number of config attributes | Reserved |
2643 * 4B | Config ID | Entity ID | Length |
2646 * \----------------------------------------------------------------------/
2647 * There can be several cfg_id-entity_id-Length-Value sets as specified by
2648 * 'Number of config attributes'.
2650 * The API parses config attributes from the user provided buffer and flashes
2651 * them to the respective NVM path using Management FW inerface.
2653 static int qed_nvm_flash_cfg_write(struct qed_dev
*cdev
, const u8
**data
)
2655 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2656 u8 entity_id
, len
, buf
[32];
2657 bool need_nvm_init
= true;
2658 struct qed_ptt
*ptt
;
2663 ptt
= qed_ptt_acquire(hwfn
);
2667 /* NVM CFG ID attribute header */
2669 count
= *((u16
*)*data
);
2672 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2673 "Read config ids: num_attrs = %0d\n", count
);
2674 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2675 * arithmetic operations in the implementation.
2677 for (i
= 1; i
<= count
; i
++) {
2678 cfg_id
= *((u16
*)*data
);
2684 memcpy(buf
, *data
, len
);
2688 if (need_nvm_init
) {
2689 flags
|= QED_NVM_CFG_OPTION_INIT
;
2690 need_nvm_init
= false;
2693 /* Commit to flash and free the resources */
2694 if (!(i
% QED_NVM_CFG_MAX_ATTRS
) || i
== count
) {
2695 flags
|= QED_NVM_CFG_OPTION_COMMIT
|
2696 QED_NVM_CFG_OPTION_FREE
;
2697 need_nvm_init
= true;
2701 flags
|= QED_NVM_CFG_OPTION_ENTITY_SEL
;
2703 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2704 "cfg_id = %d entity = %d len = %d\n", cfg_id
,
2706 rc
= qed_mcp_nvm_set_cfg(hwfn
, ptt
, cfg_id
, entity_id
, flags
,
2709 DP_ERR(cdev
, "Error %d configuring %d\n", rc
, cfg_id
);
2714 qed_ptt_release(hwfn
, ptt
);
2719 #define QED_MAX_NVM_BUF_LEN 32
2720 static int qed_nvm_flash_cfg_len(struct qed_dev
*cdev
, u32 cmd
)
2722 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2723 u8 buf
[QED_MAX_NVM_BUF_LEN
];
2724 struct qed_ptt
*ptt
;
2728 ptt
= qed_ptt_acquire(hwfn
);
2730 return QED_MAX_NVM_BUF_LEN
;
2732 rc
= qed_mcp_nvm_get_cfg(hwfn
, ptt
, cmd
, 0, QED_NVM_CFG_GET_FLAGS
, buf
,
2735 DP_ERR(cdev
, "Error %d reading %d\n", rc
, cmd
);
2736 len
= QED_MAX_NVM_BUF_LEN
;
2739 qed_ptt_release(hwfn
, ptt
);
2744 static int qed_nvm_flash_cfg_read(struct qed_dev
*cdev
, u8
**data
,
2745 u32 cmd
, u32 entity_id
)
2747 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2748 struct qed_ptt
*ptt
;
2752 ptt
= qed_ptt_acquire(hwfn
);
2756 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2757 "Read config cmd = %d entity id %d\n", cmd
, entity_id
);
2758 flags
= entity_id
? QED_NVM_CFG_GET_PF_FLAGS
: QED_NVM_CFG_GET_FLAGS
;
2759 rc
= qed_mcp_nvm_get_cfg(hwfn
, ptt
, cmd
, entity_id
, flags
, *data
, &len
);
2761 DP_ERR(cdev
, "Error %d reading %d\n", rc
, cmd
);
2763 qed_ptt_release(hwfn
, ptt
);
2768 static int qed_nvm_flash(struct qed_dev
*cdev
, const char *name
)
2770 const struct firmware
*image
;
2771 const u8
*data
, *data_end
;
2775 rc
= request_firmware(&image
, name
, &cdev
->pdev
->dev
);
2777 DP_ERR(cdev
, "Failed to find '%s'\n", name
);
2781 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2782 "Flashing '%s' - firmware's data at %p, size is %08x\n",
2783 name
, image
->data
, (u32
)image
->size
);
2785 data_end
= data
+ image
->size
;
2787 rc
= qed_nvm_flash_image_validate(cdev
, image
, &data
);
2791 while (data
< data_end
) {
2792 bool check_resp
= false;
2794 /* Parse the actual command */
2795 cmd_type
= *((u32
*)data
);
2797 case QED_NVM_FLASH_CMD_FILE_DATA
:
2798 rc
= qed_nvm_flash_image_file_data(cdev
, &data
,
2801 case QED_NVM_FLASH_CMD_FILE_START
:
2802 rc
= qed_nvm_flash_image_file_start(cdev
, &data
,
2805 case QED_NVM_FLASH_CMD_NVM_CHANGE
:
2806 rc
= qed_nvm_flash_image_access(cdev
, &data
,
2809 case QED_NVM_FLASH_CMD_NVM_CFG_ID
:
2810 rc
= qed_nvm_flash_cfg_write(cdev
, &data
);
2813 DP_ERR(cdev
, "Unknown command %08x\n", cmd_type
);
2819 DP_ERR(cdev
, "Command %08x failed\n", cmd_type
);
2823 /* Check response if needed */
2825 u32 mcp_response
= 0;
2827 if (qed_mcp_nvm_resp(cdev
, (u8
*)&mcp_response
)) {
2828 DP_ERR(cdev
, "Failed getting MCP response\n");
2833 switch (mcp_response
& FW_MSG_CODE_MASK
) {
2834 case FW_MSG_CODE_OK
:
2835 case FW_MSG_CODE_NVM_OK
:
2836 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK
:
2837 case FW_MSG_CODE_PHY_OK
:
2840 DP_ERR(cdev
, "MFW returns error: %08x\n",
2849 release_firmware(image
);
2854 static int qed_nvm_get_image(struct qed_dev
*cdev
, enum qed_nvm_images type
,
2857 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2859 return qed_mcp_get_nvm_image(hwfn
, type
, buf
, len
);
2862 void qed_schedule_recovery_handler(struct qed_hwfn
*p_hwfn
)
2864 struct qed_common_cb_ops
*ops
= p_hwfn
->cdev
->protocol_ops
.common
;
2865 void *cookie
= p_hwfn
->cdev
->ops_cookie
;
2867 if (ops
&& ops
->schedule_recovery_handler
)
2868 ops
->schedule_recovery_handler(cookie
);
2871 static const char * const qed_hw_err_type_descr
[] = {
2872 [QED_HW_ERR_FAN_FAIL
] = "Fan Failure",
2873 [QED_HW_ERR_MFW_RESP_FAIL
] = "MFW Response Failure",
2874 [QED_HW_ERR_HW_ATTN
] = "HW Attention",
2875 [QED_HW_ERR_DMAE_FAIL
] = "DMAE Failure",
2876 [QED_HW_ERR_RAMROD_FAIL
] = "Ramrod Failure",
2877 [QED_HW_ERR_FW_ASSERT
] = "FW Assertion",
2878 [QED_HW_ERR_LAST
] = "Unknown",
2881 void qed_hw_error_occurred(struct qed_hwfn
*p_hwfn
,
2882 enum qed_hw_err_type err_type
)
2884 struct qed_common_cb_ops
*ops
= p_hwfn
->cdev
->protocol_ops
.common
;
2885 void *cookie
= p_hwfn
->cdev
->ops_cookie
;
2886 const char *err_str
;
2888 if (err_type
> QED_HW_ERR_LAST
)
2889 err_type
= QED_HW_ERR_LAST
;
2890 err_str
= qed_hw_err_type_descr
[err_type
];
2892 DP_NOTICE(p_hwfn
, "HW error occurred [%s]\n", err_str
);
2894 /* Call the HW error handler of the protocol driver.
2895 * If it is not available - perform a minimal handling of preventing
2896 * HW attentions from being reasserted.
2898 if (ops
&& ops
->schedule_hw_err_handler
)
2899 ops
->schedule_hw_err_handler(cookie
, err_type
);
2901 qed_int_attn_clr_enable(p_hwfn
->cdev
, true);
2904 static int qed_set_coalesce(struct qed_dev
*cdev
, u16 rx_coal
, u16 tx_coal
,
2907 return qed_set_queue_coalesce(rx_coal
, tx_coal
, handle
);
2910 static int qed_set_led(struct qed_dev
*cdev
, enum qed_led_mode mode
)
2912 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2913 struct qed_ptt
*ptt
;
2916 ptt
= qed_ptt_acquire(hwfn
);
2920 status
= qed_mcp_set_led(hwfn
, ptt
, mode
);
2922 qed_ptt_release(hwfn
, ptt
);
2927 static int qed_recovery_process(struct qed_dev
*cdev
)
2929 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2930 struct qed_ptt
*p_ptt
;
2933 p_ptt
= qed_ptt_acquire(p_hwfn
);
2937 rc
= qed_start_recovery_process(p_hwfn
, p_ptt
);
2939 qed_ptt_release(p_hwfn
, p_ptt
);
2944 static int qed_update_wol(struct qed_dev
*cdev
, bool enabled
)
2946 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2947 struct qed_ptt
*ptt
;
2953 ptt
= qed_ptt_acquire(hwfn
);
2957 rc
= qed_mcp_ov_update_wol(hwfn
, ptt
, enabled
? QED_OV_WOL_ENABLED
2958 : QED_OV_WOL_DISABLED
);
2961 rc
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
2964 qed_ptt_release(hwfn
, ptt
);
2968 static int qed_update_drv_state(struct qed_dev
*cdev
, bool active
)
2970 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2971 struct qed_ptt
*ptt
;
2977 ptt
= qed_ptt_acquire(hwfn
);
2981 status
= qed_mcp_ov_update_driver_state(hwfn
, ptt
, active
?
2982 QED_OV_DRIVER_STATE_ACTIVE
:
2983 QED_OV_DRIVER_STATE_DISABLED
);
2985 qed_ptt_release(hwfn
, ptt
);
2990 static int qed_update_mac(struct qed_dev
*cdev
, u8
*mac
)
2992 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2993 struct qed_ptt
*ptt
;
2999 ptt
= qed_ptt_acquire(hwfn
);
3003 status
= qed_mcp_ov_update_mac(hwfn
, ptt
, mac
);
3007 status
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
3010 qed_ptt_release(hwfn
, ptt
);
3014 static int qed_update_mtu(struct qed_dev
*cdev
, u16 mtu
)
3016 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
3017 struct qed_ptt
*ptt
;
3023 ptt
= qed_ptt_acquire(hwfn
);
3027 status
= qed_mcp_ov_update_mtu(hwfn
, ptt
, mtu
);
3031 status
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
3034 qed_ptt_release(hwfn
, ptt
);
3038 static int qed_read_module_eeprom(struct qed_dev
*cdev
, char *buf
,
3039 u8 dev_addr
, u32 offset
, u32 len
)
3041 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
3042 struct qed_ptt
*ptt
;
3048 ptt
= qed_ptt_acquire(hwfn
);
3052 rc
= qed_mcp_phy_sfp_read(hwfn
, ptt
, MFW_PORT(hwfn
), dev_addr
,
3055 qed_ptt_release(hwfn
, ptt
);
3060 static int qed_set_grc_config(struct qed_dev
*cdev
, u32 cfg_id
, u32 val
)
3062 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
3063 struct qed_ptt
*ptt
;
3069 ptt
= qed_ptt_acquire(hwfn
);
3073 rc
= qed_dbg_grc_config(hwfn
, cfg_id
, val
);
3075 qed_ptt_release(hwfn
, ptt
);
3080 static u8
qed_get_affin_hwfn_idx(struct qed_dev
*cdev
)
3082 return QED_AFFIN_HWFN_IDX(cdev
);
3085 static struct qed_selftest_ops qed_selftest_ops_pass
= {
3086 .selftest_memory
= &qed_selftest_memory
,
3087 .selftest_interrupt
= &qed_selftest_interrupt
,
3088 .selftest_register
= &qed_selftest_register
,
3089 .selftest_clock
= &qed_selftest_clock
,
3090 .selftest_nvram
= &qed_selftest_nvram
,
3093 const struct qed_common_ops qed_common_ops_pass
= {
3094 .selftest
= &qed_selftest_ops_pass
,
3095 .probe
= &qed_probe
,
3096 .remove
= &qed_remove
,
3097 .set_power_state
= &qed_set_power_state
,
3098 .set_name
= &qed_set_name
,
3099 .update_pf_params
= &qed_update_pf_params
,
3100 .slowpath_start
= &qed_slowpath_start
,
3101 .slowpath_stop
= &qed_slowpath_stop
,
3102 .set_fp_int
= &qed_set_int_fp
,
3103 .get_fp_int
= &qed_get_int_fp
,
3104 .sb_init
= &qed_sb_init
,
3105 .sb_release
= &qed_sb_release
,
3106 .simd_handler_config
= &qed_simd_handler_config
,
3107 .simd_handler_clean
= &qed_simd_handler_clean
,
3108 .dbg_grc
= &qed_dbg_grc
,
3109 .dbg_grc_size
= &qed_dbg_grc_size
,
3110 .can_link_change
= &qed_can_link_change
,
3111 .set_link
= &qed_set_link
,
3112 .get_link
= &qed_get_current_link
,
3113 .drain
= &qed_drain
,
3114 .update_msglvl
= &qed_init_dp
,
3115 .dbg_all_data
= &qed_dbg_all_data
,
3116 .dbg_all_data_size
= &qed_dbg_all_data_size
,
3117 .chain_alloc
= &qed_chain_alloc
,
3118 .chain_free
= &qed_chain_free
,
3119 .nvm_flash
= &qed_nvm_flash
,
3120 .nvm_get_image
= &qed_nvm_get_image
,
3121 .set_coalesce
= &qed_set_coalesce
,
3122 .set_led
= &qed_set_led
,
3123 .recovery_process
= &qed_recovery_process
,
3124 .recovery_prolog
= &qed_recovery_prolog
,
3125 .attn_clr_enable
= &qed_int_attn_clr_enable
,
3126 .update_drv_state
= &qed_update_drv_state
,
3127 .update_mac
= &qed_update_mac
,
3128 .update_mtu
= &qed_update_mtu
,
3129 .update_wol
= &qed_update_wol
,
3130 .db_recovery_add
= &qed_db_recovery_add
,
3131 .db_recovery_del
= &qed_db_recovery_del
,
3132 .read_module_eeprom
= &qed_read_module_eeprom
,
3133 .get_affin_hwfn_idx
= &qed_get_affin_hwfn_idx
,
3134 .read_nvm_cfg
= &qed_nvm_flash_cfg_read
,
3135 .read_nvm_cfg_len
= &qed_nvm_flash_cfg_len
,
3136 .set_grc_config
= &qed_set_grc_config
,
3139 void qed_get_protocol_stats(struct qed_dev
*cdev
,
3140 enum qed_mcp_protocol_type type
,
3141 union qed_mcp_protocol_stats
*stats
)
3143 struct qed_eth_stats eth_stats
;
3145 memset(stats
, 0, sizeof(*stats
));
3148 case QED_MCP_LAN_STATS
:
3149 qed_get_vport_stats(cdev
, ð_stats
);
3150 stats
->lan_stats
.ucast_rx_pkts
=
3151 eth_stats
.common
.rx_ucast_pkts
;
3152 stats
->lan_stats
.ucast_tx_pkts
=
3153 eth_stats
.common
.tx_ucast_pkts
;
3154 stats
->lan_stats
.fcs_err
= -1;
3156 case QED_MCP_FCOE_STATS
:
3157 qed_get_protocol_stats_fcoe(cdev
, &stats
->fcoe_stats
);
3159 case QED_MCP_ISCSI_STATS
:
3160 qed_get_protocol_stats_iscsi(cdev
, &stats
->iscsi_stats
);
3163 DP_VERBOSE(cdev
, QED_MSG_SP
,
3164 "Invalid protocol type = %d\n", type
);
3169 int qed_mfw_tlv_req(struct qed_hwfn
*hwfn
)
3171 DP_VERBOSE(hwfn
->cdev
, NETIF_MSG_DRV
,
3172 "Scheduling slowpath task [Flag: %d]\n",
3173 QED_SLOWPATH_MFW_TLV_REQ
);
3174 smp_mb__before_atomic();
3175 set_bit(QED_SLOWPATH_MFW_TLV_REQ
, &hwfn
->slowpath_task_flags
);
3176 smp_mb__after_atomic();
3177 queue_delayed_work(hwfn
->slowpath_wq
, &hwfn
->slowpath_task
, 0);
3183 qed_fill_generic_tlv_data(struct qed_dev
*cdev
, struct qed_mfw_tlv_generic
*tlv
)
3185 struct qed_common_cb_ops
*op
= cdev
->protocol_ops
.common
;
3186 struct qed_eth_stats_common
*p_common
;
3187 struct qed_generic_tlvs gen_tlvs
;
3188 struct qed_eth_stats stats
;
3191 memset(&gen_tlvs
, 0, sizeof(gen_tlvs
));
3192 op
->get_generic_tlv_data(cdev
->ops_cookie
, &gen_tlvs
);
3194 if (gen_tlvs
.feat_flags
& QED_TLV_IP_CSUM
)
3195 tlv
->flags
.ipv4_csum_offload
= true;
3196 if (gen_tlvs
.feat_flags
& QED_TLV_LSO
)
3197 tlv
->flags
.lso_supported
= true;
3198 tlv
->flags
.b_set
= true;
3200 for (i
= 0; i
< QED_TLV_MAC_COUNT
; i
++) {
3201 if (is_valid_ether_addr(gen_tlvs
.mac
[i
])) {
3202 ether_addr_copy(tlv
->mac
[i
], gen_tlvs
.mac
[i
]);
3203 tlv
->mac_set
[i
] = true;
3207 qed_get_vport_stats(cdev
, &stats
);
3208 p_common
= &stats
.common
;
3209 tlv
->rx_frames
= p_common
->rx_ucast_pkts
+ p_common
->rx_mcast_pkts
+
3210 p_common
->rx_bcast_pkts
;
3211 tlv
->rx_frames_set
= true;
3212 tlv
->rx_bytes
= p_common
->rx_ucast_bytes
+ p_common
->rx_mcast_bytes
+
3213 p_common
->rx_bcast_bytes
;
3214 tlv
->rx_bytes_set
= true;
3215 tlv
->tx_frames
= p_common
->tx_ucast_pkts
+ p_common
->tx_mcast_pkts
+
3216 p_common
->tx_bcast_pkts
;
3217 tlv
->tx_frames_set
= true;
3218 tlv
->tx_bytes
= p_common
->tx_ucast_bytes
+ p_common
->tx_mcast_bytes
+
3219 p_common
->tx_bcast_bytes
;
3220 tlv
->rx_bytes_set
= true;
3223 int qed_mfw_fill_tlv_data(struct qed_hwfn
*hwfn
, enum qed_mfw_tlv_type type
,
3224 union qed_mfw_tlv_data
*tlv_buf
)
3226 struct qed_dev
*cdev
= hwfn
->cdev
;
3227 struct qed_common_cb_ops
*ops
;
3229 ops
= cdev
->protocol_ops
.common
;
3230 if (!ops
|| !ops
->get_protocol_tlv_data
|| !ops
->get_generic_tlv_data
) {
3231 DP_NOTICE(hwfn
, "Can't collect TLV management info\n");
3236 case QED_MFW_TLV_GENERIC
:
3237 qed_fill_generic_tlv_data(hwfn
->cdev
, &tlv_buf
->generic
);
3239 case QED_MFW_TLV_ETH
:
3240 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->eth
);
3242 case QED_MFW_TLV_FCOE
:
3243 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->fcoe
);
3245 case QED_MFW_TLV_ISCSI
:
3246 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->iscsi
);