1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/stddef.h>
10 #include <linux/pci.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/version.h>
14 #include <linux/delay.h>
15 #include <asm/byteorder.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/string.h>
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/workqueue.h>
21 #include <linux/ethtool.h>
22 #include <linux/etherdevice.h>
23 #include <linux/vmalloc.h>
24 #include <linux/qed/qed_if.h>
28 #include "qed_dev_api.h"
32 static const char version
[] =
33 "QLogic QL4xxx 40G/100G Ethernet Driver qed " DRV_MODULE_VERSION
"\n";
35 MODULE_DESCRIPTION("QLogic 25G/40G/50G/100G Core Module");
36 MODULE_LICENSE("GPL");
37 MODULE_VERSION(DRV_MODULE_VERSION
);
39 #define FW_FILE_VERSION \
40 __stringify(FW_MAJOR_VERSION) "." \
41 __stringify(FW_MINOR_VERSION) "." \
42 __stringify(FW_REVISION_VERSION) "." \
43 __stringify(FW_ENGINEERING_VERSION)
45 #define QED_FW_FILE_NAME \
46 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
48 MODULE_FIRMWARE(QED_FW_FILE_NAME
);
50 static int __init
qed_init(void)
52 pr_notice("qed_init called\n");
54 pr_info("%s", version
);
59 static void __exit
qed_cleanup(void)
61 pr_notice("qed_cleanup called\n");
64 module_init(qed_init
);
65 module_exit(qed_cleanup
);
67 /* Check if the DMA controller on the machine can properly handle the DMA
68 * addressing required by the device.
70 static int qed_set_coherency_mask(struct qed_dev
*cdev
)
72 struct device
*dev
= &cdev
->pdev
->dev
;
74 if (dma_set_mask(dev
, DMA_BIT_MASK(64)) == 0) {
75 if (dma_set_coherent_mask(dev
, DMA_BIT_MASK(64)) != 0) {
77 "Can't request 64-bit consistent allocations\n");
80 } else if (dma_set_mask(dev
, DMA_BIT_MASK(32)) != 0) {
81 DP_NOTICE(cdev
, "Can't request 64b/32b DMA addresses\n");
88 static void qed_free_pci(struct qed_dev
*cdev
)
90 struct pci_dev
*pdev
= cdev
->pdev
;
93 iounmap(cdev
->doorbells
);
95 iounmap(cdev
->regview
);
96 if (atomic_read(&pdev
->enable_cnt
) == 1)
97 pci_release_regions(pdev
);
99 pci_disable_device(pdev
);
102 /* Performs PCI initializations as well as initializing PCI-related parameters
103 * in the device structrue. Returns 0 in case of success.
105 static int qed_init_pci(struct qed_dev
*cdev
,
106 struct pci_dev
*pdev
)
112 rc
= pci_enable_device(pdev
);
114 DP_NOTICE(cdev
, "Cannot enable PCI device\n");
118 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
119 DP_NOTICE(cdev
, "No memory region found in bar #0\n");
124 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
125 DP_NOTICE(cdev
, "No memory region found in bar #2\n");
130 if (atomic_read(&pdev
->enable_cnt
) == 1) {
131 rc
= pci_request_regions(pdev
, "qed");
134 "Failed to request PCI memory resources\n");
137 pci_set_master(pdev
);
138 pci_save_state(pdev
);
141 if (!pci_is_pcie(pdev
)) {
142 DP_NOTICE(cdev
, "The bus is not PCI Express\n");
147 cdev
->pci_params
.pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
148 if (cdev
->pci_params
.pm_cap
== 0)
149 DP_NOTICE(cdev
, "Cannot find power management capability\n");
151 rc
= qed_set_coherency_mask(cdev
);
155 cdev
->pci_params
.mem_start
= pci_resource_start(pdev
, 0);
156 cdev
->pci_params
.mem_end
= pci_resource_end(pdev
, 0);
157 cdev
->pci_params
.irq
= pdev
->irq
;
159 cdev
->regview
= pci_ioremap_bar(pdev
, 0);
160 if (!cdev
->regview
) {
161 DP_NOTICE(cdev
, "Cannot map register space, aborting\n");
166 cdev
->db_phys_addr
= pci_resource_start(cdev
->pdev
, 2);
167 cdev
->db_size
= pci_resource_len(cdev
->pdev
, 2);
168 cdev
->doorbells
= ioremap_wc(cdev
->db_phys_addr
, cdev
->db_size
);
169 if (!cdev
->doorbells
) {
170 DP_NOTICE(cdev
, "Cannot map doorbell space\n");
177 pci_release_regions(pdev
);
179 pci_disable_device(pdev
);
184 int qed_fill_dev_info(struct qed_dev
*cdev
,
185 struct qed_dev_info
*dev_info
)
189 memset(dev_info
, 0, sizeof(struct qed_dev_info
));
191 dev_info
->num_hwfns
= cdev
->num_hwfns
;
192 dev_info
->pci_mem_start
= cdev
->pci_params
.mem_start
;
193 dev_info
->pci_mem_end
= cdev
->pci_params
.mem_end
;
194 dev_info
->pci_irq
= cdev
->pci_params
.irq
;
195 dev_info
->is_mf_default
= IS_MF_DEFAULT(&cdev
->hwfns
[0]);
196 ether_addr_copy(dev_info
->hw_mac
, cdev
->hwfns
[0].hw_info
.hw_mac_addr
);
198 dev_info
->fw_major
= FW_MAJOR_VERSION
;
199 dev_info
->fw_minor
= FW_MINOR_VERSION
;
200 dev_info
->fw_rev
= FW_REVISION_VERSION
;
201 dev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
202 dev_info
->mf_mode
= cdev
->mf_mode
;
204 qed_mcp_get_mfw_ver(cdev
, &dev_info
->mfw_rev
);
206 ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
208 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev
), ptt
,
209 &dev_info
->flash_size
);
211 qed_ptt_release(QED_LEADING_HWFN(cdev
), ptt
);
217 static void qed_free_cdev(struct qed_dev
*cdev
)
222 static struct qed_dev
*qed_alloc_cdev(struct pci_dev
*pdev
)
224 struct qed_dev
*cdev
;
226 cdev
= kzalloc(sizeof(*cdev
), GFP_KERNEL
);
230 qed_init_struct(cdev
);
235 /* Sets the requested power state */
236 static int qed_set_power_state(struct qed_dev
*cdev
,
242 DP_VERBOSE(cdev
, NETIF_MSG_DRV
, "Omitting Power state change\n");
247 static struct qed_dev
*qed_probe(struct pci_dev
*pdev
,
248 enum qed_protocol protocol
,
252 struct qed_dev
*cdev
;
255 cdev
= qed_alloc_cdev(pdev
);
259 cdev
->protocol
= protocol
;
261 qed_init_dp(cdev
, dp_module
, dp_level
);
263 rc
= qed_init_pci(cdev
, pdev
);
265 DP_ERR(cdev
, "init pci failed\n");
268 DP_INFO(cdev
, "PCI init completed successfully\n");
270 rc
= qed_hw_prepare(cdev
, QED_PCI_DEFAULT
);
272 DP_ERR(cdev
, "hw prepare failed\n");
276 DP_INFO(cdev
, "qed_probe completed successffuly\n");
288 static void qed_remove(struct qed_dev
*cdev
)
297 qed_set_power_state(cdev
, PCI_D3hot
);
302 static void qed_disable_msix(struct qed_dev
*cdev
)
304 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
305 pci_disable_msix(cdev
->pdev
);
306 kfree(cdev
->int_params
.msix_table
);
307 } else if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSI
) {
308 pci_disable_msi(cdev
->pdev
);
311 memset(&cdev
->int_params
.out
, 0, sizeof(struct qed_int_param
));
314 static int qed_enable_msix(struct qed_dev
*cdev
,
315 struct qed_int_params
*int_params
)
319 cnt
= int_params
->in
.num_vectors
;
321 for (i
= 0; i
< cnt
; i
++)
322 int_params
->msix_table
[i
].entry
= i
;
324 rc
= pci_enable_msix_range(cdev
->pdev
, int_params
->msix_table
,
325 int_params
->in
.min_msix_cnt
, cnt
);
326 if (rc
< cnt
&& rc
>= int_params
->in
.min_msix_cnt
&&
327 (rc
% cdev
->num_hwfns
)) {
328 pci_disable_msix(cdev
->pdev
);
330 /* If fastpath is initialized, we need at least one interrupt
331 * per hwfn [and the slow path interrupts]. New requested number
332 * should be a multiple of the number of hwfns.
334 cnt
= (rc
/ cdev
->num_hwfns
) * cdev
->num_hwfns
;
336 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
337 cnt
, int_params
->in
.num_vectors
);
338 rc
= pci_enable_msix_exact(cdev
->pdev
,
339 int_params
->msix_table
, cnt
);
345 /* MSI-x configuration was achieved */
346 int_params
->out
.int_mode
= QED_INT_MODE_MSIX
;
347 int_params
->out
.num_vectors
= rc
;
351 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
358 /* This function outputs the int mode and the number of enabled msix vector */
359 static int qed_set_int_mode(struct qed_dev
*cdev
, bool force_mode
)
361 struct qed_int_params
*int_params
= &cdev
->int_params
;
362 struct msix_entry
*tbl
;
365 switch (int_params
->in
.int_mode
) {
366 case QED_INT_MODE_MSIX
:
367 /* Allocate MSIX table */
368 cnt
= int_params
->in
.num_vectors
;
369 int_params
->msix_table
= kcalloc(cnt
, sizeof(*tbl
), GFP_KERNEL
);
370 if (!int_params
->msix_table
) {
376 rc
= qed_enable_msix(cdev
, int_params
);
380 DP_NOTICE(cdev
, "Failed to enable MSI-X\n");
381 kfree(int_params
->msix_table
);
386 case QED_INT_MODE_MSI
:
387 rc
= pci_enable_msi(cdev
->pdev
);
389 int_params
->out
.int_mode
= QED_INT_MODE_MSI
;
393 DP_NOTICE(cdev
, "Failed to enable MSI\n");
398 case QED_INT_MODE_INTA
:
399 int_params
->out
.int_mode
= QED_INT_MODE_INTA
;
403 DP_NOTICE(cdev
, "Unknown int_mode value %d\n",
404 int_params
->in
.int_mode
);
409 cdev
->int_coalescing_mode
= QED_COAL_MODE_ENABLE
;
414 static void qed_simd_handler_config(struct qed_dev
*cdev
, void *token
,
415 int index
, void(*handler
)(void *))
417 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
418 int relative_idx
= index
/ cdev
->num_hwfns
;
420 hwfn
->simd_proto_handler
[relative_idx
].func
= handler
;
421 hwfn
->simd_proto_handler
[relative_idx
].token
= token
;
424 static void qed_simd_handler_clean(struct qed_dev
*cdev
, int index
)
426 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
427 int relative_idx
= index
/ cdev
->num_hwfns
;
429 memset(&hwfn
->simd_proto_handler
[relative_idx
], 0,
430 sizeof(struct qed_simd_fp_handler
));
433 static irqreturn_t
qed_msix_sp_int(int irq
, void *tasklet
)
435 tasklet_schedule((struct tasklet_struct
*)tasklet
);
439 static irqreturn_t
qed_single_int(int irq
, void *dev_instance
)
441 struct qed_dev
*cdev
= (struct qed_dev
*)dev_instance
;
442 struct qed_hwfn
*hwfn
;
443 irqreturn_t rc
= IRQ_NONE
;
447 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
448 status
= qed_int_igu_read_sisr_reg(&cdev
->hwfns
[i
]);
453 hwfn
= &cdev
->hwfns
[i
];
455 /* Slowpath interrupt */
456 if (unlikely(status
& 0x1)) {
457 tasklet_schedule(hwfn
->sp_dpc
);
462 /* Fastpath interrupts */
463 for (j
= 0; j
< 64; j
++) {
464 if ((0x2ULL
<< j
) & status
) {
465 hwfn
->simd_proto_handler
[j
].func(
466 hwfn
->simd_proto_handler
[j
].token
);
467 status
&= ~(0x2ULL
<< j
);
472 if (unlikely(status
))
473 DP_VERBOSE(hwfn
, NETIF_MSG_INTR
,
474 "got an unknown interrupt status 0x%llx\n",
481 int qed_slowpath_irq_req(struct qed_hwfn
*hwfn
)
483 struct qed_dev
*cdev
= hwfn
->cdev
;
487 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
489 snprintf(hwfn
->name
, NAME_SIZE
, "sp-%d-%02x:%02x.%02x",
490 id
, cdev
->pdev
->bus
->number
,
491 PCI_SLOT(cdev
->pdev
->devfn
), hwfn
->abs_pf_id
);
492 rc
= request_irq(cdev
->int_params
.msix_table
[id
].vector
,
493 qed_msix_sp_int
, 0, hwfn
->name
, hwfn
->sp_dpc
);
495 DP_VERBOSE(hwfn
, (NETIF_MSG_INTR
| QED_MSG_SP
),
496 "Requested slowpath MSI-X\n");
498 unsigned long flags
= 0;
500 snprintf(cdev
->name
, NAME_SIZE
, "%02x:%02x.%02x",
501 cdev
->pdev
->bus
->number
, PCI_SLOT(cdev
->pdev
->devfn
),
502 PCI_FUNC(cdev
->pdev
->devfn
));
504 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_INTA
)
505 flags
|= IRQF_SHARED
;
507 rc
= request_irq(cdev
->pdev
->irq
, qed_single_int
,
508 flags
, cdev
->name
, cdev
);
514 static void qed_slowpath_irq_free(struct qed_dev
*cdev
)
518 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
519 for_each_hwfn(cdev
, i
) {
520 if (!cdev
->hwfns
[i
].b_int_requested
)
522 synchronize_irq(cdev
->int_params
.msix_table
[i
].vector
);
523 free_irq(cdev
->int_params
.msix_table
[i
].vector
,
524 cdev
->hwfns
[i
].sp_dpc
);
527 if (QED_LEADING_HWFN(cdev
)->b_int_requested
)
528 free_irq(cdev
->pdev
->irq
, cdev
);
530 qed_int_disable_post_isr_release(cdev
);
533 static int qed_nic_stop(struct qed_dev
*cdev
)
537 rc
= qed_hw_stop(cdev
);
539 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
540 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
542 if (p_hwfn
->b_sp_dpc_enabled
) {
543 tasklet_disable(p_hwfn
->sp_dpc
);
544 p_hwfn
->b_sp_dpc_enabled
= false;
545 DP_VERBOSE(cdev
, NETIF_MSG_IFDOWN
,
546 "Disabled sp taskelt [hwfn %d] at %p\n",
554 static int qed_nic_reset(struct qed_dev
*cdev
)
558 rc
= qed_hw_reset(cdev
);
567 static int qed_nic_setup(struct qed_dev
*cdev
)
571 rc
= qed_resc_alloc(cdev
);
575 DP_INFO(cdev
, "Allocated qed resources\n");
577 qed_resc_setup(cdev
);
582 static int qed_set_int_fp(struct qed_dev
*cdev
, u16 cnt
)
586 /* Mark the fastpath as free/used */
587 cdev
->int_params
.fp_initialized
= cnt
? true : false;
589 if (cdev
->int_params
.out
.int_mode
!= QED_INT_MODE_MSIX
)
590 limit
= cdev
->num_hwfns
* 63;
591 else if (cdev
->int_params
.fp_msix_cnt
)
592 limit
= cdev
->int_params
.fp_msix_cnt
;
597 return min_t(int, cnt
, limit
);
600 static int qed_get_int_fp(struct qed_dev
*cdev
, struct qed_int_info
*info
)
602 memset(info
, 0, sizeof(struct qed_int_info
));
604 if (!cdev
->int_params
.fp_initialized
) {
606 "Protocol driver requested interrupt information, but its support is not yet configured\n");
610 /* Need to expose only MSI-X information; Single IRQ is handled solely
613 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
614 int msix_base
= cdev
->int_params
.fp_msix_base
;
616 info
->msix_cnt
= cdev
->int_params
.fp_msix_cnt
;
617 info
->msix
= &cdev
->int_params
.msix_table
[msix_base
];
623 static int qed_slowpath_setup_int(struct qed_dev
*cdev
,
624 enum qed_int_mode int_mode
)
629 memset(&cdev
->int_params
, 0, sizeof(struct qed_int_params
));
631 cdev
->int_params
.in
.int_mode
= int_mode
;
632 for_each_hwfn(cdev
, i
)
633 num_vectors
+= qed_int_get_num_sbs(&cdev
->hwfns
[i
], NULL
) + 1;
634 cdev
->int_params
.in
.num_vectors
= num_vectors
;
636 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
637 cdev
->int_params
.in
.min_msix_cnt
= cdev
->num_hwfns
* 2;
639 rc
= qed_set_int_mode(cdev
, false);
641 DP_ERR(cdev
, "qed_slowpath_setup_int ERR\n");
645 cdev
->int_params
.fp_msix_base
= cdev
->num_hwfns
;
646 cdev
->int_params
.fp_msix_cnt
= cdev
->int_params
.out
.num_vectors
-
652 u32
qed_unzip_data(struct qed_hwfn
*p_hwfn
, u32 input_len
,
653 u8
*input_buf
, u32 max_size
, u8
*unzip_buf
)
657 p_hwfn
->stream
->next_in
= input_buf
;
658 p_hwfn
->stream
->avail_in
= input_len
;
659 p_hwfn
->stream
->next_out
= unzip_buf
;
660 p_hwfn
->stream
->avail_out
= max_size
;
662 rc
= zlib_inflateInit2(p_hwfn
->stream
, MAX_WBITS
);
665 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "zlib init failed, rc = %d\n",
670 rc
= zlib_inflate(p_hwfn
->stream
, Z_FINISH
);
671 zlib_inflateEnd(p_hwfn
->stream
);
673 if (rc
!= Z_OK
&& rc
!= Z_STREAM_END
) {
674 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "FW unzip error: %s, rc=%d\n",
675 p_hwfn
->stream
->msg
, rc
);
679 return p_hwfn
->stream
->total_out
/ 4;
682 static int qed_alloc_stream_mem(struct qed_dev
*cdev
)
687 for_each_hwfn(cdev
, i
) {
688 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
690 p_hwfn
->stream
= kzalloc(sizeof(*p_hwfn
->stream
), GFP_KERNEL
);
694 workspace
= vzalloc(zlib_inflate_workspacesize());
697 p_hwfn
->stream
->workspace
= workspace
;
703 static void qed_free_stream_mem(struct qed_dev
*cdev
)
707 for_each_hwfn(cdev
, i
) {
708 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
713 vfree(p_hwfn
->stream
->workspace
);
714 kfree(p_hwfn
->stream
);
718 static void qed_update_pf_params(struct qed_dev
*cdev
,
719 struct qed_pf_params
*params
)
723 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
724 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
726 p_hwfn
->pf_params
= *params
;
730 static int qed_slowpath_start(struct qed_dev
*cdev
,
731 struct qed_slowpath_params
*params
)
733 struct qed_mcp_drv_version drv_version
;
734 const u8
*data
= NULL
;
735 struct qed_hwfn
*hwfn
;
738 rc
= request_firmware(&cdev
->firmware
, QED_FW_FILE_NAME
,
742 "Failed to find fw file - /lib/firmware/%s\n",
747 rc
= qed_nic_setup(cdev
);
751 rc
= qed_slowpath_setup_int(cdev
, params
->int_mode
);
755 /* Allocate stream for unzipping */
756 rc
= qed_alloc_stream_mem(cdev
);
758 DP_NOTICE(cdev
, "Failed to allocate stream memory\n");
762 /* Start the slowpath */
763 data
= cdev
->firmware
->data
;
765 rc
= qed_hw_init(cdev
, true, cdev
->int_params
.out
.int_mode
,
771 "HW initialization and function start completed successfully\n");
773 hwfn
= QED_LEADING_HWFN(cdev
);
774 drv_version
.version
= (params
->drv_major
<< 24) |
775 (params
->drv_minor
<< 16) |
776 (params
->drv_rev
<< 8) |
778 strlcpy(drv_version
.name
, params
->name
,
779 MCP_DRV_VER_STR_SIZE
- 4);
780 rc
= qed_mcp_send_drv_version(hwfn
, hwfn
->p_main_ptt
,
783 DP_NOTICE(cdev
, "Failed sending drv version command\n");
790 qed_free_stream_mem(cdev
);
791 qed_slowpath_irq_free(cdev
);
793 qed_disable_msix(cdev
);
797 release_firmware(cdev
->firmware
);
802 static int qed_slowpath_stop(struct qed_dev
*cdev
)
807 qed_free_stream_mem(cdev
);
810 qed_slowpath_irq_free(cdev
);
812 qed_disable_msix(cdev
);
815 release_firmware(cdev
->firmware
);
820 static void qed_set_id(struct qed_dev
*cdev
, char name
[NAME_SIZE
],
821 char ver_str
[VER_SIZE
])
825 memcpy(cdev
->name
, name
, NAME_SIZE
);
826 for_each_hwfn(cdev
, i
)
827 snprintf(cdev
->hwfns
[i
].name
, NAME_SIZE
, "%s-%d", name
, i
);
829 memcpy(cdev
->ver_str
, ver_str
, VER_SIZE
);
830 cdev
->drv_type
= DRV_ID_DRV_TYPE_LINUX
;
833 static u32
qed_sb_init(struct qed_dev
*cdev
,
834 struct qed_sb_info
*sb_info
,
836 dma_addr_t sb_phy_addr
, u16 sb_id
,
837 enum qed_sb_type type
)
839 struct qed_hwfn
*p_hwfn
;
845 /* RoCE uses single engine and CMT uses two engines. When using both
846 * we force only a single engine. Storage uses only engine 0 too.
848 if (type
== QED_SB_TYPE_L2_QUEUE
)
849 n_hwfns
= cdev
->num_hwfns
;
853 hwfn_index
= sb_id
% n_hwfns
;
854 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
855 rel_sb_id
= sb_id
/ n_hwfns
;
857 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
858 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
859 hwfn_index
, rel_sb_id
, sb_id
);
861 rc
= qed_int_sb_init(p_hwfn
, p_hwfn
->p_main_ptt
, sb_info
,
862 sb_virt_addr
, sb_phy_addr
, rel_sb_id
);
867 static u32
qed_sb_release(struct qed_dev
*cdev
,
868 struct qed_sb_info
*sb_info
,
871 struct qed_hwfn
*p_hwfn
;
876 hwfn_index
= sb_id
% cdev
->num_hwfns
;
877 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
878 rel_sb_id
= sb_id
/ cdev
->num_hwfns
;
880 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
881 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
882 hwfn_index
, rel_sb_id
, sb_id
);
884 rc
= qed_int_sb_release(p_hwfn
, sb_info
, rel_sb_id
);
889 static int qed_set_link(struct qed_dev
*cdev
,
890 struct qed_link_params
*params
)
892 struct qed_hwfn
*hwfn
;
893 struct qed_mcp_link_params
*link_params
;
900 /* The link should be set only once per PF */
901 hwfn
= &cdev
->hwfns
[0];
903 ptt
= qed_ptt_acquire(hwfn
);
907 link_params
= qed_mcp_get_link_params(hwfn
);
908 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_AUTONEG
)
909 link_params
->speed
.autoneg
= params
->autoneg
;
910 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS
) {
911 link_params
->speed
.advertised_speeds
= 0;
912 if ((params
->adv_speeds
& SUPPORTED_1000baseT_Half
) ||
913 (params
->adv_speeds
& SUPPORTED_1000baseT_Full
))
914 link_params
->speed
.advertised_speeds
|=
915 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
916 if (params
->adv_speeds
& SUPPORTED_10000baseKR_Full
)
917 link_params
->speed
.advertised_speeds
|=
918 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
919 if (params
->adv_speeds
& SUPPORTED_40000baseLR4_Full
)
920 link_params
->speed
.advertised_speeds
|=
921 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
;
922 if (params
->adv_speeds
& 0)
923 link_params
->speed
.advertised_speeds
|=
924 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
;
925 if (params
->adv_speeds
& 0)
926 link_params
->speed
.advertised_speeds
|=
927 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G
;
929 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_FORCED_SPEED
)
930 link_params
->speed
.forced_speed
= params
->forced_speed
;
932 rc
= qed_mcp_set_link(hwfn
, ptt
, params
->link_up
);
934 qed_ptt_release(hwfn
, ptt
);
939 static int qed_get_port_type(u32 media_type
)
943 switch (media_type
) {
944 case MEDIA_SFPP_10G_FIBER
:
945 case MEDIA_SFP_1G_FIBER
:
946 case MEDIA_XFP_FIBER
:
948 port_type
= PORT_FIBRE
;
950 case MEDIA_DA_TWINAX
:
956 case MEDIA_NOT_PRESENT
:
957 port_type
= PORT_NONE
;
959 case MEDIA_UNSPECIFIED
:
961 port_type
= PORT_OTHER
;
967 static void qed_fill_link(struct qed_hwfn
*hwfn
,
968 struct qed_link_output
*if_link
)
970 struct qed_mcp_link_params params
;
971 struct qed_mcp_link_state link
;
972 struct qed_mcp_link_capabilities link_caps
;
975 memset(if_link
, 0, sizeof(*if_link
));
977 /* Prepare source inputs */
978 memcpy(¶ms
, qed_mcp_get_link_params(hwfn
), sizeof(params
));
979 memcpy(&link
, qed_mcp_get_link_state(hwfn
), sizeof(link
));
980 memcpy(&link_caps
, qed_mcp_get_link_capabilities(hwfn
),
983 /* Set the link parameters to pass to protocol driver */
985 if_link
->link_up
= true;
987 /* TODO - at the moment assume supported and advertised speed equal */
988 if_link
->supported_caps
= SUPPORTED_FIBRE
;
989 if (params
.speed
.autoneg
)
990 if_link
->supported_caps
|= SUPPORTED_Autoneg
;
991 if (params
.pause
.autoneg
||
992 (params
.pause
.forced_rx
&& params
.pause
.forced_tx
))
993 if_link
->supported_caps
|= SUPPORTED_Asym_Pause
;
994 if (params
.pause
.autoneg
|| params
.pause
.forced_rx
||
995 params
.pause
.forced_tx
)
996 if_link
->supported_caps
|= SUPPORTED_Pause
;
998 if_link
->advertised_caps
= if_link
->supported_caps
;
999 if (params
.speed
.advertised_speeds
&
1000 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1001 if_link
->advertised_caps
|= SUPPORTED_1000baseT_Half
|
1002 SUPPORTED_1000baseT_Full
;
1003 if (params
.speed
.advertised_speeds
&
1004 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1005 if_link
->advertised_caps
|= SUPPORTED_10000baseKR_Full
;
1006 if (params
.speed
.advertised_speeds
&
1007 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
1008 if_link
->advertised_caps
|= SUPPORTED_40000baseLR4_Full
;
1009 if (params
.speed
.advertised_speeds
&
1010 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
1011 if_link
->advertised_caps
|= 0;
1012 if (params
.speed
.advertised_speeds
&
1013 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G
)
1014 if_link
->advertised_caps
|= 0;
1016 if (link_caps
.speed_capabilities
&
1017 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1018 if_link
->supported_caps
|= SUPPORTED_1000baseT_Half
|
1019 SUPPORTED_1000baseT_Full
;
1020 if (link_caps
.speed_capabilities
&
1021 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1022 if_link
->supported_caps
|= SUPPORTED_10000baseKR_Full
;
1023 if (link_caps
.speed_capabilities
&
1024 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
1025 if_link
->supported_caps
|= SUPPORTED_40000baseLR4_Full
;
1026 if (link_caps
.speed_capabilities
&
1027 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
1028 if_link
->supported_caps
|= 0;
1029 if (link_caps
.speed_capabilities
&
1030 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G
)
1031 if_link
->supported_caps
|= 0;
1034 if_link
->speed
= link
.speed
;
1036 /* TODO - fill duplex properly */
1037 if_link
->duplex
= DUPLEX_FULL
;
1038 qed_mcp_get_media_type(hwfn
->cdev
, &media_type
);
1039 if_link
->port
= qed_get_port_type(media_type
);
1041 if_link
->autoneg
= params
.speed
.autoneg
;
1043 if (params
.pause
.autoneg
)
1044 if_link
->pause_config
|= QED_LINK_PAUSE_AUTONEG_ENABLE
;
1045 if (params
.pause
.forced_rx
)
1046 if_link
->pause_config
|= QED_LINK_PAUSE_RX_ENABLE
;
1047 if (params
.pause
.forced_tx
)
1048 if_link
->pause_config
|= QED_LINK_PAUSE_TX_ENABLE
;
1050 /* Link partner capabilities */
1051 if (link
.partner_adv_speed
&
1052 QED_LINK_PARTNER_SPEED_1G_HD
)
1053 if_link
->lp_caps
|= SUPPORTED_1000baseT_Half
;
1054 if (link
.partner_adv_speed
&
1055 QED_LINK_PARTNER_SPEED_1G_FD
)
1056 if_link
->lp_caps
|= SUPPORTED_1000baseT_Full
;
1057 if (link
.partner_adv_speed
&
1058 QED_LINK_PARTNER_SPEED_10G
)
1059 if_link
->lp_caps
|= SUPPORTED_10000baseKR_Full
;
1060 if (link
.partner_adv_speed
&
1061 QED_LINK_PARTNER_SPEED_40G
)
1062 if_link
->lp_caps
|= SUPPORTED_40000baseLR4_Full
;
1063 if (link
.partner_adv_speed
&
1064 QED_LINK_PARTNER_SPEED_50G
)
1065 if_link
->lp_caps
|= 0;
1066 if (link
.partner_adv_speed
&
1067 QED_LINK_PARTNER_SPEED_100G
)
1068 if_link
->lp_caps
|= 0;
1070 if (link
.an_complete
)
1071 if_link
->lp_caps
|= SUPPORTED_Autoneg
;
1073 if (link
.partner_adv_pause
)
1074 if_link
->lp_caps
|= SUPPORTED_Pause
;
1075 if (link
.partner_adv_pause
== QED_LINK_PARTNER_ASYMMETRIC_PAUSE
||
1076 link
.partner_adv_pause
== QED_LINK_PARTNER_BOTH_PAUSE
)
1077 if_link
->lp_caps
|= SUPPORTED_Asym_Pause
;
1080 static void qed_get_current_link(struct qed_dev
*cdev
,
1081 struct qed_link_output
*if_link
)
1083 qed_fill_link(&cdev
->hwfns
[0], if_link
);
1086 void qed_link_update(struct qed_hwfn
*hwfn
)
1088 void *cookie
= hwfn
->cdev
->ops_cookie
;
1089 struct qed_common_cb_ops
*op
= hwfn
->cdev
->protocol_ops
.common
;
1090 struct qed_link_output if_link
;
1092 qed_fill_link(hwfn
, &if_link
);
1094 if (IS_LEAD_HWFN(hwfn
) && cookie
)
1095 op
->link_update(cookie
, &if_link
);
1098 static int qed_drain(struct qed_dev
*cdev
)
1100 struct qed_hwfn
*hwfn
;
1101 struct qed_ptt
*ptt
;
1104 for_each_hwfn(cdev
, i
) {
1105 hwfn
= &cdev
->hwfns
[i
];
1106 ptt
= qed_ptt_acquire(hwfn
);
1108 DP_NOTICE(hwfn
, "Failed to drain NIG; No PTT\n");
1111 rc
= qed_mcp_drain(hwfn
, ptt
);
1114 qed_ptt_release(hwfn
, ptt
);
1120 static int qed_set_led(struct qed_dev
*cdev
, enum qed_led_mode mode
)
1122 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
1123 struct qed_ptt
*ptt
;
1126 ptt
= qed_ptt_acquire(hwfn
);
1130 status
= qed_mcp_set_led(hwfn
, ptt
, mode
);
1132 qed_ptt_release(hwfn
, ptt
);
1137 const struct qed_common_ops qed_common_ops_pass
= {
1138 .probe
= &qed_probe
,
1139 .remove
= &qed_remove
,
1140 .set_power_state
= &qed_set_power_state
,
1141 .set_id
= &qed_set_id
,
1142 .update_pf_params
= &qed_update_pf_params
,
1143 .slowpath_start
= &qed_slowpath_start
,
1144 .slowpath_stop
= &qed_slowpath_stop
,
1145 .set_fp_int
= &qed_set_int_fp
,
1146 .get_fp_int
= &qed_get_int_fp
,
1147 .sb_init
= &qed_sb_init
,
1148 .sb_release
= &qed_sb_release
,
1149 .simd_handler_config
= &qed_simd_handler_config
,
1150 .simd_handler_clean
= &qed_simd_handler_clean
,
1151 .set_link
= &qed_set_link
,
1152 .get_link
= &qed_get_current_link
,
1153 .drain
= &qed_drain
,
1154 .update_msglvl
= &qed_init_dp
,
1155 .chain_alloc
= &qed_chain_alloc
,
1156 .chain_free
= &qed_chain_free
,
1157 .set_led
= &qed_set_led
,
1160 u32
qed_get_protocol_version(enum qed_protocol protocol
)
1163 case QED_PROTOCOL_ETH
:
1164 return QED_ETH_INTERFACE_VERSION
;
1169 EXPORT_SYMBOL(qed_get_protocol_version
);