2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/mlx5/cq.h>
45 #include <linux/mlx5/qp.h>
46 #include <linux/debugfs.h>
47 #include <linux/kmod.h>
48 #include <linux/mlx5/mlx5_ifc.h>
49 #include <linux/mlx5/vport.h>
50 #ifdef CONFIG_RFS_ACCEL
51 #include <linux/cpu_rmap.h>
53 #include <linux/version.h>
54 #include <net/devlink.h>
55 #include "mlx5_core.h"
63 #include "fpga/core.h"
64 #include "fpga/ipsec.h"
65 #include "accel/ipsec.h"
66 #include "accel/tls.h"
67 #include "lib/clock.h"
68 #include "lib/vxlan.h"
69 #include "lib/geneve.h"
70 #include "lib/devcom.h"
71 #include "lib/pci_vsc.h"
72 #include "diag/fw_tracer.h"
74 #include "lib/hv_vhca.h"
75 #include "diag/rsc_dump.h"
76 #include "sf/vhca_event.h"
77 #include "sf/dev/dev.h"
81 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
82 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
83 MODULE_LICENSE("Dual BSD/GPL");
85 unsigned int mlx5_core_debug_mask
;
86 module_param_named(debug_mask
, mlx5_core_debug_mask
, uint
, 0644);
87 MODULE_PARM_DESC(debug_mask
, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
89 static unsigned int prof_sel
= MLX5_DEFAULT_PROF
;
90 module_param_named(prof_sel
, prof_sel
, uint
, 0444);
91 MODULE_PARM_DESC(prof_sel
, "profile selector. Valid range 0 - 2");
93 static u32 sw_owner_id
[4];
96 MLX5_ATOMIC_REQ_MODE_BE
= 0x0,
97 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS
= 0x1,
100 static struct mlx5_profile profile
[] = {
105 .mask
= MLX5_PROF_MASK_QP_SIZE
,
109 .mask
= MLX5_PROF_MASK_QP_SIZE
|
110 MLX5_PROF_MASK_MR_CACHE
,
179 #define FW_INIT_TIMEOUT_MILI 2000
180 #define FW_INIT_WAIT_MS 2
181 #define FW_PRE_INIT_TIMEOUT_MILI 120000
182 #define FW_INIT_WARN_MESSAGE_INTERVAL 20000
184 static int fw_initializing(struct mlx5_core_dev
*dev
)
186 return ioread32be(&dev
->iseg
->initializing
) >> 31;
189 static int wait_fw_init(struct mlx5_core_dev
*dev
, u32 max_wait_mili
,
192 unsigned long warn
= jiffies
+ msecs_to_jiffies(warn_time_mili
);
193 unsigned long end
= jiffies
+ msecs_to_jiffies(max_wait_mili
);
196 BUILD_BUG_ON(FW_PRE_INIT_TIMEOUT_MILI
< FW_INIT_WARN_MESSAGE_INTERVAL
);
198 while (fw_initializing(dev
)) {
199 if (time_after(jiffies
, end
)) {
203 if (warn_time_mili
&& time_after(jiffies
, warn
)) {
204 mlx5_core_warn(dev
, "Waiting for FW initialization, timeout abort in %ds\n",
205 jiffies_to_msecs(end
- warn
) / 1000);
206 warn
= jiffies
+ msecs_to_jiffies(warn_time_mili
);
208 msleep(FW_INIT_WAIT_MS
);
214 static void mlx5_set_driver_version(struct mlx5_core_dev
*dev
)
216 int driver_ver_sz
= MLX5_FLD_SZ_BYTES(set_driver_version_in
,
218 u8 in
[MLX5_ST_SZ_BYTES(set_driver_version_in
)] = {};
219 int remaining_size
= driver_ver_sz
;
222 if (!MLX5_CAP_GEN(dev
, driver_version
))
225 string
= MLX5_ADDR_OF(set_driver_version_in
, in
, driver_version
);
227 strncpy(string
, "Linux", remaining_size
);
229 remaining_size
= max_t(int, 0, driver_ver_sz
- strlen(string
));
230 strncat(string
, ",", remaining_size
);
232 remaining_size
= max_t(int, 0, driver_ver_sz
- strlen(string
));
233 strncat(string
, KBUILD_MODNAME
, remaining_size
);
235 remaining_size
= max_t(int, 0, driver_ver_sz
- strlen(string
));
236 strncat(string
, ",", remaining_size
);
238 remaining_size
= max_t(int, 0, driver_ver_sz
- strlen(string
));
240 snprintf(string
+ strlen(string
), remaining_size
, "%u.%u.%u",
241 LINUX_VERSION_MAJOR
, LINUX_VERSION_PATCHLEVEL
,
242 LINUX_VERSION_SUBLEVEL
);
245 MLX5_SET(set_driver_version_in
, in
, opcode
,
246 MLX5_CMD_OP_SET_DRIVER_VERSION
);
248 mlx5_cmd_exec_in(dev
, set_driver_version
, in
);
251 static int set_dma_caps(struct pci_dev
*pdev
)
255 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
257 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit PCI DMA mask\n");
258 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
260 dev_err(&pdev
->dev
, "Can't set PCI DMA mask, aborting\n");
265 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
268 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
269 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
272 "Can't set consistent PCI DMA mask, aborting\n");
277 dma_set_max_seg_size(&pdev
->dev
, 2u * 1024 * 1024 * 1024);
281 static int mlx5_pci_enable_device(struct mlx5_core_dev
*dev
)
283 struct pci_dev
*pdev
= dev
->pdev
;
286 mutex_lock(&dev
->pci_status_mutex
);
287 if (dev
->pci_status
== MLX5_PCI_STATUS_DISABLED
) {
288 err
= pci_enable_device(pdev
);
290 dev
->pci_status
= MLX5_PCI_STATUS_ENABLED
;
292 mutex_unlock(&dev
->pci_status_mutex
);
297 static void mlx5_pci_disable_device(struct mlx5_core_dev
*dev
)
299 struct pci_dev
*pdev
= dev
->pdev
;
301 mutex_lock(&dev
->pci_status_mutex
);
302 if (dev
->pci_status
== MLX5_PCI_STATUS_ENABLED
) {
303 pci_disable_device(pdev
);
304 dev
->pci_status
= MLX5_PCI_STATUS_DISABLED
;
306 mutex_unlock(&dev
->pci_status_mutex
);
309 static int request_bar(struct pci_dev
*pdev
)
313 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
314 dev_err(&pdev
->dev
, "Missing registers BAR, aborting\n");
318 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
320 dev_err(&pdev
->dev
, "Couldn't get PCI resources, aborting\n");
325 static void release_bar(struct pci_dev
*pdev
)
327 pci_release_regions(pdev
);
330 struct mlx5_reg_host_endianness
{
335 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
338 MLX5_CAP_BITS_RW_MASK
= CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM
, 2) |
339 MLX5_DEV_CAP_FLAG_DCT
,
342 static u16
to_fw_pkey_sz(struct mlx5_core_dev
*dev
, u32 size
)
358 mlx5_core_warn(dev
, "invalid pkey table size %d\n", size
);
363 static int mlx5_core_get_caps_mode(struct mlx5_core_dev
*dev
,
364 enum mlx5_cap_type cap_type
,
365 enum mlx5_cap_mode cap_mode
)
367 u8 in
[MLX5_ST_SZ_BYTES(query_hca_cap_in
)];
368 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_cap_out
);
369 void *out
, *hca_caps
;
370 u16 opmod
= (cap_type
<< 1) | (cap_mode
& 0x01);
373 memset(in
, 0, sizeof(in
));
374 out
= kzalloc(out_sz
, GFP_KERNEL
);
378 MLX5_SET(query_hca_cap_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_CAP
);
379 MLX5_SET(query_hca_cap_in
, in
, op_mod
, opmod
);
380 err
= mlx5_cmd_exec_inout(dev
, query_hca_cap
, in
, out
);
383 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
384 cap_type
, cap_mode
, err
);
388 hca_caps
= MLX5_ADDR_OF(query_hca_cap_out
, out
, capability
);
391 case HCA_CAP_OPMOD_GET_MAX
:
392 memcpy(dev
->caps
.hca
[cap_type
]->max
, hca_caps
,
393 MLX5_UN_SZ_BYTES(hca_cap_union
));
395 case HCA_CAP_OPMOD_GET_CUR
:
396 memcpy(dev
->caps
.hca
[cap_type
]->cur
, hca_caps
,
397 MLX5_UN_SZ_BYTES(hca_cap_union
));
401 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
411 int mlx5_core_get_caps(struct mlx5_core_dev
*dev
, enum mlx5_cap_type cap_type
)
415 ret
= mlx5_core_get_caps_mode(dev
, cap_type
, HCA_CAP_OPMOD_GET_CUR
);
418 return mlx5_core_get_caps_mode(dev
, cap_type
, HCA_CAP_OPMOD_GET_MAX
);
421 static int set_caps(struct mlx5_core_dev
*dev
, void *in
, int opmod
)
423 MLX5_SET(set_hca_cap_in
, in
, opcode
, MLX5_CMD_OP_SET_HCA_CAP
);
424 MLX5_SET(set_hca_cap_in
, in
, op_mod
, opmod
<< 1);
425 return mlx5_cmd_exec_in(dev
, set_hca_cap
, in
);
428 static int handle_hca_cap_atomic(struct mlx5_core_dev
*dev
, void *set_ctx
)
434 if (!MLX5_CAP_GEN(dev
, atomic
))
437 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ATOMIC
);
443 supported_atomic_req_8B_endianness_mode_1
);
445 if (req_endianness
!= MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS
)
448 set_hca_cap
= MLX5_ADDR_OF(set_hca_cap_in
, set_ctx
, capability
);
450 /* Set requestor to host endianness */
451 MLX5_SET(atomic_caps
, set_hca_cap
, atomic_req_8B_endianness_mode
,
452 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS
);
454 return set_caps(dev
, set_ctx
, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC
);
457 static int handle_hca_cap_odp(struct mlx5_core_dev
*dev
, void *set_ctx
)
463 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
) ||
464 !MLX5_CAP_GEN(dev
, pg
))
467 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ODP
);
471 set_hca_cap
= MLX5_ADDR_OF(set_hca_cap_in
, set_ctx
, capability
);
472 memcpy(set_hca_cap
, dev
->caps
.hca
[MLX5_CAP_ODP
]->cur
,
473 MLX5_ST_SZ_BYTES(odp_cap
));
475 #define ODP_CAP_SET_MAX(dev, field) \
477 u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
480 MLX5_SET(odp_cap, set_hca_cap, field, _res); \
484 ODP_CAP_SET_MAX(dev
, ud_odp_caps
.srq_receive
);
485 ODP_CAP_SET_MAX(dev
, rc_odp_caps
.srq_receive
);
486 ODP_CAP_SET_MAX(dev
, xrc_odp_caps
.srq_receive
);
487 ODP_CAP_SET_MAX(dev
, xrc_odp_caps
.send
);
488 ODP_CAP_SET_MAX(dev
, xrc_odp_caps
.receive
);
489 ODP_CAP_SET_MAX(dev
, xrc_odp_caps
.write
);
490 ODP_CAP_SET_MAX(dev
, xrc_odp_caps
.read
);
491 ODP_CAP_SET_MAX(dev
, xrc_odp_caps
.atomic
);
492 ODP_CAP_SET_MAX(dev
, dc_odp_caps
.srq_receive
);
493 ODP_CAP_SET_MAX(dev
, dc_odp_caps
.send
);
494 ODP_CAP_SET_MAX(dev
, dc_odp_caps
.receive
);
495 ODP_CAP_SET_MAX(dev
, dc_odp_caps
.write
);
496 ODP_CAP_SET_MAX(dev
, dc_odp_caps
.read
);
497 ODP_CAP_SET_MAX(dev
, dc_odp_caps
.atomic
);
502 return set_caps(dev
, set_ctx
, MLX5_SET_HCA_CAP_OP_MOD_ODP
);
505 static int handle_hca_cap(struct mlx5_core_dev
*dev
, void *set_ctx
)
507 struct mlx5_profile
*prof
= &dev
->profile
;
511 err
= mlx5_core_get_caps(dev
, MLX5_CAP_GENERAL
);
515 set_hca_cap
= MLX5_ADDR_OF(set_hca_cap_in
, set_ctx
,
517 memcpy(set_hca_cap
, dev
->caps
.hca
[MLX5_CAP_GENERAL
]->cur
,
518 MLX5_ST_SZ_BYTES(cmd_hca_cap
));
520 mlx5_core_dbg(dev
, "Current Pkey table size %d Setting new size %d\n",
521 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev
, pkey_table_size
)),
523 /* we limit the size of the pkey table to 128 entries for now */
524 MLX5_SET(cmd_hca_cap
, set_hca_cap
, pkey_table_size
,
525 to_fw_pkey_sz(dev
, 128));
527 /* Check log_max_qp from HCA caps to set in current profile */
528 if (MLX5_CAP_GEN_MAX(dev
, log_max_qp
) < prof
->log_max_qp
) {
529 mlx5_core_warn(dev
, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
531 MLX5_CAP_GEN_MAX(dev
, log_max_qp
));
532 prof
->log_max_qp
= MLX5_CAP_GEN_MAX(dev
, log_max_qp
);
534 if (prof
->mask
& MLX5_PROF_MASK_QP_SIZE
)
535 MLX5_SET(cmd_hca_cap
, set_hca_cap
, log_max_qp
,
538 /* disable cmdif checksum */
539 MLX5_SET(cmd_hca_cap
, set_hca_cap
, cmdif_checksum
, 0);
541 /* Enable 4K UAR only when HCA supports it and page size is bigger
544 if (MLX5_CAP_GEN_MAX(dev
, uar_4k
) && PAGE_SIZE
> 4096)
545 MLX5_SET(cmd_hca_cap
, set_hca_cap
, uar_4k
, 1);
547 MLX5_SET(cmd_hca_cap
, set_hca_cap
, log_uar_page_sz
, PAGE_SHIFT
- 12);
549 if (MLX5_CAP_GEN_MAX(dev
, cache_line_128byte
))
550 MLX5_SET(cmd_hca_cap
,
553 cache_line_size() >= 128 ? 1 : 0);
555 if (MLX5_CAP_GEN_MAX(dev
, dct
))
556 MLX5_SET(cmd_hca_cap
, set_hca_cap
, dct
, 1);
558 if (MLX5_CAP_GEN_MAX(dev
, pci_sync_for_fw_update_event
))
559 MLX5_SET(cmd_hca_cap
, set_hca_cap
, pci_sync_for_fw_update_event
, 1);
561 if (MLX5_CAP_GEN_MAX(dev
, num_vhca_ports
))
562 MLX5_SET(cmd_hca_cap
,
565 MLX5_CAP_GEN_MAX(dev
, num_vhca_ports
));
567 if (MLX5_CAP_GEN_MAX(dev
, release_all_pages
))
568 MLX5_SET(cmd_hca_cap
, set_hca_cap
, release_all_pages
, 1);
570 if (MLX5_CAP_GEN_MAX(dev
, mkey_by_name
))
571 MLX5_SET(cmd_hca_cap
, set_hca_cap
, mkey_by_name
, 1);
573 mlx5_vhca_state_cap_handle(dev
, set_hca_cap
);
575 if (MLX5_CAP_GEN_MAX(dev
, num_total_dynamic_vf_msix
))
576 MLX5_SET(cmd_hca_cap
, set_hca_cap
, num_total_dynamic_vf_msix
,
577 MLX5_CAP_GEN_MAX(dev
, num_total_dynamic_vf_msix
));
579 return set_caps(dev
, set_ctx
, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE
);
582 static int handle_hca_cap_roce(struct mlx5_core_dev
*dev
, void *set_ctx
)
587 if (!MLX5_CAP_GEN(dev
, roce
))
590 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ROCE
);
594 if (MLX5_CAP_ROCE(dev
, sw_r_roce_src_udp_port
) ||
595 !MLX5_CAP_ROCE_MAX(dev
, sw_r_roce_src_udp_port
))
598 set_hca_cap
= MLX5_ADDR_OF(set_hca_cap_in
, set_ctx
, capability
);
599 memcpy(set_hca_cap
, dev
->caps
.hca
[MLX5_CAP_ROCE
]->cur
,
600 MLX5_ST_SZ_BYTES(roce_cap
));
601 MLX5_SET(roce_cap
, set_hca_cap
, sw_r_roce_src_udp_port
, 1);
603 err
= set_caps(dev
, set_ctx
, MLX5_SET_HCA_CAP_OP_MOD_ROCE
);
607 static int set_hca_cap(struct mlx5_core_dev
*dev
)
609 int set_sz
= MLX5_ST_SZ_BYTES(set_hca_cap_in
);
613 set_ctx
= kzalloc(set_sz
, GFP_KERNEL
);
617 err
= handle_hca_cap(dev
, set_ctx
);
619 mlx5_core_err(dev
, "handle_hca_cap failed\n");
623 memset(set_ctx
, 0, set_sz
);
624 err
= handle_hca_cap_atomic(dev
, set_ctx
);
626 mlx5_core_err(dev
, "handle_hca_cap_atomic failed\n");
630 memset(set_ctx
, 0, set_sz
);
631 err
= handle_hca_cap_odp(dev
, set_ctx
);
633 mlx5_core_err(dev
, "handle_hca_cap_odp failed\n");
637 memset(set_ctx
, 0, set_sz
);
638 err
= handle_hca_cap_roce(dev
, set_ctx
);
640 mlx5_core_err(dev
, "handle_hca_cap_roce failed\n");
649 static int set_hca_ctrl(struct mlx5_core_dev
*dev
)
651 struct mlx5_reg_host_endianness he_in
;
652 struct mlx5_reg_host_endianness he_out
;
655 if (!mlx5_core_is_pf(dev
))
658 memset(&he_in
, 0, sizeof(he_in
));
659 he_in
.he
= MLX5_SET_HOST_ENDIANNESS
;
660 err
= mlx5_core_access_reg(dev
, &he_in
, sizeof(he_in
),
661 &he_out
, sizeof(he_out
),
662 MLX5_REG_HOST_ENDIANNESS
, 0, 1);
666 static int mlx5_core_set_hca_defaults(struct mlx5_core_dev
*dev
)
670 /* Disable local_lb by default */
671 if (MLX5_CAP_GEN(dev
, port_type
) == MLX5_CAP_PORT_TYPE_ETH
)
672 ret
= mlx5_nic_vport_update_local_lb(dev
, false);
677 int mlx5_core_enable_hca(struct mlx5_core_dev
*dev
, u16 func_id
)
679 u32 in
[MLX5_ST_SZ_DW(enable_hca_in
)] = {};
681 MLX5_SET(enable_hca_in
, in
, opcode
, MLX5_CMD_OP_ENABLE_HCA
);
682 MLX5_SET(enable_hca_in
, in
, function_id
, func_id
);
683 MLX5_SET(enable_hca_in
, in
, embedded_cpu_function
,
684 dev
->caps
.embedded_cpu
);
685 return mlx5_cmd_exec_in(dev
, enable_hca
, in
);
688 int mlx5_core_disable_hca(struct mlx5_core_dev
*dev
, u16 func_id
)
690 u32 in
[MLX5_ST_SZ_DW(disable_hca_in
)] = {};
692 MLX5_SET(disable_hca_in
, in
, opcode
, MLX5_CMD_OP_DISABLE_HCA
);
693 MLX5_SET(disable_hca_in
, in
, function_id
, func_id
);
694 MLX5_SET(enable_hca_in
, in
, embedded_cpu_function
,
695 dev
->caps
.embedded_cpu
);
696 return mlx5_cmd_exec_in(dev
, disable_hca
, in
);
699 static int mlx5_core_set_issi(struct mlx5_core_dev
*dev
)
701 u32 query_out
[MLX5_ST_SZ_DW(query_issi_out
)] = {};
702 u32 query_in
[MLX5_ST_SZ_DW(query_issi_in
)] = {};
706 MLX5_SET(query_issi_in
, query_in
, opcode
, MLX5_CMD_OP_QUERY_ISSI
);
707 err
= mlx5_cmd_exec_inout(dev
, query_issi
, query_in
, query_out
);
712 mlx5_cmd_mbox_status(query_out
, &status
, &syndrome
);
713 if (!status
|| syndrome
== MLX5_DRIVER_SYND
) {
714 mlx5_core_err(dev
, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
715 err
, status
, syndrome
);
719 mlx5_core_warn(dev
, "Query ISSI is not supported by FW, ISSI is 0\n");
724 sup_issi
= MLX5_GET(query_issi_out
, query_out
, supported_issi_dw0
);
726 if (sup_issi
& (1 << 1)) {
727 u32 set_in
[MLX5_ST_SZ_DW(set_issi_in
)] = {};
729 MLX5_SET(set_issi_in
, set_in
, opcode
, MLX5_CMD_OP_SET_ISSI
);
730 MLX5_SET(set_issi_in
, set_in
, current_issi
, 1);
731 err
= mlx5_cmd_exec_in(dev
, set_issi
, set_in
);
733 mlx5_core_err(dev
, "Failed to set ISSI to 1 err(%d)\n",
741 } else if (sup_issi
& (1 << 0) || !sup_issi
) {
748 static int mlx5_pci_init(struct mlx5_core_dev
*dev
, struct pci_dev
*pdev
,
749 const struct pci_device_id
*id
)
753 mutex_init(&dev
->pci_status_mutex
);
754 pci_set_drvdata(dev
->pdev
, dev
);
756 dev
->bar_addr
= pci_resource_start(pdev
, 0);
758 err
= mlx5_pci_enable_device(dev
);
760 mlx5_core_err(dev
, "Cannot enable PCI device, aborting\n");
764 err
= request_bar(pdev
);
766 mlx5_core_err(dev
, "error requesting BARs, aborting\n");
770 pci_set_master(pdev
);
772 err
= set_dma_caps(pdev
);
774 mlx5_core_err(dev
, "Failed setting DMA capabilities mask, aborting\n");
778 if (pci_enable_atomic_ops_to_root(pdev
, PCI_EXP_DEVCAP2_ATOMIC_COMP32
) &&
779 pci_enable_atomic_ops_to_root(pdev
, PCI_EXP_DEVCAP2_ATOMIC_COMP64
) &&
780 pci_enable_atomic_ops_to_root(pdev
, PCI_EXP_DEVCAP2_ATOMIC_COMP128
))
781 mlx5_core_dbg(dev
, "Enabling pci atomics failed\n");
783 dev
->iseg_base
= dev
->bar_addr
;
784 dev
->iseg
= ioremap(dev
->iseg_base
, sizeof(*dev
->iseg
));
787 mlx5_core_err(dev
, "Failed mapping initialization segment, aborting\n");
791 mlx5_pci_vsc_init(dev
);
792 dev
->caps
.embedded_cpu
= mlx5_read_embedded_cpu(dev
);
796 pci_clear_master(dev
->pdev
);
797 release_bar(dev
->pdev
);
799 mlx5_pci_disable_device(dev
);
803 static void mlx5_pci_close(struct mlx5_core_dev
*dev
)
805 /* health work might still be active, and it needs pci bar in
806 * order to know the NIC state. Therefore, drain the health WQ
807 * before removing the pci bars
809 mlx5_drain_health_wq(dev
);
811 pci_clear_master(dev
->pdev
);
812 release_bar(dev
->pdev
);
813 mlx5_pci_disable_device(dev
);
816 static int mlx5_init_once(struct mlx5_core_dev
*dev
)
820 dev
->priv
.devcom
= mlx5_devcom_register_device(dev
);
821 if (IS_ERR(dev
->priv
.devcom
))
822 mlx5_core_err(dev
, "failed to register with devcom (0x%p)\n",
825 err
= mlx5_query_board_id(dev
);
827 mlx5_core_err(dev
, "query board id failed\n");
831 err
= mlx5_irq_table_init(dev
);
833 mlx5_core_err(dev
, "failed to initialize irq table\n");
837 err
= mlx5_eq_table_init(dev
);
839 mlx5_core_err(dev
, "failed to initialize eq\n");
840 goto err_irq_cleanup
;
843 err
= mlx5_events_init(dev
);
845 mlx5_core_err(dev
, "failed to initialize events\n");
849 err
= mlx5_fw_reset_init(dev
);
851 mlx5_core_err(dev
, "failed to initialize fw reset events\n");
852 goto err_events_cleanup
;
855 mlx5_cq_debugfs_init(dev
);
857 mlx5_init_reserved_gids(dev
);
859 mlx5_init_clock(dev
);
861 dev
->vxlan
= mlx5_vxlan_create(dev
);
862 dev
->geneve
= mlx5_geneve_create(dev
);
864 err
= mlx5_init_rl_table(dev
);
866 mlx5_core_err(dev
, "Failed to init rate limiting\n");
867 goto err_tables_cleanup
;
870 err
= mlx5_mpfs_init(dev
);
872 mlx5_core_err(dev
, "Failed to init l2 table %d\n", err
);
876 err
= mlx5_sriov_init(dev
);
878 mlx5_core_err(dev
, "Failed to init sriov %d\n", err
);
879 goto err_mpfs_cleanup
;
882 err
= mlx5_eswitch_init(dev
);
884 mlx5_core_err(dev
, "Failed to init eswitch %d\n", err
);
885 goto err_sriov_cleanup
;
888 err
= mlx5_fpga_init(dev
);
890 mlx5_core_err(dev
, "Failed to init fpga device %d\n", err
);
891 goto err_eswitch_cleanup
;
894 err
= mlx5_vhca_event_init(dev
);
896 mlx5_core_err(dev
, "Failed to init vhca event notifier %d\n", err
);
897 goto err_fpga_cleanup
;
900 err
= mlx5_sf_hw_table_init(dev
);
902 mlx5_core_err(dev
, "Failed to init SF HW table %d\n", err
);
903 goto err_sf_hw_table_cleanup
;
906 err
= mlx5_sf_table_init(dev
);
908 mlx5_core_err(dev
, "Failed to init SF table %d\n", err
);
909 goto err_sf_table_cleanup
;
912 dev
->dm
= mlx5_dm_create(dev
);
914 mlx5_core_warn(dev
, "Failed to init device memory%d\n", err
);
916 dev
->tracer
= mlx5_fw_tracer_create(dev
);
917 dev
->hv_vhca
= mlx5_hv_vhca_create(dev
);
918 dev
->rsc_dump
= mlx5_rsc_dump_create(dev
);
922 err_sf_table_cleanup
:
923 mlx5_sf_hw_table_cleanup(dev
);
924 err_sf_hw_table_cleanup
:
925 mlx5_vhca_event_cleanup(dev
);
927 mlx5_fpga_cleanup(dev
);
929 mlx5_eswitch_cleanup(dev
->priv
.eswitch
);
931 mlx5_sriov_cleanup(dev
);
933 mlx5_mpfs_cleanup(dev
);
935 mlx5_cleanup_rl_table(dev
);
937 mlx5_geneve_destroy(dev
->geneve
);
938 mlx5_vxlan_destroy(dev
->vxlan
);
939 mlx5_cq_debugfs_cleanup(dev
);
940 mlx5_fw_reset_cleanup(dev
);
942 mlx5_events_cleanup(dev
);
944 mlx5_eq_table_cleanup(dev
);
946 mlx5_irq_table_cleanup(dev
);
948 mlx5_devcom_unregister_device(dev
->priv
.devcom
);
953 static void mlx5_cleanup_once(struct mlx5_core_dev
*dev
)
955 mlx5_rsc_dump_destroy(dev
);
956 mlx5_hv_vhca_destroy(dev
->hv_vhca
);
957 mlx5_fw_tracer_destroy(dev
->tracer
);
958 mlx5_dm_cleanup(dev
);
959 mlx5_sf_table_cleanup(dev
);
960 mlx5_sf_hw_table_cleanup(dev
);
961 mlx5_vhca_event_cleanup(dev
);
962 mlx5_fpga_cleanup(dev
);
963 mlx5_eswitch_cleanup(dev
->priv
.eswitch
);
964 mlx5_sriov_cleanup(dev
);
965 mlx5_mpfs_cleanup(dev
);
966 mlx5_cleanup_rl_table(dev
);
967 mlx5_geneve_destroy(dev
->geneve
);
968 mlx5_vxlan_destroy(dev
->vxlan
);
969 mlx5_cleanup_clock(dev
);
970 mlx5_cleanup_reserved_gids(dev
);
971 mlx5_cq_debugfs_cleanup(dev
);
972 mlx5_fw_reset_cleanup(dev
);
973 mlx5_events_cleanup(dev
);
974 mlx5_eq_table_cleanup(dev
);
975 mlx5_irq_table_cleanup(dev
);
976 mlx5_devcom_unregister_device(dev
->priv
.devcom
);
979 static int mlx5_function_setup(struct mlx5_core_dev
*dev
, bool boot
)
983 mlx5_core_info(dev
, "firmware version: %d.%d.%d\n", fw_rev_maj(dev
),
984 fw_rev_min(dev
), fw_rev_sub(dev
));
986 /* Only PFs hold the relevant PCIe information for this query */
987 if (mlx5_core_is_pf(dev
))
988 pcie_print_link_status(dev
->pdev
);
990 /* wait for firmware to accept initialization segments configurations
992 err
= wait_fw_init(dev
, FW_PRE_INIT_TIMEOUT_MILI
, FW_INIT_WARN_MESSAGE_INTERVAL
);
994 mlx5_core_err(dev
, "Firmware over %d MS in pre-initializing state, aborting\n",
995 FW_PRE_INIT_TIMEOUT_MILI
);
999 err
= mlx5_cmd_init(dev
);
1001 mlx5_core_err(dev
, "Failed initializing command interface, aborting\n");
1005 err
= wait_fw_init(dev
, FW_INIT_TIMEOUT_MILI
, 0);
1007 mlx5_core_err(dev
, "Firmware over %d MS in initializing state, aborting\n",
1008 FW_INIT_TIMEOUT_MILI
);
1009 goto err_cmd_cleanup
;
1012 mlx5_cmd_set_state(dev
, MLX5_CMDIF_STATE_UP
);
1014 err
= mlx5_core_enable_hca(dev
, 0);
1016 mlx5_core_err(dev
, "enable hca failed\n");
1017 goto err_cmd_cleanup
;
1020 err
= mlx5_core_set_issi(dev
);
1022 mlx5_core_err(dev
, "failed to set issi\n");
1023 goto err_disable_hca
;
1026 err
= mlx5_satisfy_startup_pages(dev
, 1);
1028 mlx5_core_err(dev
, "failed to allocate boot pages\n");
1029 goto err_disable_hca
;
1032 err
= set_hca_ctrl(dev
);
1034 mlx5_core_err(dev
, "set_hca_ctrl failed\n");
1035 goto reclaim_boot_pages
;
1038 err
= set_hca_cap(dev
);
1040 mlx5_core_err(dev
, "set_hca_cap failed\n");
1041 goto reclaim_boot_pages
;
1044 err
= mlx5_satisfy_startup_pages(dev
, 0);
1046 mlx5_core_err(dev
, "failed to allocate init pages\n");
1047 goto reclaim_boot_pages
;
1050 err
= mlx5_cmd_init_hca(dev
, sw_owner_id
);
1052 mlx5_core_err(dev
, "init hca failed\n");
1053 goto reclaim_boot_pages
;
1056 mlx5_set_driver_version(dev
);
1058 mlx5_start_health_poll(dev
);
1060 err
= mlx5_query_hca_caps(dev
);
1062 mlx5_core_err(dev
, "query hca failed\n");
1069 mlx5_stop_health_poll(dev
, boot
);
1071 mlx5_reclaim_startup_pages(dev
);
1073 mlx5_core_disable_hca(dev
, 0);
1075 mlx5_cmd_set_state(dev
, MLX5_CMDIF_STATE_DOWN
);
1076 mlx5_cmd_cleanup(dev
);
1081 static int mlx5_function_teardown(struct mlx5_core_dev
*dev
, bool boot
)
1085 mlx5_stop_health_poll(dev
, boot
);
1086 err
= mlx5_cmd_teardown_hca(dev
);
1088 mlx5_core_err(dev
, "tear_down_hca failed, skip cleanup\n");
1091 mlx5_reclaim_startup_pages(dev
);
1092 mlx5_core_disable_hca(dev
, 0);
1093 mlx5_cmd_set_state(dev
, MLX5_CMDIF_STATE_DOWN
);
1094 mlx5_cmd_cleanup(dev
);
1099 static int mlx5_load(struct mlx5_core_dev
*dev
)
1103 dev
->priv
.uar
= mlx5_get_uars_page(dev
);
1104 if (IS_ERR(dev
->priv
.uar
)) {
1105 mlx5_core_err(dev
, "Failed allocating uar, aborting\n");
1106 err
= PTR_ERR(dev
->priv
.uar
);
1110 mlx5_events_start(dev
);
1111 mlx5_pagealloc_start(dev
);
1113 err
= mlx5_irq_table_create(dev
);
1115 mlx5_core_err(dev
, "Failed to alloc IRQs\n");
1119 err
= mlx5_eq_table_create(dev
);
1121 mlx5_core_err(dev
, "Failed to create EQs\n");
1125 err
= mlx5_fw_tracer_init(dev
->tracer
);
1127 mlx5_core_err(dev
, "Failed to init FW tracer\n");
1131 mlx5_fw_reset_events_start(dev
);
1132 mlx5_hv_vhca_init(dev
->hv_vhca
);
1134 err
= mlx5_rsc_dump_init(dev
);
1136 mlx5_core_err(dev
, "Failed to init Resource dump\n");
1140 err
= mlx5_fpga_device_start(dev
);
1142 mlx5_core_err(dev
, "fpga device start failed %d\n", err
);
1143 goto err_fpga_start
;
1146 mlx5_accel_ipsec_init(dev
);
1148 err
= mlx5_accel_tls_init(dev
);
1150 mlx5_core_err(dev
, "TLS device start failed %d\n", err
);
1154 err
= mlx5_init_fs(dev
);
1156 mlx5_core_err(dev
, "Failed to init flow steering\n");
1160 err
= mlx5_core_set_hca_defaults(dev
);
1162 mlx5_core_err(dev
, "Failed to set hca defaults\n");
1166 mlx5_vhca_event_start(dev
);
1168 err
= mlx5_sf_hw_table_create(dev
);
1170 mlx5_core_err(dev
, "sf table create failed %d\n", err
);
1174 err
= mlx5_ec_init(dev
);
1176 mlx5_core_err(dev
, "Failed to init embedded CPU\n");
1180 mlx5_lag_add_mdev(dev
);
1181 err
= mlx5_sriov_attach(dev
);
1183 mlx5_core_err(dev
, "sriov init failed %d\n", err
);
1187 mlx5_sf_dev_table_create(dev
);
1192 mlx5_lag_remove_mdev(dev
);
1193 mlx5_ec_cleanup(dev
);
1195 mlx5_sf_hw_table_destroy(dev
);
1197 mlx5_vhca_event_stop(dev
);
1199 mlx5_cleanup_fs(dev
);
1201 mlx5_accel_tls_cleanup(dev
);
1203 mlx5_accel_ipsec_cleanup(dev
);
1204 mlx5_fpga_device_stop(dev
);
1206 mlx5_rsc_dump_cleanup(dev
);
1208 mlx5_hv_vhca_cleanup(dev
->hv_vhca
);
1209 mlx5_fw_reset_events_stop(dev
);
1210 mlx5_fw_tracer_cleanup(dev
->tracer
);
1212 mlx5_eq_table_destroy(dev
);
1214 mlx5_irq_table_destroy(dev
);
1216 mlx5_pagealloc_stop(dev
);
1217 mlx5_events_stop(dev
);
1218 mlx5_put_uars_page(dev
, dev
->priv
.uar
);
1222 static void mlx5_unload(struct mlx5_core_dev
*dev
)
1224 mlx5_sf_dev_table_destroy(dev
);
1225 mlx5_sriov_detach(dev
);
1226 mlx5_lag_remove_mdev(dev
);
1227 mlx5_ec_cleanup(dev
);
1228 mlx5_sf_hw_table_destroy(dev
);
1229 mlx5_vhca_event_stop(dev
);
1230 mlx5_cleanup_fs(dev
);
1231 mlx5_accel_ipsec_cleanup(dev
);
1232 mlx5_accel_tls_cleanup(dev
);
1233 mlx5_fpga_device_stop(dev
);
1234 mlx5_rsc_dump_cleanup(dev
);
1235 mlx5_hv_vhca_cleanup(dev
->hv_vhca
);
1236 mlx5_fw_reset_events_stop(dev
);
1237 mlx5_fw_tracer_cleanup(dev
->tracer
);
1238 mlx5_eq_table_destroy(dev
);
1239 mlx5_irq_table_destroy(dev
);
1240 mlx5_pagealloc_stop(dev
);
1241 mlx5_events_stop(dev
);
1242 mlx5_put_uars_page(dev
, dev
->priv
.uar
);
1245 int mlx5_init_one(struct mlx5_core_dev
*dev
)
1249 mutex_lock(&dev
->intf_state_mutex
);
1250 dev
->state
= MLX5_DEVICE_STATE_UP
;
1252 err
= mlx5_function_setup(dev
, true);
1256 err
= mlx5_init_once(dev
);
1258 mlx5_core_err(dev
, "sw objs init failed\n");
1259 goto function_teardown
;
1262 err
= mlx5_load(dev
);
1266 set_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
);
1268 err
= mlx5_devlink_register(priv_to_devlink(dev
));
1270 goto err_devlink_reg
;
1272 err
= mlx5_register_device(dev
);
1276 mutex_unlock(&dev
->intf_state_mutex
);
1280 mlx5_devlink_unregister(priv_to_devlink(dev
));
1282 clear_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
);
1285 mlx5_cleanup_once(dev
);
1287 mlx5_function_teardown(dev
, true);
1289 dev
->state
= MLX5_DEVICE_STATE_INTERNAL_ERROR
;
1290 mutex_unlock(&dev
->intf_state_mutex
);
1294 void mlx5_uninit_one(struct mlx5_core_dev
*dev
)
1296 mutex_lock(&dev
->intf_state_mutex
);
1298 mlx5_unregister_device(dev
);
1299 mlx5_devlink_unregister(priv_to_devlink(dev
));
1301 if (!test_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
)) {
1302 mlx5_core_warn(dev
, "%s: interface is down, NOP\n",
1304 mlx5_cleanup_once(dev
);
1308 clear_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
);
1310 mlx5_cleanup_once(dev
);
1311 mlx5_function_teardown(dev
, true);
1313 mutex_unlock(&dev
->intf_state_mutex
);
1316 int mlx5_load_one(struct mlx5_core_dev
*dev
)
1320 mutex_lock(&dev
->intf_state_mutex
);
1321 if (test_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
)) {
1322 mlx5_core_warn(dev
, "interface is up, NOP\n");
1325 /* remove any previous indication of internal error */
1326 dev
->state
= MLX5_DEVICE_STATE_UP
;
1328 err
= mlx5_function_setup(dev
, false);
1332 err
= mlx5_load(dev
);
1336 set_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
);
1338 err
= mlx5_attach_device(dev
);
1342 mutex_unlock(&dev
->intf_state_mutex
);
1346 clear_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
);
1349 mlx5_function_teardown(dev
, false);
1351 dev
->state
= MLX5_DEVICE_STATE_INTERNAL_ERROR
;
1353 mutex_unlock(&dev
->intf_state_mutex
);
1357 void mlx5_unload_one(struct mlx5_core_dev
*dev
)
1359 mutex_lock(&dev
->intf_state_mutex
);
1361 mlx5_detach_device(dev
);
1363 if (!test_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
)) {
1364 mlx5_core_warn(dev
, "%s: interface is down, NOP\n",
1369 clear_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
);
1371 mlx5_function_teardown(dev
, false);
1373 mutex_unlock(&dev
->intf_state_mutex
);
1376 static const int types
[] = {
1379 MLX5_CAP_ETHERNET_OFFLOADS
,
1380 MLX5_CAP_IPOIB_ENHANCED_OFFLOADS
,
1384 MLX5_CAP_IPOIB_OFFLOADS
,
1385 MLX5_CAP_FLOW_TABLE
,
1386 MLX5_CAP_ESWITCH_FLOW_TABLE
,
1388 MLX5_CAP_VECTOR_CALC
,
1394 MLX5_CAP_VDPA_EMULATION
,
1398 static void mlx5_hca_caps_free(struct mlx5_core_dev
*dev
)
1403 for (i
= 0; i
< ARRAY_SIZE(types
); i
++) {
1405 kfree(dev
->caps
.hca
[type
]);
1409 static int mlx5_hca_caps_alloc(struct mlx5_core_dev
*dev
)
1411 struct mlx5_hca_cap
*cap
;
1415 for (i
= 0; i
< ARRAY_SIZE(types
); i
++) {
1416 cap
= kzalloc(sizeof(*cap
), GFP_KERNEL
);
1420 dev
->caps
.hca
[type
] = cap
;
1426 mlx5_hca_caps_free(dev
);
1430 int mlx5_mdev_init(struct mlx5_core_dev
*dev
, int profile_idx
)
1432 struct mlx5_priv
*priv
= &dev
->priv
;
1435 memcpy(&dev
->profile
, &profile
[profile_idx
], sizeof(dev
->profile
));
1436 INIT_LIST_HEAD(&priv
->ctx_list
);
1437 spin_lock_init(&priv
->ctx_lock
);
1438 mutex_init(&dev
->intf_state_mutex
);
1440 mutex_init(&priv
->bfregs
.reg_head
.lock
);
1441 mutex_init(&priv
->bfregs
.wc_head
.lock
);
1442 INIT_LIST_HEAD(&priv
->bfregs
.reg_head
.list
);
1443 INIT_LIST_HEAD(&priv
->bfregs
.wc_head
.list
);
1445 mutex_init(&priv
->alloc_mutex
);
1446 mutex_init(&priv
->pgdir_mutex
);
1447 INIT_LIST_HEAD(&priv
->pgdir_list
);
1449 priv
->numa_node
= dev_to_node(mlx5_core_dma_dev(dev
));
1450 priv
->dbg_root
= debugfs_create_dir(dev_name(dev
->device
),
1452 INIT_LIST_HEAD(&priv
->traps
);
1454 err
= mlx5_health_init(dev
);
1456 goto err_health_init
;
1458 err
= mlx5_pagealloc_init(dev
);
1460 goto err_pagealloc_init
;
1462 err
= mlx5_adev_init(dev
);
1466 err
= mlx5_hca_caps_alloc(dev
);
1473 mlx5_adev_cleanup(dev
);
1475 mlx5_pagealloc_cleanup(dev
);
1477 mlx5_health_cleanup(dev
);
1479 debugfs_remove(dev
->priv
.dbg_root
);
1480 mutex_destroy(&priv
->pgdir_mutex
);
1481 mutex_destroy(&priv
->alloc_mutex
);
1482 mutex_destroy(&priv
->bfregs
.wc_head
.lock
);
1483 mutex_destroy(&priv
->bfregs
.reg_head
.lock
);
1484 mutex_destroy(&dev
->intf_state_mutex
);
1488 void mlx5_mdev_uninit(struct mlx5_core_dev
*dev
)
1490 struct mlx5_priv
*priv
= &dev
->priv
;
1492 mlx5_hca_caps_free(dev
);
1493 mlx5_adev_cleanup(dev
);
1494 mlx5_pagealloc_cleanup(dev
);
1495 mlx5_health_cleanup(dev
);
1496 debugfs_remove_recursive(dev
->priv
.dbg_root
);
1497 mutex_destroy(&priv
->pgdir_mutex
);
1498 mutex_destroy(&priv
->alloc_mutex
);
1499 mutex_destroy(&priv
->bfregs
.wc_head
.lock
);
1500 mutex_destroy(&priv
->bfregs
.reg_head
.lock
);
1501 mutex_destroy(&dev
->intf_state_mutex
);
1504 static int probe_one(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1506 struct mlx5_core_dev
*dev
;
1507 struct devlink
*devlink
;
1510 devlink
= mlx5_devlink_alloc(&pdev
->dev
);
1512 dev_err(&pdev
->dev
, "devlink alloc failed\n");
1516 dev
= devlink_priv(devlink
);
1517 dev
->device
= &pdev
->dev
;
1520 dev
->coredev_type
= id
->driver_data
& MLX5_PCI_DEV_IS_VF
?
1521 MLX5_COREDEV_VF
: MLX5_COREDEV_PF
;
1523 dev
->priv
.adev_idx
= mlx5_adev_idx_alloc();
1524 if (dev
->priv
.adev_idx
< 0) {
1525 err
= dev
->priv
.adev_idx
;
1529 err
= mlx5_mdev_init(dev
, prof_sel
);
1533 err
= mlx5_pci_init(dev
, pdev
, id
);
1535 mlx5_core_err(dev
, "mlx5_pci_init failed with error code %d\n",
1540 err
= mlx5_init_one(dev
);
1542 mlx5_core_err(dev
, "mlx5_init_one failed with error code %d\n",
1547 err
= mlx5_crdump_enable(dev
);
1549 dev_err(&pdev
->dev
, "mlx5_crdump_enable failed with error code %d\n", err
);
1551 pci_save_state(pdev
);
1552 if (!mlx5_core_is_mp_slave(dev
))
1553 devlink_reload_enable(devlink
);
1557 mlx5_pci_close(dev
);
1559 mlx5_mdev_uninit(dev
);
1561 mlx5_adev_idx_free(dev
->priv
.adev_idx
);
1563 mlx5_devlink_free(devlink
);
1568 static void remove_one(struct pci_dev
*pdev
)
1570 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1571 struct devlink
*devlink
= priv_to_devlink(dev
);
1573 devlink_reload_disable(devlink
);
1574 mlx5_crdump_disable(dev
);
1575 mlx5_drain_health_wq(dev
);
1576 mlx5_uninit_one(dev
);
1577 mlx5_pci_close(dev
);
1578 mlx5_mdev_uninit(dev
);
1579 mlx5_adev_idx_free(dev
->priv
.adev_idx
);
1580 mlx5_devlink_free(devlink
);
1583 static pci_ers_result_t
mlx5_pci_err_detected(struct pci_dev
*pdev
,
1584 pci_channel_state_t state
)
1586 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1588 mlx5_core_info(dev
, "%s was called\n", __func__
);
1590 mlx5_enter_error_state(dev
, false);
1591 mlx5_error_sw_reset(dev
);
1592 mlx5_unload_one(dev
);
1593 mlx5_drain_health_wq(dev
);
1594 mlx5_pci_disable_device(dev
);
1596 return state
== pci_channel_io_perm_failure
?
1597 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
1600 /* wait for the device to show vital signs by waiting
1601 * for the health counter to start counting.
1603 static int wait_vital(struct pci_dev
*pdev
)
1605 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1606 struct mlx5_core_health
*health
= &dev
->priv
.health
;
1607 const int niter
= 100;
1612 for (i
= 0; i
< niter
; i
++) {
1613 count
= ioread32be(health
->health_counter
);
1614 if (count
&& count
!= 0xffffffff) {
1615 if (last_count
&& last_count
!= count
) {
1617 "wait vital counter value 0x%x after %d iterations\n",
1629 static pci_ers_result_t
mlx5_pci_slot_reset(struct pci_dev
*pdev
)
1631 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1634 mlx5_core_info(dev
, "%s was called\n", __func__
);
1636 err
= mlx5_pci_enable_device(dev
);
1638 mlx5_core_err(dev
, "%s: mlx5_pci_enable_device failed with error code: %d\n",
1640 return PCI_ERS_RESULT_DISCONNECT
;
1643 pci_set_master(pdev
);
1644 pci_restore_state(pdev
);
1645 pci_save_state(pdev
);
1647 if (wait_vital(pdev
)) {
1648 mlx5_core_err(dev
, "%s: wait_vital timed out\n", __func__
);
1649 return PCI_ERS_RESULT_DISCONNECT
;
1652 return PCI_ERS_RESULT_RECOVERED
;
1655 static void mlx5_pci_resume(struct pci_dev
*pdev
)
1657 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1660 mlx5_core_info(dev
, "%s was called\n", __func__
);
1662 err
= mlx5_load_one(dev
);
1664 mlx5_core_err(dev
, "%s: mlx5_load_one failed with error code: %d\n",
1667 mlx5_core_info(dev
, "%s: device recovered\n", __func__
);
1670 static const struct pci_error_handlers mlx5_err_handler
= {
1671 .error_detected
= mlx5_pci_err_detected
,
1672 .slot_reset
= mlx5_pci_slot_reset
,
1673 .resume
= mlx5_pci_resume
1676 static int mlx5_try_fast_unload(struct mlx5_core_dev
*dev
)
1678 bool fast_teardown
= false, force_teardown
= false;
1681 fast_teardown
= MLX5_CAP_GEN(dev
, fast_teardown
);
1682 force_teardown
= MLX5_CAP_GEN(dev
, force_teardown
);
1684 mlx5_core_dbg(dev
, "force teardown firmware support=%d\n", force_teardown
);
1685 mlx5_core_dbg(dev
, "fast teardown firmware support=%d\n", fast_teardown
);
1687 if (!fast_teardown
&& !force_teardown
)
1690 if (dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
1691 mlx5_core_dbg(dev
, "Device in internal error state, giving up\n");
1695 /* Panic tear down fw command will stop the PCI bus communication
1696 * with the HCA, so the health polll is no longer needed.
1698 mlx5_drain_health_wq(dev
);
1699 mlx5_stop_health_poll(dev
, false);
1701 ret
= mlx5_cmd_fast_teardown_hca(dev
);
1705 ret
= mlx5_cmd_force_teardown_hca(dev
);
1709 mlx5_core_dbg(dev
, "Firmware couldn't do fast unload error: %d\n", ret
);
1710 mlx5_start_health_poll(dev
);
1714 mlx5_enter_error_state(dev
, true);
1716 /* Some platforms requiring freeing the IRQ's in the shutdown
1717 * flow. If they aren't freed they can't be allocated after
1718 * kexec. There is no need to cleanup the mlx5_core software
1721 mlx5_core_eq_free_irqs(dev
);
1726 static void shutdown(struct pci_dev
*pdev
)
1728 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1731 mlx5_core_info(dev
, "Shutdown was called\n");
1732 err
= mlx5_try_fast_unload(dev
);
1734 mlx5_unload_one(dev
);
1735 mlx5_pci_disable_device(dev
);
1738 static int mlx5_suspend(struct pci_dev
*pdev
, pm_message_t state
)
1740 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1742 mlx5_unload_one(dev
);
1747 static int mlx5_resume(struct pci_dev
*pdev
)
1749 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1751 return mlx5_load_one(dev
);
1754 static const struct pci_device_id mlx5_core_pci_table
[] = {
1755 { PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_CONNECTIB
) },
1756 { PCI_VDEVICE(MELLANOX
, 0x1012), MLX5_PCI_DEV_IS_VF
}, /* Connect-IB VF */
1757 { PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_CONNECTX4
) },
1758 { PCI_VDEVICE(MELLANOX
, 0x1014), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-4 VF */
1759 { PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX
) },
1760 { PCI_VDEVICE(MELLANOX
, 0x1016), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-4LX VF */
1761 { PCI_VDEVICE(MELLANOX
, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
1762 { PCI_VDEVICE(MELLANOX
, 0x1018), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-5 VF */
1763 { PCI_VDEVICE(MELLANOX
, 0x1019) }, /* ConnectX-5 Ex */
1764 { PCI_VDEVICE(MELLANOX
, 0x101a), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-5 Ex VF */
1765 { PCI_VDEVICE(MELLANOX
, 0x101b) }, /* ConnectX-6 */
1766 { PCI_VDEVICE(MELLANOX
, 0x101c), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-6 VF */
1767 { PCI_VDEVICE(MELLANOX
, 0x101d) }, /* ConnectX-6 Dx */
1768 { PCI_VDEVICE(MELLANOX
, 0x101e), MLX5_PCI_DEV_IS_VF
}, /* ConnectX Family mlx5Gen Virtual Function */
1769 { PCI_VDEVICE(MELLANOX
, 0x101f) }, /* ConnectX-6 LX */
1770 { PCI_VDEVICE(MELLANOX
, 0x1021) }, /* ConnectX-7 */
1771 { PCI_VDEVICE(MELLANOX
, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
1772 { PCI_VDEVICE(MELLANOX
, 0xa2d3), MLX5_PCI_DEV_IS_VF
}, /* BlueField integrated ConnectX-5 network controller VF */
1773 { PCI_VDEVICE(MELLANOX
, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
1774 { PCI_VDEVICE(MELLANOX
, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
1778 MODULE_DEVICE_TABLE(pci
, mlx5_core_pci_table
);
1780 void mlx5_disable_device(struct mlx5_core_dev
*dev
)
1782 mlx5_error_sw_reset(dev
);
1783 mlx5_unload_one(dev
);
1786 int mlx5_recover_device(struct mlx5_core_dev
*dev
)
1790 mlx5_pci_disable_device(dev
);
1791 if (mlx5_pci_slot_reset(dev
->pdev
) == PCI_ERS_RESULT_RECOVERED
)
1792 ret
= mlx5_load_one(dev
);
1796 static struct pci_driver mlx5_core_driver
= {
1797 .name
= KBUILD_MODNAME
,
1798 .id_table
= mlx5_core_pci_table
,
1800 .remove
= remove_one
,
1801 .suspend
= mlx5_suspend
,
1802 .resume
= mlx5_resume
,
1803 .shutdown
= shutdown
,
1804 .err_handler
= &mlx5_err_handler
,
1805 .sriov_configure
= mlx5_core_sriov_configure
,
1806 .sriov_get_vf_total_msix
= mlx5_sriov_get_vf_total_msix
,
1807 .sriov_set_msix_vec_count
= mlx5_core_sriov_set_msix_vec_count
,
1810 static void mlx5_core_verify_params(void)
1812 if (prof_sel
>= ARRAY_SIZE(profile
)) {
1813 pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
1815 ARRAY_SIZE(profile
) - 1,
1817 prof_sel
= MLX5_DEFAULT_PROF
;
1821 static int __init
init(void)
1825 WARN_ONCE(strcmp(MLX5_ADEV_NAME
, KBUILD_MODNAME
),
1826 "mlx5_core name not in sync with kernel module name");
1828 get_random_bytes(&sw_owner_id
, sizeof(sw_owner_id
));
1830 mlx5_core_verify_params();
1831 mlx5_fpga_ipsec_build_fs_cmds();
1832 mlx5_register_debugfs();
1834 err
= pci_register_driver(&mlx5_core_driver
);
1838 err
= mlx5_sf_driver_register();
1849 mlx5_sf_driver_unregister();
1851 pci_unregister_driver(&mlx5_core_driver
);
1853 mlx5_unregister_debugfs();
1857 static void __exit
cleanup(void)
1860 mlx5_sf_driver_unregister();
1861 pci_unregister_driver(&mlx5_core_driver
);
1862 mlx5_unregister_debugfs();
1866 module_exit(cleanup
);