2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/mlx5/cq.h>
45 #include <linux/mlx5/qp.h>
46 #include <linux/debugfs.h>
47 #include <linux/kmod.h>
48 #include <linux/mlx5/mlx5_ifc.h>
49 #include <linux/mlx5/vport.h>
50 #ifdef CONFIG_RFS_ACCEL
51 #include <linux/cpu_rmap.h>
53 #include <net/devlink.h>
54 #include "mlx5_core.h"
60 #include "fpga/core.h"
61 #include "fpga/ipsec.h"
62 #include "accel/ipsec.h"
63 #include "accel/tls.h"
64 #include "lib/clock.h"
65 #include "lib/vxlan.h"
66 #include "lib/devcom.h"
67 #include "diag/fw_tracer.h"
70 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
71 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
72 MODULE_LICENSE("Dual BSD/GPL");
73 MODULE_VERSION(DRIVER_VERSION
);
75 unsigned int mlx5_core_debug_mask
;
76 module_param_named(debug_mask
, mlx5_core_debug_mask
, uint
, 0644);
77 MODULE_PARM_DESC(debug_mask
, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
79 #define MLX5_DEFAULT_PROF 2
80 static unsigned int prof_sel
= MLX5_DEFAULT_PROF
;
81 module_param_named(prof_sel
, prof_sel
, uint
, 0444);
82 MODULE_PARM_DESC(prof_sel
, "profile selector. Valid range 0 - 2");
84 static u32 sw_owner_id
[4];
87 MLX5_ATOMIC_REQ_MODE_BE
= 0x0,
88 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS
= 0x1,
91 static struct mlx5_profile profile
[] = {
96 .mask
= MLX5_PROF_MASK_QP_SIZE
,
100 .mask
= MLX5_PROF_MASK_QP_SIZE
|
101 MLX5_PROF_MASK_MR_CACHE
,
190 #define FW_INIT_TIMEOUT_MILI 2000
191 #define FW_INIT_WAIT_MS 2
192 #define FW_PRE_INIT_TIMEOUT_MILI 10000
194 static int wait_fw_init(struct mlx5_core_dev
*dev
, u32 max_wait_mili
)
196 unsigned long end
= jiffies
+ msecs_to_jiffies(max_wait_mili
);
199 while (fw_initializing(dev
)) {
200 if (time_after(jiffies
, end
)) {
204 msleep(FW_INIT_WAIT_MS
);
210 static void mlx5_set_driver_version(struct mlx5_core_dev
*dev
)
212 int driver_ver_sz
= MLX5_FLD_SZ_BYTES(set_driver_version_in
,
214 u8 in
[MLX5_ST_SZ_BYTES(set_driver_version_in
)] = {0};
215 u8 out
[MLX5_ST_SZ_BYTES(set_driver_version_out
)] = {0};
216 int remaining_size
= driver_ver_sz
;
219 if (!MLX5_CAP_GEN(dev
, driver_version
))
222 string
= MLX5_ADDR_OF(set_driver_version_in
, in
, driver_version
);
224 strncpy(string
, "Linux", remaining_size
);
226 remaining_size
= max_t(int, 0, driver_ver_sz
- strlen(string
));
227 strncat(string
, ",", remaining_size
);
229 remaining_size
= max_t(int, 0, driver_ver_sz
- strlen(string
));
230 strncat(string
, DRIVER_NAME
, remaining_size
);
232 remaining_size
= max_t(int, 0, driver_ver_sz
- strlen(string
));
233 strncat(string
, ",", remaining_size
);
235 remaining_size
= max_t(int, 0, driver_ver_sz
- strlen(string
));
236 strncat(string
, DRIVER_VERSION
, remaining_size
);
239 MLX5_SET(set_driver_version_in
, in
, opcode
,
240 MLX5_CMD_OP_SET_DRIVER_VERSION
);
242 mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
245 static int set_dma_caps(struct pci_dev
*pdev
)
249 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
251 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit PCI DMA mask\n");
252 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
254 dev_err(&pdev
->dev
, "Can't set PCI DMA mask, aborting\n");
259 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
262 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
263 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
266 "Can't set consistent PCI DMA mask, aborting\n");
271 dma_set_max_seg_size(&pdev
->dev
, 2u * 1024 * 1024 * 1024);
275 static int mlx5_pci_enable_device(struct mlx5_core_dev
*dev
)
277 struct pci_dev
*pdev
= dev
->pdev
;
280 mutex_lock(&dev
->pci_status_mutex
);
281 if (dev
->pci_status
== MLX5_PCI_STATUS_DISABLED
) {
282 err
= pci_enable_device(pdev
);
284 dev
->pci_status
= MLX5_PCI_STATUS_ENABLED
;
286 mutex_unlock(&dev
->pci_status_mutex
);
291 static void mlx5_pci_disable_device(struct mlx5_core_dev
*dev
)
293 struct pci_dev
*pdev
= dev
->pdev
;
295 mutex_lock(&dev
->pci_status_mutex
);
296 if (dev
->pci_status
== MLX5_PCI_STATUS_ENABLED
) {
297 pci_disable_device(pdev
);
298 dev
->pci_status
= MLX5_PCI_STATUS_DISABLED
;
300 mutex_unlock(&dev
->pci_status_mutex
);
303 static int request_bar(struct pci_dev
*pdev
)
307 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
308 dev_err(&pdev
->dev
, "Missing registers BAR, aborting\n");
312 err
= pci_request_regions(pdev
, DRIVER_NAME
);
314 dev_err(&pdev
->dev
, "Couldn't get PCI resources, aborting\n");
319 static void release_bar(struct pci_dev
*pdev
)
321 pci_release_regions(pdev
);
324 struct mlx5_reg_host_endianness
{
329 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
332 MLX5_CAP_BITS_RW_MASK
= CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM
, 2) |
333 MLX5_DEV_CAP_FLAG_DCT
,
336 static u16
to_fw_pkey_sz(struct mlx5_core_dev
*dev
, u32 size
)
352 mlx5_core_warn(dev
, "invalid pkey table size %d\n", size
);
357 static int mlx5_core_get_caps_mode(struct mlx5_core_dev
*dev
,
358 enum mlx5_cap_type cap_type
,
359 enum mlx5_cap_mode cap_mode
)
361 u8 in
[MLX5_ST_SZ_BYTES(query_hca_cap_in
)];
362 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_cap_out
);
363 void *out
, *hca_caps
;
364 u16 opmod
= (cap_type
<< 1) | (cap_mode
& 0x01);
367 memset(in
, 0, sizeof(in
));
368 out
= kzalloc(out_sz
, GFP_KERNEL
);
372 MLX5_SET(query_hca_cap_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_CAP
);
373 MLX5_SET(query_hca_cap_in
, in
, op_mod
, opmod
);
374 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, out_sz
);
377 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
378 cap_type
, cap_mode
, err
);
382 hca_caps
= MLX5_ADDR_OF(query_hca_cap_out
, out
, capability
);
385 case HCA_CAP_OPMOD_GET_MAX
:
386 memcpy(dev
->caps
.hca_max
[cap_type
], hca_caps
,
387 MLX5_UN_SZ_BYTES(hca_cap_union
));
389 case HCA_CAP_OPMOD_GET_CUR
:
390 memcpy(dev
->caps
.hca_cur
[cap_type
], hca_caps
,
391 MLX5_UN_SZ_BYTES(hca_cap_union
));
395 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
405 int mlx5_core_get_caps(struct mlx5_core_dev
*dev
, enum mlx5_cap_type cap_type
)
409 ret
= mlx5_core_get_caps_mode(dev
, cap_type
, HCA_CAP_OPMOD_GET_CUR
);
412 return mlx5_core_get_caps_mode(dev
, cap_type
, HCA_CAP_OPMOD_GET_MAX
);
415 static int set_caps(struct mlx5_core_dev
*dev
, void *in
, int in_sz
, int opmod
)
417 u32 out
[MLX5_ST_SZ_DW(set_hca_cap_out
)] = {0};
419 MLX5_SET(set_hca_cap_in
, in
, opcode
, MLX5_CMD_OP_SET_HCA_CAP
);
420 MLX5_SET(set_hca_cap_in
, in
, op_mod
, opmod
<< 1);
421 return mlx5_cmd_exec(dev
, in
, in_sz
, out
, sizeof(out
));
424 static int handle_hca_cap_atomic(struct mlx5_core_dev
*dev
)
428 int set_sz
= MLX5_ST_SZ_BYTES(set_hca_cap_in
);
432 if (MLX5_CAP_GEN(dev
, atomic
)) {
433 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ATOMIC
);
442 supported_atomic_req_8B_endianness_mode_1
);
444 if (req_endianness
!= MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS
)
447 set_ctx
= kzalloc(set_sz
, GFP_KERNEL
);
451 set_hca_cap
= MLX5_ADDR_OF(set_hca_cap_in
, set_ctx
, capability
);
453 /* Set requestor to host endianness */
454 MLX5_SET(atomic_caps
, set_hca_cap
, atomic_req_8B_endianness_mode
,
455 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS
);
457 err
= set_caps(dev
, set_ctx
, set_sz
, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC
);
463 static int handle_hca_cap_odp(struct mlx5_core_dev
*dev
)
471 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
) ||
472 !MLX5_CAP_GEN(dev
, pg
))
475 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ODP
);
479 set_sz
= MLX5_ST_SZ_BYTES(set_hca_cap_in
);
480 set_ctx
= kzalloc(set_sz
, GFP_KERNEL
);
484 set_hca_cap
= MLX5_ADDR_OF(set_hca_cap_in
, set_ctx
, capability
);
485 memcpy(set_hca_cap
, dev
->caps
.hca_cur
[MLX5_CAP_ODP
],
486 MLX5_ST_SZ_BYTES(odp_cap
));
488 #define ODP_CAP_SET_MAX(dev, field) \
490 u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
493 MLX5_SET(odp_cap, set_hca_cap, field, _res); \
497 ODP_CAP_SET_MAX(dev
, ud_odp_caps
.srq_receive
);
498 ODP_CAP_SET_MAX(dev
, rc_odp_caps
.srq_receive
);
499 ODP_CAP_SET_MAX(dev
, xrc_odp_caps
.srq_receive
);
500 ODP_CAP_SET_MAX(dev
, xrc_odp_caps
.send
);
501 ODP_CAP_SET_MAX(dev
, xrc_odp_caps
.receive
);
502 ODP_CAP_SET_MAX(dev
, xrc_odp_caps
.write
);
503 ODP_CAP_SET_MAX(dev
, xrc_odp_caps
.read
);
504 ODP_CAP_SET_MAX(dev
, xrc_odp_caps
.atomic
);
507 err
= set_caps(dev
, set_ctx
, set_sz
,
508 MLX5_SET_HCA_CAP_OP_MOD_ODP
);
515 static int handle_hca_cap(struct mlx5_core_dev
*dev
)
517 void *set_ctx
= NULL
;
518 struct mlx5_profile
*prof
= dev
->profile
;
520 int set_sz
= MLX5_ST_SZ_BYTES(set_hca_cap_in
);
523 set_ctx
= kzalloc(set_sz
, GFP_KERNEL
);
527 err
= mlx5_core_get_caps(dev
, MLX5_CAP_GENERAL
);
531 set_hca_cap
= MLX5_ADDR_OF(set_hca_cap_in
, set_ctx
,
533 memcpy(set_hca_cap
, dev
->caps
.hca_cur
[MLX5_CAP_GENERAL
],
534 MLX5_ST_SZ_BYTES(cmd_hca_cap
));
536 mlx5_core_dbg(dev
, "Current Pkey table size %d Setting new size %d\n",
537 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev
, pkey_table_size
)),
539 /* we limit the size of the pkey table to 128 entries for now */
540 MLX5_SET(cmd_hca_cap
, set_hca_cap
, pkey_table_size
,
541 to_fw_pkey_sz(dev
, 128));
543 /* Check log_max_qp from HCA caps to set in current profile */
544 if (MLX5_CAP_GEN_MAX(dev
, log_max_qp
) < profile
[prof_sel
].log_max_qp
) {
545 mlx5_core_warn(dev
, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
546 profile
[prof_sel
].log_max_qp
,
547 MLX5_CAP_GEN_MAX(dev
, log_max_qp
));
548 profile
[prof_sel
].log_max_qp
= MLX5_CAP_GEN_MAX(dev
, log_max_qp
);
550 if (prof
->mask
& MLX5_PROF_MASK_QP_SIZE
)
551 MLX5_SET(cmd_hca_cap
, set_hca_cap
, log_max_qp
,
554 /* disable cmdif checksum */
555 MLX5_SET(cmd_hca_cap
, set_hca_cap
, cmdif_checksum
, 0);
557 /* Enable 4K UAR only when HCA supports it and page size is bigger
560 if (MLX5_CAP_GEN_MAX(dev
, uar_4k
) && PAGE_SIZE
> 4096)
561 MLX5_SET(cmd_hca_cap
, set_hca_cap
, uar_4k
, 1);
563 MLX5_SET(cmd_hca_cap
, set_hca_cap
, log_uar_page_sz
, PAGE_SHIFT
- 12);
565 if (MLX5_CAP_GEN_MAX(dev
, cache_line_128byte
))
566 MLX5_SET(cmd_hca_cap
,
569 cache_line_size() >= 128 ? 1 : 0);
571 if (MLX5_CAP_GEN_MAX(dev
, dct
))
572 MLX5_SET(cmd_hca_cap
, set_hca_cap
, dct
, 1);
574 if (MLX5_CAP_GEN_MAX(dev
, num_vhca_ports
))
575 MLX5_SET(cmd_hca_cap
,
578 MLX5_CAP_GEN_MAX(dev
, num_vhca_ports
));
580 err
= set_caps(dev
, set_ctx
, set_sz
,
581 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE
);
588 static int set_hca_cap(struct mlx5_core_dev
*dev
)
592 err
= handle_hca_cap(dev
);
594 mlx5_core_err(dev
, "handle_hca_cap failed\n");
598 err
= handle_hca_cap_atomic(dev
);
600 mlx5_core_err(dev
, "handle_hca_cap_atomic failed\n");
604 err
= handle_hca_cap_odp(dev
);
606 mlx5_core_err(dev
, "handle_hca_cap_odp failed\n");
614 static int set_hca_ctrl(struct mlx5_core_dev
*dev
)
616 struct mlx5_reg_host_endianness he_in
;
617 struct mlx5_reg_host_endianness he_out
;
620 if (!mlx5_core_is_pf(dev
))
623 memset(&he_in
, 0, sizeof(he_in
));
624 he_in
.he
= MLX5_SET_HOST_ENDIANNESS
;
625 err
= mlx5_core_access_reg(dev
, &he_in
, sizeof(he_in
),
626 &he_out
, sizeof(he_out
),
627 MLX5_REG_HOST_ENDIANNESS
, 0, 1);
631 static int mlx5_core_set_hca_defaults(struct mlx5_core_dev
*dev
)
635 /* Disable local_lb by default */
636 if (MLX5_CAP_GEN(dev
, port_type
) == MLX5_CAP_PORT_TYPE_ETH
)
637 ret
= mlx5_nic_vport_update_local_lb(dev
, false);
642 int mlx5_core_enable_hca(struct mlx5_core_dev
*dev
, u16 func_id
)
644 u32 out
[MLX5_ST_SZ_DW(enable_hca_out
)] = {0};
645 u32 in
[MLX5_ST_SZ_DW(enable_hca_in
)] = {0};
647 MLX5_SET(enable_hca_in
, in
, opcode
, MLX5_CMD_OP_ENABLE_HCA
);
648 MLX5_SET(enable_hca_in
, in
, function_id
, func_id
);
649 MLX5_SET(enable_hca_in
, in
, embedded_cpu_function
,
650 dev
->caps
.embedded_cpu
);
651 return mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
654 int mlx5_core_disable_hca(struct mlx5_core_dev
*dev
, u16 func_id
)
656 u32 out
[MLX5_ST_SZ_DW(disable_hca_out
)] = {0};
657 u32 in
[MLX5_ST_SZ_DW(disable_hca_in
)] = {0};
659 MLX5_SET(disable_hca_in
, in
, opcode
, MLX5_CMD_OP_DISABLE_HCA
);
660 MLX5_SET(disable_hca_in
, in
, function_id
, func_id
);
661 MLX5_SET(enable_hca_in
, in
, embedded_cpu_function
,
662 dev
->caps
.embedded_cpu
);
663 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
666 u64
mlx5_read_internal_timer(struct mlx5_core_dev
*dev
,
667 struct ptp_system_timestamp
*sts
)
669 u32 timer_h
, timer_h1
, timer_l
;
671 timer_h
= ioread32be(&dev
->iseg
->internal_timer_h
);
672 ptp_read_system_prets(sts
);
673 timer_l
= ioread32be(&dev
->iseg
->internal_timer_l
);
674 ptp_read_system_postts(sts
);
675 timer_h1
= ioread32be(&dev
->iseg
->internal_timer_h
);
676 if (timer_h
!= timer_h1
) {
678 ptp_read_system_prets(sts
);
679 timer_l
= ioread32be(&dev
->iseg
->internal_timer_l
);
680 ptp_read_system_postts(sts
);
683 return (u64
)timer_l
| (u64
)timer_h1
<< 32;
686 static int mlx5_core_set_issi(struct mlx5_core_dev
*dev
)
688 u32 query_in
[MLX5_ST_SZ_DW(query_issi_in
)] = {0};
689 u32 query_out
[MLX5_ST_SZ_DW(query_issi_out
)] = {0};
693 MLX5_SET(query_issi_in
, query_in
, opcode
, MLX5_CMD_OP_QUERY_ISSI
);
694 err
= mlx5_cmd_exec(dev
, query_in
, sizeof(query_in
),
695 query_out
, sizeof(query_out
));
700 mlx5_cmd_mbox_status(query_out
, &status
, &syndrome
);
701 if (!status
|| syndrome
== MLX5_DRIVER_SYND
) {
702 mlx5_core_err(dev
, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
703 err
, status
, syndrome
);
707 mlx5_core_warn(dev
, "Query ISSI is not supported by FW, ISSI is 0\n");
712 sup_issi
= MLX5_GET(query_issi_out
, query_out
, supported_issi_dw0
);
714 if (sup_issi
& (1 << 1)) {
715 u32 set_in
[MLX5_ST_SZ_DW(set_issi_in
)] = {0};
716 u32 set_out
[MLX5_ST_SZ_DW(set_issi_out
)] = {0};
718 MLX5_SET(set_issi_in
, set_in
, opcode
, MLX5_CMD_OP_SET_ISSI
);
719 MLX5_SET(set_issi_in
, set_in
, current_issi
, 1);
720 err
= mlx5_cmd_exec(dev
, set_in
, sizeof(set_in
),
721 set_out
, sizeof(set_out
));
723 mlx5_core_err(dev
, "Failed to set ISSI to 1 err(%d)\n",
731 } else if (sup_issi
& (1 << 0) || !sup_issi
) {
738 static int mlx5_pci_init(struct mlx5_core_dev
*dev
, struct pci_dev
*pdev
,
739 const struct pci_device_id
*id
)
741 struct mlx5_priv
*priv
= &dev
->priv
;
745 priv
->pci_dev_data
= id
->driver_data
;
747 pci_set_drvdata(dev
->pdev
, dev
);
749 dev
->bar_addr
= pci_resource_start(pdev
, 0);
750 priv
->numa_node
= dev_to_node(&dev
->pdev
->dev
);
752 err
= mlx5_pci_enable_device(dev
);
754 mlx5_core_err(dev
, "Cannot enable PCI device, aborting\n");
758 err
= request_bar(pdev
);
760 mlx5_core_err(dev
, "error requesting BARs, aborting\n");
764 pci_set_master(pdev
);
766 err
= set_dma_caps(pdev
);
768 mlx5_core_err(dev
, "Failed setting DMA capabilities mask, aborting\n");
772 if (pci_enable_atomic_ops_to_root(pdev
, PCI_EXP_DEVCAP2_ATOMIC_COMP32
) &&
773 pci_enable_atomic_ops_to_root(pdev
, PCI_EXP_DEVCAP2_ATOMIC_COMP64
) &&
774 pci_enable_atomic_ops_to_root(pdev
, PCI_EXP_DEVCAP2_ATOMIC_COMP128
))
775 mlx5_core_dbg(dev
, "Enabling pci atomics failed\n");
777 dev
->iseg_base
= dev
->bar_addr
;
778 dev
->iseg
= ioremap(dev
->iseg_base
, sizeof(*dev
->iseg
));
781 mlx5_core_err(dev
, "Failed mapping initialization segment, aborting\n");
788 pci_clear_master(dev
->pdev
);
789 release_bar(dev
->pdev
);
791 mlx5_pci_disable_device(dev
);
795 static void mlx5_pci_close(struct mlx5_core_dev
*dev
)
798 pci_clear_master(dev
->pdev
);
799 release_bar(dev
->pdev
);
800 mlx5_pci_disable_device(dev
);
803 static int mlx5_init_once(struct mlx5_core_dev
*dev
)
807 dev
->priv
.devcom
= mlx5_devcom_register_device(dev
);
808 if (IS_ERR(dev
->priv
.devcom
))
809 mlx5_core_err(dev
, "failed to register with devcom (0x%p)\n",
812 err
= mlx5_query_board_id(dev
);
814 mlx5_core_err(dev
, "query board id failed\n");
818 err
= mlx5_eq_table_init(dev
);
820 mlx5_core_err(dev
, "failed to initialize eq\n");
824 err
= mlx5_events_init(dev
);
826 mlx5_core_err(dev
, "failed to initialize events\n");
830 err
= mlx5_cq_debugfs_init(dev
);
832 mlx5_core_err(dev
, "failed to initialize cq debugfs\n");
833 goto err_events_cleanup
;
836 mlx5_init_qp_table(dev
);
838 mlx5_init_mkey_table(dev
);
840 mlx5_init_reserved_gids(dev
);
842 mlx5_init_clock(dev
);
844 dev
->vxlan
= mlx5_vxlan_create(dev
);
846 err
= mlx5_init_rl_table(dev
);
848 mlx5_core_err(dev
, "Failed to init rate limiting\n");
849 goto err_tables_cleanup
;
852 err
= mlx5_mpfs_init(dev
);
854 mlx5_core_err(dev
, "Failed to init l2 table %d\n", err
);
858 err
= mlx5_eswitch_init(dev
);
860 mlx5_core_err(dev
, "Failed to init eswitch %d\n", err
);
861 goto err_mpfs_cleanup
;
864 err
= mlx5_sriov_init(dev
);
866 mlx5_core_err(dev
, "Failed to init sriov %d\n", err
);
867 goto err_eswitch_cleanup
;
870 err
= mlx5_fpga_init(dev
);
872 mlx5_core_err(dev
, "Failed to init fpga device %d\n", err
);
873 goto err_sriov_cleanup
;
876 dev
->tracer
= mlx5_fw_tracer_create(dev
);
881 mlx5_sriov_cleanup(dev
);
883 mlx5_eswitch_cleanup(dev
->priv
.eswitch
);
885 mlx5_mpfs_cleanup(dev
);
887 mlx5_cleanup_rl_table(dev
);
889 mlx5_vxlan_destroy(dev
->vxlan
);
890 mlx5_cleanup_mkey_table(dev
);
891 mlx5_cleanup_qp_table(dev
);
892 mlx5_cq_debugfs_cleanup(dev
);
894 mlx5_events_cleanup(dev
);
896 mlx5_eq_table_cleanup(dev
);
898 mlx5_devcom_unregister_device(dev
->priv
.devcom
);
903 static void mlx5_cleanup_once(struct mlx5_core_dev
*dev
)
905 mlx5_fw_tracer_destroy(dev
->tracer
);
906 mlx5_fpga_cleanup(dev
);
907 mlx5_sriov_cleanup(dev
);
908 mlx5_eswitch_cleanup(dev
->priv
.eswitch
);
909 mlx5_mpfs_cleanup(dev
);
910 mlx5_cleanup_rl_table(dev
);
911 mlx5_vxlan_destroy(dev
->vxlan
);
912 mlx5_cleanup_clock(dev
);
913 mlx5_cleanup_reserved_gids(dev
);
914 mlx5_cleanup_mkey_table(dev
);
915 mlx5_cleanup_qp_table(dev
);
916 mlx5_cq_debugfs_cleanup(dev
);
917 mlx5_events_cleanup(dev
);
918 mlx5_eq_table_cleanup(dev
);
919 mlx5_devcom_unregister_device(dev
->priv
.devcom
);
922 static int mlx5_function_setup(struct mlx5_core_dev
*dev
, bool boot
)
926 mlx5_core_info(dev
, "firmware version: %d.%d.%d\n", fw_rev_maj(dev
),
927 fw_rev_min(dev
), fw_rev_sub(dev
));
929 /* Only PFs hold the relevant PCIe information for this query */
930 if (mlx5_core_is_pf(dev
))
931 pcie_print_link_status(dev
->pdev
);
933 /* wait for firmware to accept initialization segments configurations
935 err
= wait_fw_init(dev
, FW_PRE_INIT_TIMEOUT_MILI
);
937 mlx5_core_err(dev
, "Firmware over %d MS in pre-initializing state, aborting\n",
938 FW_PRE_INIT_TIMEOUT_MILI
);
942 err
= mlx5_cmd_init(dev
);
944 mlx5_core_err(dev
, "Failed initializing command interface, aborting\n");
948 err
= wait_fw_init(dev
, FW_INIT_TIMEOUT_MILI
);
950 mlx5_core_err(dev
, "Firmware over %d MS in initializing state, aborting\n",
951 FW_INIT_TIMEOUT_MILI
);
952 goto err_cmd_cleanup
;
955 err
= mlx5_core_enable_hca(dev
, 0);
957 mlx5_core_err(dev
, "enable hca failed\n");
958 goto err_cmd_cleanup
;
961 err
= mlx5_core_set_issi(dev
);
963 mlx5_core_err(dev
, "failed to set issi\n");
964 goto err_disable_hca
;
967 err
= mlx5_satisfy_startup_pages(dev
, 1);
969 mlx5_core_err(dev
, "failed to allocate boot pages\n");
970 goto err_disable_hca
;
973 err
= set_hca_ctrl(dev
);
975 mlx5_core_err(dev
, "set_hca_ctrl failed\n");
976 goto reclaim_boot_pages
;
979 err
= set_hca_cap(dev
);
981 mlx5_core_err(dev
, "set_hca_cap failed\n");
982 goto reclaim_boot_pages
;
985 err
= mlx5_satisfy_startup_pages(dev
, 0);
987 mlx5_core_err(dev
, "failed to allocate init pages\n");
988 goto reclaim_boot_pages
;
991 err
= mlx5_cmd_init_hca(dev
, sw_owner_id
);
993 mlx5_core_err(dev
, "init hca failed\n");
994 goto reclaim_boot_pages
;
997 mlx5_set_driver_version(dev
);
999 mlx5_start_health_poll(dev
);
1001 err
= mlx5_query_hca_caps(dev
);
1003 mlx5_core_err(dev
, "query hca failed\n");
1010 mlx5_stop_health_poll(dev
, boot
);
1012 mlx5_reclaim_startup_pages(dev
);
1014 mlx5_core_disable_hca(dev
, 0);
1016 mlx5_cmd_cleanup(dev
);
1021 static int mlx5_function_teardown(struct mlx5_core_dev
*dev
, bool boot
)
1025 mlx5_stop_health_poll(dev
, boot
);
1026 err
= mlx5_cmd_teardown_hca(dev
);
1028 mlx5_core_err(dev
, "tear_down_hca failed, skip cleanup\n");
1031 mlx5_reclaim_startup_pages(dev
);
1032 mlx5_core_disable_hca(dev
, 0);
1033 mlx5_cmd_cleanup(dev
);
1038 static int mlx5_load(struct mlx5_core_dev
*dev
)
1042 dev
->priv
.uar
= mlx5_get_uars_page(dev
);
1043 if (IS_ERR(dev
->priv
.uar
)) {
1044 mlx5_core_err(dev
, "Failed allocating uar, aborting\n");
1045 err
= PTR_ERR(dev
->priv
.uar
);
1049 mlx5_events_start(dev
);
1050 mlx5_pagealloc_start(dev
);
1052 err
= mlx5_eq_table_create(dev
);
1054 mlx5_core_err(dev
, "Failed to create EQs\n");
1058 err
= mlx5_fw_tracer_init(dev
->tracer
);
1060 mlx5_core_err(dev
, "Failed to init FW tracer\n");
1064 err
= mlx5_fpga_device_start(dev
);
1066 mlx5_core_err(dev
, "fpga device start failed %d\n", err
);
1067 goto err_fpga_start
;
1070 err
= mlx5_accel_ipsec_init(dev
);
1072 mlx5_core_err(dev
, "IPSec device start failed %d\n", err
);
1073 goto err_ipsec_start
;
1076 err
= mlx5_accel_tls_init(dev
);
1078 mlx5_core_err(dev
, "TLS device start failed %d\n", err
);
1082 err
= mlx5_init_fs(dev
);
1084 mlx5_core_err(dev
, "Failed to init flow steering\n");
1088 err
= mlx5_core_set_hca_defaults(dev
);
1090 mlx5_core_err(dev
, "Failed to set hca defaults\n");
1094 err
= mlx5_sriov_attach(dev
);
1096 mlx5_core_err(dev
, "sriov init failed %d\n", err
);
1100 err
= mlx5_ec_init(dev
);
1102 mlx5_core_err(dev
, "Failed to init embedded CPU\n");
1109 mlx5_sriov_detach(dev
);
1111 mlx5_cleanup_fs(dev
);
1113 mlx5_accel_tls_cleanup(dev
);
1115 mlx5_accel_ipsec_cleanup(dev
);
1117 mlx5_fpga_device_stop(dev
);
1119 mlx5_fw_tracer_cleanup(dev
->tracer
);
1121 mlx5_eq_table_destroy(dev
);
1123 mlx5_pagealloc_stop(dev
);
1124 mlx5_events_stop(dev
);
1125 mlx5_put_uars_page(dev
, dev
->priv
.uar
);
1129 static void mlx5_unload(struct mlx5_core_dev
*dev
)
1131 mlx5_ec_cleanup(dev
);
1132 mlx5_sriov_detach(dev
);
1133 mlx5_cleanup_fs(dev
);
1134 mlx5_accel_ipsec_cleanup(dev
);
1135 mlx5_accel_tls_cleanup(dev
);
1136 mlx5_fpga_device_stop(dev
);
1137 mlx5_fw_tracer_cleanup(dev
->tracer
);
1138 mlx5_eq_table_destroy(dev
);
1139 mlx5_pagealloc_stop(dev
);
1140 mlx5_events_stop(dev
);
1141 mlx5_put_uars_page(dev
, dev
->priv
.uar
);
1144 static int mlx5_load_one(struct mlx5_core_dev
*dev
, bool boot
)
1148 dev
->caps
.embedded_cpu
= mlx5_read_embedded_cpu(dev
);
1149 mutex_lock(&dev
->intf_state_mutex
);
1150 if (test_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
)) {
1151 mlx5_core_warn(dev
, "interface is up, NOP\n");
1154 /* remove any previous indication of internal error */
1155 dev
->state
= MLX5_DEVICE_STATE_UP
;
1157 err
= mlx5_function_setup(dev
, boot
);
1162 err
= mlx5_init_once(dev
);
1164 mlx5_core_err(dev
, "sw objs init failed\n");
1165 goto function_teardown
;
1169 err
= mlx5_load(dev
);
1173 if (mlx5_device_registered(dev
)) {
1174 mlx5_attach_device(dev
);
1176 err
= mlx5_register_device(dev
);
1178 mlx5_core_err(dev
, "register device failed %d\n", err
);
1183 set_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
);
1185 mutex_unlock(&dev
->intf_state_mutex
);
1193 mlx5_cleanup_once(dev
);
1195 mlx5_function_teardown(dev
, boot
);
1196 dev
->state
= MLX5_DEVICE_STATE_INTERNAL_ERROR
;
1197 mutex_unlock(&dev
->intf_state_mutex
);
1202 static int mlx5_unload_one(struct mlx5_core_dev
*dev
, bool cleanup
)
1207 mlx5_drain_health_recovery(dev
);
1209 mutex_lock(&dev
->intf_state_mutex
);
1210 if (!test_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
)) {
1211 mlx5_core_warn(dev
, "%s: interface is down, NOP\n",
1214 mlx5_cleanup_once(dev
);
1218 clear_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
);
1220 if (mlx5_device_registered(dev
))
1221 mlx5_detach_device(dev
);
1226 mlx5_cleanup_once(dev
);
1228 mlx5_function_teardown(dev
, cleanup
);
1230 mutex_unlock(&dev
->intf_state_mutex
);
1234 static const struct devlink_ops mlx5_devlink_ops
= {
1235 #ifdef CONFIG_MLX5_ESWITCH
1236 .eswitch_mode_set
= mlx5_devlink_eswitch_mode_set
,
1237 .eswitch_mode_get
= mlx5_devlink_eswitch_mode_get
,
1238 .eswitch_inline_mode_set
= mlx5_devlink_eswitch_inline_mode_set
,
1239 .eswitch_inline_mode_get
= mlx5_devlink_eswitch_inline_mode_get
,
1240 .eswitch_encap_mode_set
= mlx5_devlink_eswitch_encap_mode_set
,
1241 .eswitch_encap_mode_get
= mlx5_devlink_eswitch_encap_mode_get
,
1245 static int mlx5_mdev_init(struct mlx5_core_dev
*dev
, int profile_idx
, const char *name
)
1247 struct mlx5_priv
*priv
= &dev
->priv
;
1250 strncpy(priv
->name
, name
, MLX5_MAX_NAME_LEN
);
1251 priv
->name
[MLX5_MAX_NAME_LEN
- 1] = 0;
1253 dev
->profile
= &profile
[profile_idx
];
1255 INIT_LIST_HEAD(&priv
->ctx_list
);
1256 spin_lock_init(&priv
->ctx_lock
);
1257 mutex_init(&dev
->pci_status_mutex
);
1258 mutex_init(&dev
->intf_state_mutex
);
1260 mutex_init(&priv
->bfregs
.reg_head
.lock
);
1261 mutex_init(&priv
->bfregs
.wc_head
.lock
);
1262 INIT_LIST_HEAD(&priv
->bfregs
.reg_head
.list
);
1263 INIT_LIST_HEAD(&priv
->bfregs
.wc_head
.list
);
1265 mutex_init(&priv
->alloc_mutex
);
1266 mutex_init(&priv
->pgdir_mutex
);
1267 INIT_LIST_HEAD(&priv
->pgdir_list
);
1268 spin_lock_init(&priv
->mkey_lock
);
1270 priv
->dbg_root
= debugfs_create_dir(name
, mlx5_debugfs_root
);
1271 if (!priv
->dbg_root
) {
1272 pr_err("mlx5_core: %s error, Cannot create debugfs dir, aborting\n", name
);
1276 err
= mlx5_health_init(dev
);
1278 goto err_health_init
;
1280 err
= mlx5_pagealloc_init(dev
);
1282 goto err_pagealloc_init
;
1287 mlx5_health_cleanup(dev
);
1289 debugfs_remove(dev
->priv
.dbg_root
);
1294 static void mlx5_mdev_uninit(struct mlx5_core_dev
*dev
)
1296 mlx5_pagealloc_cleanup(dev
);
1297 mlx5_health_cleanup(dev
);
1298 debugfs_remove_recursive(dev
->priv
.dbg_root
);
1301 #define MLX5_IB_MOD "mlx5_ib"
1302 static int init_one(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1304 struct mlx5_core_dev
*dev
;
1305 struct devlink
*devlink
;
1308 devlink
= devlink_alloc(&mlx5_devlink_ops
, sizeof(*dev
));
1310 dev_err(&pdev
->dev
, "kzalloc failed\n");
1314 dev
= devlink_priv(devlink
);
1316 err
= mlx5_mdev_init(dev
, prof_sel
, dev_name(&pdev
->dev
));
1320 err
= mlx5_pci_init(dev
, pdev
, id
);
1322 mlx5_core_err(dev
, "mlx5_pci_init failed with error code %d\n",
1327 err
= mlx5_load_one(dev
, true);
1329 mlx5_core_err(dev
, "mlx5_load_one failed with error code %d\n",
1334 request_module_nowait(MLX5_IB_MOD
);
1336 err
= devlink_register(devlink
, &pdev
->dev
);
1340 pci_save_state(pdev
);
1344 mlx5_unload_one(dev
, true);
1347 mlx5_pci_close(dev
);
1349 mlx5_mdev_uninit(dev
);
1351 devlink_free(devlink
);
1356 static void remove_one(struct pci_dev
*pdev
)
1358 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1359 struct devlink
*devlink
= priv_to_devlink(dev
);
1361 devlink_unregister(devlink
);
1362 mlx5_unregister_device(dev
);
1364 if (mlx5_unload_one(dev
, true)) {
1365 mlx5_core_err(dev
, "mlx5_unload_one failed\n");
1366 mlx5_health_flush(dev
);
1370 mlx5_pci_close(dev
);
1371 mlx5_mdev_uninit(dev
);
1372 devlink_free(devlink
);
1375 static pci_ers_result_t
mlx5_pci_err_detected(struct pci_dev
*pdev
,
1376 pci_channel_state_t state
)
1378 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1380 mlx5_core_info(dev
, "%s was called\n", __func__
);
1382 mlx5_enter_error_state(dev
, false);
1383 mlx5_unload_one(dev
, false);
1384 /* In case of kernel call drain the health wq */
1386 mlx5_drain_health_wq(dev
);
1387 mlx5_pci_disable_device(dev
);
1390 return state
== pci_channel_io_perm_failure
?
1391 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
1394 /* wait for the device to show vital signs by waiting
1395 * for the health counter to start counting.
1397 static int wait_vital(struct pci_dev
*pdev
)
1399 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1400 struct mlx5_core_health
*health
= &dev
->priv
.health
;
1401 const int niter
= 100;
1406 for (i
= 0; i
< niter
; i
++) {
1407 count
= ioread32be(health
->health_counter
);
1408 if (count
&& count
!= 0xffffffff) {
1409 if (last_count
&& last_count
!= count
) {
1411 "wait vital counter value 0x%x after %d iterations\n",
1423 static pci_ers_result_t
mlx5_pci_slot_reset(struct pci_dev
*pdev
)
1425 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1428 mlx5_core_info(dev
, "%s was called\n", __func__
);
1430 err
= mlx5_pci_enable_device(dev
);
1432 mlx5_core_err(dev
, "%s: mlx5_pci_enable_device failed with error code: %d\n",
1434 return PCI_ERS_RESULT_DISCONNECT
;
1437 pci_set_master(pdev
);
1438 pci_restore_state(pdev
);
1439 pci_save_state(pdev
);
1441 if (wait_vital(pdev
)) {
1442 mlx5_core_err(dev
, "%s: wait_vital timed out\n", __func__
);
1443 return PCI_ERS_RESULT_DISCONNECT
;
1446 return PCI_ERS_RESULT_RECOVERED
;
1449 static void mlx5_pci_resume(struct pci_dev
*pdev
)
1451 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1454 mlx5_core_info(dev
, "%s was called\n", __func__
);
1456 err
= mlx5_load_one(dev
, false);
1458 mlx5_core_err(dev
, "%s: mlx5_load_one failed with error code: %d\n",
1461 mlx5_core_info(dev
, "%s: device recovered\n", __func__
);
1464 static const struct pci_error_handlers mlx5_err_handler
= {
1465 .error_detected
= mlx5_pci_err_detected
,
1466 .slot_reset
= mlx5_pci_slot_reset
,
1467 .resume
= mlx5_pci_resume
1470 static int mlx5_try_fast_unload(struct mlx5_core_dev
*dev
)
1472 bool fast_teardown
= false, force_teardown
= false;
1475 fast_teardown
= MLX5_CAP_GEN(dev
, fast_teardown
);
1476 force_teardown
= MLX5_CAP_GEN(dev
, force_teardown
);
1478 mlx5_core_dbg(dev
, "force teardown firmware support=%d\n", force_teardown
);
1479 mlx5_core_dbg(dev
, "fast teardown firmware support=%d\n", fast_teardown
);
1481 if (!fast_teardown
&& !force_teardown
)
1484 if (dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
1485 mlx5_core_dbg(dev
, "Device in internal error state, giving up\n");
1489 /* Panic tear down fw command will stop the PCI bus communication
1490 * with the HCA, so the health polll is no longer needed.
1492 mlx5_drain_health_wq(dev
);
1493 mlx5_stop_health_poll(dev
, false);
1495 ret
= mlx5_cmd_fast_teardown_hca(dev
);
1499 ret
= mlx5_cmd_force_teardown_hca(dev
);
1503 mlx5_core_dbg(dev
, "Firmware couldn't do fast unload error: %d\n", ret
);
1504 mlx5_start_health_poll(dev
);
1508 mlx5_enter_error_state(dev
, true);
1510 /* Some platforms requiring freeing the IRQ's in the shutdown
1511 * flow. If they aren't freed they can't be allocated after
1512 * kexec. There is no need to cleanup the mlx5_core software
1515 mlx5_core_eq_free_irqs(dev
);
1520 static void shutdown(struct pci_dev
*pdev
)
1522 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1525 mlx5_core_info(dev
, "Shutdown was called\n");
1526 err
= mlx5_try_fast_unload(dev
);
1528 mlx5_unload_one(dev
, false);
1529 mlx5_pci_disable_device(dev
);
1532 static const struct pci_device_id mlx5_core_pci_table
[] = {
1533 { PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_CONNECTIB
) },
1534 { PCI_VDEVICE(MELLANOX
, 0x1012), MLX5_PCI_DEV_IS_VF
}, /* Connect-IB VF */
1535 { PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_CONNECTX4
) },
1536 { PCI_VDEVICE(MELLANOX
, 0x1014), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-4 VF */
1537 { PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX
) },
1538 { PCI_VDEVICE(MELLANOX
, 0x1016), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-4LX VF */
1539 { PCI_VDEVICE(MELLANOX
, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
1540 { PCI_VDEVICE(MELLANOX
, 0x1018), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-5 VF */
1541 { PCI_VDEVICE(MELLANOX
, 0x1019) }, /* ConnectX-5 Ex */
1542 { PCI_VDEVICE(MELLANOX
, 0x101a), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-5 Ex VF */
1543 { PCI_VDEVICE(MELLANOX
, 0x101b) }, /* ConnectX-6 */
1544 { PCI_VDEVICE(MELLANOX
, 0x101c), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-6 VF */
1545 { PCI_VDEVICE(MELLANOX
, 0x101d) }, /* ConnectX-6 Dx */
1546 { PCI_VDEVICE(MELLANOX
, 0x101e), MLX5_PCI_DEV_IS_VF
}, /* ConnectX Family mlx5Gen Virtual Function */
1547 { PCI_VDEVICE(MELLANOX
, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
1548 { PCI_VDEVICE(MELLANOX
, 0xa2d3), MLX5_PCI_DEV_IS_VF
}, /* BlueField integrated ConnectX-5 network controller VF */
1552 MODULE_DEVICE_TABLE(pci
, mlx5_core_pci_table
);
1554 void mlx5_disable_device(struct mlx5_core_dev
*dev
)
1556 mlx5_pci_err_detected(dev
->pdev
, 0);
1559 void mlx5_recover_device(struct mlx5_core_dev
*dev
)
1561 mlx5_pci_disable_device(dev
);
1562 if (mlx5_pci_slot_reset(dev
->pdev
) == PCI_ERS_RESULT_RECOVERED
)
1563 mlx5_pci_resume(dev
->pdev
);
1566 static struct pci_driver mlx5_core_driver
= {
1567 .name
= DRIVER_NAME
,
1568 .id_table
= mlx5_core_pci_table
,
1570 .remove
= remove_one
,
1571 .shutdown
= shutdown
,
1572 .err_handler
= &mlx5_err_handler
,
1573 .sriov_configure
= mlx5_core_sriov_configure
,
1576 static void mlx5_core_verify_params(void)
1578 if (prof_sel
>= ARRAY_SIZE(profile
)) {
1579 pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
1581 ARRAY_SIZE(profile
) - 1,
1583 prof_sel
= MLX5_DEFAULT_PROF
;
1587 static int __init
init(void)
1591 get_random_bytes(&sw_owner_id
, sizeof(sw_owner_id
));
1593 mlx5_core_verify_params();
1594 mlx5_fpga_ipsec_build_fs_cmds();
1595 mlx5_register_debugfs();
1597 err
= pci_register_driver(&mlx5_core_driver
);
1601 #ifdef CONFIG_MLX5_CORE_EN
1608 mlx5_unregister_debugfs();
1612 static void __exit
cleanup(void)
1614 #ifdef CONFIG_MLX5_CORE_EN
1617 pci_unregister_driver(&mlx5_core_driver
);
1618 mlx5_unregister_debugfs();
1622 module_exit(cleanup
);