2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/init.h>
39 #include <linux/errno.h>
40 #include <linux/pci.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/slab.h>
43 #include <linux/io-mapping.h>
44 #include <linux/delay.h>
45 #include <linux/kmod.h>
46 #include <linux/etherdevice.h>
47 #include <net/devlink.h>
49 #include <linux/mlx4/device.h>
50 #include <linux/mlx4/doorbell.h>
56 MODULE_AUTHOR("Roland Dreier");
57 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
58 MODULE_LICENSE("Dual BSD/GPL");
59 MODULE_VERSION(DRV_VERSION
);
61 struct workqueue_struct
*mlx4_wq
;
63 #ifdef CONFIG_MLX4_DEBUG
65 int mlx4_debug_level
= 0;
66 module_param_named(debug_level
, mlx4_debug_level
, int, 0644);
67 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0");
69 #endif /* CONFIG_MLX4_DEBUG */
74 module_param(msi_x
, int, 0444);
75 MODULE_PARM_DESC(msi_x
, "attempt to use MSI-X if nonzero");
77 #else /* CONFIG_PCI_MSI */
81 #endif /* CONFIG_PCI_MSI */
83 static uint8_t num_vfs
[3] = {0, 0, 0};
84 static int num_vfs_argc
;
85 module_param_array(num_vfs
, byte
, &num_vfs_argc
, 0444);
86 MODULE_PARM_DESC(num_vfs
, "enable #num_vfs functions if num_vfs > 0\n"
87 "num_vfs=port1,port2,port1+2");
89 static uint8_t probe_vf
[3] = {0, 0, 0};
90 static int probe_vfs_argc
;
91 module_param_array(probe_vf
, byte
, &probe_vfs_argc
, 0444);
92 MODULE_PARM_DESC(probe_vf
, "number of vfs to probe by pf driver (num_vfs > 0)\n"
93 "probe_vf=port1,port2,port1+2");
95 static int mlx4_log_num_mgm_entry_size
= MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE
;
96 module_param_named(log_num_mgm_entry_size
,
97 mlx4_log_num_mgm_entry_size
, int, 0444);
98 MODULE_PARM_DESC(log_num_mgm_entry_size
, "log mgm size, that defines the num"
99 " of qp per mcg, for example:"
100 " 10 gives 248.range: 7 <="
101 " log_num_mgm_entry_size <= 12."
102 " To activate device managed"
103 " flow steering when available, set to -1");
105 static bool enable_64b_cqe_eqe
= true;
106 module_param(enable_64b_cqe_eqe
, bool, 0444);
107 MODULE_PARM_DESC(enable_64b_cqe_eqe
,
108 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
110 static bool enable_4k_uar
;
111 module_param(enable_4k_uar
, bool, 0444);
112 MODULE_PARM_DESC(enable_4k_uar
,
113 "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)");
115 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
116 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
117 MLX4_FUNC_CAP_DMFS_A0_STATIC)
119 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV)
121 static char mlx4_version
[] =
122 DRV_NAME
": Mellanox ConnectX core driver v"
125 static const struct mlx4_profile default_profile
= {
128 .rdmarc_per_qp
= 1 << 4,
132 .num_mtt
= 1 << 20, /* It is really num mtt segements */
135 static const struct mlx4_profile low_mem_profile
= {
138 .rdmarc_per_qp
= 1 << 4,
145 static int log_num_mac
= 7;
146 module_param_named(log_num_mac
, log_num_mac
, int, 0444);
147 MODULE_PARM_DESC(log_num_mac
, "Log2 max number of MACs per ETH port (1-7)");
149 static int log_num_vlan
;
150 module_param_named(log_num_vlan
, log_num_vlan
, int, 0444);
151 MODULE_PARM_DESC(log_num_vlan
, "Log2 max number of VLANs per ETH port (0-7)");
152 /* Log2 max number of VLANs per ETH port (0-7) */
153 #define MLX4_LOG_NUM_VLANS 7
154 #define MLX4_MIN_LOG_NUM_VLANS 0
155 #define MLX4_MIN_LOG_NUM_MAC 1
157 static bool use_prio
;
158 module_param_named(use_prio
, use_prio
, bool, 0444);
159 MODULE_PARM_DESC(use_prio
, "Enable steering by VLAN priority on ETH ports (deprecated)");
161 int log_mtts_per_seg
= ilog2(MLX4_MTT_ENTRY_PER_SEG
);
162 module_param_named(log_mtts_per_seg
, log_mtts_per_seg
, int, 0444);
163 MODULE_PARM_DESC(log_mtts_per_seg
, "Log2 number of MTT entries per segment (1-7)");
165 static int port_type_array
[2] = {MLX4_PORT_TYPE_NONE
, MLX4_PORT_TYPE_NONE
};
166 static int arr_argc
= 2;
167 module_param_array(port_type_array
, int, &arr_argc
, 0444);
168 MODULE_PARM_DESC(port_type_array
, "Array of port types: HW_DEFAULT (0) is default "
169 "1 for IB, 2 for Ethernet");
171 struct mlx4_port_config
{
172 struct list_head list
;
173 enum mlx4_port_type port_type
[MLX4_MAX_PORTS
+ 1];
174 struct pci_dev
*pdev
;
177 static atomic_t pf_loading
= ATOMIC_INIT(0);
179 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev
*dev
,
180 struct mlx4_dev_cap
*dev_cap
)
182 /* The reserved_uars is calculated by system page size unit.
183 * Therefore, adjustment is added when the uar page size is less
184 * than the system page size
186 dev
->caps
.reserved_uars
=
188 mlx4_get_num_reserved_uar(dev
),
189 dev_cap
->reserved_uars
/
190 (1 << (PAGE_SHIFT
- dev
->uar_page_shift
)));
193 int mlx4_check_port_params(struct mlx4_dev
*dev
,
194 enum mlx4_port_type
*port_type
)
198 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
)) {
199 for (i
= 0; i
< dev
->caps
.num_ports
- 1; i
++) {
200 if (port_type
[i
] != port_type
[i
+ 1]) {
201 mlx4_err(dev
, "Only same port types supported on this HCA, aborting\n");
207 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
208 if (!(port_type
[i
] & dev
->caps
.supported_type
[i
+1])) {
209 mlx4_err(dev
, "Requested port type for port %d is not supported on this HCA\n",
217 static void mlx4_set_port_mask(struct mlx4_dev
*dev
)
221 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
)
222 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
226 MLX4_QUERY_FUNC_NUM_SYS_EQS
= 1 << 0,
229 static int mlx4_query_func(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
232 struct mlx4_func func
;
234 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
) {
235 err
= mlx4_QUERY_FUNC(dev
, &func
, 0);
237 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
240 dev_cap
->max_eqs
= func
.max_eq
;
241 dev_cap
->reserved_eqs
= func
.rsvd_eqs
;
242 dev_cap
->reserved_uars
= func
.rsvd_uars
;
243 err
|= MLX4_QUERY_FUNC_NUM_SYS_EQS
;
248 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev
*dev
)
250 struct mlx4_caps
*dev_cap
= &dev
->caps
;
252 /* FW not supporting or cancelled by user */
253 if (!(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_EQE_STRIDE
) ||
254 !(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_CQE_STRIDE
))
257 /* Must have 64B CQE_EQE enabled by FW to use bigger stride
258 * When FW has NCSI it may decide not to report 64B CQE/EQEs
260 if (!(dev_cap
->flags
& MLX4_DEV_CAP_FLAG_64B_EQE
) ||
261 !(dev_cap
->flags
& MLX4_DEV_CAP_FLAG_64B_CQE
)) {
262 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
263 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
267 if (cache_line_size() == 128 || cache_line_size() == 256) {
268 mlx4_dbg(dev
, "Enabling CQE stride cacheLine supported\n");
269 /* Changing the real data inside CQE size to 32B */
270 dev_cap
->flags
&= ~MLX4_DEV_CAP_FLAG_64B_CQE
;
271 dev_cap
->flags
&= ~MLX4_DEV_CAP_FLAG_64B_EQE
;
273 if (mlx4_is_master(dev
))
274 dev_cap
->function_caps
|= MLX4_FUNC_CAP_EQE_CQE_STRIDE
;
276 if (cache_line_size() != 32 && cache_line_size() != 64)
277 mlx4_dbg(dev
, "Disabling CQE stride, cacheLine size unsupported\n");
278 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
279 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
283 static int _mlx4_dev_port(struct mlx4_dev
*dev
, int port
,
284 struct mlx4_port_cap
*port_cap
)
286 dev
->caps
.vl_cap
[port
] = port_cap
->max_vl
;
287 dev
->caps
.ib_mtu_cap
[port
] = port_cap
->ib_mtu
;
288 dev
->phys_caps
.gid_phys_table_len
[port
] = port_cap
->max_gids
;
289 dev
->phys_caps
.pkey_phys_table_len
[port
] = port_cap
->max_pkeys
;
290 /* set gid and pkey table operating lengths by default
291 * to non-sriov values
293 dev
->caps
.gid_table_len
[port
] = port_cap
->max_gids
;
294 dev
->caps
.pkey_table_len
[port
] = port_cap
->max_pkeys
;
295 dev
->caps
.port_width_cap
[port
] = port_cap
->max_port_width
;
296 dev
->caps
.eth_mtu_cap
[port
] = port_cap
->eth_mtu
;
297 dev
->caps
.max_tc_eth
= port_cap
->max_tc_eth
;
298 dev
->caps
.def_mac
[port
] = port_cap
->def_mac
;
299 dev
->caps
.supported_type
[port
] = port_cap
->supported_port_types
;
300 dev
->caps
.suggested_type
[port
] = port_cap
->suggested_type
;
301 dev
->caps
.default_sense
[port
] = port_cap
->default_sense
;
302 dev
->caps
.trans_type
[port
] = port_cap
->trans_type
;
303 dev
->caps
.vendor_oui
[port
] = port_cap
->vendor_oui
;
304 dev
->caps
.wavelength
[port
] = port_cap
->wavelength
;
305 dev
->caps
.trans_code
[port
] = port_cap
->trans_code
;
310 static int mlx4_dev_port(struct mlx4_dev
*dev
, int port
,
311 struct mlx4_port_cap
*port_cap
)
315 err
= mlx4_QUERY_PORT(dev
, port
, port_cap
);
318 mlx4_err(dev
, "QUERY_PORT command failed.\n");
323 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev
*dev
)
325 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_IGNORE_FCS
))
328 if (mlx4_is_mfunc(dev
)) {
329 mlx4_dbg(dev
, "SRIOV mode - Disabling Ignore FCS");
330 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS
;
334 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_FCS_KEEP
)) {
336 "Keep FCS is not supported - Disabling Ignore FCS");
337 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS
;
342 #define MLX4_A0_STEERING_TABLE_SIZE 256
343 static int mlx4_dev_cap(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
348 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
350 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
353 mlx4_dev_cap_dump(dev
, dev_cap
);
355 if (dev_cap
->min_page_sz
> PAGE_SIZE
) {
356 mlx4_err(dev
, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
357 dev_cap
->min_page_sz
, PAGE_SIZE
);
360 if (dev_cap
->num_ports
> MLX4_MAX_PORTS
) {
361 mlx4_err(dev
, "HCA has %d ports, but we only support %d, aborting\n",
362 dev_cap
->num_ports
, MLX4_MAX_PORTS
);
366 if (dev_cap
->uar_size
> pci_resource_len(dev
->persist
->pdev
, 2)) {
367 mlx4_err(dev
, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
370 pci_resource_len(dev
->persist
->pdev
, 2));
374 dev
->caps
.num_ports
= dev_cap
->num_ports
;
375 dev
->caps
.num_sys_eqs
= dev_cap
->num_sys_eqs
;
376 dev
->phys_caps
.num_phys_eqs
= dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
?
377 dev
->caps
.num_sys_eqs
:
379 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
380 err
= _mlx4_dev_port(dev
, i
, dev_cap
->port_cap
+ i
);
382 mlx4_err(dev
, "QUERY_PORT command failed, aborting\n");
387 dev
->caps
.uar_page_size
= PAGE_SIZE
;
388 dev
->caps
.num_uars
= dev_cap
->uar_size
/ PAGE_SIZE
;
389 dev
->caps
.local_ca_ack_delay
= dev_cap
->local_ca_ack_delay
;
390 dev
->caps
.bf_reg_size
= dev_cap
->bf_reg_size
;
391 dev
->caps
.bf_regs_per_page
= dev_cap
->bf_regs_per_page
;
392 dev
->caps
.max_sq_sg
= dev_cap
->max_sq_sg
;
393 dev
->caps
.max_rq_sg
= dev_cap
->max_rq_sg
;
394 dev
->caps
.max_wqes
= dev_cap
->max_qp_sz
;
395 dev
->caps
.max_qp_init_rdma
= dev_cap
->max_requester_per_qp
;
396 dev
->caps
.max_srq_wqes
= dev_cap
->max_srq_sz
;
397 dev
->caps
.max_srq_sge
= dev_cap
->max_rq_sg
- 1;
398 dev
->caps
.reserved_srqs
= dev_cap
->reserved_srqs
;
399 dev
->caps
.max_sq_desc_sz
= dev_cap
->max_sq_desc_sz
;
400 dev
->caps
.max_rq_desc_sz
= dev_cap
->max_rq_desc_sz
;
402 * Subtract 1 from the limit because we need to allocate a
403 * spare CQE so the HCA HW can tell the difference between an
404 * empty CQ and a full CQ.
406 dev
->caps
.max_cqes
= dev_cap
->max_cq_sz
- 1;
407 dev
->caps
.reserved_cqs
= dev_cap
->reserved_cqs
;
408 dev
->caps
.reserved_eqs
= dev_cap
->reserved_eqs
;
409 dev
->caps
.reserved_mtts
= dev_cap
->reserved_mtts
;
410 dev
->caps
.reserved_mrws
= dev_cap
->reserved_mrws
;
412 dev
->caps
.reserved_pds
= dev_cap
->reserved_pds
;
413 dev
->caps
.reserved_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
414 dev_cap
->reserved_xrcds
: 0;
415 dev
->caps
.max_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
416 dev_cap
->max_xrcds
: 0;
417 dev
->caps
.mtt_entry_sz
= dev_cap
->mtt_entry_sz
;
419 dev
->caps
.max_msg_sz
= dev_cap
->max_msg_sz
;
420 dev
->caps
.page_size_cap
= ~(u32
) (dev_cap
->min_page_sz
- 1);
421 dev
->caps
.flags
= dev_cap
->flags
;
422 dev
->caps
.flags2
= dev_cap
->flags2
;
423 dev
->caps
.bmme_flags
= dev_cap
->bmme_flags
;
424 dev
->caps
.reserved_lkey
= dev_cap
->reserved_lkey
;
425 dev
->caps
.stat_rate_support
= dev_cap
->stat_rate_support
;
426 dev
->caps
.max_gso_sz
= dev_cap
->max_gso_sz
;
427 dev
->caps
.max_rss_tbl_sz
= dev_cap
->max_rss_tbl_sz
;
428 dev
->caps
.wol_port
[1] = dev_cap
->wol_port
[1];
429 dev
->caps
.wol_port
[2] = dev_cap
->wol_port
[2];
431 /* Save uar page shift */
432 if (!mlx4_is_slave(dev
)) {
433 /* Virtual PCI function needs to determine UAR page size from
434 * firmware. Only master PCI function can set the uar page size
436 if (enable_4k_uar
|| !dev
->persist
->num_vfs
)
437 dev
->uar_page_shift
= DEFAULT_UAR_PAGE_SHIFT
;
439 dev
->uar_page_shift
= PAGE_SHIFT
;
441 mlx4_set_num_reserved_uars(dev
, dev_cap
);
444 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_PHV_EN
) {
445 struct mlx4_init_hca_param hca_param
;
447 memset(&hca_param
, 0, sizeof(hca_param
));
448 err
= mlx4_QUERY_HCA(dev
, &hca_param
);
449 /* Turn off PHV_EN flag in case phv_check_en is set.
450 * phv_check_en is a HW check that parse the packet and verify
451 * phv bit was reported correctly in the wqe. To allow QinQ
452 * PHV_EN flag should be set and phv_check_en must be cleared
453 * otherwise QinQ packets will be drop by the HW.
455 if (err
|| hca_param
.phv_check_en
)
456 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_PHV_EN
;
459 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
460 if (mlx4_priv(dev
)->pci_dev_data
& MLX4_PCI_DEV_FORCE_SENSE_PORT
)
461 dev
->caps
.flags
|= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
;
462 /* Don't do sense port on multifunction devices (for now at least) */
463 if (mlx4_is_mfunc(dev
))
464 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
;
466 if (mlx4_low_memory_profile()) {
467 dev
->caps
.log_num_macs
= MLX4_MIN_LOG_NUM_MAC
;
468 dev
->caps
.log_num_vlans
= MLX4_MIN_LOG_NUM_VLANS
;
470 dev
->caps
.log_num_macs
= log_num_mac
;
471 dev
->caps
.log_num_vlans
= MLX4_LOG_NUM_VLANS
;
474 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
475 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_NONE
;
476 if (dev
->caps
.supported_type
[i
]) {
477 /* if only ETH is supported - assign ETH */
478 if (dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_ETH
)
479 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_ETH
;
480 /* if only IB is supported, assign IB */
481 else if (dev
->caps
.supported_type
[i
] ==
483 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_IB
;
485 /* if IB and ETH are supported, we set the port
486 * type according to user selection of port type;
487 * if user selected none, take the FW hint */
488 if (port_type_array
[i
- 1] == MLX4_PORT_TYPE_NONE
)
489 dev
->caps
.port_type
[i
] = dev
->caps
.suggested_type
[i
] ?
490 MLX4_PORT_TYPE_ETH
: MLX4_PORT_TYPE_IB
;
492 dev
->caps
.port_type
[i
] = port_type_array
[i
- 1];
496 * Link sensing is allowed on the port if 3 conditions are true:
497 * 1. Both protocols are supported on the port.
498 * 2. Different types are supported on the port
499 * 3. FW declared that it supports link sensing
501 mlx4_priv(dev
)->sense
.sense_allowed
[i
] =
502 ((dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_AUTO
) &&
503 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
504 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
));
507 * If "default_sense" bit is set, we move the port to "AUTO" mode
508 * and perform sense_port FW command to try and set the correct
509 * port type from beginning
511 if (mlx4_priv(dev
)->sense
.sense_allowed
[i
] && dev
->caps
.default_sense
[i
]) {
512 enum mlx4_port_type sensed_port
= MLX4_PORT_TYPE_NONE
;
513 dev
->caps
.possible_type
[i
] = MLX4_PORT_TYPE_AUTO
;
514 mlx4_SENSE_PORT(dev
, i
, &sensed_port
);
515 if (sensed_port
!= MLX4_PORT_TYPE_NONE
)
516 dev
->caps
.port_type
[i
] = sensed_port
;
518 dev
->caps
.possible_type
[i
] = dev
->caps
.port_type
[i
];
521 if (dev
->caps
.log_num_macs
> dev_cap
->port_cap
[i
].log_max_macs
) {
522 dev
->caps
.log_num_macs
= dev_cap
->port_cap
[i
].log_max_macs
;
523 mlx4_warn(dev
, "Requested number of MACs is too much for port %d, reducing to %d\n",
524 i
, 1 << dev
->caps
.log_num_macs
);
526 if (dev
->caps
.log_num_vlans
> dev_cap
->port_cap
[i
].log_max_vlans
) {
527 dev
->caps
.log_num_vlans
= dev_cap
->port_cap
[i
].log_max_vlans
;
528 mlx4_warn(dev
, "Requested number of VLANs is too much for port %d, reducing to %d\n",
529 i
, 1 << dev
->caps
.log_num_vlans
);
533 if (mlx4_is_master(dev
) && (dev
->caps
.num_ports
== 2) &&
534 (port_type_array
[0] == MLX4_PORT_TYPE_IB
) &&
535 (port_type_array
[1] == MLX4_PORT_TYPE_ETH
)) {
537 "Granular QoS per VF not supported with IB/Eth configuration\n");
538 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_QOS_VPP
;
541 dev
->caps
.max_counters
= dev_cap
->max_counters
;
543 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] = dev_cap
->reserved_qps
;
544 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] =
545 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] =
546 (1 << dev
->caps
.log_num_macs
) *
547 (1 << dev
->caps
.log_num_vlans
) *
549 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
] = MLX4_NUM_FEXCH
;
551 if (dev_cap
->dmfs_high_rate_qpn_base
> 0 &&
552 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
)
553 dev
->caps
.dmfs_high_rate_qpn_base
= dev_cap
->dmfs_high_rate_qpn_base
;
555 dev
->caps
.dmfs_high_rate_qpn_base
=
556 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
558 if (dev_cap
->dmfs_high_rate_qpn_range
> 0 &&
559 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
) {
560 dev
->caps
.dmfs_high_rate_qpn_range
= dev_cap
->dmfs_high_rate_qpn_range
;
561 dev
->caps
.dmfs_high_steer_mode
= MLX4_STEERING_DMFS_A0_DEFAULT
;
562 dev
->caps
.flags2
|= MLX4_DEV_CAP_FLAG2_FS_A0
;
564 dev
->caps
.dmfs_high_steer_mode
= MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
;
565 dev
->caps
.dmfs_high_rate_qpn_base
=
566 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
567 dev
->caps
.dmfs_high_rate_qpn_range
= MLX4_A0_STEERING_TABLE_SIZE
;
570 dev
->caps
.rl_caps
= dev_cap
->rl_caps
;
572 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_RSS_RAW_ETH
] =
573 dev
->caps
.dmfs_high_rate_qpn_range
;
575 dev
->caps
.reserved_qps
= dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] +
576 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] +
577 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] +
578 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
];
580 dev
->caps
.sqp_demux
= (mlx4_is_master(dev
)) ? MLX4_MAX_NUM_SLAVES
: 0;
582 if (!enable_64b_cqe_eqe
&& !mlx4_is_slave(dev
)) {
584 (MLX4_DEV_CAP_FLAG_64B_CQE
| MLX4_DEV_CAP_FLAG_64B_EQE
)) {
585 mlx4_warn(dev
, "64B EQEs/CQEs supported by the device but not enabled\n");
586 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_64B_CQE
;
587 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_64B_EQE
;
590 if (dev_cap
->flags2
&
591 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE
|
592 MLX4_DEV_CAP_FLAG2_EQE_STRIDE
)) {
593 mlx4_warn(dev
, "Disabling EQE/CQE stride per user request\n");
594 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
595 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
599 if ((dev
->caps
.flags
&
600 (MLX4_DEV_CAP_FLAG_64B_CQE
| MLX4_DEV_CAP_FLAG_64B_EQE
)) &&
602 dev
->caps
.function_caps
|= MLX4_FUNC_CAP_64B_EQE_CQE
;
604 if (!mlx4_is_slave(dev
)) {
605 mlx4_enable_cqe_eqe_stride(dev
);
606 dev
->caps
.alloc_res_qp_mask
=
607 (dev
->caps
.bf_reg_size
? MLX4_RESERVE_ETH_BF_QP
: 0) |
610 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ETS_CFG
) &&
611 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SET_ETH_SCHED
) {
612 mlx4_warn(dev
, "Old device ETS support detected\n");
613 mlx4_warn(dev
, "Consider upgrading device FW.\n");
614 dev
->caps
.flags2
|= MLX4_DEV_CAP_FLAG2_ETS_CFG
;
618 dev
->caps
.alloc_res_qp_mask
= 0;
621 mlx4_enable_ignore_fcs(dev
);
626 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev
*dev
,
627 enum pci_bus_speed
*speed
,
628 enum pcie_link_width
*width
)
630 u32 lnkcap1
, lnkcap2
;
633 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
635 *speed
= PCI_SPEED_UNKNOWN
;
636 *width
= PCIE_LNK_WIDTH_UNKNOWN
;
638 err1
= pcie_capability_read_dword(dev
->persist
->pdev
, PCI_EXP_LNKCAP
,
640 err2
= pcie_capability_read_dword(dev
->persist
->pdev
, PCI_EXP_LNKCAP2
,
642 if (!err2
&& lnkcap2
) { /* PCIe r3.0-compliant */
643 if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_8_0GB
)
644 *speed
= PCIE_SPEED_8_0GT
;
645 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_5_0GB
)
646 *speed
= PCIE_SPEED_5_0GT
;
647 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_2_5GB
)
648 *speed
= PCIE_SPEED_2_5GT
;
651 *width
= (lnkcap1
& PCI_EXP_LNKCAP_MLW
) >> PCIE_MLW_CAP_SHIFT
;
652 if (!lnkcap2
) { /* pre-r3.0 */
653 if (lnkcap1
& PCI_EXP_LNKCAP_SLS_5_0GB
)
654 *speed
= PCIE_SPEED_5_0GT
;
655 else if (lnkcap1
& PCI_EXP_LNKCAP_SLS_2_5GB
)
656 *speed
= PCIE_SPEED_2_5GT
;
660 if (*speed
== PCI_SPEED_UNKNOWN
|| *width
== PCIE_LNK_WIDTH_UNKNOWN
) {
662 err2
? err2
: -EINVAL
;
667 static void mlx4_check_pcie_caps(struct mlx4_dev
*dev
)
669 enum pcie_link_width width
, width_cap
;
670 enum pci_bus_speed speed
, speed_cap
;
673 #define PCIE_SPEED_STR(speed) \
674 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
675 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
676 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
679 err
= mlx4_get_pcie_dev_link_caps(dev
, &speed_cap
, &width_cap
);
682 "Unable to determine PCIe device BW capabilities\n");
686 err
= pcie_get_minimum_link(dev
->persist
->pdev
, &speed
, &width
);
687 if (err
|| speed
== PCI_SPEED_UNKNOWN
||
688 width
== PCIE_LNK_WIDTH_UNKNOWN
) {
690 "Unable to determine PCI device chain minimum BW\n");
694 if (width
!= width_cap
|| speed
!= speed_cap
)
696 "PCIe BW is different than device's capability\n");
698 mlx4_info(dev
, "PCIe link speed is %s, device supports %s\n",
699 PCIE_SPEED_STR(speed
), PCIE_SPEED_STR(speed_cap
));
700 mlx4_info(dev
, "PCIe link width is x%d, device supports x%d\n",
705 /*The function checks if there are live vf, return the num of them*/
706 static int mlx4_how_many_lives_vf(struct mlx4_dev
*dev
)
708 struct mlx4_priv
*priv
= mlx4_priv(dev
);
709 struct mlx4_slave_state
*s_state
;
713 for (i
= 1/*the ppf is 0*/; i
< dev
->num_slaves
; ++i
) {
714 s_state
= &priv
->mfunc
.master
.slave_state
[i
];
715 if (s_state
->active
&& s_state
->last_cmd
!=
716 MLX4_COMM_CMD_RESET
) {
717 mlx4_warn(dev
, "%s: slave: %d is still active\n",
725 int mlx4_get_parav_qkey(struct mlx4_dev
*dev
, u32 qpn
, u32
*qkey
)
727 u32 qk
= MLX4_RESERVED_QKEY_BASE
;
729 if (qpn
>= dev
->phys_caps
.base_tunnel_sqpn
+ 8 * MLX4_MFUNC_MAX
||
730 qpn
< dev
->phys_caps
.base_proxy_sqpn
)
733 if (qpn
>= dev
->phys_caps
.base_tunnel_sqpn
)
735 qk
+= qpn
- dev
->phys_caps
.base_tunnel_sqpn
;
737 qk
+= qpn
- dev
->phys_caps
.base_proxy_sqpn
;
741 EXPORT_SYMBOL(mlx4_get_parav_qkey
);
743 void mlx4_sync_pkey_table(struct mlx4_dev
*dev
, int slave
, int port
, int i
, int val
)
745 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
747 if (!mlx4_is_master(dev
))
750 priv
->virt2phys_pkey
[slave
][port
- 1][i
] = val
;
752 EXPORT_SYMBOL(mlx4_sync_pkey_table
);
754 void mlx4_put_slave_node_guid(struct mlx4_dev
*dev
, int slave
, __be64 guid
)
756 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
758 if (!mlx4_is_master(dev
))
761 priv
->slave_node_guids
[slave
] = guid
;
763 EXPORT_SYMBOL(mlx4_put_slave_node_guid
);
765 __be64
mlx4_get_slave_node_guid(struct mlx4_dev
*dev
, int slave
)
767 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
769 if (!mlx4_is_master(dev
))
772 return priv
->slave_node_guids
[slave
];
774 EXPORT_SYMBOL(mlx4_get_slave_node_guid
);
776 int mlx4_is_slave_active(struct mlx4_dev
*dev
, int slave
)
778 struct mlx4_priv
*priv
= mlx4_priv(dev
);
779 struct mlx4_slave_state
*s_slave
;
781 if (!mlx4_is_master(dev
))
784 s_slave
= &priv
->mfunc
.master
.slave_state
[slave
];
785 return !!s_slave
->active
;
787 EXPORT_SYMBOL(mlx4_is_slave_active
);
789 void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl
*ctrl
,
790 struct _rule_hw
*eth_header
)
792 if (is_multicast_ether_addr(eth_header
->eth
.dst_mac
) ||
793 is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
794 struct mlx4_net_trans_rule_hw_eth
*eth
=
795 (struct mlx4_net_trans_rule_hw_eth
*)eth_header
;
796 struct _rule_hw
*next_rule
= (struct _rule_hw
*)(eth
+ 1);
797 bool last_rule
= next_rule
->size
== 0 && next_rule
->id
== 0 &&
798 next_rule
->rsvd
== 0;
801 ctrl
->prio
= cpu_to_be16(MLX4_DOMAIN_NIC
);
804 EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio
);
806 static void slave_adjust_steering_mode(struct mlx4_dev
*dev
,
807 struct mlx4_dev_cap
*dev_cap
,
808 struct mlx4_init_hca_param
*hca_param
)
810 dev
->caps
.steering_mode
= hca_param
->steering_mode
;
811 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
812 dev
->caps
.num_qp_per_mgm
= dev_cap
->fs_max_num_qp_per_entry
;
813 dev
->caps
.fs_log_max_ucast_qp_range_size
=
814 dev_cap
->fs_log_max_ucast_qp_range_size
;
816 dev
->caps
.num_qp_per_mgm
=
817 4 * ((1 << hca_param
->log_mc_entry_sz
)/16 - 2);
819 mlx4_dbg(dev
, "Steering mode is: %s\n",
820 mlx4_steering_mode_str(dev
->caps
.steering_mode
));
823 static void mlx4_slave_destroy_special_qp_cap(struct mlx4_dev
*dev
)
825 kfree(dev
->caps
.spec_qps
);
826 dev
->caps
.spec_qps
= NULL
;
829 static int mlx4_slave_special_qp_cap(struct mlx4_dev
*dev
)
831 struct mlx4_func_cap
*func_cap
= NULL
;
832 struct mlx4_caps
*caps
= &dev
->caps
;
835 func_cap
= kzalloc(sizeof(*func_cap
), GFP_KERNEL
);
836 caps
->spec_qps
= kcalloc(caps
->num_ports
, sizeof(*caps
->spec_qps
), GFP_KERNEL
);
838 if (!func_cap
|| !caps
->spec_qps
) {
839 mlx4_err(dev
, "Failed to allocate memory for special qps cap\n");
844 for (i
= 1; i
<= caps
->num_ports
; ++i
) {
845 err
= mlx4_QUERY_FUNC_CAP(dev
, i
, func_cap
);
847 mlx4_err(dev
, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
851 caps
->spec_qps
[i
- 1] = func_cap
->spec_qps
;
852 caps
->port_mask
[i
] = caps
->port_type
[i
];
853 caps
->phys_port_id
[i
] = func_cap
->phys_port_id
;
854 err
= mlx4_get_slave_pkey_gid_tbl_len(dev
, i
,
855 &caps
->gid_table_len
[i
],
856 &caps
->pkey_table_len
[i
]);
858 mlx4_err(dev
, "QUERY_PORT command failed for port %d, aborting (%d)\n",
866 mlx4_slave_destroy_special_qp_cap(dev
);
871 static int mlx4_slave_cap(struct mlx4_dev
*dev
)
875 struct mlx4_dev_cap
*dev_cap
= NULL
;
876 struct mlx4_func_cap
*func_cap
= NULL
;
877 struct mlx4_init_hca_param
*hca_param
= NULL
;
879 hca_param
= kzalloc(sizeof(*hca_param
), GFP_KERNEL
);
880 func_cap
= kzalloc(sizeof(*func_cap
), GFP_KERNEL
);
881 dev_cap
= kzalloc(sizeof(*dev_cap
), GFP_KERNEL
);
882 if (!hca_param
|| !func_cap
|| !dev_cap
) {
883 mlx4_err(dev
, "Failed to allocate memory for slave_cap\n");
888 err
= mlx4_QUERY_HCA(dev
, hca_param
);
890 mlx4_err(dev
, "QUERY_HCA command failed, aborting\n");
894 /* fail if the hca has an unknown global capability
895 * at this time global_caps should be always zeroed
897 if (hca_param
->global_caps
) {
898 mlx4_err(dev
, "Unknown hca global capabilities\n");
903 dev
->caps
.hca_core_clock
= hca_param
->hca_core_clock
;
905 dev
->caps
.max_qp_dest_rdma
= 1 << hca_param
->log_rd_per_qp
;
906 err
= mlx4_dev_cap(dev
, dev_cap
);
908 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
912 err
= mlx4_QUERY_FW(dev
);
914 mlx4_err(dev
, "QUERY_FW command failed: could not get FW version\n");
916 page_size
= ~dev
->caps
.page_size_cap
+ 1;
917 mlx4_warn(dev
, "HCA minimum page size:%d\n", page_size
);
918 if (page_size
> PAGE_SIZE
) {
919 mlx4_err(dev
, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
920 page_size
, PAGE_SIZE
);
925 /* Set uar_page_shift for VF */
926 dev
->uar_page_shift
= hca_param
->uar_page_sz
+ 12;
928 /* Make sure the master uar page size is valid */
929 if (dev
->uar_page_shift
> PAGE_SHIFT
) {
931 "Invalid configuration: uar page size is larger than system page size\n");
936 /* Set reserved_uars based on the uar_page_shift */
937 mlx4_set_num_reserved_uars(dev
, dev_cap
);
939 /* Although uar page size in FW differs from system page size,
940 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
941 * still works with assumption that uar page size == system page size
943 dev
->caps
.uar_page_size
= PAGE_SIZE
;
945 err
= mlx4_QUERY_FUNC_CAP(dev
, 0, func_cap
);
947 mlx4_err(dev
, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
952 if ((func_cap
->pf_context_behaviour
| PF_CONTEXT_BEHAVIOUR_MASK
) !=
953 PF_CONTEXT_BEHAVIOUR_MASK
) {
954 mlx4_err(dev
, "Unknown pf context behaviour %x known flags %x\n",
955 func_cap
->pf_context_behaviour
,
956 PF_CONTEXT_BEHAVIOUR_MASK
);
961 dev
->caps
.num_ports
= func_cap
->num_ports
;
962 dev
->quotas
.qp
= func_cap
->qp_quota
;
963 dev
->quotas
.srq
= func_cap
->srq_quota
;
964 dev
->quotas
.cq
= func_cap
->cq_quota
;
965 dev
->quotas
.mpt
= func_cap
->mpt_quota
;
966 dev
->quotas
.mtt
= func_cap
->mtt_quota
;
967 dev
->caps
.num_qps
= 1 << hca_param
->log_num_qps
;
968 dev
->caps
.num_srqs
= 1 << hca_param
->log_num_srqs
;
969 dev
->caps
.num_cqs
= 1 << hca_param
->log_num_cqs
;
970 dev
->caps
.num_mpts
= 1 << hca_param
->log_mpt_sz
;
971 dev
->caps
.num_eqs
= func_cap
->max_eq
;
972 dev
->caps
.reserved_eqs
= func_cap
->reserved_eq
;
973 dev
->caps
.reserved_lkey
= func_cap
->reserved_lkey
;
974 dev
->caps
.num_pds
= MLX4_NUM_PDS
;
975 dev
->caps
.num_mgms
= 0;
976 dev
->caps
.num_amgms
= 0;
978 if (dev
->caps
.num_ports
> MLX4_MAX_PORTS
) {
979 mlx4_err(dev
, "HCA has %d ports, but we only support %d, aborting\n",
980 dev
->caps
.num_ports
, MLX4_MAX_PORTS
);
985 mlx4_replace_zero_macs(dev
);
987 err
= mlx4_slave_special_qp_cap(dev
);
989 mlx4_err(dev
, "Set special QP caps failed. aborting\n");
993 if (dev
->caps
.uar_page_size
* (dev
->caps
.num_uars
-
994 dev
->caps
.reserved_uars
) >
995 pci_resource_len(dev
->persist
->pdev
,
997 mlx4_err(dev
, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
998 dev
->caps
.uar_page_size
* dev
->caps
.num_uars
,
1000 pci_resource_len(dev
->persist
->pdev
, 2));
1005 if (hca_param
->dev_cap_enabled
& MLX4_DEV_CAP_64B_EQE_ENABLED
) {
1006 dev
->caps
.eqe_size
= 64;
1007 dev
->caps
.eqe_factor
= 1;
1009 dev
->caps
.eqe_size
= 32;
1010 dev
->caps
.eqe_factor
= 0;
1013 if (hca_param
->dev_cap_enabled
& MLX4_DEV_CAP_64B_CQE_ENABLED
) {
1014 dev
->caps
.cqe_size
= 64;
1015 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
1017 dev
->caps
.cqe_size
= 32;
1020 if (hca_param
->dev_cap_enabled
& MLX4_DEV_CAP_EQE_STRIDE_ENABLED
) {
1021 dev
->caps
.eqe_size
= hca_param
->eqe_size
;
1022 dev
->caps
.eqe_factor
= 0;
1025 if (hca_param
->dev_cap_enabled
& MLX4_DEV_CAP_CQE_STRIDE_ENABLED
) {
1026 dev
->caps
.cqe_size
= hca_param
->cqe_size
;
1027 /* User still need to know when CQE > 32B */
1028 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
1031 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
1032 mlx4_warn(dev
, "Timestamping is not supported in slave mode\n");
1034 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_USER_MAC_EN
;
1035 mlx4_dbg(dev
, "User MAC FW update is not supported in slave mode\n");
1037 slave_adjust_steering_mode(dev
, dev_cap
, hca_param
);
1038 mlx4_dbg(dev
, "RSS support for IP fragments is %s\n",
1039 hca_param
->rss_ip_frags
? "on" : "off");
1041 if (func_cap
->extra_flags
& MLX4_QUERY_FUNC_FLAGS_BF_RES_QP
&&
1042 dev
->caps
.bf_reg_size
)
1043 dev
->caps
.alloc_res_qp_mask
|= MLX4_RESERVE_ETH_BF_QP
;
1045 if (func_cap
->extra_flags
& MLX4_QUERY_FUNC_FLAGS_A0_RES_QP
)
1046 dev
->caps
.alloc_res_qp_mask
|= MLX4_RESERVE_A0_QP
;
1050 mlx4_slave_destroy_special_qp_cap(dev
);
1058 static void mlx4_request_modules(struct mlx4_dev
*dev
)
1061 int has_ib_port
= false;
1062 int has_eth_port
= false;
1063 #define EN_DRV_NAME "mlx4_en"
1064 #define IB_DRV_NAME "mlx4_ib"
1066 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
1067 if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_IB
)
1069 else if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
)
1070 has_eth_port
= true;
1074 request_module_nowait(EN_DRV_NAME
);
1075 if (has_ib_port
|| (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IBOE
))
1076 request_module_nowait(IB_DRV_NAME
);
1080 * Change the port configuration of the device.
1081 * Every user of this function must hold the port mutex.
1083 int mlx4_change_port_types(struct mlx4_dev
*dev
,
1084 enum mlx4_port_type
*port_types
)
1090 for (port
= 0; port
< dev
->caps
.num_ports
; port
++) {
1091 /* Change the port type only if the new type is different
1092 * from the current, and not set to Auto */
1093 if (port_types
[port
] != dev
->caps
.port_type
[port
+ 1])
1097 mlx4_unregister_device(dev
);
1098 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
1099 mlx4_CLOSE_PORT(dev
, port
);
1100 dev
->caps
.port_type
[port
] = port_types
[port
- 1];
1101 err
= mlx4_SET_PORT(dev
, port
, -1);
1103 mlx4_err(dev
, "Failed to set port %d, aborting\n",
1108 mlx4_set_port_mask(dev
);
1109 err
= mlx4_register_device(dev
);
1111 mlx4_err(dev
, "Failed to register device\n");
1114 mlx4_request_modules(dev
);
1121 static ssize_t
show_port_type(struct device
*dev
,
1122 struct device_attribute
*attr
,
1125 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
1127 struct mlx4_dev
*mdev
= info
->dev
;
1131 (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_IB
) ?
1133 if (mdev
->caps
.possible_type
[info
->port
] == MLX4_PORT_TYPE_AUTO
)
1134 sprintf(buf
, "auto (%s)\n", type
);
1136 sprintf(buf
, "%s\n", type
);
1141 static int __set_port_type(struct mlx4_port_info
*info
,
1142 enum mlx4_port_type port_type
)
1144 struct mlx4_dev
*mdev
= info
->dev
;
1145 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
1146 enum mlx4_port_type types
[MLX4_MAX_PORTS
];
1147 enum mlx4_port_type new_types
[MLX4_MAX_PORTS
];
1151 if ((port_type
& mdev
->caps
.supported_type
[info
->port
]) != port_type
) {
1153 "Requested port type for port %d is not supported on this HCA\n",
1159 mlx4_stop_sense(mdev
);
1160 mutex_lock(&priv
->port_mutex
);
1161 info
->tmp_type
= port_type
;
1163 /* Possible type is always the one that was delivered */
1164 mdev
->caps
.possible_type
[info
->port
] = info
->tmp_type
;
1166 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++) {
1167 types
[i
] = priv
->port
[i
+1].tmp_type
? priv
->port
[i
+1].tmp_type
:
1168 mdev
->caps
.possible_type
[i
+1];
1169 if (types
[i
] == MLX4_PORT_TYPE_AUTO
)
1170 types
[i
] = mdev
->caps
.port_type
[i
+1];
1173 if (!(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
1174 !(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
)) {
1175 for (i
= 1; i
<= mdev
->caps
.num_ports
; i
++) {
1176 if (mdev
->caps
.possible_type
[i
] == MLX4_PORT_TYPE_AUTO
) {
1177 mdev
->caps
.possible_type
[i
] = mdev
->caps
.port_type
[i
];
1183 mlx4_err(mdev
, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
1187 mlx4_do_sense_ports(mdev
, new_types
, types
);
1189 err
= mlx4_check_port_params(mdev
, new_types
);
1193 /* We are about to apply the changes after the configuration
1194 * was verified, no need to remember the temporary types
1196 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++)
1197 priv
->port
[i
+ 1].tmp_type
= 0;
1199 err
= mlx4_change_port_types(mdev
, new_types
);
1202 mlx4_start_sense(mdev
);
1203 mutex_unlock(&priv
->port_mutex
);
1208 static ssize_t
set_port_type(struct device
*dev
,
1209 struct device_attribute
*attr
,
1210 const char *buf
, size_t count
)
1212 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
1214 struct mlx4_dev
*mdev
= info
->dev
;
1215 enum mlx4_port_type port_type
;
1216 static DEFINE_MUTEX(set_port_type_mutex
);
1219 mutex_lock(&set_port_type_mutex
);
1221 if (!strcmp(buf
, "ib\n")) {
1222 port_type
= MLX4_PORT_TYPE_IB
;
1223 } else if (!strcmp(buf
, "eth\n")) {
1224 port_type
= MLX4_PORT_TYPE_ETH
;
1225 } else if (!strcmp(buf
, "auto\n")) {
1226 port_type
= MLX4_PORT_TYPE_AUTO
;
1228 mlx4_err(mdev
, "%s is not supported port type\n", buf
);
1233 err
= __set_port_type(info
, port_type
);
1236 mutex_unlock(&set_port_type_mutex
);
1238 return err
? err
: count
;
1249 static inline int int_to_ibta_mtu(int mtu
)
1252 case 256: return IB_MTU_256
;
1253 case 512: return IB_MTU_512
;
1254 case 1024: return IB_MTU_1024
;
1255 case 2048: return IB_MTU_2048
;
1256 case 4096: return IB_MTU_4096
;
1261 static inline int ibta_mtu_to_int(enum ibta_mtu mtu
)
1264 case IB_MTU_256
: return 256;
1265 case IB_MTU_512
: return 512;
1266 case IB_MTU_1024
: return 1024;
1267 case IB_MTU_2048
: return 2048;
1268 case IB_MTU_4096
: return 4096;
1273 static ssize_t
show_port_ib_mtu(struct device
*dev
,
1274 struct device_attribute
*attr
,
1277 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
1279 struct mlx4_dev
*mdev
= info
->dev
;
1281 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
)
1282 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
1284 sprintf(buf
, "%d\n",
1285 ibta_mtu_to_int(mdev
->caps
.port_ib_mtu
[info
->port
]));
1289 static ssize_t
set_port_ib_mtu(struct device
*dev
,
1290 struct device_attribute
*attr
,
1291 const char *buf
, size_t count
)
1293 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
1295 struct mlx4_dev
*mdev
= info
->dev
;
1296 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
1297 int err
, port
, mtu
, ibta_mtu
= -1;
1299 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
) {
1300 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
1304 err
= kstrtoint(buf
, 0, &mtu
);
1306 ibta_mtu
= int_to_ibta_mtu(mtu
);
1308 if (err
|| ibta_mtu
< 0) {
1309 mlx4_err(mdev
, "%s is invalid IBTA mtu\n", buf
);
1313 mdev
->caps
.port_ib_mtu
[info
->port
] = ibta_mtu
;
1315 mlx4_stop_sense(mdev
);
1316 mutex_lock(&priv
->port_mutex
);
1317 mlx4_unregister_device(mdev
);
1318 for (port
= 1; port
<= mdev
->caps
.num_ports
; port
++) {
1319 mlx4_CLOSE_PORT(mdev
, port
);
1320 err
= mlx4_SET_PORT(mdev
, port
, -1);
1322 mlx4_err(mdev
, "Failed to set port %d, aborting\n",
1327 err
= mlx4_register_device(mdev
);
1329 mutex_unlock(&priv
->port_mutex
);
1330 mlx4_start_sense(mdev
);
1331 return err
? err
: count
;
1334 /* bond for multi-function device */
1335 #define MAX_MF_BOND_ALLOWED_SLAVES 63
1336 static int mlx4_mf_bond(struct mlx4_dev
*dev
)
1340 struct mlx4_slaves_pport slaves_port1
;
1341 struct mlx4_slaves_pport slaves_port2
;
1342 DECLARE_BITMAP(slaves_port_1_2
, MLX4_MFUNC_MAX
);
1344 slaves_port1
= mlx4_phys_to_slaves_pport(dev
, 1);
1345 slaves_port2
= mlx4_phys_to_slaves_pport(dev
, 2);
1346 bitmap_and(slaves_port_1_2
,
1347 slaves_port1
.slaves
, slaves_port2
.slaves
,
1348 dev
->persist
->num_vfs
+ 1);
1350 /* only single port vfs are allowed */
1351 if (bitmap_weight(slaves_port_1_2
, dev
->persist
->num_vfs
+ 1) > 1) {
1352 mlx4_warn(dev
, "HA mode unsupported for dual ported VFs\n");
1356 /* number of virtual functions is number of total functions minus one
1357 * physical function for each port.
1359 nvfs
= bitmap_weight(slaves_port1
.slaves
, dev
->persist
->num_vfs
+ 1) +
1360 bitmap_weight(slaves_port2
.slaves
, dev
->persist
->num_vfs
+ 1) - 2;
1362 /* limit on maximum allowed VFs */
1363 if (nvfs
> MAX_MF_BOND_ALLOWED_SLAVES
) {
1364 mlx4_warn(dev
, "HA mode is not supported for %d VFs (max %d are allowed)\n",
1365 nvfs
, MAX_MF_BOND_ALLOWED_SLAVES
);
1369 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1370 mlx4_warn(dev
, "HA mode unsupported for NON DMFS steering\n");
1374 err
= mlx4_bond_mac_table(dev
);
1377 err
= mlx4_bond_vlan_table(dev
);
1380 err
= mlx4_bond_fs_rules(dev
);
1386 (void)mlx4_unbond_vlan_table(dev
);
1388 (void)mlx4_unbond_mac_table(dev
);
1392 static int mlx4_mf_unbond(struct mlx4_dev
*dev
)
1396 ret
= mlx4_unbond_fs_rules(dev
);
1398 mlx4_warn(dev
, "multifunction unbond for flow rules failedi (%d)\n", ret
);
1399 ret1
= mlx4_unbond_mac_table(dev
);
1401 mlx4_warn(dev
, "multifunction unbond for MAC table failed (%d)\n", ret1
);
1404 ret1
= mlx4_unbond_vlan_table(dev
);
1406 mlx4_warn(dev
, "multifunction unbond for VLAN table failed (%d)\n", ret1
);
1412 int mlx4_bond(struct mlx4_dev
*dev
)
1415 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1417 mutex_lock(&priv
->bond_mutex
);
1419 if (!mlx4_is_bonded(dev
)) {
1420 ret
= mlx4_do_bond(dev
, true);
1422 mlx4_err(dev
, "Failed to bond device: %d\n", ret
);
1423 if (!ret
&& mlx4_is_master(dev
)) {
1424 ret
= mlx4_mf_bond(dev
);
1426 mlx4_err(dev
, "bond for multifunction failed\n");
1427 mlx4_do_bond(dev
, false);
1432 mutex_unlock(&priv
->bond_mutex
);
1434 mlx4_dbg(dev
, "Device is bonded\n");
1438 EXPORT_SYMBOL_GPL(mlx4_bond
);
1440 int mlx4_unbond(struct mlx4_dev
*dev
)
1443 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1445 mutex_lock(&priv
->bond_mutex
);
1447 if (mlx4_is_bonded(dev
)) {
1450 ret
= mlx4_do_bond(dev
, false);
1452 mlx4_err(dev
, "Failed to unbond device: %d\n", ret
);
1453 if (mlx4_is_master(dev
))
1454 ret2
= mlx4_mf_unbond(dev
);
1456 mlx4_warn(dev
, "Failed to unbond device for multifunction (%d)\n", ret2
);
1461 mutex_unlock(&priv
->bond_mutex
);
1463 mlx4_dbg(dev
, "Device is unbonded\n");
1467 EXPORT_SYMBOL_GPL(mlx4_unbond
);
1470 int mlx4_port_map_set(struct mlx4_dev
*dev
, struct mlx4_port_map
*v2p
)
1472 u8 port1
= v2p
->port1
;
1473 u8 port2
= v2p
->port2
;
1474 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1477 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_PORT_REMAP
))
1480 mutex_lock(&priv
->bond_mutex
);
1482 /* zero means keep current mapping for this port */
1484 port1
= priv
->v2p
.port1
;
1486 port2
= priv
->v2p
.port2
;
1488 if ((port1
< 1) || (port1
> MLX4_MAX_PORTS
) ||
1489 (port2
< 1) || (port2
> MLX4_MAX_PORTS
) ||
1490 (port1
== 2 && port2
== 1)) {
1491 /* besides boundary checks cross mapping makes
1492 * no sense and therefore not allowed */
1494 } else if ((port1
== priv
->v2p
.port1
) &&
1495 (port2
== priv
->v2p
.port2
)) {
1498 err
= mlx4_virt2phy_port_map(dev
, port1
, port2
);
1500 mlx4_dbg(dev
, "port map changed: [%d][%d]\n",
1502 priv
->v2p
.port1
= port1
;
1503 priv
->v2p
.port2
= port2
;
1505 mlx4_err(dev
, "Failed to change port mape: %d\n", err
);
1509 mutex_unlock(&priv
->bond_mutex
);
1512 EXPORT_SYMBOL_GPL(mlx4_port_map_set
);
1514 static int mlx4_load_fw(struct mlx4_dev
*dev
)
1516 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1519 priv
->fw
.fw_icm
= mlx4_alloc_icm(dev
, priv
->fw
.fw_pages
,
1520 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
1521 if (!priv
->fw
.fw_icm
) {
1522 mlx4_err(dev
, "Couldn't allocate FW area, aborting\n");
1526 err
= mlx4_MAP_FA(dev
, priv
->fw
.fw_icm
);
1528 mlx4_err(dev
, "MAP_FA command failed, aborting\n");
1532 err
= mlx4_RUN_FW(dev
);
1534 mlx4_err(dev
, "RUN_FW command failed, aborting\n");
1544 mlx4_free_icm(dev
, priv
->fw
.fw_icm
, 0);
1548 static int mlx4_init_cmpt_table(struct mlx4_dev
*dev
, u64 cmpt_base
,
1551 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1555 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.cmpt_table
,
1557 ((u64
) (MLX4_CMPT_TYPE_QP
*
1558 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1559 cmpt_entry_sz
, dev
->caps
.num_qps
,
1560 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1565 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.cmpt_table
,
1567 ((u64
) (MLX4_CMPT_TYPE_SRQ
*
1568 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1569 cmpt_entry_sz
, dev
->caps
.num_srqs
,
1570 dev
->caps
.reserved_srqs
, 0, 0);
1574 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.cmpt_table
,
1576 ((u64
) (MLX4_CMPT_TYPE_CQ
*
1577 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1578 cmpt_entry_sz
, dev
->caps
.num_cqs
,
1579 dev
->caps
.reserved_cqs
, 0, 0);
1583 num_eqs
= dev
->phys_caps
.num_phys_eqs
;
1584 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.cmpt_table
,
1586 ((u64
) (MLX4_CMPT_TYPE_EQ
*
1587 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1588 cmpt_entry_sz
, num_eqs
, num_eqs
, 0, 0);
1595 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1598 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1601 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1607 static int mlx4_init_icm(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
,
1608 struct mlx4_init_hca_param
*init_hca
, u64 icm_size
)
1610 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1615 err
= mlx4_SET_ICM_SIZE(dev
, icm_size
, &aux_pages
);
1617 mlx4_err(dev
, "SET_ICM_SIZE command failed, aborting\n");
1621 mlx4_dbg(dev
, "%lld KB of HCA context requires %lld KB aux memory\n",
1622 (unsigned long long) icm_size
>> 10,
1623 (unsigned long long) aux_pages
<< 2);
1625 priv
->fw
.aux_icm
= mlx4_alloc_icm(dev
, aux_pages
,
1626 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
1627 if (!priv
->fw
.aux_icm
) {
1628 mlx4_err(dev
, "Couldn't allocate aux memory, aborting\n");
1632 err
= mlx4_MAP_ICM_AUX(dev
, priv
->fw
.aux_icm
);
1634 mlx4_err(dev
, "MAP_ICM_AUX command failed, aborting\n");
1638 err
= mlx4_init_cmpt_table(dev
, init_hca
->cmpt_base
, dev_cap
->cmpt_entry_sz
);
1640 mlx4_err(dev
, "Failed to map cMPT context memory, aborting\n");
1645 num_eqs
= dev
->phys_caps
.num_phys_eqs
;
1646 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.table
,
1647 init_hca
->eqc_base
, dev_cap
->eqc_entry_sz
,
1648 num_eqs
, num_eqs
, 0, 0);
1650 mlx4_err(dev
, "Failed to map EQ context memory, aborting\n");
1651 goto err_unmap_cmpt
;
1655 * Reserved MTT entries must be aligned up to a cacheline
1656 * boundary, since the FW will write to them, while the driver
1657 * writes to all other MTT entries. (The variable
1658 * dev->caps.mtt_entry_sz below is really the MTT segment
1659 * size, not the raw entry size)
1661 dev
->caps
.reserved_mtts
=
1662 ALIGN(dev
->caps
.reserved_mtts
* dev
->caps
.mtt_entry_sz
,
1663 dma_get_cache_alignment()) / dev
->caps
.mtt_entry_sz
;
1665 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.mtt_table
,
1667 dev
->caps
.mtt_entry_sz
,
1669 dev
->caps
.reserved_mtts
, 1, 0);
1671 mlx4_err(dev
, "Failed to map MTT context memory, aborting\n");
1675 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.dmpt_table
,
1676 init_hca
->dmpt_base
,
1677 dev_cap
->dmpt_entry_sz
,
1679 dev
->caps
.reserved_mrws
, 1, 1);
1681 mlx4_err(dev
, "Failed to map dMPT context memory, aborting\n");
1685 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.qp_table
,
1687 dev_cap
->qpc_entry_sz
,
1689 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1692 mlx4_err(dev
, "Failed to map QP context memory, aborting\n");
1693 goto err_unmap_dmpt
;
1696 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.auxc_table
,
1697 init_hca
->auxc_base
,
1698 dev_cap
->aux_entry_sz
,
1700 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1703 mlx4_err(dev
, "Failed to map AUXC context memory, aborting\n");
1707 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.altc_table
,
1708 init_hca
->altc_base
,
1709 dev_cap
->altc_entry_sz
,
1711 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1714 mlx4_err(dev
, "Failed to map ALTC context memory, aborting\n");
1715 goto err_unmap_auxc
;
1718 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.rdmarc_table
,
1719 init_hca
->rdmarc_base
,
1720 dev_cap
->rdmarc_entry_sz
<< priv
->qp_table
.rdmarc_shift
,
1722 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1725 mlx4_err(dev
, "Failed to map RDMARC context memory, aborting\n");
1726 goto err_unmap_altc
;
1729 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.table
,
1731 dev_cap
->cqc_entry_sz
,
1733 dev
->caps
.reserved_cqs
, 0, 0);
1735 mlx4_err(dev
, "Failed to map CQ context memory, aborting\n");
1736 goto err_unmap_rdmarc
;
1739 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.table
,
1740 init_hca
->srqc_base
,
1741 dev_cap
->srq_entry_sz
,
1743 dev
->caps
.reserved_srqs
, 0, 0);
1745 mlx4_err(dev
, "Failed to map SRQ context memory, aborting\n");
1750 * For flow steering device managed mode it is required to use
1751 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1752 * required, but for simplicity just map the whole multicast
1753 * group table now. The table isn't very big and it's a lot
1754 * easier than trying to track ref counts.
1756 err
= mlx4_init_icm_table(dev
, &priv
->mcg_table
.table
,
1758 mlx4_get_mgm_entry_size(dev
),
1759 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1760 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1763 mlx4_err(dev
, "Failed to map MCG context memory, aborting\n");
1770 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1773 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1776 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1779 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1782 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1785 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1788 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1791 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1794 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1797 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1798 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1799 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1800 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1803 mlx4_UNMAP_ICM_AUX(dev
);
1806 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1811 static void mlx4_free_icms(struct mlx4_dev
*dev
)
1813 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1815 mlx4_cleanup_icm_table(dev
, &priv
->mcg_table
.table
);
1816 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1817 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1818 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1819 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1820 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1821 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1822 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1823 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1824 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1825 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1826 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1827 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1828 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1830 mlx4_UNMAP_ICM_AUX(dev
);
1831 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1834 static void mlx4_slave_exit(struct mlx4_dev
*dev
)
1836 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1838 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
1839 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, MLX4_COMM_CMD_NA_OP
,
1841 mlx4_warn(dev
, "Failed to close slave function\n");
1842 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1845 static int map_bf_area(struct mlx4_dev
*dev
)
1847 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1848 resource_size_t bf_start
;
1849 resource_size_t bf_len
;
1852 if (!dev
->caps
.bf_reg_size
)
1855 bf_start
= pci_resource_start(dev
->persist
->pdev
, 2) +
1856 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1857 bf_len
= pci_resource_len(dev
->persist
->pdev
, 2) -
1858 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1859 priv
->bf_mapping
= io_mapping_create_wc(bf_start
, bf_len
);
1860 if (!priv
->bf_mapping
)
1866 static void unmap_bf_area(struct mlx4_dev
*dev
)
1868 if (mlx4_priv(dev
)->bf_mapping
)
1869 io_mapping_free(mlx4_priv(dev
)->bf_mapping
);
1872 u64
mlx4_read_clock(struct mlx4_dev
*dev
)
1874 u32 clockhi
, clocklo
, clockhi1
;
1877 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1879 for (i
= 0; i
< 10; i
++) {
1880 clockhi
= swab32(readl(priv
->clock_mapping
));
1881 clocklo
= swab32(readl(priv
->clock_mapping
+ 4));
1882 clockhi1
= swab32(readl(priv
->clock_mapping
));
1883 if (clockhi
== clockhi1
)
1887 cycles
= (u64
) clockhi
<< 32 | (u64
) clocklo
;
1891 EXPORT_SYMBOL_GPL(mlx4_read_clock
);
1894 static int map_internal_clock(struct mlx4_dev
*dev
)
1896 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1898 priv
->clock_mapping
=
1899 ioremap(pci_resource_start(dev
->persist
->pdev
,
1900 priv
->fw
.clock_bar
) +
1901 priv
->fw
.clock_offset
, MLX4_CLOCK_SIZE
);
1903 if (!priv
->clock_mapping
)
1909 int mlx4_get_internal_clock_params(struct mlx4_dev
*dev
,
1910 struct mlx4_clock_params
*params
)
1912 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1914 if (mlx4_is_slave(dev
))
1920 params
->bar
= priv
->fw
.clock_bar
;
1921 params
->offset
= priv
->fw
.clock_offset
;
1922 params
->size
= MLX4_CLOCK_SIZE
;
1926 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params
);
1928 static void unmap_internal_clock(struct mlx4_dev
*dev
)
1930 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1932 if (priv
->clock_mapping
)
1933 iounmap(priv
->clock_mapping
);
1936 static void mlx4_close_hca(struct mlx4_dev
*dev
)
1938 unmap_internal_clock(dev
);
1940 if (mlx4_is_slave(dev
))
1941 mlx4_slave_exit(dev
);
1943 mlx4_CLOSE_HCA(dev
, 0);
1944 mlx4_free_icms(dev
);
1948 static void mlx4_close_fw(struct mlx4_dev
*dev
)
1950 if (!mlx4_is_slave(dev
)) {
1952 mlx4_free_icm(dev
, mlx4_priv(dev
)->fw
.fw_icm
, 0);
1956 static int mlx4_comm_check_offline(struct mlx4_dev
*dev
)
1958 #define COMM_CHAN_OFFLINE_OFFSET 0x09
1963 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1965 end
= msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT
) + jiffies
;
1966 while (time_before(jiffies
, end
)) {
1967 comm_flags
= swab32(readl((__iomem
char *)priv
->mfunc
.comm
+
1968 MLX4_COMM_CHAN_FLAGS
));
1969 offline_bit
= (comm_flags
&
1970 (u32
)(1 << COMM_CHAN_OFFLINE_OFFSET
));
1974 /* If device removal has been requested,
1975 * do not continue retrying.
1977 if (dev
->persist
->interface_state
&
1978 MLX4_INTERFACE_STATE_NOWAIT
)
1981 /* There are cases as part of AER/Reset flow that PF needs
1982 * around 100 msec to load. We therefore sleep for 100 msec
1983 * to allow other tasks to make use of that CPU during this
1988 mlx4_err(dev
, "Communication channel is offline.\n");
1992 static void mlx4_reset_vf_support(struct mlx4_dev
*dev
)
1994 #define COMM_CHAN_RST_OFFSET 0x1e
1996 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2000 comm_caps
= swab32(readl((__iomem
char *)priv
->mfunc
.comm
+
2001 MLX4_COMM_CHAN_CAPS
));
2002 comm_rst
= (comm_caps
& (u32
)(1 << COMM_CHAN_RST_OFFSET
));
2005 dev
->caps
.vf_caps
|= MLX4_VF_CAP_FLAG_RESET
;
2008 static int mlx4_init_slave(struct mlx4_dev
*dev
)
2010 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2011 u64 dma
= (u64
) priv
->mfunc
.vhcr_dma
;
2012 int ret_from_reset
= 0;
2014 u32 cmd_channel_ver
;
2016 if (atomic_read(&pf_loading
)) {
2017 mlx4_warn(dev
, "PF is not ready - Deferring probe\n");
2018 return -EPROBE_DEFER
;
2021 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
2022 priv
->cmd
.max_cmds
= 1;
2023 if (mlx4_comm_check_offline(dev
)) {
2024 mlx4_err(dev
, "PF is not responsive, skipping initialization\n");
2028 mlx4_reset_vf_support(dev
);
2029 mlx4_warn(dev
, "Sending reset\n");
2030 ret_from_reset
= mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0,
2031 MLX4_COMM_CMD_NA_OP
, MLX4_COMM_TIME
);
2032 /* if we are in the middle of flr the slave will try
2033 * NUM_OF_RESET_RETRIES times before leaving.*/
2034 if (ret_from_reset
) {
2035 if (MLX4_DELAY_RESET_SLAVE
== ret_from_reset
) {
2036 mlx4_warn(dev
, "slave is currently in the middle of FLR - Deferring probe\n");
2037 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
2038 return -EPROBE_DEFER
;
2043 /* check the driver version - the slave I/F revision
2044 * must match the master's */
2045 slave_read
= swab32(readl(&priv
->mfunc
.comm
->slave_read
));
2046 cmd_channel_ver
= mlx4_comm_get_version();
2048 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver
) !=
2049 MLX4_COMM_GET_IF_REV(slave_read
)) {
2050 mlx4_err(dev
, "slave driver version is not supported by the master\n");
2054 mlx4_warn(dev
, "Sending vhcr0\n");
2055 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR0
, dma
>> 48,
2056 MLX4_COMM_CMD_NA_OP
, MLX4_COMM_TIME
))
2058 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR1
, dma
>> 32,
2059 MLX4_COMM_CMD_NA_OP
, MLX4_COMM_TIME
))
2061 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR2
, dma
>> 16,
2062 MLX4_COMM_CMD_NA_OP
, MLX4_COMM_TIME
))
2064 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR_EN
, dma
,
2065 MLX4_COMM_CMD_NA_OP
, MLX4_COMM_TIME
))
2068 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
2072 mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, MLX4_COMM_CMD_NA_OP
, 0);
2074 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
2078 static void mlx4_parav_master_pf_caps(struct mlx4_dev
*dev
)
2082 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
2083 if (dev
->caps
.port_type
[i
] == MLX4_PORT_TYPE_ETH
)
2084 dev
->caps
.gid_table_len
[i
] =
2085 mlx4_get_slave_num_gids(dev
, 0, i
);
2087 dev
->caps
.gid_table_len
[i
] = 1;
2088 dev
->caps
.pkey_table_len
[i
] =
2089 dev
->phys_caps
.pkey_phys_table_len
[i
] - 1;
2093 static int choose_log_fs_mgm_entry_size(int qp_per_entry
)
2095 int i
= MLX4_MIN_MGM_LOG_ENTRY_SIZE
;
2097 for (i
= MLX4_MIN_MGM_LOG_ENTRY_SIZE
; i
<= MLX4_MAX_MGM_LOG_ENTRY_SIZE
;
2099 if (qp_per_entry
<= 4 * ((1 << i
) / 16 - 2))
2103 return (i
<= MLX4_MAX_MGM_LOG_ENTRY_SIZE
) ? i
: -1;
2106 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode
)
2108 switch (dmfs_high_steer_mode
) {
2109 case MLX4_STEERING_DMFS_A0_DEFAULT
:
2110 return "default performance";
2112 case MLX4_STEERING_DMFS_A0_DYNAMIC
:
2113 return "dynamic hybrid mode";
2115 case MLX4_STEERING_DMFS_A0_STATIC
:
2116 return "performance optimized for limited rule configuration (static)";
2118 case MLX4_STEERING_DMFS_A0_DISABLE
:
2119 return "disabled performance optimized steering";
2121 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
:
2122 return "performance optimized steering not supported";
2125 return "Unrecognized mode";
2129 #define MLX4_DMFS_A0_STEERING (1UL << 2)
2131 static void choose_steering_mode(struct mlx4_dev
*dev
,
2132 struct mlx4_dev_cap
*dev_cap
)
2134 if (mlx4_log_num_mgm_entry_size
<= 0) {
2135 if ((-mlx4_log_num_mgm_entry_size
) & MLX4_DMFS_A0_STEERING
) {
2136 if (dev
->caps
.dmfs_high_steer_mode
==
2137 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
)
2138 mlx4_err(dev
, "DMFS high rate mode not supported\n");
2140 dev
->caps
.dmfs_high_steer_mode
=
2141 MLX4_STEERING_DMFS_A0_STATIC
;
2145 if (mlx4_log_num_mgm_entry_size
<= 0 &&
2146 dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
&&
2147 (!mlx4_is_mfunc(dev
) ||
2148 (dev_cap
->fs_max_num_qp_per_entry
>=
2149 (dev
->persist
->num_vfs
+ 1))) &&
2150 choose_log_fs_mgm_entry_size(dev_cap
->fs_max_num_qp_per_entry
) >=
2151 MLX4_MIN_MGM_LOG_ENTRY_SIZE
) {
2152 dev
->oper_log_mgm_entry_size
=
2153 choose_log_fs_mgm_entry_size(dev_cap
->fs_max_num_qp_per_entry
);
2154 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
2155 dev
->caps
.num_qp_per_mgm
= dev_cap
->fs_max_num_qp_per_entry
;
2156 dev
->caps
.fs_log_max_ucast_qp_range_size
=
2157 dev_cap
->fs_log_max_ucast_qp_range_size
;
2159 if (dev
->caps
.dmfs_high_steer_mode
!=
2160 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
)
2161 dev
->caps
.dmfs_high_steer_mode
= MLX4_STEERING_DMFS_A0_DISABLE
;
2162 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
&&
2163 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
2164 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_B0
;
2166 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_A0
;
2168 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
||
2169 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
2170 mlx4_warn(dev
, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
2172 dev
->oper_log_mgm_entry_size
=
2173 mlx4_log_num_mgm_entry_size
> 0 ?
2174 mlx4_log_num_mgm_entry_size
:
2175 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE
;
2176 dev
->caps
.num_qp_per_mgm
= mlx4_get_qp_per_mgm(dev
);
2178 mlx4_dbg(dev
, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
2179 mlx4_steering_mode_str(dev
->caps
.steering_mode
),
2180 dev
->oper_log_mgm_entry_size
,
2181 mlx4_log_num_mgm_entry_size
);
2184 static void choose_tunnel_offload_mode(struct mlx4_dev
*dev
,
2185 struct mlx4_dev_cap
*dev_cap
)
2187 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
&&
2188 dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
)
2189 dev
->caps
.tunnel_offload_mode
= MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
;
2191 dev
->caps
.tunnel_offload_mode
= MLX4_TUNNEL_OFFLOAD_MODE_NONE
;
2193 mlx4_dbg(dev
, "Tunneling offload mode is: %s\n", (dev
->caps
.tunnel_offload_mode
2194 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) ? "vxlan" : "none");
2197 static int mlx4_validate_optimized_steering(struct mlx4_dev
*dev
)
2200 struct mlx4_port_cap port_cap
;
2202 if (dev
->caps
.dmfs_high_steer_mode
== MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
)
2205 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
2206 if (mlx4_dev_port(dev
, i
, &port_cap
)) {
2208 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
2209 } else if ((dev
->caps
.dmfs_high_steer_mode
!=
2210 MLX4_STEERING_DMFS_A0_DEFAULT
) &&
2211 (port_cap
.dmfs_optimized_state
==
2212 !!(dev
->caps
.dmfs_high_steer_mode
==
2213 MLX4_STEERING_DMFS_A0_DISABLE
))) {
2215 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
2216 dmfs_high_rate_steering_mode_str(
2217 dev
->caps
.dmfs_high_steer_mode
),
2218 (port_cap
.dmfs_optimized_state
?
2219 "enabled" : "disabled"));
2226 static int mlx4_init_fw(struct mlx4_dev
*dev
)
2228 struct mlx4_mod_stat_cfg mlx4_cfg
;
2231 if (!mlx4_is_slave(dev
)) {
2232 err
= mlx4_QUERY_FW(dev
);
2235 mlx4_info(dev
, "non-primary physical function, skipping\n");
2237 mlx4_err(dev
, "QUERY_FW command failed, aborting\n");
2241 err
= mlx4_load_fw(dev
);
2243 mlx4_err(dev
, "Failed to start FW, aborting\n");
2247 mlx4_cfg
.log_pg_sz_m
= 1;
2248 mlx4_cfg
.log_pg_sz
= 0;
2249 err
= mlx4_MOD_STAT_CFG(dev
, &mlx4_cfg
);
2251 mlx4_warn(dev
, "Failed to override log_pg_sz parameter\n");
2257 static int mlx4_init_hca(struct mlx4_dev
*dev
)
2259 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2260 struct mlx4_adapter adapter
;
2261 struct mlx4_dev_cap dev_cap
;
2262 struct mlx4_profile profile
;
2263 struct mlx4_init_hca_param init_hca
;
2265 struct mlx4_config_dev_params params
;
2268 if (!mlx4_is_slave(dev
)) {
2269 err
= mlx4_dev_cap(dev
, &dev_cap
);
2271 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
2275 choose_steering_mode(dev
, &dev_cap
);
2276 choose_tunnel_offload_mode(dev
, &dev_cap
);
2278 if (dev
->caps
.dmfs_high_steer_mode
== MLX4_STEERING_DMFS_A0_STATIC
&&
2279 mlx4_is_master(dev
))
2280 dev
->caps
.function_caps
|= MLX4_FUNC_CAP_DMFS_A0_STATIC
;
2282 err
= mlx4_get_phys_port_id(dev
);
2284 mlx4_err(dev
, "Fail to get physical port id\n");
2286 if (mlx4_is_master(dev
))
2287 mlx4_parav_master_pf_caps(dev
);
2289 if (mlx4_low_memory_profile()) {
2290 mlx4_info(dev
, "Running from within kdump kernel. Using low memory profile\n");
2291 profile
= low_mem_profile
;
2293 profile
= default_profile
;
2295 if (dev
->caps
.steering_mode
==
2296 MLX4_STEERING_MODE_DEVICE_MANAGED
)
2297 profile
.num_mcg
= MLX4_FS_NUM_MCG
;
2299 icm_size
= mlx4_make_profile(dev
, &profile
, &dev_cap
,
2301 if ((long long) icm_size
< 0) {
2306 dev
->caps
.max_fmr_maps
= (1 << (32 - ilog2(dev
->caps
.num_mpts
))) - 1;
2308 if (enable_4k_uar
|| !dev
->persist
->num_vfs
) {
2309 init_hca
.log_uar_sz
= ilog2(dev
->caps
.num_uars
) +
2310 PAGE_SHIFT
- DEFAULT_UAR_PAGE_SHIFT
;
2311 init_hca
.uar_page_sz
= DEFAULT_UAR_PAGE_SHIFT
- 12;
2313 init_hca
.log_uar_sz
= ilog2(dev
->caps
.num_uars
);
2314 init_hca
.uar_page_sz
= PAGE_SHIFT
- 12;
2317 init_hca
.mw_enabled
= 0;
2318 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
||
2319 dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
)
2320 init_hca
.mw_enabled
= INIT_HCA_TPT_MW_ENABLE
;
2322 err
= mlx4_init_icm(dev
, &dev_cap
, &init_hca
, icm_size
);
2326 err
= mlx4_INIT_HCA(dev
, &init_hca
);
2328 mlx4_err(dev
, "INIT_HCA command failed, aborting\n");
2332 if (dev_cap
.flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
) {
2333 err
= mlx4_query_func(dev
, &dev_cap
);
2335 mlx4_err(dev
, "QUERY_FUNC command failed, aborting.\n");
2337 } else if (err
& MLX4_QUERY_FUNC_NUM_SYS_EQS
) {
2338 dev
->caps
.num_eqs
= dev_cap
.max_eqs
;
2339 dev
->caps
.reserved_eqs
= dev_cap
.reserved_eqs
;
2340 dev
->caps
.reserved_uars
= dev_cap
.reserved_uars
;
2345 * If TS is supported by FW
2346 * read HCA frequency by QUERY_HCA command
2348 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_TS
) {
2349 memset(&init_hca
, 0, sizeof(init_hca
));
2350 err
= mlx4_QUERY_HCA(dev
, &init_hca
);
2352 mlx4_err(dev
, "QUERY_HCA command failed, disable timestamp\n");
2353 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
2355 dev
->caps
.hca_core_clock
=
2356 init_hca
.hca_core_clock
;
2359 /* In case we got HCA frequency 0 - disable timestamping
2360 * to avoid dividing by zero
2362 if (!dev
->caps
.hca_core_clock
) {
2363 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
2365 "HCA frequency is 0 - timestamping is not supported\n");
2366 } else if (map_internal_clock(dev
)) {
2368 * Map internal clock,
2369 * in case of failure disable timestamping
2371 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
2372 mlx4_err(dev
, "Failed to map internal clock. Timestamping is not supported\n");
2376 if (dev
->caps
.dmfs_high_steer_mode
!=
2377 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
) {
2378 if (mlx4_validate_optimized_steering(dev
))
2379 mlx4_warn(dev
, "Optimized steering validation failed\n");
2381 if (dev
->caps
.dmfs_high_steer_mode
==
2382 MLX4_STEERING_DMFS_A0_DISABLE
) {
2383 dev
->caps
.dmfs_high_rate_qpn_base
=
2384 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
2385 dev
->caps
.dmfs_high_rate_qpn_range
=
2386 MLX4_A0_STEERING_TABLE_SIZE
;
2389 mlx4_info(dev
, "DMFS high rate steer mode is: %s\n",
2390 dmfs_high_rate_steering_mode_str(
2391 dev
->caps
.dmfs_high_steer_mode
));
2394 err
= mlx4_init_slave(dev
);
2396 if (err
!= -EPROBE_DEFER
)
2397 mlx4_err(dev
, "Failed to initialize slave\n");
2401 err
= mlx4_slave_cap(dev
);
2403 mlx4_err(dev
, "Failed to obtain slave caps\n");
2408 if (map_bf_area(dev
))
2409 mlx4_dbg(dev
, "Failed to map blue flame area\n");
2411 /*Only the master set the ports, all the rest got it from it.*/
2412 if (!mlx4_is_slave(dev
))
2413 mlx4_set_port_mask(dev
);
2415 err
= mlx4_QUERY_ADAPTER(dev
, &adapter
);
2417 mlx4_err(dev
, "QUERY_ADAPTER command failed, aborting\n");
2421 /* Query CONFIG_DEV parameters */
2422 err
= mlx4_config_dev_retrieval(dev
, ¶ms
);
2423 if (err
&& err
!= -EOPNOTSUPP
) {
2424 mlx4_err(dev
, "Failed to query CONFIG_DEV parameters\n");
2426 dev
->caps
.rx_checksum_flags_port
[1] = params
.rx_csum_flags_port_1
;
2427 dev
->caps
.rx_checksum_flags_port
[2] = params
.rx_csum_flags_port_2
;
2429 priv
->eq_table
.inta_pin
= adapter
.inta_pin
;
2430 memcpy(dev
->board_id
, adapter
.board_id
, sizeof(dev
->board_id
));
2435 unmap_internal_clock(dev
);
2438 if (mlx4_is_slave(dev
))
2439 mlx4_slave_destroy_special_qp_cap(dev
);
2442 if (mlx4_is_slave(dev
))
2443 mlx4_slave_exit(dev
);
2445 mlx4_CLOSE_HCA(dev
, 0);
2448 if (!mlx4_is_slave(dev
))
2449 mlx4_free_icms(dev
);
2454 static int mlx4_init_counters_table(struct mlx4_dev
*dev
)
2456 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2459 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
2462 if (!dev
->caps
.max_counters
)
2465 nent_pow2
= roundup_pow_of_two(dev
->caps
.max_counters
);
2466 /* reserve last counter index for sink counter */
2467 return mlx4_bitmap_init(&priv
->counters_bitmap
, nent_pow2
,
2469 nent_pow2
- dev
->caps
.max_counters
+ 1);
2472 static void mlx4_cleanup_counters_table(struct mlx4_dev
*dev
)
2474 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
2477 if (!dev
->caps
.max_counters
)
2480 mlx4_bitmap_cleanup(&mlx4_priv(dev
)->counters_bitmap
);
2483 static void mlx4_cleanup_default_counters(struct mlx4_dev
*dev
)
2485 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2488 for (port
= 0; port
< dev
->caps
.num_ports
; port
++)
2489 if (priv
->def_counter
[port
] != -1)
2490 mlx4_counter_free(dev
, priv
->def_counter
[port
]);
2493 static int mlx4_allocate_default_counters(struct mlx4_dev
*dev
)
2495 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2499 for (port
= 0; port
< dev
->caps
.num_ports
; port
++)
2500 priv
->def_counter
[port
] = -1;
2502 for (port
= 0; port
< dev
->caps
.num_ports
; port
++) {
2503 err
= mlx4_counter_alloc(dev
, &idx
, MLX4_RES_USAGE_DRIVER
);
2505 if (!err
|| err
== -ENOSPC
) {
2506 priv
->def_counter
[port
] = idx
;
2507 } else if (err
== -ENOENT
) {
2510 } else if (mlx4_is_slave(dev
) && err
== -EINVAL
) {
2511 priv
->def_counter
[port
] = MLX4_SINK_COUNTER_INDEX(dev
);
2512 mlx4_warn(dev
, "can't allocate counter from old PF driver, using index %d\n",
2513 MLX4_SINK_COUNTER_INDEX(dev
));
2516 mlx4_err(dev
, "%s: failed to allocate default counter port %d err %d\n",
2517 __func__
, port
+ 1, err
);
2518 mlx4_cleanup_default_counters(dev
);
2522 mlx4_dbg(dev
, "%s: default counter index %d for port %d\n",
2523 __func__
, priv
->def_counter
[port
], port
+ 1);
2529 int __mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
2531 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2533 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
2536 *idx
= mlx4_bitmap_alloc(&priv
->counters_bitmap
);
2538 *idx
= MLX4_SINK_COUNTER_INDEX(dev
);
2545 int mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
, u8 usage
)
2547 u32 in_modifier
= RES_COUNTER
| (((u32
)usage
& 3) << 30);
2551 if (mlx4_is_mfunc(dev
)) {
2552 err
= mlx4_cmd_imm(dev
, 0, &out_param
, in_modifier
,
2553 RES_OP_RESERVE
, MLX4_CMD_ALLOC_RES
,
2554 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
2556 *idx
= get_param_l(&out_param
);
2560 return __mlx4_counter_alloc(dev
, idx
);
2562 EXPORT_SYMBOL_GPL(mlx4_counter_alloc
);
2564 static int __mlx4_clear_if_stat(struct mlx4_dev
*dev
,
2567 struct mlx4_cmd_mailbox
*if_stat_mailbox
;
2569 u32 if_stat_in_mod
= (counter_index
& 0xff) | MLX4_QUERY_IF_STAT_RESET
;
2571 if_stat_mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2572 if (IS_ERR(if_stat_mailbox
))
2573 return PTR_ERR(if_stat_mailbox
);
2575 err
= mlx4_cmd_box(dev
, 0, if_stat_mailbox
->dma
, if_stat_in_mod
, 0,
2576 MLX4_CMD_QUERY_IF_STAT
, MLX4_CMD_TIME_CLASS_C
,
2579 mlx4_free_cmd_mailbox(dev
, if_stat_mailbox
);
2583 void __mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
2585 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
2588 if (idx
== MLX4_SINK_COUNTER_INDEX(dev
))
2591 __mlx4_clear_if_stat(dev
, idx
);
2593 mlx4_bitmap_free(&mlx4_priv(dev
)->counters_bitmap
, idx
, MLX4_USE_RR
);
2597 void mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
2601 if (mlx4_is_mfunc(dev
)) {
2602 set_param_l(&in_param
, idx
);
2603 mlx4_cmd(dev
, in_param
, RES_COUNTER
, RES_OP_RESERVE
,
2604 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
2608 __mlx4_counter_free(dev
, idx
);
2610 EXPORT_SYMBOL_GPL(mlx4_counter_free
);
2612 int mlx4_get_default_counter_index(struct mlx4_dev
*dev
, int port
)
2614 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2616 return priv
->def_counter
[port
- 1];
2618 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index
);
2620 void mlx4_set_admin_guid(struct mlx4_dev
*dev
, __be64 guid
, int entry
, int port
)
2622 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2624 priv
->mfunc
.master
.vf_admin
[entry
].vport
[port
].guid
= guid
;
2626 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid
);
2628 __be64
mlx4_get_admin_guid(struct mlx4_dev
*dev
, int entry
, int port
)
2630 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2632 return priv
->mfunc
.master
.vf_admin
[entry
].vport
[port
].guid
;
2634 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid
);
2636 void mlx4_set_random_admin_guid(struct mlx4_dev
*dev
, int entry
, int port
)
2638 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2645 get_random_bytes((char *)&guid
, sizeof(guid
));
2646 guid
&= ~(cpu_to_be64(1ULL << 56));
2647 guid
|= cpu_to_be64(1ULL << 57);
2648 priv
->mfunc
.master
.vf_admin
[entry
].vport
[port
].guid
= guid
;
2651 static int mlx4_setup_hca(struct mlx4_dev
*dev
)
2653 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2656 __be32 ib_port_default_caps
;
2658 err
= mlx4_init_uar_table(dev
);
2660 mlx4_err(dev
, "Failed to initialize user access region table, aborting\n");
2664 err
= mlx4_uar_alloc(dev
, &priv
->driver_uar
);
2666 mlx4_err(dev
, "Failed to allocate driver access region, aborting\n");
2667 goto err_uar_table_free
;
2670 priv
->kar
= ioremap((phys_addr_t
) priv
->driver_uar
.pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
2672 mlx4_err(dev
, "Couldn't map kernel access region, aborting\n");
2677 err
= mlx4_init_pd_table(dev
);
2679 mlx4_err(dev
, "Failed to initialize protection domain table, aborting\n");
2683 err
= mlx4_init_xrcd_table(dev
);
2685 mlx4_err(dev
, "Failed to initialize reliable connection domain table, aborting\n");
2686 goto err_pd_table_free
;
2689 err
= mlx4_init_mr_table(dev
);
2691 mlx4_err(dev
, "Failed to initialize memory region table, aborting\n");
2692 goto err_xrcd_table_free
;
2695 if (!mlx4_is_slave(dev
)) {
2696 err
= mlx4_init_mcg_table(dev
);
2698 mlx4_err(dev
, "Failed to initialize multicast group table, aborting\n");
2699 goto err_mr_table_free
;
2701 err
= mlx4_config_mad_demux(dev
);
2703 mlx4_err(dev
, "Failed in config_mad_demux, aborting\n");
2704 goto err_mcg_table_free
;
2708 err
= mlx4_init_eq_table(dev
);
2710 mlx4_err(dev
, "Failed to initialize event queue table, aborting\n");
2711 goto err_mcg_table_free
;
2714 err
= mlx4_cmd_use_events(dev
);
2716 mlx4_err(dev
, "Failed to switch to event-driven firmware commands, aborting\n");
2717 goto err_eq_table_free
;
2720 err
= mlx4_NOP(dev
);
2722 if (dev
->flags
& MLX4_FLAG_MSI_X
) {
2723 mlx4_warn(dev
, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2724 priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].irq
);
2725 mlx4_warn(dev
, "Trying again without MSI-X\n");
2727 mlx4_err(dev
, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2728 priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].irq
);
2729 mlx4_err(dev
, "BIOS or ACPI interrupt routing problem?\n");
2735 mlx4_dbg(dev
, "NOP command IRQ test passed\n");
2737 err
= mlx4_init_cq_table(dev
);
2739 mlx4_err(dev
, "Failed to initialize completion queue table, aborting\n");
2743 err
= mlx4_init_srq_table(dev
);
2745 mlx4_err(dev
, "Failed to initialize shared receive queue table, aborting\n");
2746 goto err_cq_table_free
;
2749 err
= mlx4_init_qp_table(dev
);
2751 mlx4_err(dev
, "Failed to initialize queue pair table, aborting\n");
2752 goto err_srq_table_free
;
2755 if (!mlx4_is_slave(dev
)) {
2756 err
= mlx4_init_counters_table(dev
);
2757 if (err
&& err
!= -ENOENT
) {
2758 mlx4_err(dev
, "Failed to initialize counters table, aborting\n");
2759 goto err_qp_table_free
;
2763 err
= mlx4_allocate_default_counters(dev
);
2765 mlx4_err(dev
, "Failed to allocate default counters, aborting\n");
2766 goto err_counters_table_free
;
2769 if (!mlx4_is_slave(dev
)) {
2770 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2771 ib_port_default_caps
= 0;
2772 err
= mlx4_get_port_ib_caps(dev
, port
,
2773 &ib_port_default_caps
);
2775 mlx4_warn(dev
, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2777 dev
->caps
.ib_port_def_cap
[port
] = ib_port_default_caps
;
2779 /* initialize per-slave default ib port capabilities */
2780 if (mlx4_is_master(dev
)) {
2782 for (i
= 0; i
< dev
->num_slaves
; i
++) {
2783 if (i
== mlx4_master_func_num(dev
))
2785 priv
->mfunc
.master
.slave_state
[i
].ib_cap_mask
[port
] =
2786 ib_port_default_caps
;
2790 if (mlx4_is_mfunc(dev
))
2791 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_2048
;
2793 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_4096
;
2795 err
= mlx4_SET_PORT(dev
, port
, mlx4_is_master(dev
) ?
2796 dev
->caps
.pkey_table_len
[port
] : -1);
2798 mlx4_err(dev
, "Failed to set port %d, aborting\n",
2800 goto err_default_countes_free
;
2807 err_default_countes_free
:
2808 mlx4_cleanup_default_counters(dev
);
2810 err_counters_table_free
:
2811 if (!mlx4_is_slave(dev
))
2812 mlx4_cleanup_counters_table(dev
);
2815 mlx4_cleanup_qp_table(dev
);
2818 mlx4_cleanup_srq_table(dev
);
2821 mlx4_cleanup_cq_table(dev
);
2824 mlx4_cmd_use_polling(dev
);
2827 mlx4_cleanup_eq_table(dev
);
2830 if (!mlx4_is_slave(dev
))
2831 mlx4_cleanup_mcg_table(dev
);
2834 mlx4_cleanup_mr_table(dev
);
2836 err_xrcd_table_free
:
2837 mlx4_cleanup_xrcd_table(dev
);
2840 mlx4_cleanup_pd_table(dev
);
2846 mlx4_uar_free(dev
, &priv
->driver_uar
);
2849 mlx4_cleanup_uar_table(dev
);
2853 static int mlx4_init_affinity_hint(struct mlx4_dev
*dev
, int port
, int eqn
)
2855 int requested_cpu
= 0;
2856 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2861 if (eqn
> dev
->caps
.num_comp_vectors
)
2864 for (i
= 1; i
< port
; i
++)
2865 off
+= mlx4_get_eqs_per_port(dev
, i
);
2867 requested_cpu
= eqn
- off
- !!(eqn
> MLX4_EQ_ASYNC
);
2869 /* Meaning EQs are shared, and this call comes from the second port */
2870 if (requested_cpu
< 0)
2873 eq
= &priv
->eq_table
.eq
[eqn
];
2875 if (!zalloc_cpumask_var(&eq
->affinity_mask
, GFP_KERNEL
))
2878 cpumask_set_cpu(requested_cpu
, eq
->affinity_mask
);
2883 static void mlx4_enable_msi_x(struct mlx4_dev
*dev
)
2885 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2886 struct msix_entry
*entries
;
2891 int nreq
= min3(dev
->caps
.num_ports
*
2892 (int)num_online_cpus() + 1,
2893 dev
->caps
.num_eqs
- dev
->caps
.reserved_eqs
,
2896 entries
= kcalloc(nreq
, sizeof(*entries
), GFP_KERNEL
);
2900 for (i
= 0; i
< nreq
; ++i
)
2901 entries
[i
].entry
= i
;
2903 nreq
= pci_enable_msix_range(dev
->persist
->pdev
, entries
, 2,
2906 if (nreq
< 0 || nreq
< MLX4_EQ_ASYNC
) {
2910 /* 1 is reserved for events (asyncrounous EQ) */
2911 dev
->caps
.num_comp_vectors
= nreq
- 1;
2913 priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].irq
= entries
[0].vector
;
2914 bitmap_zero(priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].actv_ports
.ports
,
2915 dev
->caps
.num_ports
);
2917 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; i
++) {
2918 if (i
== MLX4_EQ_ASYNC
)
2921 priv
->eq_table
.eq
[i
].irq
=
2922 entries
[i
+ 1 - !!(i
> MLX4_EQ_ASYNC
)].vector
;
2924 if (MLX4_IS_LEGACY_EQ_MODE(dev
->caps
)) {
2925 bitmap_fill(priv
->eq_table
.eq
[i
].actv_ports
.ports
,
2926 dev
->caps
.num_ports
);
2927 /* We don't set affinity hint when there
2932 priv
->eq_table
.eq
[i
].actv_ports
.ports
);
2933 if (mlx4_init_affinity_hint(dev
, port
+ 1, i
))
2934 mlx4_warn(dev
, "Couldn't init hint cpumask for EQ %d\n",
2937 /* We divide the Eqs evenly between the two ports.
2938 * (dev->caps.num_comp_vectors / dev->caps.num_ports)
2939 * refers to the number of Eqs per port
2940 * (i.e eqs_per_port). Theoretically, we would like to
2941 * write something like (i + 1) % eqs_per_port == 0.
2942 * However, since there's an asynchronous Eq, we have
2943 * to skip over it by comparing this condition to
2944 * !!((i + 1) > MLX4_EQ_ASYNC).
2946 if ((dev
->caps
.num_comp_vectors
> dev
->caps
.num_ports
) &&
2948 (dev
->caps
.num_comp_vectors
/ dev
->caps
.num_ports
)) ==
2949 !!((i
+ 1) > MLX4_EQ_ASYNC
))
2950 /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
2951 * everything is shared anyway.
2956 dev
->flags
|= MLX4_FLAG_MSI_X
;
2963 dev
->caps
.num_comp_vectors
= 1;
2965 BUG_ON(MLX4_EQ_ASYNC
>= 2);
2966 for (i
= 0; i
< 2; ++i
) {
2967 priv
->eq_table
.eq
[i
].irq
= dev
->persist
->pdev
->irq
;
2968 if (i
!= MLX4_EQ_ASYNC
) {
2969 bitmap_fill(priv
->eq_table
.eq
[i
].actv_ports
.ports
,
2970 dev
->caps
.num_ports
);
2975 static int mlx4_init_port_info(struct mlx4_dev
*dev
, int port
)
2977 struct devlink
*devlink
= priv_to_devlink(mlx4_priv(dev
));
2978 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
2981 err
= devlink_port_register(devlink
, &info
->devlink_port
, port
);
2987 if (!mlx4_is_slave(dev
)) {
2988 mlx4_init_mac_table(dev
, &info
->mac_table
);
2989 mlx4_init_vlan_table(dev
, &info
->vlan_table
);
2990 mlx4_init_roce_gid_table(dev
, &info
->gid_table
);
2991 info
->base_qpn
= mlx4_get_base_qpn(dev
, port
);
2994 sprintf(info
->dev_name
, "mlx4_port%d", port
);
2995 info
->port_attr
.attr
.name
= info
->dev_name
;
2996 if (mlx4_is_mfunc(dev
)) {
2997 info
->port_attr
.attr
.mode
= 0444;
2999 info
->port_attr
.attr
.mode
= 0644;
3000 info
->port_attr
.store
= set_port_type
;
3002 info
->port_attr
.show
= show_port_type
;
3003 sysfs_attr_init(&info
->port_attr
.attr
);
3005 err
= device_create_file(&dev
->persist
->pdev
->dev
, &info
->port_attr
);
3007 mlx4_err(dev
, "Failed to create file for port %d\n", port
);
3008 devlink_port_unregister(&info
->devlink_port
);
3012 sprintf(info
->dev_mtu_name
, "mlx4_port%d_mtu", port
);
3013 info
->port_mtu_attr
.attr
.name
= info
->dev_mtu_name
;
3014 if (mlx4_is_mfunc(dev
)) {
3015 info
->port_mtu_attr
.attr
.mode
= 0444;
3017 info
->port_mtu_attr
.attr
.mode
= 0644;
3018 info
->port_mtu_attr
.store
= set_port_ib_mtu
;
3020 info
->port_mtu_attr
.show
= show_port_ib_mtu
;
3021 sysfs_attr_init(&info
->port_mtu_attr
.attr
);
3023 err
= device_create_file(&dev
->persist
->pdev
->dev
,
3024 &info
->port_mtu_attr
);
3026 mlx4_err(dev
, "Failed to create mtu file for port %d\n", port
);
3027 device_remove_file(&info
->dev
->persist
->pdev
->dev
,
3029 devlink_port_unregister(&info
->devlink_port
);
3036 static void mlx4_cleanup_port_info(struct mlx4_port_info
*info
)
3041 device_remove_file(&info
->dev
->persist
->pdev
->dev
, &info
->port_attr
);
3042 device_remove_file(&info
->dev
->persist
->pdev
->dev
,
3043 &info
->port_mtu_attr
);
3044 devlink_port_unregister(&info
->devlink_port
);
3046 #ifdef CONFIG_RFS_ACCEL
3047 free_irq_cpu_rmap(info
->rmap
);
3052 static int mlx4_init_steering(struct mlx4_dev
*dev
)
3054 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3055 int num_entries
= dev
->caps
.num_ports
;
3058 priv
->steer
= kzalloc(sizeof(struct mlx4_steer
) * num_entries
, GFP_KERNEL
);
3062 for (i
= 0; i
< num_entries
; i
++)
3063 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
3064 INIT_LIST_HEAD(&priv
->steer
[i
].promisc_qps
[j
]);
3065 INIT_LIST_HEAD(&priv
->steer
[i
].steer_entries
[j
]);
3070 static void mlx4_clear_steering(struct mlx4_dev
*dev
)
3072 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3073 struct mlx4_steer_index
*entry
, *tmp_entry
;
3074 struct mlx4_promisc_qp
*pqp
, *tmp_pqp
;
3075 int num_entries
= dev
->caps
.num_ports
;
3078 for (i
= 0; i
< num_entries
; i
++) {
3079 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
3080 list_for_each_entry_safe(pqp
, tmp_pqp
,
3081 &priv
->steer
[i
].promisc_qps
[j
],
3083 list_del(&pqp
->list
);
3086 list_for_each_entry_safe(entry
, tmp_entry
,
3087 &priv
->steer
[i
].steer_entries
[j
],
3089 list_del(&entry
->list
);
3090 list_for_each_entry_safe(pqp
, tmp_pqp
,
3093 list_del(&pqp
->list
);
3103 static int extended_func_num(struct pci_dev
*pdev
)
3105 return PCI_SLOT(pdev
->devfn
) * 8 + PCI_FUNC(pdev
->devfn
);
3108 #define MLX4_OWNER_BASE 0x8069c
3109 #define MLX4_OWNER_SIZE 4
3111 static int mlx4_get_ownership(struct mlx4_dev
*dev
)
3113 void __iomem
*owner
;
3116 if (pci_channel_offline(dev
->persist
->pdev
))
3119 owner
= ioremap(pci_resource_start(dev
->persist
->pdev
, 0) +
3123 mlx4_err(dev
, "Failed to obtain ownership bit\n");
3132 static void mlx4_free_ownership(struct mlx4_dev
*dev
)
3134 void __iomem
*owner
;
3136 if (pci_channel_offline(dev
->persist
->pdev
))
3139 owner
= ioremap(pci_resource_start(dev
->persist
->pdev
, 0) +
3143 mlx4_err(dev
, "Failed to obtain ownership bit\n");
3151 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
3152 !!((flags) & MLX4_FLAG_MASTER))
3154 static u64
mlx4_enable_sriov(struct mlx4_dev
*dev
, struct pci_dev
*pdev
,
3155 u8 total_vfs
, int existing_vfs
, int reset_flow
)
3157 u64 dev_flags
= dev
->flags
;
3159 int fw_enabled_sriov_vfs
= min(pci_sriov_get_totalvfs(pdev
),
3163 dev
->dev_vfs
= kcalloc(total_vfs
, sizeof(*dev
->dev_vfs
),
3170 atomic_inc(&pf_loading
);
3171 if (dev
->flags
& MLX4_FLAG_SRIOV
) {
3172 if (existing_vfs
!= total_vfs
) {
3173 mlx4_err(dev
, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
3174 existing_vfs
, total_vfs
);
3175 total_vfs
= existing_vfs
;
3179 dev
->dev_vfs
= kzalloc(total_vfs
* sizeof(*dev
->dev_vfs
), GFP_KERNEL
);
3180 if (NULL
== dev
->dev_vfs
) {
3181 mlx4_err(dev
, "Failed to allocate memory for VFs\n");
3185 if (!(dev
->flags
& MLX4_FLAG_SRIOV
)) {
3186 if (total_vfs
> fw_enabled_sriov_vfs
) {
3187 mlx4_err(dev
, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
3188 total_vfs
, fw_enabled_sriov_vfs
);
3192 mlx4_warn(dev
, "Enabling SR-IOV with %d VFs\n", total_vfs
);
3193 err
= pci_enable_sriov(pdev
, total_vfs
);
3196 mlx4_err(dev
, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
3200 mlx4_warn(dev
, "Running in master mode\n");
3201 dev_flags
|= MLX4_FLAG_SRIOV
|
3203 dev_flags
&= ~MLX4_FLAG_SLAVE
;
3204 dev
->persist
->num_vfs
= total_vfs
;
3209 atomic_dec(&pf_loading
);
3211 dev
->persist
->num_vfs
= 0;
3212 kfree(dev
->dev_vfs
);
3213 dev
->dev_vfs
= NULL
;
3214 return dev_flags
& ~MLX4_FLAG_MASTER
;
3218 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64
= -1,
3221 static int mlx4_check_dev_cap(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
,
3224 int requested_vfs
= nvfs
[0] + nvfs
[1] + nvfs
[2];
3225 /* Checking for 64 VFs as a limitation of CX2 */
3226 if (!(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_80_VFS
) &&
3227 requested_vfs
>= 64) {
3228 mlx4_err(dev
, "Requested %d VFs, but FW does not support more than 64\n",
3230 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64
;
3235 static int mlx4_pci_enable_device(struct mlx4_dev
*dev
)
3237 struct pci_dev
*pdev
= dev
->persist
->pdev
;
3240 mutex_lock(&dev
->persist
->pci_status_mutex
);
3241 if (dev
->persist
->pci_status
== MLX4_PCI_STATUS_DISABLED
) {
3242 err
= pci_enable_device(pdev
);
3244 dev
->persist
->pci_status
= MLX4_PCI_STATUS_ENABLED
;
3246 mutex_unlock(&dev
->persist
->pci_status_mutex
);
3251 static void mlx4_pci_disable_device(struct mlx4_dev
*dev
)
3253 struct pci_dev
*pdev
= dev
->persist
->pdev
;
3255 mutex_lock(&dev
->persist
->pci_status_mutex
);
3256 if (dev
->persist
->pci_status
== MLX4_PCI_STATUS_ENABLED
) {
3257 pci_disable_device(pdev
);
3258 dev
->persist
->pci_status
= MLX4_PCI_STATUS_DISABLED
;
3260 mutex_unlock(&dev
->persist
->pci_status_mutex
);
3263 static int mlx4_load_one(struct pci_dev
*pdev
, int pci_dev_data
,
3264 int total_vfs
, int *nvfs
, struct mlx4_priv
*priv
,
3267 struct mlx4_dev
*dev
;
3272 struct mlx4_dev_cap
*dev_cap
= NULL
;
3273 int existing_vfs
= 0;
3277 INIT_LIST_HEAD(&priv
->ctx_list
);
3278 spin_lock_init(&priv
->ctx_lock
);
3280 mutex_init(&priv
->port_mutex
);
3281 mutex_init(&priv
->bond_mutex
);
3283 INIT_LIST_HEAD(&priv
->pgdir_list
);
3284 mutex_init(&priv
->pgdir_mutex
);
3285 spin_lock_init(&priv
->cmd
.context_lock
);
3287 INIT_LIST_HEAD(&priv
->bf_list
);
3288 mutex_init(&priv
->bf_mutex
);
3290 dev
->rev_id
= pdev
->revision
;
3291 dev
->numa_node
= dev_to_node(&pdev
->dev
);
3293 /* Detect if this device is a virtual function */
3294 if (pci_dev_data
& MLX4_PCI_DEV_IS_VF
) {
3295 mlx4_warn(dev
, "Detected virtual function - running in slave mode\n");
3296 dev
->flags
|= MLX4_FLAG_SLAVE
;
3298 /* We reset the device and enable SRIOV only for physical
3299 * devices. Try to claim ownership on the device;
3300 * if already taken, skip -- do not allow multiple PFs */
3301 err
= mlx4_get_ownership(dev
);
3306 mlx4_warn(dev
, "Multiple PFs not yet supported - Skipping PF\n");
3311 atomic_set(&priv
->opreq_count
, 0);
3312 INIT_WORK(&priv
->opreq_task
, mlx4_opreq_action
);
3315 * Now reset the HCA before we touch the PCI capabilities or
3316 * attempt a firmware command, since a boot ROM may have left
3317 * the HCA in an undefined state.
3319 err
= mlx4_reset(dev
);
3321 mlx4_err(dev
, "Failed to reset HCA, aborting\n");
3326 dev
->flags
= MLX4_FLAG_MASTER
;
3327 existing_vfs
= pci_num_vf(pdev
);
3329 dev
->flags
|= MLX4_FLAG_SRIOV
;
3330 dev
->persist
->num_vfs
= total_vfs
;
3334 /* on load remove any previous indication of internal error,
3337 dev
->persist
->state
= MLX4_DEVICE_STATE_UP
;
3340 err
= mlx4_cmd_init(dev
);
3342 mlx4_err(dev
, "Failed to init command interface, aborting\n");
3346 /* In slave functions, the communication channel must be initialized
3347 * before posting commands. Also, init num_slaves before calling
3349 if (mlx4_is_mfunc(dev
)) {
3350 if (mlx4_is_master(dev
)) {
3351 dev
->num_slaves
= MLX4_MAX_NUM_SLAVES
;
3354 dev
->num_slaves
= 0;
3355 err
= mlx4_multi_func_init(dev
);
3357 mlx4_err(dev
, "Failed to init slave mfunc interface, aborting\n");
3363 err
= mlx4_init_fw(dev
);
3365 mlx4_err(dev
, "Failed to init fw, aborting.\n");
3369 if (mlx4_is_master(dev
)) {
3370 /* when we hit the goto slave_start below, dev_cap already initialized */
3372 dev_cap
= kzalloc(sizeof(*dev_cap
), GFP_KERNEL
);
3379 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
3381 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
3385 if (mlx4_check_dev_cap(dev
, dev_cap
, nvfs
))
3388 if (!(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
)) {
3389 u64 dev_flags
= mlx4_enable_sriov(dev
, pdev
,
3395 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
3396 dev
->flags
= dev_flags
;
3397 if (!SRIOV_VALID_STATE(dev
->flags
)) {
3398 mlx4_err(dev
, "Invalid SRIOV state\n");
3401 err
= mlx4_reset(dev
);
3403 mlx4_err(dev
, "Failed to reset HCA, aborting.\n");
3409 /* Legacy mode FW requires SRIOV to be enabled before
3410 * doing QUERY_DEV_CAP, since max_eq's value is different if
3413 memset(dev_cap
, 0, sizeof(*dev_cap
));
3414 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
3416 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
3420 if (mlx4_check_dev_cap(dev
, dev_cap
, nvfs
))
3425 err
= mlx4_init_hca(dev
);
3427 if (err
== -EACCES
) {
3428 /* Not primary Physical function
3429 * Running in slave mode */
3430 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
3431 /* We're not a PF */
3432 if (dev
->flags
& MLX4_FLAG_SRIOV
) {
3434 pci_disable_sriov(pdev
);
3435 if (mlx4_is_master(dev
) && !reset_flow
)
3436 atomic_dec(&pf_loading
);
3437 dev
->flags
&= ~MLX4_FLAG_SRIOV
;
3439 if (!mlx4_is_slave(dev
))
3440 mlx4_free_ownership(dev
);
3441 dev
->flags
|= MLX4_FLAG_SLAVE
;
3442 dev
->flags
&= ~MLX4_FLAG_MASTER
;
3448 if (mlx4_is_master(dev
) && (dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
)) {
3449 u64 dev_flags
= mlx4_enable_sriov(dev
, pdev
, total_vfs
,
3450 existing_vfs
, reset_flow
);
3452 if ((dev
->flags
^ dev_flags
) & (MLX4_FLAG_MASTER
| MLX4_FLAG_SLAVE
)) {
3453 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_VHCR
);
3454 dev
->flags
= dev_flags
;
3455 err
= mlx4_cmd_init(dev
);
3457 /* Only VHCR is cleaned up, so could still
3460 mlx4_err(dev
, "Failed to init VHCR command interface, aborting\n");
3464 dev
->flags
= dev_flags
;
3467 if (!SRIOV_VALID_STATE(dev
->flags
)) {
3468 mlx4_err(dev
, "Invalid SRIOV state\n");
3473 /* check if the device is functioning at its maximum possible speed.
3474 * No return code for this call, just warn the user in case of PCI
3475 * express device capabilities are under-satisfied by the bus.
3477 if (!mlx4_is_slave(dev
))
3478 mlx4_check_pcie_caps(dev
);
3480 /* In master functions, the communication channel must be initialized
3481 * after obtaining its address from fw */
3482 if (mlx4_is_master(dev
)) {
3483 if (dev
->caps
.num_ports
< 2 &&
3487 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
3488 dev
->caps
.num_ports
);
3491 memcpy(dev
->persist
->nvfs
, nvfs
, sizeof(dev
->persist
->nvfs
));
3494 i
< sizeof(dev
->persist
->nvfs
)/
3495 sizeof(dev
->persist
->nvfs
[0]); i
++) {
3498 for (j
= 0; j
< dev
->persist
->nvfs
[i
]; ++sum
, ++j
) {
3499 dev
->dev_vfs
[sum
].min_port
= i
< 2 ? i
+ 1 : 1;
3500 dev
->dev_vfs
[sum
].n_ports
= i
< 2 ? 1 :
3501 dev
->caps
.num_ports
;
3505 /* In master functions, the communication channel
3506 * must be initialized after obtaining its address from fw
3508 err
= mlx4_multi_func_init(dev
);
3510 mlx4_err(dev
, "Failed to init master mfunc interface, aborting.\n");
3515 err
= mlx4_alloc_eq_table(dev
);
3517 goto err_master_mfunc
;
3519 bitmap_zero(priv
->msix_ctl
.pool_bm
, MAX_MSIX
);
3520 mutex_init(&priv
->msix_ctl
.pool_lock
);
3522 mlx4_enable_msi_x(dev
);
3523 if ((mlx4_is_mfunc(dev
)) &&
3524 !(dev
->flags
& MLX4_FLAG_MSI_X
)) {
3526 mlx4_err(dev
, "INTx is not supported in multi-function mode, aborting\n");
3530 if (!mlx4_is_slave(dev
)) {
3531 err
= mlx4_init_steering(dev
);
3533 goto err_disable_msix
;
3536 mlx4_init_quotas(dev
);
3538 err
= mlx4_setup_hca(dev
);
3539 if (err
== -EBUSY
&& (dev
->flags
& MLX4_FLAG_MSI_X
) &&
3540 !mlx4_is_mfunc(dev
)) {
3541 dev
->flags
&= ~MLX4_FLAG_MSI_X
;
3542 dev
->caps
.num_comp_vectors
= 1;
3543 pci_disable_msix(pdev
);
3544 err
= mlx4_setup_hca(dev
);
3550 /* When PF resources are ready arm its comm channel to enable
3553 if (mlx4_is_master(dev
)) {
3554 err
= mlx4_ARM_COMM_CHANNEL(dev
);
3556 mlx4_err(dev
, " Failed to arm comm channel eq: %x\n",
3562 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
3563 err
= mlx4_init_port_info(dev
, port
);
3568 priv
->v2p
.port1
= 1;
3569 priv
->v2p
.port2
= 2;
3571 err
= mlx4_register_device(dev
);
3575 mlx4_request_modules(dev
);
3577 mlx4_sense_init(dev
);
3578 mlx4_start_sense(dev
);
3582 if (mlx4_is_master(dev
) && dev
->persist
->num_vfs
&& !reset_flow
)
3583 atomic_dec(&pf_loading
);
3589 for (--port
; port
>= 1; --port
)
3590 mlx4_cleanup_port_info(&priv
->port
[port
]);
3592 mlx4_cleanup_default_counters(dev
);
3593 if (!mlx4_is_slave(dev
))
3594 mlx4_cleanup_counters_table(dev
);
3595 mlx4_cleanup_qp_table(dev
);
3596 mlx4_cleanup_srq_table(dev
);
3597 mlx4_cleanup_cq_table(dev
);
3598 mlx4_cmd_use_polling(dev
);
3599 mlx4_cleanup_eq_table(dev
);
3600 mlx4_cleanup_mcg_table(dev
);
3601 mlx4_cleanup_mr_table(dev
);
3602 mlx4_cleanup_xrcd_table(dev
);
3603 mlx4_cleanup_pd_table(dev
);
3604 mlx4_cleanup_uar_table(dev
);
3607 if (!mlx4_is_slave(dev
))
3608 mlx4_clear_steering(dev
);
3611 if (dev
->flags
& MLX4_FLAG_MSI_X
)
3612 pci_disable_msix(pdev
);
3615 mlx4_free_eq_table(dev
);
3618 if (mlx4_is_master(dev
)) {
3619 mlx4_free_resource_tracker(dev
, RES_TR_FREE_STRUCTS_ONLY
);
3620 mlx4_multi_func_cleanup(dev
);
3623 if (mlx4_is_slave(dev
))
3624 mlx4_slave_destroy_special_qp_cap(dev
);
3627 mlx4_close_hca(dev
);
3633 if (mlx4_is_slave(dev
))
3634 mlx4_multi_func_cleanup(dev
);
3637 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
3640 if (dev
->flags
& MLX4_FLAG_SRIOV
&& !existing_vfs
) {
3641 pci_disable_sriov(pdev
);
3642 dev
->flags
&= ~MLX4_FLAG_SRIOV
;
3645 if (mlx4_is_master(dev
) && dev
->persist
->num_vfs
&& !reset_flow
)
3646 atomic_dec(&pf_loading
);
3648 kfree(priv
->dev
.dev_vfs
);
3650 if (!mlx4_is_slave(dev
))
3651 mlx4_free_ownership(dev
);
3657 static int __mlx4_init_one(struct pci_dev
*pdev
, int pci_dev_data
,
3658 struct mlx4_priv
*priv
)
3661 int nvfs
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
3662 int prb_vf
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
3663 const int param_map
[MLX4_MAX_PORTS
+ 1][MLX4_MAX_PORTS
+ 1] = {
3664 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
3665 unsigned total_vfs
= 0;
3668 pr_info(DRV_NAME
": Initializing %s\n", pci_name(pdev
));
3670 err
= mlx4_pci_enable_device(&priv
->dev
);
3672 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
3676 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
3677 * per port, we must limit the number of VFs to 63 (since their are
3680 for (i
= 0; i
< ARRAY_SIZE(nvfs
) && i
< num_vfs_argc
;
3681 total_vfs
+= nvfs
[param_map
[num_vfs_argc
- 1][i
]], i
++) {
3682 nvfs
[param_map
[num_vfs_argc
- 1][i
]] = num_vfs
[i
];
3684 dev_err(&pdev
->dev
, "num_vfs module parameter cannot be negative\n");
3686 goto err_disable_pdev
;
3689 for (i
= 0; i
< ARRAY_SIZE(prb_vf
) && i
< probe_vfs_argc
;
3691 prb_vf
[param_map
[probe_vfs_argc
- 1][i
]] = probe_vf
[i
];
3692 if (prb_vf
[i
] < 0 || prb_vf
[i
] > nvfs
[i
]) {
3693 dev_err(&pdev
->dev
, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
3695 goto err_disable_pdev
;
3698 if (total_vfs
> MLX4_MAX_NUM_VF
) {
3700 "Requested more VF's (%d) than allowed by hw (%d)\n",
3701 total_vfs
, MLX4_MAX_NUM_VF
);
3703 goto err_disable_pdev
;
3706 for (i
= 0; i
< MLX4_MAX_PORTS
; i
++) {
3707 if (nvfs
[i
] + nvfs
[2] > MLX4_MAX_NUM_VF_P_PORT
) {
3709 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
3710 nvfs
[i
] + nvfs
[2], i
+ 1,
3711 MLX4_MAX_NUM_VF_P_PORT
);
3713 goto err_disable_pdev
;
3717 /* Check for BARs. */
3718 if (!(pci_dev_data
& MLX4_PCI_DEV_IS_VF
) &&
3719 !(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
3720 dev_err(&pdev
->dev
, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
3721 pci_dev_data
, pci_resource_flags(pdev
, 0));
3723 goto err_disable_pdev
;
3725 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
3726 dev_err(&pdev
->dev
, "Missing UAR, aborting\n");
3728 goto err_disable_pdev
;
3731 err
= pci_request_regions(pdev
, DRV_NAME
);
3733 dev_err(&pdev
->dev
, "Couldn't get PCI resources, aborting\n");
3734 goto err_disable_pdev
;
3737 pci_set_master(pdev
);
3739 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
3741 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit PCI DMA mask\n");
3742 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3744 dev_err(&pdev
->dev
, "Can't set PCI DMA mask, aborting\n");
3745 goto err_release_regions
;
3748 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
3750 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
3751 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
3753 dev_err(&pdev
->dev
, "Can't set consistent PCI DMA mask, aborting\n");
3754 goto err_release_regions
;
3758 /* Allow large DMA segments, up to the firmware limit of 1 GB */
3759 dma_set_max_seg_size(&pdev
->dev
, 1024 * 1024 * 1024);
3760 /* Detect if this device is a virtual function */
3761 if (pci_dev_data
& MLX4_PCI_DEV_IS_VF
) {
3762 /* When acting as pf, we normally skip vfs unless explicitly
3763 * requested to probe them.
3766 unsigned vfs_offset
= 0;
3768 for (i
= 0; i
< ARRAY_SIZE(nvfs
) &&
3769 vfs_offset
+ nvfs
[i
] < extended_func_num(pdev
);
3770 vfs_offset
+= nvfs
[i
], i
++)
3772 if (i
== ARRAY_SIZE(nvfs
)) {
3774 goto err_release_regions
;
3776 if ((extended_func_num(pdev
) - vfs_offset
)
3778 dev_warn(&pdev
->dev
, "Skipping virtual function:%d\n",
3779 extended_func_num(pdev
));
3781 goto err_release_regions
;
3786 err
= mlx4_catas_init(&priv
->dev
);
3788 goto err_release_regions
;
3790 err
= mlx4_load_one(pdev
, pci_dev_data
, total_vfs
, nvfs
, priv
, 0);
3797 mlx4_catas_end(&priv
->dev
);
3799 err_release_regions
:
3800 pci_release_regions(pdev
);
3803 mlx4_pci_disable_device(&priv
->dev
);
3807 static int mlx4_devlink_port_type_set(struct devlink_port
*devlink_port
,
3808 enum devlink_port_type port_type
)
3810 struct mlx4_port_info
*info
= container_of(devlink_port
,
3811 struct mlx4_port_info
,
3813 enum mlx4_port_type mlx4_port_type
;
3815 switch (port_type
) {
3816 case DEVLINK_PORT_TYPE_AUTO
:
3817 mlx4_port_type
= MLX4_PORT_TYPE_AUTO
;
3819 case DEVLINK_PORT_TYPE_ETH
:
3820 mlx4_port_type
= MLX4_PORT_TYPE_ETH
;
3822 case DEVLINK_PORT_TYPE_IB
:
3823 mlx4_port_type
= MLX4_PORT_TYPE_IB
;
3829 return __set_port_type(info
, mlx4_port_type
);
3832 static const struct devlink_ops mlx4_devlink_ops
= {
3833 .port_type_set
= mlx4_devlink_port_type_set
,
3836 static int mlx4_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
3838 struct devlink
*devlink
;
3839 struct mlx4_priv
*priv
;
3840 struct mlx4_dev
*dev
;
3843 printk_once(KERN_INFO
"%s", mlx4_version
);
3845 devlink
= devlink_alloc(&mlx4_devlink_ops
, sizeof(*priv
));
3848 priv
= devlink_priv(devlink
);
3851 dev
->persist
= kzalloc(sizeof(*dev
->persist
), GFP_KERNEL
);
3852 if (!dev
->persist
) {
3854 goto err_devlink_free
;
3856 dev
->persist
->pdev
= pdev
;
3857 dev
->persist
->dev
= dev
;
3858 pci_set_drvdata(pdev
, dev
->persist
);
3859 priv
->pci_dev_data
= id
->driver_data
;
3860 mutex_init(&dev
->persist
->device_state_mutex
);
3861 mutex_init(&dev
->persist
->interface_state_mutex
);
3862 mutex_init(&dev
->persist
->pci_status_mutex
);
3864 ret
= devlink_register(devlink
, &pdev
->dev
);
3866 goto err_persist_free
;
3868 ret
= __mlx4_init_one(pdev
, id
->driver_data
, priv
);
3870 goto err_devlink_unregister
;
3872 pci_save_state(pdev
);
3875 err_devlink_unregister
:
3876 devlink_unregister(devlink
);
3878 kfree(dev
->persist
);
3880 devlink_free(devlink
);
3884 static void mlx4_clean_dev(struct mlx4_dev
*dev
)
3886 struct mlx4_dev_persistent
*persist
= dev
->persist
;
3887 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3888 unsigned long flags
= (dev
->flags
& RESET_PERSIST_MASK_FLAGS
);
3890 memset(priv
, 0, sizeof(*priv
));
3891 priv
->dev
.persist
= persist
;
3892 priv
->dev
.flags
= flags
;
3895 static void mlx4_unload_one(struct pci_dev
*pdev
)
3897 struct mlx4_dev_persistent
*persist
= pci_get_drvdata(pdev
);
3898 struct mlx4_dev
*dev
= persist
->dev
;
3899 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3906 /* saving current ports type for further use */
3907 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
3908 dev
->persist
->curr_port_type
[i
] = dev
->caps
.port_type
[i
+ 1];
3909 dev
->persist
->curr_port_poss_type
[i
] = dev
->caps
.
3910 possible_type
[i
+ 1];
3913 pci_dev_data
= priv
->pci_dev_data
;
3915 mlx4_stop_sense(dev
);
3916 mlx4_unregister_device(dev
);
3918 for (p
= 1; p
<= dev
->caps
.num_ports
; p
++) {
3919 mlx4_cleanup_port_info(&priv
->port
[p
]);
3920 mlx4_CLOSE_PORT(dev
, p
);
3923 if (mlx4_is_master(dev
))
3924 mlx4_free_resource_tracker(dev
,
3925 RES_TR_FREE_SLAVES_ONLY
);
3927 mlx4_cleanup_default_counters(dev
);
3928 if (!mlx4_is_slave(dev
))
3929 mlx4_cleanup_counters_table(dev
);
3930 mlx4_cleanup_qp_table(dev
);
3931 mlx4_cleanup_srq_table(dev
);
3932 mlx4_cleanup_cq_table(dev
);
3933 mlx4_cmd_use_polling(dev
);
3934 mlx4_cleanup_eq_table(dev
);
3935 mlx4_cleanup_mcg_table(dev
);
3936 mlx4_cleanup_mr_table(dev
);
3937 mlx4_cleanup_xrcd_table(dev
);
3938 mlx4_cleanup_pd_table(dev
);
3940 if (mlx4_is_master(dev
))
3941 mlx4_free_resource_tracker(dev
,
3942 RES_TR_FREE_STRUCTS_ONLY
);
3945 mlx4_uar_free(dev
, &priv
->driver_uar
);
3946 mlx4_cleanup_uar_table(dev
);
3947 if (!mlx4_is_slave(dev
))
3948 mlx4_clear_steering(dev
);
3949 mlx4_free_eq_table(dev
);
3950 if (mlx4_is_master(dev
))
3951 mlx4_multi_func_cleanup(dev
);
3952 mlx4_close_hca(dev
);
3954 if (mlx4_is_slave(dev
))
3955 mlx4_multi_func_cleanup(dev
);
3956 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
3958 if (dev
->flags
& MLX4_FLAG_MSI_X
)
3959 pci_disable_msix(pdev
);
3961 if (!mlx4_is_slave(dev
))
3962 mlx4_free_ownership(dev
);
3964 mlx4_slave_destroy_special_qp_cap(dev
);
3965 kfree(dev
->dev_vfs
);
3967 mlx4_clean_dev(dev
);
3968 priv
->pci_dev_data
= pci_dev_data
;
3972 static void mlx4_remove_one(struct pci_dev
*pdev
)
3974 struct mlx4_dev_persistent
*persist
= pci_get_drvdata(pdev
);
3975 struct mlx4_dev
*dev
= persist
->dev
;
3976 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3977 struct devlink
*devlink
= priv_to_devlink(priv
);
3980 if (mlx4_is_slave(dev
))
3981 persist
->interface_state
|= MLX4_INTERFACE_STATE_NOWAIT
;
3983 mutex_lock(&persist
->interface_state_mutex
);
3984 persist
->interface_state
|= MLX4_INTERFACE_STATE_DELETION
;
3985 mutex_unlock(&persist
->interface_state_mutex
);
3987 /* Disabling SR-IOV is not allowed while there are active vf's */
3988 if (mlx4_is_master(dev
) && dev
->flags
& MLX4_FLAG_SRIOV
) {
3989 active_vfs
= mlx4_how_many_lives_vf(dev
);
3991 pr_warn("Removing PF when there are active VF's !!\n");
3992 pr_warn("Will not disable SR-IOV.\n");
3996 /* device marked to be under deletion running now without the lock
3997 * letting other tasks to be terminated
3999 if (persist
->interface_state
& MLX4_INTERFACE_STATE_UP
)
4000 mlx4_unload_one(pdev
);
4002 mlx4_info(dev
, "%s: interface is down\n", __func__
);
4003 mlx4_catas_end(dev
);
4004 if (dev
->flags
& MLX4_FLAG_SRIOV
&& !active_vfs
) {
4005 mlx4_warn(dev
, "Disabling SR-IOV\n");
4006 pci_disable_sriov(pdev
);
4009 pci_release_regions(pdev
);
4010 mlx4_pci_disable_device(dev
);
4011 devlink_unregister(devlink
);
4012 kfree(dev
->persist
);
4013 devlink_free(devlink
);
4016 static int restore_current_port_types(struct mlx4_dev
*dev
,
4017 enum mlx4_port_type
*types
,
4018 enum mlx4_port_type
*poss_types
)
4020 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4023 mlx4_stop_sense(dev
);
4025 mutex_lock(&priv
->port_mutex
);
4026 for (i
= 0; i
< dev
->caps
.num_ports
; i
++)
4027 dev
->caps
.possible_type
[i
+ 1] = poss_types
[i
];
4028 err
= mlx4_change_port_types(dev
, types
);
4029 mlx4_start_sense(dev
);
4030 mutex_unlock(&priv
->port_mutex
);
4035 int mlx4_restart_one(struct pci_dev
*pdev
)
4037 struct mlx4_dev_persistent
*persist
= pci_get_drvdata(pdev
);
4038 struct mlx4_dev
*dev
= persist
->dev
;
4039 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4040 int nvfs
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
4041 int pci_dev_data
, err
, total_vfs
;
4043 pci_dev_data
= priv
->pci_dev_data
;
4044 total_vfs
= dev
->persist
->num_vfs
;
4045 memcpy(nvfs
, dev
->persist
->nvfs
, sizeof(dev
->persist
->nvfs
));
4047 mlx4_unload_one(pdev
);
4048 err
= mlx4_load_one(pdev
, pci_dev_data
, total_vfs
, nvfs
, priv
, 1);
4050 mlx4_err(dev
, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
4051 __func__
, pci_name(pdev
), err
);
4055 err
= restore_current_port_types(dev
, dev
->persist
->curr_port_type
,
4056 dev
->persist
->curr_port_poss_type
);
4058 mlx4_err(dev
, "could not restore original port types (%d)\n",
4064 #define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT }
4065 #define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF }
4066 #define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
4068 static const struct pci_device_id mlx4_pci_table
[] = {
4069 #ifdef CONFIG_MLX4_CORE_GEN2
4070 /* MT25408 "Hermon" */
4071 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR
), /* SDR */
4072 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR
), /* DDR */
4073 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR
), /* QDR */
4074 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2
), /* DDR Gen2 */
4075 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2
), /* QDR Gen2 */
4076 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN
), /* EN 10GigE */
4077 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2
), /* EN 10GigE Gen2 */
4078 /* MT25458 ConnectX EN 10GBASE-T */
4079 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN
),
4080 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2
), /* Gen2 */
4081 /* MT26468 ConnectX EN 10GigE PCIe Gen2*/
4082 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2
),
4083 /* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */
4084 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2
),
4085 /* MT26478 ConnectX2 40GigE PCIe Gen2 */
4086 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2
),
4087 /* MT25400 Family [ConnectX-2] */
4088 MLX_VF(0x1002), /* Virtual Function */
4089 #endif /* CONFIG_MLX4_CORE_GEN2 */
4090 /* MT27500 Family [ConnectX-3] */
4091 MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3
),
4092 MLX_VF(0x1004), /* Virtual Function */
4093 MLX_GN(0x1005), /* MT27510 Family */
4094 MLX_GN(0x1006), /* MT27511 Family */
4095 MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO
), /* MT27520 Family */
4096 MLX_GN(0x1008), /* MT27521 Family */
4097 MLX_GN(0x1009), /* MT27530 Family */
4098 MLX_GN(0x100a), /* MT27531 Family */
4099 MLX_GN(0x100b), /* MT27540 Family */
4100 MLX_GN(0x100c), /* MT27541 Family */
4101 MLX_GN(0x100d), /* MT27550 Family */
4102 MLX_GN(0x100e), /* MT27551 Family */
4103 MLX_GN(0x100f), /* MT27560 Family */
4104 MLX_GN(0x1010), /* MT27561 Family */
4107 * See the mellanox_check_broken_intx_masking() quirk when
4114 MODULE_DEVICE_TABLE(pci
, mlx4_pci_table
);
4116 static pci_ers_result_t
mlx4_pci_err_detected(struct pci_dev
*pdev
,
4117 pci_channel_state_t state
)
4119 struct mlx4_dev_persistent
*persist
= pci_get_drvdata(pdev
);
4121 mlx4_err(persist
->dev
, "mlx4_pci_err_detected was called\n");
4122 mlx4_enter_error_state(persist
);
4124 mutex_lock(&persist
->interface_state_mutex
);
4125 if (persist
->interface_state
& MLX4_INTERFACE_STATE_UP
)
4126 mlx4_unload_one(pdev
);
4128 mutex_unlock(&persist
->interface_state_mutex
);
4129 if (state
== pci_channel_io_perm_failure
)
4130 return PCI_ERS_RESULT_DISCONNECT
;
4132 mlx4_pci_disable_device(persist
->dev
);
4133 return PCI_ERS_RESULT_NEED_RESET
;
4136 static pci_ers_result_t
mlx4_pci_slot_reset(struct pci_dev
*pdev
)
4138 struct mlx4_dev_persistent
*persist
= pci_get_drvdata(pdev
);
4139 struct mlx4_dev
*dev
= persist
->dev
;
4142 mlx4_err(dev
, "mlx4_pci_slot_reset was called\n");
4143 err
= mlx4_pci_enable_device(dev
);
4145 mlx4_err(dev
, "Can not re-enable device, err=%d\n", err
);
4146 return PCI_ERS_RESULT_DISCONNECT
;
4149 pci_set_master(pdev
);
4150 pci_restore_state(pdev
);
4151 pci_save_state(pdev
);
4152 return PCI_ERS_RESULT_RECOVERED
;
4155 static void mlx4_pci_resume(struct pci_dev
*pdev
)
4157 struct mlx4_dev_persistent
*persist
= pci_get_drvdata(pdev
);
4158 struct mlx4_dev
*dev
= persist
->dev
;
4159 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4160 int nvfs
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
4164 mlx4_err(dev
, "%s was called\n", __func__
);
4165 total_vfs
= dev
->persist
->num_vfs
;
4166 memcpy(nvfs
, dev
->persist
->nvfs
, sizeof(dev
->persist
->nvfs
));
4168 mutex_lock(&persist
->interface_state_mutex
);
4169 if (!(persist
->interface_state
& MLX4_INTERFACE_STATE_UP
)) {
4170 err
= mlx4_load_one(pdev
, priv
->pci_dev_data
, total_vfs
, nvfs
,
4173 mlx4_err(dev
, "%s: mlx4_load_one failed, err=%d\n",
4178 err
= restore_current_port_types(dev
, dev
->persist
->
4179 curr_port_type
, dev
->persist
->
4180 curr_port_poss_type
);
4182 mlx4_err(dev
, "could not restore original port types (%d)\n", err
);
4185 mutex_unlock(&persist
->interface_state_mutex
);
4189 static void mlx4_shutdown(struct pci_dev
*pdev
)
4191 struct mlx4_dev_persistent
*persist
= pci_get_drvdata(pdev
);
4193 mlx4_info(persist
->dev
, "mlx4_shutdown was called\n");
4194 mutex_lock(&persist
->interface_state_mutex
);
4195 if (persist
->interface_state
& MLX4_INTERFACE_STATE_UP
)
4196 mlx4_unload_one(pdev
);
4197 mutex_unlock(&persist
->interface_state_mutex
);
4200 static const struct pci_error_handlers mlx4_err_handler
= {
4201 .error_detected
= mlx4_pci_err_detected
,
4202 .slot_reset
= mlx4_pci_slot_reset
,
4203 .resume
= mlx4_pci_resume
,
4206 static struct pci_driver mlx4_driver
= {
4208 .id_table
= mlx4_pci_table
,
4209 .probe
= mlx4_init_one
,
4210 .shutdown
= mlx4_shutdown
,
4211 .remove
= mlx4_remove_one
,
4212 .err_handler
= &mlx4_err_handler
,
4215 static int __init
mlx4_verify_params(void)
4217 if ((log_num_mac
< 0) || (log_num_mac
> 7)) {
4218 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac
);
4222 if (log_num_vlan
!= 0)
4223 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
4224 MLX4_LOG_NUM_VLANS
);
4227 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
4229 if ((log_mtts_per_seg
< 1) || (log_mtts_per_seg
> 7)) {
4230 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
4235 /* Check if module param for ports type has legal combination */
4236 if (port_type_array
[0] == false && port_type_array
[1] == true) {
4237 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
4238 port_type_array
[0] = true;
4241 if (mlx4_log_num_mgm_entry_size
< -7 ||
4242 (mlx4_log_num_mgm_entry_size
> 0 &&
4243 (mlx4_log_num_mgm_entry_size
< MLX4_MIN_MGM_LOG_ENTRY_SIZE
||
4244 mlx4_log_num_mgm_entry_size
> MLX4_MAX_MGM_LOG_ENTRY_SIZE
))) {
4245 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
4246 mlx4_log_num_mgm_entry_size
,
4247 MLX4_MIN_MGM_LOG_ENTRY_SIZE
,
4248 MLX4_MAX_MGM_LOG_ENTRY_SIZE
);
4255 static int __init
mlx4_init(void)
4259 if (mlx4_verify_params())
4263 mlx4_wq
= create_singlethread_workqueue("mlx4");
4267 ret
= pci_register_driver(&mlx4_driver
);
4269 destroy_workqueue(mlx4_wq
);
4270 return ret
< 0 ? ret
: 0;
4273 static void __exit
mlx4_cleanup(void)
4275 pci_unregister_driver(&mlx4_driver
);
4276 destroy_workqueue(mlx4_wq
);
4279 module_init(mlx4_init
);
4280 module_exit(mlx4_cleanup
);