2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/netdevice.h>
46 #include <linux/mlx4/device.h>
47 #include <linux/mlx4/doorbell.h>
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRV_VERSION
);
58 struct workqueue_struct
*mlx4_wq
;
60 #ifdef CONFIG_MLX4_DEBUG
62 int mlx4_debug_level
= 0;
63 module_param_named(debug_level
, mlx4_debug_level
, int, 0644);
64 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0");
66 #endif /* CONFIG_MLX4_DEBUG */
71 module_param(msi_x
, int, 0444);
72 MODULE_PARM_DESC(msi_x
, "attempt to use MSI-X if nonzero");
74 #else /* CONFIG_PCI_MSI */
78 #endif /* CONFIG_PCI_MSI */
81 module_param(num_vfs
, int, 0444);
82 MODULE_PARM_DESC(num_vfs
, "enable #num_vfs functions if num_vfs > 0");
85 module_param(probe_vf
, int, 0644);
86 MODULE_PARM_DESC(probe_vf
, "number of vfs to probe by pf driver (num_vfs > 0)");
88 int mlx4_log_num_mgm_entry_size
= 10;
89 module_param_named(log_num_mgm_entry_size
,
90 mlx4_log_num_mgm_entry_size
, int, 0444);
91 MODULE_PARM_DESC(log_num_mgm_entry_size
, "log mgm size, that defines the num"
92 " of qp per mcg, for example:"
93 " 10 gives 248.range: 9<="
94 " log_num_mgm_entry_size <= 12."
95 " Not in use with device managed"
98 #define MLX4_VF (1 << 0)
100 #define HCA_GLOBAL_CAP_MASK 0
101 #define PF_CONTEXT_BEHAVIOUR_MASK 0
103 static char mlx4_version
[] __devinitdata
=
104 DRV_NAME
": Mellanox ConnectX core driver v"
105 DRV_VERSION
" (" DRV_RELDATE
")\n";
107 static struct mlx4_profile default_profile
= {
110 .rdmarc_per_qp
= 1 << 4,
114 .num_mtt
= 1 << 20, /* It is really num mtt segements */
117 static int log_num_mac
= 7;
118 module_param_named(log_num_mac
, log_num_mac
, int, 0444);
119 MODULE_PARM_DESC(log_num_mac
, "Log2 max number of MACs per ETH port (1-7)");
121 static int log_num_vlan
;
122 module_param_named(log_num_vlan
, log_num_vlan
, int, 0444);
123 MODULE_PARM_DESC(log_num_vlan
, "Log2 max number of VLANs per ETH port (0-7)");
124 /* Log2 max number of VLANs per ETH port (0-7) */
125 #define MLX4_LOG_NUM_VLANS 7
127 static bool use_prio
;
128 module_param_named(use_prio
, use_prio
, bool, 0444);
129 MODULE_PARM_DESC(use_prio
, "Enable steering by VLAN priority on ETH ports "
132 int log_mtts_per_seg
= ilog2(MLX4_MTT_ENTRY_PER_SEG
);
133 module_param_named(log_mtts_per_seg
, log_mtts_per_seg
, int, 0444);
134 MODULE_PARM_DESC(log_mtts_per_seg
, "Log2 number of MTT entries per segment (1-7)");
136 static int port_type_array
[2] = {MLX4_PORT_TYPE_NONE
, MLX4_PORT_TYPE_NONE
};
137 static int arr_argc
= 2;
138 module_param_array(port_type_array
, int, &arr_argc
, 0444);
139 MODULE_PARM_DESC(port_type_array
, "Array of port types: HW_DEFAULT (0) is default "
140 "1 for IB, 2 for Ethernet");
142 struct mlx4_port_config
{
143 struct list_head list
;
144 enum mlx4_port_type port_type
[MLX4_MAX_PORTS
+ 1];
145 struct pci_dev
*pdev
;
148 int mlx4_check_port_params(struct mlx4_dev
*dev
,
149 enum mlx4_port_type
*port_type
)
153 for (i
= 0; i
< dev
->caps
.num_ports
- 1; i
++) {
154 if (port_type
[i
] != port_type
[i
+ 1]) {
155 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
)) {
156 mlx4_err(dev
, "Only same port types supported "
157 "on this HCA, aborting.\n");
160 if (port_type
[i
] == MLX4_PORT_TYPE_ETH
&&
161 port_type
[i
+ 1] == MLX4_PORT_TYPE_IB
)
166 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
167 if (!(port_type
[i
] & dev
->caps
.supported_type
[i
+1])) {
168 mlx4_err(dev
, "Requested port type for port %d is not "
169 "supported on this HCA\n", i
+ 1);
176 static void mlx4_set_port_mask(struct mlx4_dev
*dev
)
180 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
)
181 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
184 static int mlx4_dev_cap(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
189 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
191 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
195 if (dev_cap
->min_page_sz
> PAGE_SIZE
) {
196 mlx4_err(dev
, "HCA minimum page size of %d bigger than "
197 "kernel PAGE_SIZE of %ld, aborting.\n",
198 dev_cap
->min_page_sz
, PAGE_SIZE
);
201 if (dev_cap
->num_ports
> MLX4_MAX_PORTS
) {
202 mlx4_err(dev
, "HCA has %d ports, but we only support %d, "
204 dev_cap
->num_ports
, MLX4_MAX_PORTS
);
208 if (dev_cap
->uar_size
> pci_resource_len(dev
->pdev
, 2)) {
209 mlx4_err(dev
, "HCA reported UAR size of 0x%x bigger than "
210 "PCI resource 2 size of 0x%llx, aborting.\n",
212 (unsigned long long) pci_resource_len(dev
->pdev
, 2));
216 dev
->caps
.num_ports
= dev_cap
->num_ports
;
217 dev
->phys_caps
.num_phys_eqs
= MLX4_MAX_EQ_NUM
;
218 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
219 dev
->caps
.vl_cap
[i
] = dev_cap
->max_vl
[i
];
220 dev
->caps
.ib_mtu_cap
[i
] = dev_cap
->ib_mtu
[i
];
221 dev
->caps
.gid_table_len
[i
] = dev_cap
->max_gids
[i
];
222 dev
->caps
.pkey_table_len
[i
] = dev_cap
->max_pkeys
[i
];
223 dev
->caps
.port_width_cap
[i
] = dev_cap
->max_port_width
[i
];
224 dev
->caps
.eth_mtu_cap
[i
] = dev_cap
->eth_mtu
[i
];
225 dev
->caps
.def_mac
[i
] = dev_cap
->def_mac
[i
];
226 dev
->caps
.supported_type
[i
] = dev_cap
->supported_port_types
[i
];
227 dev
->caps
.suggested_type
[i
] = dev_cap
->suggested_type
[i
];
228 dev
->caps
.default_sense
[i
] = dev_cap
->default_sense
[i
];
229 dev
->caps
.trans_type
[i
] = dev_cap
->trans_type
[i
];
230 dev
->caps
.vendor_oui
[i
] = dev_cap
->vendor_oui
[i
];
231 dev
->caps
.wavelength
[i
] = dev_cap
->wavelength
[i
];
232 dev
->caps
.trans_code
[i
] = dev_cap
->trans_code
[i
];
235 dev
->caps
.uar_page_size
= PAGE_SIZE
;
236 dev
->caps
.num_uars
= dev_cap
->uar_size
/ PAGE_SIZE
;
237 dev
->caps
.local_ca_ack_delay
= dev_cap
->local_ca_ack_delay
;
238 dev
->caps
.bf_reg_size
= dev_cap
->bf_reg_size
;
239 dev
->caps
.bf_regs_per_page
= dev_cap
->bf_regs_per_page
;
240 dev
->caps
.max_sq_sg
= dev_cap
->max_sq_sg
;
241 dev
->caps
.max_rq_sg
= dev_cap
->max_rq_sg
;
242 dev
->caps
.max_wqes
= dev_cap
->max_qp_sz
;
243 dev
->caps
.max_qp_init_rdma
= dev_cap
->max_requester_per_qp
;
244 dev
->caps
.max_srq_wqes
= dev_cap
->max_srq_sz
;
245 dev
->caps
.max_srq_sge
= dev_cap
->max_rq_sg
- 1;
246 dev
->caps
.reserved_srqs
= dev_cap
->reserved_srqs
;
247 dev
->caps
.max_sq_desc_sz
= dev_cap
->max_sq_desc_sz
;
248 dev
->caps
.max_rq_desc_sz
= dev_cap
->max_rq_desc_sz
;
250 * Subtract 1 from the limit because we need to allocate a
251 * spare CQE so the HCA HW can tell the difference between an
252 * empty CQ and a full CQ.
254 dev
->caps
.max_cqes
= dev_cap
->max_cq_sz
- 1;
255 dev
->caps
.reserved_cqs
= dev_cap
->reserved_cqs
;
256 dev
->caps
.reserved_eqs
= dev_cap
->reserved_eqs
;
257 dev
->caps
.reserved_mtts
= dev_cap
->reserved_mtts
;
258 dev
->caps
.reserved_mrws
= dev_cap
->reserved_mrws
;
260 /* The first 128 UARs are used for EQ doorbells */
261 dev
->caps
.reserved_uars
= max_t(int, 128, dev_cap
->reserved_uars
);
262 dev
->caps
.reserved_pds
= dev_cap
->reserved_pds
;
263 dev
->caps
.reserved_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
264 dev_cap
->reserved_xrcds
: 0;
265 dev
->caps
.max_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
266 dev_cap
->max_xrcds
: 0;
267 dev
->caps
.mtt_entry_sz
= dev_cap
->mtt_entry_sz
;
269 dev
->caps
.max_msg_sz
= dev_cap
->max_msg_sz
;
270 dev
->caps
.page_size_cap
= ~(u32
) (dev_cap
->min_page_sz
- 1);
271 dev
->caps
.flags
= dev_cap
->flags
;
272 dev
->caps
.flags2
= dev_cap
->flags2
;
273 dev
->caps
.bmme_flags
= dev_cap
->bmme_flags
;
274 dev
->caps
.reserved_lkey
= dev_cap
->reserved_lkey
;
275 dev
->caps
.stat_rate_support
= dev_cap
->stat_rate_support
;
276 dev
->caps
.max_gso_sz
= dev_cap
->max_gso_sz
;
277 dev
->caps
.max_rss_tbl_sz
= dev_cap
->max_rss_tbl_sz
;
279 if (dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
) {
280 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
281 dev
->caps
.num_qp_per_mgm
= dev_cap
->fs_max_num_qp_per_entry
;
282 dev
->caps
.fs_log_max_ucast_qp_range_size
=
283 dev_cap
->fs_log_max_ucast_qp_range_size
;
285 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
&&
286 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
) {
287 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_B0
;
289 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_A0
;
291 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
||
292 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
293 mlx4_warn(dev
, "Must have UC_STEER and MC_STEER flags "
294 "set to use B0 steering. Falling back to A0 steering mode.\n");
296 dev
->caps
.num_qp_per_mgm
= mlx4_get_qp_per_mgm(dev
);
298 mlx4_dbg(dev
, "Steering mode is: %s\n",
299 mlx4_steering_mode_str(dev
->caps
.steering_mode
));
301 /* Sense port always allowed on supported devices for ConnectX1 and 2 */
302 if (dev
->pdev
->device
!= 0x1003)
303 dev
->caps
.flags
|= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
;
305 dev
->caps
.log_num_macs
= log_num_mac
;
306 dev
->caps
.log_num_vlans
= MLX4_LOG_NUM_VLANS
;
307 dev
->caps
.log_num_prios
= use_prio
? 3 : 0;
309 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
310 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_NONE
;
311 if (dev
->caps
.supported_type
[i
]) {
312 /* if only ETH is supported - assign ETH */
313 if (dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_ETH
)
314 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_ETH
;
315 /* if only IB is supported,
316 * assign IB only if SRIOV is off*/
317 else if (dev
->caps
.supported_type
[i
] ==
319 if (dev
->flags
& MLX4_FLAG_SRIOV
)
320 dev
->caps
.port_type
[i
] =
323 dev
->caps
.port_type
[i
] =
325 /* if IB and ETH are supported,
326 * first of all check if SRIOV is on */
327 } else if (dev
->flags
& MLX4_FLAG_SRIOV
)
328 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_ETH
;
330 /* In non-SRIOV mode, we set the port type
331 * according to user selection of port type,
332 * if usere selected none, take the FW hint */
333 if (port_type_array
[i
-1] == MLX4_PORT_TYPE_NONE
)
334 dev
->caps
.port_type
[i
] = dev
->caps
.suggested_type
[i
] ?
335 MLX4_PORT_TYPE_ETH
: MLX4_PORT_TYPE_IB
;
337 dev
->caps
.port_type
[i
] = port_type_array
[i
-1];
341 * Link sensing is allowed on the port if 3 conditions are true:
342 * 1. Both protocols are supported on the port.
343 * 2. Different types are supported on the port
344 * 3. FW declared that it supports link sensing
346 mlx4_priv(dev
)->sense
.sense_allowed
[i
] =
347 ((dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_AUTO
) &&
348 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
349 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
));
352 * If "default_sense" bit is set, we move the port to "AUTO" mode
353 * and perform sense_port FW command to try and set the correct
354 * port type from beginning
356 if (mlx4_priv(dev
)->sense
.sense_allowed
[i
] && dev
->caps
.default_sense
[i
]) {
357 enum mlx4_port_type sensed_port
= MLX4_PORT_TYPE_NONE
;
358 dev
->caps
.possible_type
[i
] = MLX4_PORT_TYPE_AUTO
;
359 mlx4_SENSE_PORT(dev
, i
, &sensed_port
);
360 if (sensed_port
!= MLX4_PORT_TYPE_NONE
)
361 dev
->caps
.port_type
[i
] = sensed_port
;
363 dev
->caps
.possible_type
[i
] = dev
->caps
.port_type
[i
];
366 if (dev
->caps
.log_num_macs
> dev_cap
->log_max_macs
[i
]) {
367 dev
->caps
.log_num_macs
= dev_cap
->log_max_macs
[i
];
368 mlx4_warn(dev
, "Requested number of MACs is too much "
369 "for port %d, reducing to %d.\n",
370 i
, 1 << dev
->caps
.log_num_macs
);
372 if (dev
->caps
.log_num_vlans
> dev_cap
->log_max_vlans
[i
]) {
373 dev
->caps
.log_num_vlans
= dev_cap
->log_max_vlans
[i
];
374 mlx4_warn(dev
, "Requested number of VLANs is too much "
375 "for port %d, reducing to %d.\n",
376 i
, 1 << dev
->caps
.log_num_vlans
);
380 dev
->caps
.max_counters
= 1 << ilog2(dev_cap
->max_counters
);
382 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] = dev_cap
->reserved_qps
;
383 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] =
384 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] =
385 (1 << dev
->caps
.log_num_macs
) *
386 (1 << dev
->caps
.log_num_vlans
) *
387 (1 << dev
->caps
.log_num_prios
) *
389 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
] = MLX4_NUM_FEXCH
;
391 dev
->caps
.reserved_qps
= dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] +
392 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] +
393 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] +
394 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
];
398 /*The function checks if there are live vf, return the num of them*/
399 static int mlx4_how_many_lives_vf(struct mlx4_dev
*dev
)
401 struct mlx4_priv
*priv
= mlx4_priv(dev
);
402 struct mlx4_slave_state
*s_state
;
406 for (i
= 1/*the ppf is 0*/; i
< dev
->num_slaves
; ++i
) {
407 s_state
= &priv
->mfunc
.master
.slave_state
[i
];
408 if (s_state
->active
&& s_state
->last_cmd
!=
409 MLX4_COMM_CMD_RESET
) {
410 mlx4_warn(dev
, "%s: slave: %d is still active\n",
418 int mlx4_is_slave_active(struct mlx4_dev
*dev
, int slave
)
420 struct mlx4_priv
*priv
= mlx4_priv(dev
);
421 struct mlx4_slave_state
*s_slave
;
423 if (!mlx4_is_master(dev
))
426 s_slave
= &priv
->mfunc
.master
.slave_state
[slave
];
427 return !!s_slave
->active
;
429 EXPORT_SYMBOL(mlx4_is_slave_active
);
431 static int mlx4_slave_cap(struct mlx4_dev
*dev
)
435 struct mlx4_dev_cap dev_cap
;
436 struct mlx4_func_cap func_cap
;
437 struct mlx4_init_hca_param hca_param
;
440 memset(&hca_param
, 0, sizeof(hca_param
));
441 err
= mlx4_QUERY_HCA(dev
, &hca_param
);
443 mlx4_err(dev
, "QUERY_HCA command failed, aborting.\n");
447 /*fail if the hca has an unknown capability */
448 if ((hca_param
.global_caps
| HCA_GLOBAL_CAP_MASK
) !=
449 HCA_GLOBAL_CAP_MASK
) {
450 mlx4_err(dev
, "Unknown hca global capabilities\n");
454 mlx4_log_num_mgm_entry_size
= hca_param
.log_mc_entry_sz
;
456 memset(&dev_cap
, 0, sizeof(dev_cap
));
457 dev
->caps
.max_qp_dest_rdma
= 1 << hca_param
.log_rd_per_qp
;
458 err
= mlx4_dev_cap(dev
, &dev_cap
);
460 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
464 err
= mlx4_QUERY_FW(dev
);
466 mlx4_err(dev
, "QUERY_FW command failed: could not get FW version.\n");
468 page_size
= ~dev
->caps
.page_size_cap
+ 1;
469 mlx4_warn(dev
, "HCA minimum page size:%d\n", page_size
);
470 if (page_size
> PAGE_SIZE
) {
471 mlx4_err(dev
, "HCA minimum page size of %d bigger than "
472 "kernel PAGE_SIZE of %ld, aborting.\n",
473 page_size
, PAGE_SIZE
);
477 /* slave gets uar page size from QUERY_HCA fw command */
478 dev
->caps
.uar_page_size
= 1 << (hca_param
.uar_page_sz
+ 12);
480 /* TODO: relax this assumption */
481 if (dev
->caps
.uar_page_size
!= PAGE_SIZE
) {
482 mlx4_err(dev
, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
483 dev
->caps
.uar_page_size
, PAGE_SIZE
);
487 memset(&func_cap
, 0, sizeof(func_cap
));
488 err
= mlx4_QUERY_FUNC_CAP(dev
, &func_cap
);
490 mlx4_err(dev
, "QUERY_FUNC_CAP command failed, aborting.\n");
494 if ((func_cap
.pf_context_behaviour
| PF_CONTEXT_BEHAVIOUR_MASK
) !=
495 PF_CONTEXT_BEHAVIOUR_MASK
) {
496 mlx4_err(dev
, "Unknown pf context behaviour\n");
500 dev
->caps
.num_ports
= func_cap
.num_ports
;
501 dev
->caps
.num_qps
= func_cap
.qp_quota
;
502 dev
->caps
.num_srqs
= func_cap
.srq_quota
;
503 dev
->caps
.num_cqs
= func_cap
.cq_quota
;
504 dev
->caps
.num_eqs
= func_cap
.max_eq
;
505 dev
->caps
.reserved_eqs
= func_cap
.reserved_eq
;
506 dev
->caps
.num_mpts
= func_cap
.mpt_quota
;
507 dev
->caps
.num_mtts
= func_cap
.mtt_quota
;
508 dev
->caps
.num_pds
= MLX4_NUM_PDS
;
509 dev
->caps
.num_mgms
= 0;
510 dev
->caps
.num_amgms
= 0;
512 if (dev
->caps
.num_ports
> MLX4_MAX_PORTS
) {
513 mlx4_err(dev
, "HCA has %d ports, but we only support %d, "
514 "aborting.\n", dev
->caps
.num_ports
, MLX4_MAX_PORTS
);
518 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
)
519 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
521 if (dev
->caps
.uar_page_size
* (dev
->caps
.num_uars
-
522 dev
->caps
.reserved_uars
) >
523 pci_resource_len(dev
->pdev
, 2)) {
524 mlx4_err(dev
, "HCA reported UAR region size of 0x%x bigger than "
525 "PCI resource 2 size of 0x%llx, aborting.\n",
526 dev
->caps
.uar_page_size
* dev
->caps
.num_uars
,
527 (unsigned long long) pci_resource_len(dev
->pdev
, 2));
535 * Change the port configuration of the device.
536 * Every user of this function must hold the port mutex.
538 int mlx4_change_port_types(struct mlx4_dev
*dev
,
539 enum mlx4_port_type
*port_types
)
545 for (port
= 0; port
< dev
->caps
.num_ports
; port
++) {
546 /* Change the port type only if the new type is different
547 * from the current, and not set to Auto */
548 if (port_types
[port
] != dev
->caps
.port_type
[port
+ 1])
552 mlx4_unregister_device(dev
);
553 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
554 mlx4_CLOSE_PORT(dev
, port
);
555 dev
->caps
.port_type
[port
] = port_types
[port
- 1];
556 err
= mlx4_SET_PORT(dev
, port
);
558 mlx4_err(dev
, "Failed to set port %d, "
563 mlx4_set_port_mask(dev
);
564 err
= mlx4_register_device(dev
);
571 static ssize_t
show_port_type(struct device
*dev
,
572 struct device_attribute
*attr
,
575 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
577 struct mlx4_dev
*mdev
= info
->dev
;
581 (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_IB
) ?
583 if (mdev
->caps
.possible_type
[info
->port
] == MLX4_PORT_TYPE_AUTO
)
584 sprintf(buf
, "auto (%s)\n", type
);
586 sprintf(buf
, "%s\n", type
);
591 static ssize_t
set_port_type(struct device
*dev
,
592 struct device_attribute
*attr
,
593 const char *buf
, size_t count
)
595 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
597 struct mlx4_dev
*mdev
= info
->dev
;
598 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
599 enum mlx4_port_type types
[MLX4_MAX_PORTS
];
600 enum mlx4_port_type new_types
[MLX4_MAX_PORTS
];
604 if (!strcmp(buf
, "ib\n"))
605 info
->tmp_type
= MLX4_PORT_TYPE_IB
;
606 else if (!strcmp(buf
, "eth\n"))
607 info
->tmp_type
= MLX4_PORT_TYPE_ETH
;
608 else if (!strcmp(buf
, "auto\n"))
609 info
->tmp_type
= MLX4_PORT_TYPE_AUTO
;
611 mlx4_err(mdev
, "%s is not supported port type\n", buf
);
615 mlx4_stop_sense(mdev
);
616 mutex_lock(&priv
->port_mutex
);
617 /* Possible type is always the one that was delivered */
618 mdev
->caps
.possible_type
[info
->port
] = info
->tmp_type
;
620 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++) {
621 types
[i
] = priv
->port
[i
+1].tmp_type
? priv
->port
[i
+1].tmp_type
:
622 mdev
->caps
.possible_type
[i
+1];
623 if (types
[i
] == MLX4_PORT_TYPE_AUTO
)
624 types
[i
] = mdev
->caps
.port_type
[i
+1];
627 if (!(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
628 !(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
)) {
629 for (i
= 1; i
<= mdev
->caps
.num_ports
; i
++) {
630 if (mdev
->caps
.possible_type
[i
] == MLX4_PORT_TYPE_AUTO
) {
631 mdev
->caps
.possible_type
[i
] = mdev
->caps
.port_type
[i
];
637 mlx4_err(mdev
, "Auto sensing is not supported on this HCA. "
638 "Set only 'eth' or 'ib' for both ports "
639 "(should be the same)\n");
643 mlx4_do_sense_ports(mdev
, new_types
, types
);
645 err
= mlx4_check_port_params(mdev
, new_types
);
649 /* We are about to apply the changes after the configuration
650 * was verified, no need to remember the temporary types
652 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++)
653 priv
->port
[i
+ 1].tmp_type
= 0;
655 err
= mlx4_change_port_types(mdev
, new_types
);
658 mlx4_start_sense(mdev
);
659 mutex_unlock(&priv
->port_mutex
);
660 return err
? err
: count
;
671 static inline int int_to_ibta_mtu(int mtu
)
674 case 256: return IB_MTU_256
;
675 case 512: return IB_MTU_512
;
676 case 1024: return IB_MTU_1024
;
677 case 2048: return IB_MTU_2048
;
678 case 4096: return IB_MTU_4096
;
683 static inline int ibta_mtu_to_int(enum ibta_mtu mtu
)
686 case IB_MTU_256
: return 256;
687 case IB_MTU_512
: return 512;
688 case IB_MTU_1024
: return 1024;
689 case IB_MTU_2048
: return 2048;
690 case IB_MTU_4096
: return 4096;
695 static ssize_t
show_port_ib_mtu(struct device
*dev
,
696 struct device_attribute
*attr
,
699 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
701 struct mlx4_dev
*mdev
= info
->dev
;
703 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
)
704 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
707 ibta_mtu_to_int(mdev
->caps
.port_ib_mtu
[info
->port
]));
711 static ssize_t
set_port_ib_mtu(struct device
*dev
,
712 struct device_attribute
*attr
,
713 const char *buf
, size_t count
)
715 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
717 struct mlx4_dev
*mdev
= info
->dev
;
718 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
719 int err
, port
, mtu
, ibta_mtu
= -1;
721 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
) {
722 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
726 err
= sscanf(buf
, "%d", &mtu
);
728 ibta_mtu
= int_to_ibta_mtu(mtu
);
730 if (err
<= 0 || ibta_mtu
< 0) {
731 mlx4_err(mdev
, "%s is invalid IBTA mtu\n", buf
);
735 mdev
->caps
.port_ib_mtu
[info
->port
] = ibta_mtu
;
737 mlx4_stop_sense(mdev
);
738 mutex_lock(&priv
->port_mutex
);
739 mlx4_unregister_device(mdev
);
740 for (port
= 1; port
<= mdev
->caps
.num_ports
; port
++) {
741 mlx4_CLOSE_PORT(mdev
, port
);
742 err
= mlx4_SET_PORT(mdev
, port
);
744 mlx4_err(mdev
, "Failed to set port %d, "
749 err
= mlx4_register_device(mdev
);
751 mutex_unlock(&priv
->port_mutex
);
752 mlx4_start_sense(mdev
);
753 return err
? err
: count
;
756 static int mlx4_load_fw(struct mlx4_dev
*dev
)
758 struct mlx4_priv
*priv
= mlx4_priv(dev
);
761 priv
->fw
.fw_icm
= mlx4_alloc_icm(dev
, priv
->fw
.fw_pages
,
762 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
763 if (!priv
->fw
.fw_icm
) {
764 mlx4_err(dev
, "Couldn't allocate FW area, aborting.\n");
768 err
= mlx4_MAP_FA(dev
, priv
->fw
.fw_icm
);
770 mlx4_err(dev
, "MAP_FA command failed, aborting.\n");
774 err
= mlx4_RUN_FW(dev
);
776 mlx4_err(dev
, "RUN_FW command failed, aborting.\n");
786 mlx4_free_icm(dev
, priv
->fw
.fw_icm
, 0);
790 static int mlx4_init_cmpt_table(struct mlx4_dev
*dev
, u64 cmpt_base
,
793 struct mlx4_priv
*priv
= mlx4_priv(dev
);
797 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.cmpt_table
,
799 ((u64
) (MLX4_CMPT_TYPE_QP
*
800 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
801 cmpt_entry_sz
, dev
->caps
.num_qps
,
802 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
807 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.cmpt_table
,
809 ((u64
) (MLX4_CMPT_TYPE_SRQ
*
810 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
811 cmpt_entry_sz
, dev
->caps
.num_srqs
,
812 dev
->caps
.reserved_srqs
, 0, 0);
816 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.cmpt_table
,
818 ((u64
) (MLX4_CMPT_TYPE_CQ
*
819 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
820 cmpt_entry_sz
, dev
->caps
.num_cqs
,
821 dev
->caps
.reserved_cqs
, 0, 0);
825 num_eqs
= (mlx4_is_master(dev
)) ? dev
->phys_caps
.num_phys_eqs
:
827 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.cmpt_table
,
829 ((u64
) (MLX4_CMPT_TYPE_EQ
*
830 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
831 cmpt_entry_sz
, num_eqs
, num_eqs
, 0, 0);
838 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
841 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
844 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
850 static int mlx4_init_icm(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
,
851 struct mlx4_init_hca_param
*init_hca
, u64 icm_size
)
853 struct mlx4_priv
*priv
= mlx4_priv(dev
);
858 err
= mlx4_SET_ICM_SIZE(dev
, icm_size
, &aux_pages
);
860 mlx4_err(dev
, "SET_ICM_SIZE command failed, aborting.\n");
864 mlx4_dbg(dev
, "%lld KB of HCA context requires %lld KB aux memory.\n",
865 (unsigned long long) icm_size
>> 10,
866 (unsigned long long) aux_pages
<< 2);
868 priv
->fw
.aux_icm
= mlx4_alloc_icm(dev
, aux_pages
,
869 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
870 if (!priv
->fw
.aux_icm
) {
871 mlx4_err(dev
, "Couldn't allocate aux memory, aborting.\n");
875 err
= mlx4_MAP_ICM_AUX(dev
, priv
->fw
.aux_icm
);
877 mlx4_err(dev
, "MAP_ICM_AUX command failed, aborting.\n");
881 err
= mlx4_init_cmpt_table(dev
, init_hca
->cmpt_base
, dev_cap
->cmpt_entry_sz
);
883 mlx4_err(dev
, "Failed to map cMPT context memory, aborting.\n");
888 num_eqs
= (mlx4_is_master(dev
)) ? dev
->phys_caps
.num_phys_eqs
:
890 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.table
,
891 init_hca
->eqc_base
, dev_cap
->eqc_entry_sz
,
892 num_eqs
, num_eqs
, 0, 0);
894 mlx4_err(dev
, "Failed to map EQ context memory, aborting.\n");
899 * Reserved MTT entries must be aligned up to a cacheline
900 * boundary, since the FW will write to them, while the driver
901 * writes to all other MTT entries. (The variable
902 * dev->caps.mtt_entry_sz below is really the MTT segment
903 * size, not the raw entry size)
905 dev
->caps
.reserved_mtts
=
906 ALIGN(dev
->caps
.reserved_mtts
* dev
->caps
.mtt_entry_sz
,
907 dma_get_cache_alignment()) / dev
->caps
.mtt_entry_sz
;
909 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.mtt_table
,
911 dev
->caps
.mtt_entry_sz
,
913 dev
->caps
.reserved_mtts
, 1, 0);
915 mlx4_err(dev
, "Failed to map MTT context memory, aborting.\n");
919 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.dmpt_table
,
921 dev_cap
->dmpt_entry_sz
,
923 dev
->caps
.reserved_mrws
, 1, 1);
925 mlx4_err(dev
, "Failed to map dMPT context memory, aborting.\n");
929 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.qp_table
,
931 dev_cap
->qpc_entry_sz
,
933 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
936 mlx4_err(dev
, "Failed to map QP context memory, aborting.\n");
940 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.auxc_table
,
942 dev_cap
->aux_entry_sz
,
944 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
947 mlx4_err(dev
, "Failed to map AUXC context memory, aborting.\n");
951 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.altc_table
,
953 dev_cap
->altc_entry_sz
,
955 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
958 mlx4_err(dev
, "Failed to map ALTC context memory, aborting.\n");
962 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.rdmarc_table
,
963 init_hca
->rdmarc_base
,
964 dev_cap
->rdmarc_entry_sz
<< priv
->qp_table
.rdmarc_shift
,
966 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
969 mlx4_err(dev
, "Failed to map RDMARC context memory, aborting\n");
973 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.table
,
975 dev_cap
->cqc_entry_sz
,
977 dev
->caps
.reserved_cqs
, 0, 0);
979 mlx4_err(dev
, "Failed to map CQ context memory, aborting.\n");
980 goto err_unmap_rdmarc
;
983 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.table
,
985 dev_cap
->srq_entry_sz
,
987 dev
->caps
.reserved_srqs
, 0, 0);
989 mlx4_err(dev
, "Failed to map SRQ context memory, aborting.\n");
994 * For flow steering device managed mode it is required to use
995 * mlx4_init_icm_table. For B0 steering mode it's not strictly
996 * required, but for simplicity just map the whole multicast
997 * group table now. The table isn't very big and it's a lot
998 * easier than trying to track ref counts.
1000 err
= mlx4_init_icm_table(dev
, &priv
->mcg_table
.table
,
1002 mlx4_get_mgm_entry_size(dev
),
1003 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1004 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1007 mlx4_err(dev
, "Failed to map MCG context memory, aborting.\n");
1014 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1017 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1020 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1023 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1026 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1029 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1032 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1035 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1038 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1041 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1042 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1043 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1044 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1047 mlx4_UNMAP_ICM_AUX(dev
);
1050 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1055 static void mlx4_free_icms(struct mlx4_dev
*dev
)
1057 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1059 mlx4_cleanup_icm_table(dev
, &priv
->mcg_table
.table
);
1060 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1061 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1062 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1063 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1064 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1065 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1066 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1067 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1068 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1069 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1070 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1071 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1072 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1074 mlx4_UNMAP_ICM_AUX(dev
);
1075 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1078 static void mlx4_slave_exit(struct mlx4_dev
*dev
)
1080 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1082 down(&priv
->cmd
.slave_sem
);
1083 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, MLX4_COMM_TIME
))
1084 mlx4_warn(dev
, "Failed to close slave function.\n");
1085 up(&priv
->cmd
.slave_sem
);
1088 static int map_bf_area(struct mlx4_dev
*dev
)
1090 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1091 resource_size_t bf_start
;
1092 resource_size_t bf_len
;
1095 if (!dev
->caps
.bf_reg_size
)
1098 bf_start
= pci_resource_start(dev
->pdev
, 2) +
1099 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1100 bf_len
= pci_resource_len(dev
->pdev
, 2) -
1101 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1102 priv
->bf_mapping
= io_mapping_create_wc(bf_start
, bf_len
);
1103 if (!priv
->bf_mapping
)
1109 static void unmap_bf_area(struct mlx4_dev
*dev
)
1111 if (mlx4_priv(dev
)->bf_mapping
)
1112 io_mapping_free(mlx4_priv(dev
)->bf_mapping
);
1115 static void mlx4_close_hca(struct mlx4_dev
*dev
)
1118 if (mlx4_is_slave(dev
))
1119 mlx4_slave_exit(dev
);
1121 mlx4_CLOSE_HCA(dev
, 0);
1122 mlx4_free_icms(dev
);
1124 mlx4_free_icm(dev
, mlx4_priv(dev
)->fw
.fw_icm
, 0);
1128 static int mlx4_init_slave(struct mlx4_dev
*dev
)
1130 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1131 u64 dma
= (u64
) priv
->mfunc
.vhcr_dma
;
1132 int num_of_reset_retries
= NUM_OF_RESET_RETRIES
;
1133 int ret_from_reset
= 0;
1135 u32 cmd_channel_ver
;
1137 down(&priv
->cmd
.slave_sem
);
1138 priv
->cmd
.max_cmds
= 1;
1139 mlx4_warn(dev
, "Sending reset\n");
1140 ret_from_reset
= mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0,
1142 /* if we are in the middle of flr the slave will try
1143 * NUM_OF_RESET_RETRIES times before leaving.*/
1144 if (ret_from_reset
) {
1145 if (MLX4_DELAY_RESET_SLAVE
== ret_from_reset
) {
1146 msleep(SLEEP_TIME_IN_RESET
);
1147 while (ret_from_reset
&& num_of_reset_retries
) {
1148 mlx4_warn(dev
, "slave is currently in the"
1149 "middle of FLR. retrying..."
1151 (NUM_OF_RESET_RETRIES
-
1152 num_of_reset_retries
+ 1));
1154 mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
,
1156 num_of_reset_retries
= num_of_reset_retries
- 1;
1162 /* check the driver version - the slave I/F revision
1163 * must match the master's */
1164 slave_read
= swab32(readl(&priv
->mfunc
.comm
->slave_read
));
1165 cmd_channel_ver
= mlx4_comm_get_version();
1167 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver
) !=
1168 MLX4_COMM_GET_IF_REV(slave_read
)) {
1169 mlx4_err(dev
, "slave driver version is not supported"
1170 " by the master\n");
1174 mlx4_warn(dev
, "Sending vhcr0\n");
1175 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR0
, dma
>> 48,
1178 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR1
, dma
>> 32,
1181 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR2
, dma
>> 16,
1184 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR_EN
, dma
, MLX4_COMM_TIME
))
1186 up(&priv
->cmd
.slave_sem
);
1190 mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, 0);
1191 up(&priv
->cmd
.slave_sem
);
1195 static int mlx4_init_hca(struct mlx4_dev
*dev
)
1197 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1198 struct mlx4_adapter adapter
;
1199 struct mlx4_dev_cap dev_cap
;
1200 struct mlx4_mod_stat_cfg mlx4_cfg
;
1201 struct mlx4_profile profile
;
1202 struct mlx4_init_hca_param init_hca
;
1206 if (!mlx4_is_slave(dev
)) {
1207 err
= mlx4_QUERY_FW(dev
);
1210 mlx4_info(dev
, "non-primary physical function, skipping.\n");
1212 mlx4_err(dev
, "QUERY_FW command failed, aborting.\n");
1216 err
= mlx4_load_fw(dev
);
1218 mlx4_err(dev
, "Failed to start FW, aborting.\n");
1222 mlx4_cfg
.log_pg_sz_m
= 1;
1223 mlx4_cfg
.log_pg_sz
= 0;
1224 err
= mlx4_MOD_STAT_CFG(dev
, &mlx4_cfg
);
1226 mlx4_warn(dev
, "Failed to override log_pg_sz parameter\n");
1228 err
= mlx4_dev_cap(dev
, &dev_cap
);
1230 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
1234 priv
->fs_hash_mode
= MLX4_FS_L2_HASH
;
1236 switch (priv
->fs_hash_mode
) {
1237 case MLX4_FS_L2_HASH
:
1238 init_hca
.fs_hash_enable_bits
= 0;
1241 case MLX4_FS_L2_L3_L4_HASH
:
1242 /* Enable flow steering with
1243 * udp unicast and tcp unicast
1245 init_hca
.fs_hash_enable_bits
=
1246 MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
;
1250 profile
= default_profile
;
1251 if (dev
->caps
.steering_mode
==
1252 MLX4_STEERING_MODE_DEVICE_MANAGED
)
1253 profile
.num_mcg
= MLX4_FS_NUM_MCG
;
1255 icm_size
= mlx4_make_profile(dev
, &profile
, &dev_cap
,
1257 if ((long long) icm_size
< 0) {
1262 dev
->caps
.max_fmr_maps
= (1 << (32 - ilog2(dev
->caps
.num_mpts
))) - 1;
1264 init_hca
.log_uar_sz
= ilog2(dev
->caps
.num_uars
);
1265 init_hca
.uar_page_sz
= PAGE_SHIFT
- 12;
1267 err
= mlx4_init_icm(dev
, &dev_cap
, &init_hca
, icm_size
);
1271 err
= mlx4_INIT_HCA(dev
, &init_hca
);
1273 mlx4_err(dev
, "INIT_HCA command failed, aborting.\n");
1277 err
= mlx4_init_slave(dev
);
1279 mlx4_err(dev
, "Failed to initialize slave\n");
1283 err
= mlx4_slave_cap(dev
);
1285 mlx4_err(dev
, "Failed to obtain slave caps\n");
1290 if (map_bf_area(dev
))
1291 mlx4_dbg(dev
, "Failed to map blue flame area\n");
1293 /*Only the master set the ports, all the rest got it from it.*/
1294 if (!mlx4_is_slave(dev
))
1295 mlx4_set_port_mask(dev
);
1297 err
= mlx4_QUERY_ADAPTER(dev
, &adapter
);
1299 mlx4_err(dev
, "QUERY_ADAPTER command failed, aborting.\n");
1303 priv
->eq_table
.inta_pin
= adapter
.inta_pin
;
1304 memcpy(dev
->board_id
, adapter
.board_id
, sizeof dev
->board_id
);
1309 mlx4_close_hca(dev
);
1312 if (!mlx4_is_slave(dev
))
1313 mlx4_free_icms(dev
);
1316 if (!mlx4_is_slave(dev
)) {
1318 mlx4_free_icm(dev
, priv
->fw
.fw_icm
, 0);
1325 static int mlx4_init_counters_table(struct mlx4_dev
*dev
)
1327 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1330 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
1333 nent
= dev
->caps
.max_counters
;
1334 return mlx4_bitmap_init(&priv
->counters_bitmap
, nent
, nent
- 1, 0, 0);
1337 static void mlx4_cleanup_counters_table(struct mlx4_dev
*dev
)
1339 mlx4_bitmap_cleanup(&mlx4_priv(dev
)->counters_bitmap
);
1342 int __mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
1344 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1346 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
1349 *idx
= mlx4_bitmap_alloc(&priv
->counters_bitmap
);
1356 int mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
1361 if (mlx4_is_mfunc(dev
)) {
1362 err
= mlx4_cmd_imm(dev
, 0, &out_param
, RES_COUNTER
,
1363 RES_OP_RESERVE
, MLX4_CMD_ALLOC_RES
,
1364 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
1366 *idx
= get_param_l(&out_param
);
1370 return __mlx4_counter_alloc(dev
, idx
);
1372 EXPORT_SYMBOL_GPL(mlx4_counter_alloc
);
1374 void __mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
1376 mlx4_bitmap_free(&mlx4_priv(dev
)->counters_bitmap
, idx
);
1380 void mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
1384 if (mlx4_is_mfunc(dev
)) {
1385 set_param_l(&in_param
, idx
);
1386 mlx4_cmd(dev
, in_param
, RES_COUNTER
, RES_OP_RESERVE
,
1387 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
1391 __mlx4_counter_free(dev
, idx
);
1393 EXPORT_SYMBOL_GPL(mlx4_counter_free
);
1395 static int mlx4_setup_hca(struct mlx4_dev
*dev
)
1397 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1400 __be32 ib_port_default_caps
;
1402 err
= mlx4_init_uar_table(dev
);
1404 mlx4_err(dev
, "Failed to initialize "
1405 "user access region table, aborting.\n");
1409 err
= mlx4_uar_alloc(dev
, &priv
->driver_uar
);
1411 mlx4_err(dev
, "Failed to allocate driver access region, "
1413 goto err_uar_table_free
;
1416 priv
->kar
= ioremap((phys_addr_t
) priv
->driver_uar
.pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
1418 mlx4_err(dev
, "Couldn't map kernel access region, "
1424 err
= mlx4_init_pd_table(dev
);
1426 mlx4_err(dev
, "Failed to initialize "
1427 "protection domain table, aborting.\n");
1431 err
= mlx4_init_xrcd_table(dev
);
1433 mlx4_err(dev
, "Failed to initialize "
1434 "reliable connection domain table, aborting.\n");
1435 goto err_pd_table_free
;
1438 err
= mlx4_init_mr_table(dev
);
1440 mlx4_err(dev
, "Failed to initialize "
1441 "memory region table, aborting.\n");
1442 goto err_xrcd_table_free
;
1445 err
= mlx4_init_eq_table(dev
);
1447 mlx4_err(dev
, "Failed to initialize "
1448 "event queue table, aborting.\n");
1449 goto err_mr_table_free
;
1452 err
= mlx4_cmd_use_events(dev
);
1454 mlx4_err(dev
, "Failed to switch to event-driven "
1455 "firmware commands, aborting.\n");
1456 goto err_eq_table_free
;
1459 err
= mlx4_NOP(dev
);
1461 if (dev
->flags
& MLX4_FLAG_MSI_X
) {
1462 mlx4_warn(dev
, "NOP command failed to generate MSI-X "
1463 "interrupt IRQ %d).\n",
1464 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].irq
);
1465 mlx4_warn(dev
, "Trying again without MSI-X.\n");
1467 mlx4_err(dev
, "NOP command failed to generate interrupt "
1468 "(IRQ %d), aborting.\n",
1469 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].irq
);
1470 mlx4_err(dev
, "BIOS or ACPI interrupt routing problem?\n");
1476 mlx4_dbg(dev
, "NOP command IRQ test passed\n");
1478 err
= mlx4_init_cq_table(dev
);
1480 mlx4_err(dev
, "Failed to initialize "
1481 "completion queue table, aborting.\n");
1485 err
= mlx4_init_srq_table(dev
);
1487 mlx4_err(dev
, "Failed to initialize "
1488 "shared receive queue table, aborting.\n");
1489 goto err_cq_table_free
;
1492 err
= mlx4_init_qp_table(dev
);
1494 mlx4_err(dev
, "Failed to initialize "
1495 "queue pair table, aborting.\n");
1496 goto err_srq_table_free
;
1499 if (!mlx4_is_slave(dev
)) {
1500 err
= mlx4_init_mcg_table(dev
);
1502 mlx4_err(dev
, "Failed to initialize "
1503 "multicast group table, aborting.\n");
1504 goto err_qp_table_free
;
1508 err
= mlx4_init_counters_table(dev
);
1509 if (err
&& err
!= -ENOENT
) {
1510 mlx4_err(dev
, "Failed to initialize counters table, aborting.\n");
1511 goto err_mcg_table_free
;
1514 if (!mlx4_is_slave(dev
)) {
1515 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
1516 ib_port_default_caps
= 0;
1517 err
= mlx4_get_port_ib_caps(dev
, port
,
1518 &ib_port_default_caps
);
1520 mlx4_warn(dev
, "failed to get port %d default "
1521 "ib capabilities (%d). Continuing "
1522 "with caps = 0\n", port
, err
);
1523 dev
->caps
.ib_port_def_cap
[port
] = ib_port_default_caps
;
1525 if (mlx4_is_mfunc(dev
))
1526 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_2048
;
1528 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_4096
;
1530 err
= mlx4_SET_PORT(dev
, port
);
1532 mlx4_err(dev
, "Failed to set port %d, aborting\n",
1534 goto err_counters_table_free
;
1541 err_counters_table_free
:
1542 mlx4_cleanup_counters_table(dev
);
1545 mlx4_cleanup_mcg_table(dev
);
1548 mlx4_cleanup_qp_table(dev
);
1551 mlx4_cleanup_srq_table(dev
);
1554 mlx4_cleanup_cq_table(dev
);
1557 mlx4_cmd_use_polling(dev
);
1560 mlx4_cleanup_eq_table(dev
);
1563 mlx4_cleanup_mr_table(dev
);
1565 err_xrcd_table_free
:
1566 mlx4_cleanup_xrcd_table(dev
);
1569 mlx4_cleanup_pd_table(dev
);
1575 mlx4_uar_free(dev
, &priv
->driver_uar
);
1578 mlx4_cleanup_uar_table(dev
);
1582 static void mlx4_enable_msi_x(struct mlx4_dev
*dev
)
1584 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1585 struct msix_entry
*entries
;
1586 int nreq
= min_t(int, dev
->caps
.num_ports
*
1587 min_t(int, netif_get_num_default_rss_queues() + 1,
1588 MAX_MSIX_P_PORT
) + MSIX_LEGACY_SZ
, MAX_MSIX
);
1593 /* In multifunction mode each function gets 2 msi-X vectors
1594 * one for data path completions anf the other for asynch events
1595 * or command completions */
1596 if (mlx4_is_mfunc(dev
)) {
1599 nreq
= min_t(int, dev
->caps
.num_eqs
-
1600 dev
->caps
.reserved_eqs
, nreq
);
1603 entries
= kcalloc(nreq
, sizeof *entries
, GFP_KERNEL
);
1607 for (i
= 0; i
< nreq
; ++i
)
1608 entries
[i
].entry
= i
;
1611 err
= pci_enable_msix(dev
->pdev
, entries
, nreq
);
1613 /* Try again if at least 2 vectors are available */
1615 mlx4_info(dev
, "Requested %d vectors, "
1616 "but only %d MSI-X vectors available, "
1617 "trying again\n", nreq
, err
);
1626 MSIX_LEGACY_SZ
+ dev
->caps
.num_ports
* MIN_MSIX_P_PORT
) {
1627 /*Working in legacy mode , all EQ's shared*/
1628 dev
->caps
.comp_pool
= 0;
1629 dev
->caps
.num_comp_vectors
= nreq
- 1;
1631 dev
->caps
.comp_pool
= nreq
- MSIX_LEGACY_SZ
;
1632 dev
->caps
.num_comp_vectors
= MSIX_LEGACY_SZ
- 1;
1634 for (i
= 0; i
< nreq
; ++i
)
1635 priv
->eq_table
.eq
[i
].irq
= entries
[i
].vector
;
1637 dev
->flags
|= MLX4_FLAG_MSI_X
;
1644 dev
->caps
.num_comp_vectors
= 1;
1645 dev
->caps
.comp_pool
= 0;
1647 for (i
= 0; i
< 2; ++i
)
1648 priv
->eq_table
.eq
[i
].irq
= dev
->pdev
->irq
;
1651 static int mlx4_init_port_info(struct mlx4_dev
*dev
, int port
)
1653 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
1658 if (!mlx4_is_slave(dev
)) {
1659 INIT_RADIX_TREE(&info
->mac_tree
, GFP_KERNEL
);
1660 mlx4_init_mac_table(dev
, &info
->mac_table
);
1661 mlx4_init_vlan_table(dev
, &info
->vlan_table
);
1663 dev
->caps
.reserved_qps_base
[MLX4_QP_REGION_ETH_ADDR
] +
1664 (port
- 1) * (1 << log_num_mac
);
1667 sprintf(info
->dev_name
, "mlx4_port%d", port
);
1668 info
->port_attr
.attr
.name
= info
->dev_name
;
1669 if (mlx4_is_mfunc(dev
))
1670 info
->port_attr
.attr
.mode
= S_IRUGO
;
1672 info
->port_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
1673 info
->port_attr
.store
= set_port_type
;
1675 info
->port_attr
.show
= show_port_type
;
1676 sysfs_attr_init(&info
->port_attr
.attr
);
1678 err
= device_create_file(&dev
->pdev
->dev
, &info
->port_attr
);
1680 mlx4_err(dev
, "Failed to create file for port %d\n", port
);
1684 sprintf(info
->dev_mtu_name
, "mlx4_port%d_mtu", port
);
1685 info
->port_mtu_attr
.attr
.name
= info
->dev_mtu_name
;
1686 if (mlx4_is_mfunc(dev
))
1687 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
;
1689 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
1690 info
->port_mtu_attr
.store
= set_port_ib_mtu
;
1692 info
->port_mtu_attr
.show
= show_port_ib_mtu
;
1693 sysfs_attr_init(&info
->port_mtu_attr
.attr
);
1695 err
= device_create_file(&dev
->pdev
->dev
, &info
->port_mtu_attr
);
1697 mlx4_err(dev
, "Failed to create mtu file for port %d\n", port
);
1698 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_attr
);
1705 static void mlx4_cleanup_port_info(struct mlx4_port_info
*info
)
1710 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_attr
);
1711 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_mtu_attr
);
1714 static int mlx4_init_steering(struct mlx4_dev
*dev
)
1716 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1717 int num_entries
= dev
->caps
.num_ports
;
1720 priv
->steer
= kzalloc(sizeof(struct mlx4_steer
) * num_entries
, GFP_KERNEL
);
1724 for (i
= 0; i
< num_entries
; i
++)
1725 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
1726 INIT_LIST_HEAD(&priv
->steer
[i
].promisc_qps
[j
]);
1727 INIT_LIST_HEAD(&priv
->steer
[i
].steer_entries
[j
]);
1732 static void mlx4_clear_steering(struct mlx4_dev
*dev
)
1734 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1735 struct mlx4_steer_index
*entry
, *tmp_entry
;
1736 struct mlx4_promisc_qp
*pqp
, *tmp_pqp
;
1737 int num_entries
= dev
->caps
.num_ports
;
1740 for (i
= 0; i
< num_entries
; i
++) {
1741 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
1742 list_for_each_entry_safe(pqp
, tmp_pqp
,
1743 &priv
->steer
[i
].promisc_qps
[j
],
1745 list_del(&pqp
->list
);
1748 list_for_each_entry_safe(entry
, tmp_entry
,
1749 &priv
->steer
[i
].steer_entries
[j
],
1751 list_del(&entry
->list
);
1752 list_for_each_entry_safe(pqp
, tmp_pqp
,
1755 list_del(&pqp
->list
);
1765 static int extended_func_num(struct pci_dev
*pdev
)
1767 return PCI_SLOT(pdev
->devfn
) * 8 + PCI_FUNC(pdev
->devfn
);
1770 #define MLX4_OWNER_BASE 0x8069c
1771 #define MLX4_OWNER_SIZE 4
1773 static int mlx4_get_ownership(struct mlx4_dev
*dev
)
1775 void __iomem
*owner
;
1778 if (pci_channel_offline(dev
->pdev
))
1781 owner
= ioremap(pci_resource_start(dev
->pdev
, 0) + MLX4_OWNER_BASE
,
1784 mlx4_err(dev
, "Failed to obtain ownership bit\n");
1793 static void mlx4_free_ownership(struct mlx4_dev
*dev
)
1795 void __iomem
*owner
;
1797 if (pci_channel_offline(dev
->pdev
))
1800 owner
= ioremap(pci_resource_start(dev
->pdev
, 0) + MLX4_OWNER_BASE
,
1803 mlx4_err(dev
, "Failed to obtain ownership bit\n");
1811 static int __mlx4_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1813 struct mlx4_priv
*priv
;
1814 struct mlx4_dev
*dev
;
1818 pr_info(DRV_NAME
": Initializing %s\n", pci_name(pdev
));
1820 err
= pci_enable_device(pdev
);
1822 dev_err(&pdev
->dev
, "Cannot enable PCI device, "
1826 if (num_vfs
> MLX4_MAX_NUM_VF
) {
1827 printk(KERN_ERR
"There are more VF's (%d) than allowed(%d)\n",
1828 num_vfs
, MLX4_MAX_NUM_VF
);
1834 if (((id
== NULL
) || !(id
->driver_data
& MLX4_VF
)) &&
1835 !(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
1836 dev_err(&pdev
->dev
, "Missing DCS, aborting."
1837 "(id == 0X%p, id->driver_data: 0x%lx,"
1838 " pci_resource_flags(pdev, 0):0x%lx)\n", id
,
1839 id
? id
->driver_data
: 0, pci_resource_flags(pdev
, 0));
1841 goto err_disable_pdev
;
1843 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
1844 dev_err(&pdev
->dev
, "Missing UAR, aborting.\n");
1846 goto err_disable_pdev
;
1849 err
= pci_request_regions(pdev
, DRV_NAME
);
1851 dev_err(&pdev
->dev
, "Couldn't get PCI resources, aborting\n");
1852 goto err_disable_pdev
;
1855 pci_set_master(pdev
);
1857 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1859 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1860 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1862 dev_err(&pdev
->dev
, "Can't set PCI DMA mask, aborting.\n");
1863 goto err_release_regions
;
1866 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
1868 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit "
1869 "consistent PCI DMA mask.\n");
1870 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1872 dev_err(&pdev
->dev
, "Can't set consistent PCI DMA mask, "
1874 goto err_release_regions
;
1878 /* Allow large DMA segments, up to the firmware limit of 1 GB */
1879 dma_set_max_seg_size(&pdev
->dev
, 1024 * 1024 * 1024);
1881 priv
= kzalloc(sizeof *priv
, GFP_KERNEL
);
1883 dev_err(&pdev
->dev
, "Device struct alloc failed, "
1886 goto err_release_regions
;
1891 INIT_LIST_HEAD(&priv
->ctx_list
);
1892 spin_lock_init(&priv
->ctx_lock
);
1894 mutex_init(&priv
->port_mutex
);
1896 INIT_LIST_HEAD(&priv
->pgdir_list
);
1897 mutex_init(&priv
->pgdir_mutex
);
1899 INIT_LIST_HEAD(&priv
->bf_list
);
1900 mutex_init(&priv
->bf_mutex
);
1902 dev
->rev_id
= pdev
->revision
;
1903 /* Detect if this device is a virtual function */
1904 if (id
&& id
->driver_data
& MLX4_VF
) {
1905 /* When acting as pf, we normally skip vfs unless explicitly
1906 * requested to probe them. */
1907 if (num_vfs
&& extended_func_num(pdev
) > probe_vf
) {
1908 mlx4_warn(dev
, "Skipping virtual function:%d\n",
1909 extended_func_num(pdev
));
1913 mlx4_warn(dev
, "Detected virtual function - running in slave mode\n");
1914 dev
->flags
|= MLX4_FLAG_SLAVE
;
1916 /* We reset the device and enable SRIOV only for physical
1917 * devices. Try to claim ownership on the device;
1918 * if already taken, skip -- do not allow multiple PFs */
1919 err
= mlx4_get_ownership(dev
);
1924 mlx4_warn(dev
, "Multiple PFs not yet supported."
1932 mlx4_warn(dev
, "Enabling sriov with:%d vfs\n", num_vfs
);
1933 err
= pci_enable_sriov(pdev
, num_vfs
);
1935 mlx4_err(dev
, "Failed to enable sriov,"
1936 "continuing without sriov enabled"
1937 " (err = %d).\n", err
);
1940 mlx4_warn(dev
, "Running in master mode\n");
1941 dev
->flags
|= MLX4_FLAG_SRIOV
|
1943 dev
->num_vfs
= num_vfs
;
1948 * Now reset the HCA before we touch the PCI capabilities or
1949 * attempt a firmware command, since a boot ROM may have left
1950 * the HCA in an undefined state.
1952 err
= mlx4_reset(dev
);
1954 mlx4_err(dev
, "Failed to reset HCA, aborting.\n");
1960 if (mlx4_cmd_init(dev
)) {
1961 mlx4_err(dev
, "Failed to init command interface, aborting.\n");
1965 /* In slave functions, the communication channel must be initialized
1966 * before posting commands. Also, init num_slaves before calling
1968 if (mlx4_is_mfunc(dev
)) {
1969 if (mlx4_is_master(dev
))
1970 dev
->num_slaves
= MLX4_MAX_NUM_SLAVES
;
1972 dev
->num_slaves
= 0;
1973 if (mlx4_multi_func_init(dev
)) {
1974 mlx4_err(dev
, "Failed to init slave mfunc"
1975 " interface, aborting.\n");
1981 err
= mlx4_init_hca(dev
);
1983 if (err
== -EACCES
) {
1984 /* Not primary Physical function
1985 * Running in slave mode */
1986 mlx4_cmd_cleanup(dev
);
1987 dev
->flags
|= MLX4_FLAG_SLAVE
;
1988 dev
->flags
&= ~MLX4_FLAG_MASTER
;
1994 /* In master functions, the communication channel must be initialized
1995 * after obtaining its address from fw */
1996 if (mlx4_is_master(dev
)) {
1997 if (mlx4_multi_func_init(dev
)) {
1998 mlx4_err(dev
, "Failed to init master mfunc"
1999 "interface, aborting.\n");
2004 err
= mlx4_alloc_eq_table(dev
);
2006 goto err_master_mfunc
;
2008 priv
->msix_ctl
.pool_bm
= 0;
2009 mutex_init(&priv
->msix_ctl
.pool_lock
);
2011 mlx4_enable_msi_x(dev
);
2012 if ((mlx4_is_mfunc(dev
)) &&
2013 !(dev
->flags
& MLX4_FLAG_MSI_X
)) {
2014 mlx4_err(dev
, "INTx is not supported in multi-function mode."
2019 if (!mlx4_is_slave(dev
)) {
2020 err
= mlx4_init_steering(dev
);
2025 err
= mlx4_setup_hca(dev
);
2026 if (err
== -EBUSY
&& (dev
->flags
& MLX4_FLAG_MSI_X
) &&
2027 !mlx4_is_mfunc(dev
)) {
2028 dev
->flags
&= ~MLX4_FLAG_MSI_X
;
2029 dev
->caps
.num_comp_vectors
= 1;
2030 dev
->caps
.comp_pool
= 0;
2031 pci_disable_msix(pdev
);
2032 err
= mlx4_setup_hca(dev
);
2038 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2039 err
= mlx4_init_port_info(dev
, port
);
2044 err
= mlx4_register_device(dev
);
2048 mlx4_sense_init(dev
);
2049 mlx4_start_sense(dev
);
2051 pci_set_drvdata(pdev
, dev
);
2056 for (--port
; port
>= 1; --port
)
2057 mlx4_cleanup_port_info(&priv
->port
[port
]);
2059 mlx4_cleanup_counters_table(dev
);
2060 mlx4_cleanup_mcg_table(dev
);
2061 mlx4_cleanup_qp_table(dev
);
2062 mlx4_cleanup_srq_table(dev
);
2063 mlx4_cleanup_cq_table(dev
);
2064 mlx4_cmd_use_polling(dev
);
2065 mlx4_cleanup_eq_table(dev
);
2066 mlx4_cleanup_mr_table(dev
);
2067 mlx4_cleanup_xrcd_table(dev
);
2068 mlx4_cleanup_pd_table(dev
);
2069 mlx4_cleanup_uar_table(dev
);
2072 if (!mlx4_is_slave(dev
))
2073 mlx4_clear_steering(dev
);
2076 mlx4_free_eq_table(dev
);
2079 if (mlx4_is_master(dev
))
2080 mlx4_multi_func_cleanup(dev
);
2083 if (dev
->flags
& MLX4_FLAG_MSI_X
)
2084 pci_disable_msix(pdev
);
2086 mlx4_close_hca(dev
);
2089 if (mlx4_is_slave(dev
))
2090 mlx4_multi_func_cleanup(dev
);
2093 mlx4_cmd_cleanup(dev
);
2096 if (dev
->flags
& MLX4_FLAG_SRIOV
)
2097 pci_disable_sriov(pdev
);
2100 if (!mlx4_is_slave(dev
))
2101 mlx4_free_ownership(dev
);
2106 err_release_regions
:
2107 pci_release_regions(pdev
);
2110 pci_disable_device(pdev
);
2111 pci_set_drvdata(pdev
, NULL
);
2115 static int __devinit
mlx4_init_one(struct pci_dev
*pdev
,
2116 const struct pci_device_id
*id
)
2118 printk_once(KERN_INFO
"%s", mlx4_version
);
2120 return __mlx4_init_one(pdev
, id
);
2123 static void mlx4_remove_one(struct pci_dev
*pdev
)
2125 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
2126 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2130 /* in SRIOV it is not allowed to unload the pf's
2131 * driver while there are alive vf's */
2132 if (mlx4_is_master(dev
)) {
2133 if (mlx4_how_many_lives_vf(dev
))
2134 printk(KERN_ERR
"Removing PF when there are assigned VF's !!!\n");
2136 mlx4_stop_sense(dev
);
2137 mlx4_unregister_device(dev
);
2139 for (p
= 1; p
<= dev
->caps
.num_ports
; p
++) {
2140 mlx4_cleanup_port_info(&priv
->port
[p
]);
2141 mlx4_CLOSE_PORT(dev
, p
);
2144 if (mlx4_is_master(dev
))
2145 mlx4_free_resource_tracker(dev
,
2146 RES_TR_FREE_SLAVES_ONLY
);
2148 mlx4_cleanup_counters_table(dev
);
2149 mlx4_cleanup_mcg_table(dev
);
2150 mlx4_cleanup_qp_table(dev
);
2151 mlx4_cleanup_srq_table(dev
);
2152 mlx4_cleanup_cq_table(dev
);
2153 mlx4_cmd_use_polling(dev
);
2154 mlx4_cleanup_eq_table(dev
);
2155 mlx4_cleanup_mr_table(dev
);
2156 mlx4_cleanup_xrcd_table(dev
);
2157 mlx4_cleanup_pd_table(dev
);
2159 if (mlx4_is_master(dev
))
2160 mlx4_free_resource_tracker(dev
,
2161 RES_TR_FREE_STRUCTS_ONLY
);
2164 mlx4_uar_free(dev
, &priv
->driver_uar
);
2165 mlx4_cleanup_uar_table(dev
);
2166 if (!mlx4_is_slave(dev
))
2167 mlx4_clear_steering(dev
);
2168 mlx4_free_eq_table(dev
);
2169 if (mlx4_is_master(dev
))
2170 mlx4_multi_func_cleanup(dev
);
2171 mlx4_close_hca(dev
);
2172 if (mlx4_is_slave(dev
))
2173 mlx4_multi_func_cleanup(dev
);
2174 mlx4_cmd_cleanup(dev
);
2176 if (dev
->flags
& MLX4_FLAG_MSI_X
)
2177 pci_disable_msix(pdev
);
2178 if (dev
->flags
& MLX4_FLAG_SRIOV
) {
2179 mlx4_warn(dev
, "Disabling sriov\n");
2180 pci_disable_sriov(pdev
);
2183 if (!mlx4_is_slave(dev
))
2184 mlx4_free_ownership(dev
);
2186 pci_release_regions(pdev
);
2187 pci_disable_device(pdev
);
2188 pci_set_drvdata(pdev
, NULL
);
2192 int mlx4_restart_one(struct pci_dev
*pdev
)
2194 mlx4_remove_one(pdev
);
2195 return __mlx4_init_one(pdev
, NULL
);
2198 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table
) = {
2199 /* MT25408 "Hermon" SDR */
2200 { PCI_VDEVICE(MELLANOX
, 0x6340), 0 },
2201 /* MT25408 "Hermon" DDR */
2202 { PCI_VDEVICE(MELLANOX
, 0x634a), 0 },
2203 /* MT25408 "Hermon" QDR */
2204 { PCI_VDEVICE(MELLANOX
, 0x6354), 0 },
2205 /* MT25408 "Hermon" DDR PCIe gen2 */
2206 { PCI_VDEVICE(MELLANOX
, 0x6732), 0 },
2207 /* MT25408 "Hermon" QDR PCIe gen2 */
2208 { PCI_VDEVICE(MELLANOX
, 0x673c), 0 },
2209 /* MT25408 "Hermon" EN 10GigE */
2210 { PCI_VDEVICE(MELLANOX
, 0x6368), 0 },
2211 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
2212 { PCI_VDEVICE(MELLANOX
, 0x6750), 0 },
2213 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
2214 { PCI_VDEVICE(MELLANOX
, 0x6372), 0 },
2215 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
2216 { PCI_VDEVICE(MELLANOX
, 0x675a), 0 },
2217 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
2218 { PCI_VDEVICE(MELLANOX
, 0x6764), 0 },
2219 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
2220 { PCI_VDEVICE(MELLANOX
, 0x6746), 0 },
2221 /* MT26478 ConnectX2 40GigE PCIe gen2 */
2222 { PCI_VDEVICE(MELLANOX
, 0x676e), 0 },
2223 /* MT25400 Family [ConnectX-2 Virtual Function] */
2224 { PCI_VDEVICE(MELLANOX
, 0x1002), MLX4_VF
},
2225 /* MT27500 Family [ConnectX-3] */
2226 { PCI_VDEVICE(MELLANOX
, 0x1003), 0 },
2227 /* MT27500 Family [ConnectX-3 Virtual Function] */
2228 { PCI_VDEVICE(MELLANOX
, 0x1004), MLX4_VF
},
2229 { PCI_VDEVICE(MELLANOX
, 0x1005), 0 }, /* MT27510 Family */
2230 { PCI_VDEVICE(MELLANOX
, 0x1006), 0 }, /* MT27511 Family */
2231 { PCI_VDEVICE(MELLANOX
, 0x1007), 0 }, /* MT27520 Family */
2232 { PCI_VDEVICE(MELLANOX
, 0x1008), 0 }, /* MT27521 Family */
2233 { PCI_VDEVICE(MELLANOX
, 0x1009), 0 }, /* MT27530 Family */
2234 { PCI_VDEVICE(MELLANOX
, 0x100a), 0 }, /* MT27531 Family */
2235 { PCI_VDEVICE(MELLANOX
, 0x100b), 0 }, /* MT27540 Family */
2236 { PCI_VDEVICE(MELLANOX
, 0x100c), 0 }, /* MT27541 Family */
2237 { PCI_VDEVICE(MELLANOX
, 0x100d), 0 }, /* MT27550 Family */
2238 { PCI_VDEVICE(MELLANOX
, 0x100e), 0 }, /* MT27551 Family */
2239 { PCI_VDEVICE(MELLANOX
, 0x100f), 0 }, /* MT27560 Family */
2240 { PCI_VDEVICE(MELLANOX
, 0x1010), 0 }, /* MT27561 Family */
2244 MODULE_DEVICE_TABLE(pci
, mlx4_pci_table
);
2246 static pci_ers_result_t
mlx4_pci_err_detected(struct pci_dev
*pdev
,
2247 pci_channel_state_t state
)
2249 mlx4_remove_one(pdev
);
2251 return state
== pci_channel_io_perm_failure
?
2252 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
2255 static pci_ers_result_t
mlx4_pci_slot_reset(struct pci_dev
*pdev
)
2257 int ret
= __mlx4_init_one(pdev
, NULL
);
2259 return ret
? PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_RECOVERED
;
2262 static struct pci_error_handlers mlx4_err_handler
= {
2263 .error_detected
= mlx4_pci_err_detected
,
2264 .slot_reset
= mlx4_pci_slot_reset
,
2267 static struct pci_driver mlx4_driver
= {
2269 .id_table
= mlx4_pci_table
,
2270 .probe
= mlx4_init_one
,
2271 .remove
= __devexit_p(mlx4_remove_one
),
2272 .err_handler
= &mlx4_err_handler
,
2275 static int __init
mlx4_verify_params(void)
2277 if ((log_num_mac
< 0) || (log_num_mac
> 7)) {
2278 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac
);
2282 if (log_num_vlan
!= 0)
2283 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2284 MLX4_LOG_NUM_VLANS
);
2286 if ((log_mtts_per_seg
< 1) || (log_mtts_per_seg
> 7)) {
2287 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg
);
2291 /* Check if module param for ports type has legal combination */
2292 if (port_type_array
[0] == false && port_type_array
[1] == true) {
2293 printk(KERN_WARNING
"Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
2294 port_type_array
[0] = true;
2300 static int __init
mlx4_init(void)
2304 if (mlx4_verify_params())
2309 mlx4_wq
= create_singlethread_workqueue("mlx4");
2313 ret
= pci_register_driver(&mlx4_driver
);
2314 return ret
< 0 ? ret
: 0;
2317 static void __exit
mlx4_cleanup(void)
2319 pci_unregister_driver(&mlx4_driver
);
2320 destroy_workqueue(mlx4_wq
);
2323 module_init(mlx4_init
);
2324 module_exit(mlx4_cleanup
);