2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/netdevice.h>
46 #include <linux/mlx4/device.h>
47 #include <linux/mlx4/doorbell.h>
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRV_VERSION
);
58 struct workqueue_struct
*mlx4_wq
;
60 #ifdef CONFIG_MLX4_DEBUG
62 int mlx4_debug_level
= 0;
63 module_param_named(debug_level
, mlx4_debug_level
, int, 0644);
64 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0");
66 #endif /* CONFIG_MLX4_DEBUG */
71 module_param(msi_x
, int, 0444);
72 MODULE_PARM_DESC(msi_x
, "attempt to use MSI-X if nonzero");
74 #else /* CONFIG_PCI_MSI */
78 #endif /* CONFIG_PCI_MSI */
81 module_param(num_vfs
, int, 0444);
82 MODULE_PARM_DESC(num_vfs
, "enable #num_vfs functions if num_vfs > 0");
85 module_param(probe_vf
, int, 0644);
86 MODULE_PARM_DESC(probe_vf
, "number of vfs to probe by pf driver (num_vfs > 0)");
88 int mlx4_log_num_mgm_entry_size
= 10;
89 module_param_named(log_num_mgm_entry_size
,
90 mlx4_log_num_mgm_entry_size
, int, 0444);
91 MODULE_PARM_DESC(log_num_mgm_entry_size
, "log mgm size, that defines the num"
92 " of qp per mcg, for example:"
93 " 10 gives 248.range: 9<="
94 " log_num_mgm_entry_size <= 12."
95 " Not in use with device managed"
98 #define MLX4_VF (1 << 0)
100 #define HCA_GLOBAL_CAP_MASK 0
101 #define PF_CONTEXT_BEHAVIOUR_MASK 0
103 static char mlx4_version
[] __devinitdata
=
104 DRV_NAME
": Mellanox ConnectX core driver v"
105 DRV_VERSION
" (" DRV_RELDATE
")\n";
107 static struct mlx4_profile default_profile
= {
110 .rdmarc_per_qp
= 1 << 4,
114 .num_mtt
= 1 << 20, /* It is really num mtt segements */
117 static int log_num_mac
= 7;
118 module_param_named(log_num_mac
, log_num_mac
, int, 0444);
119 MODULE_PARM_DESC(log_num_mac
, "Log2 max number of MACs per ETH port (1-7)");
121 static int log_num_vlan
;
122 module_param_named(log_num_vlan
, log_num_vlan
, int, 0444);
123 MODULE_PARM_DESC(log_num_vlan
, "Log2 max number of VLANs per ETH port (0-7)");
124 /* Log2 max number of VLANs per ETH port (0-7) */
125 #define MLX4_LOG_NUM_VLANS 7
127 static bool use_prio
;
128 module_param_named(use_prio
, use_prio
, bool, 0444);
129 MODULE_PARM_DESC(use_prio
, "Enable steering by VLAN priority on ETH ports "
132 int log_mtts_per_seg
= ilog2(MLX4_MTT_ENTRY_PER_SEG
);
133 module_param_named(log_mtts_per_seg
, log_mtts_per_seg
, int, 0444);
134 MODULE_PARM_DESC(log_mtts_per_seg
, "Log2 number of MTT entries per segment (1-7)");
136 static int port_type_array
[2] = {MLX4_PORT_TYPE_NONE
, MLX4_PORT_TYPE_NONE
};
137 static int arr_argc
= 2;
138 module_param_array(port_type_array
, int, &arr_argc
, 0444);
139 MODULE_PARM_DESC(port_type_array
, "Array of port types: HW_DEFAULT (0) is default "
140 "1 for IB, 2 for Ethernet");
142 struct mlx4_port_config
{
143 struct list_head list
;
144 enum mlx4_port_type port_type
[MLX4_MAX_PORTS
+ 1];
145 struct pci_dev
*pdev
;
148 int mlx4_check_port_params(struct mlx4_dev
*dev
,
149 enum mlx4_port_type
*port_type
)
153 for (i
= 0; i
< dev
->caps
.num_ports
- 1; i
++) {
154 if (port_type
[i
] != port_type
[i
+ 1]) {
155 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
)) {
156 mlx4_err(dev
, "Only same port types supported "
157 "on this HCA, aborting.\n");
160 if (port_type
[i
] == MLX4_PORT_TYPE_ETH
&&
161 port_type
[i
+ 1] == MLX4_PORT_TYPE_IB
)
166 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
167 if (!(port_type
[i
] & dev
->caps
.supported_type
[i
+1])) {
168 mlx4_err(dev
, "Requested port type for port %d is not "
169 "supported on this HCA\n", i
+ 1);
176 static void mlx4_set_port_mask(struct mlx4_dev
*dev
)
180 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
)
181 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
184 static int mlx4_dev_cap(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
189 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
191 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
195 if (dev_cap
->min_page_sz
> PAGE_SIZE
) {
196 mlx4_err(dev
, "HCA minimum page size of %d bigger than "
197 "kernel PAGE_SIZE of %ld, aborting.\n",
198 dev_cap
->min_page_sz
, PAGE_SIZE
);
201 if (dev_cap
->num_ports
> MLX4_MAX_PORTS
) {
202 mlx4_err(dev
, "HCA has %d ports, but we only support %d, "
204 dev_cap
->num_ports
, MLX4_MAX_PORTS
);
208 if (dev_cap
->uar_size
> pci_resource_len(dev
->pdev
, 2)) {
209 mlx4_err(dev
, "HCA reported UAR size of 0x%x bigger than "
210 "PCI resource 2 size of 0x%llx, aborting.\n",
212 (unsigned long long) pci_resource_len(dev
->pdev
, 2));
216 dev
->caps
.num_ports
= dev_cap
->num_ports
;
217 dev
->phys_caps
.num_phys_eqs
= MLX4_MAX_EQ_NUM
;
218 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
219 dev
->caps
.vl_cap
[i
] = dev_cap
->max_vl
[i
];
220 dev
->caps
.ib_mtu_cap
[i
] = dev_cap
->ib_mtu
[i
];
221 dev
->phys_caps
.gid_phys_table_len
[i
] = dev_cap
->max_gids
[i
];
222 dev
->phys_caps
.pkey_phys_table_len
[i
] = dev_cap
->max_pkeys
[i
];
223 /* set gid and pkey table operating lengths by default
224 * to non-sriov values */
225 dev
->caps
.gid_table_len
[i
] = dev_cap
->max_gids
[i
];
226 dev
->caps
.pkey_table_len
[i
] = dev_cap
->max_pkeys
[i
];
227 dev
->caps
.port_width_cap
[i
] = dev_cap
->max_port_width
[i
];
228 dev
->caps
.eth_mtu_cap
[i
] = dev_cap
->eth_mtu
[i
];
229 dev
->caps
.def_mac
[i
] = dev_cap
->def_mac
[i
];
230 dev
->caps
.supported_type
[i
] = dev_cap
->supported_port_types
[i
];
231 dev
->caps
.suggested_type
[i
] = dev_cap
->suggested_type
[i
];
232 dev
->caps
.default_sense
[i
] = dev_cap
->default_sense
[i
];
233 dev
->caps
.trans_type
[i
] = dev_cap
->trans_type
[i
];
234 dev
->caps
.vendor_oui
[i
] = dev_cap
->vendor_oui
[i
];
235 dev
->caps
.wavelength
[i
] = dev_cap
->wavelength
[i
];
236 dev
->caps
.trans_code
[i
] = dev_cap
->trans_code
[i
];
239 dev
->caps
.uar_page_size
= PAGE_SIZE
;
240 dev
->caps
.num_uars
= dev_cap
->uar_size
/ PAGE_SIZE
;
241 dev
->caps
.local_ca_ack_delay
= dev_cap
->local_ca_ack_delay
;
242 dev
->caps
.bf_reg_size
= dev_cap
->bf_reg_size
;
243 dev
->caps
.bf_regs_per_page
= dev_cap
->bf_regs_per_page
;
244 dev
->caps
.max_sq_sg
= dev_cap
->max_sq_sg
;
245 dev
->caps
.max_rq_sg
= dev_cap
->max_rq_sg
;
246 dev
->caps
.max_wqes
= dev_cap
->max_qp_sz
;
247 dev
->caps
.max_qp_init_rdma
= dev_cap
->max_requester_per_qp
;
248 dev
->caps
.max_srq_wqes
= dev_cap
->max_srq_sz
;
249 dev
->caps
.max_srq_sge
= dev_cap
->max_rq_sg
- 1;
250 dev
->caps
.reserved_srqs
= dev_cap
->reserved_srqs
;
251 dev
->caps
.max_sq_desc_sz
= dev_cap
->max_sq_desc_sz
;
252 dev
->caps
.max_rq_desc_sz
= dev_cap
->max_rq_desc_sz
;
254 * Subtract 1 from the limit because we need to allocate a
255 * spare CQE so the HCA HW can tell the difference between an
256 * empty CQ and a full CQ.
258 dev
->caps
.max_cqes
= dev_cap
->max_cq_sz
- 1;
259 dev
->caps
.reserved_cqs
= dev_cap
->reserved_cqs
;
260 dev
->caps
.reserved_eqs
= dev_cap
->reserved_eqs
;
261 dev
->caps
.reserved_mtts
= dev_cap
->reserved_mtts
;
262 dev
->caps
.reserved_mrws
= dev_cap
->reserved_mrws
;
264 /* The first 128 UARs are used for EQ doorbells */
265 dev
->caps
.reserved_uars
= max_t(int, 128, dev_cap
->reserved_uars
);
266 dev
->caps
.reserved_pds
= dev_cap
->reserved_pds
;
267 dev
->caps
.reserved_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
268 dev_cap
->reserved_xrcds
: 0;
269 dev
->caps
.max_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
270 dev_cap
->max_xrcds
: 0;
271 dev
->caps
.mtt_entry_sz
= dev_cap
->mtt_entry_sz
;
273 dev
->caps
.max_msg_sz
= dev_cap
->max_msg_sz
;
274 dev
->caps
.page_size_cap
= ~(u32
) (dev_cap
->min_page_sz
- 1);
275 dev
->caps
.flags
= dev_cap
->flags
;
276 dev
->caps
.flags2
= dev_cap
->flags2
;
277 dev
->caps
.bmme_flags
= dev_cap
->bmme_flags
;
278 dev
->caps
.reserved_lkey
= dev_cap
->reserved_lkey
;
279 dev
->caps
.stat_rate_support
= dev_cap
->stat_rate_support
;
280 dev
->caps
.max_gso_sz
= dev_cap
->max_gso_sz
;
281 dev
->caps
.max_rss_tbl_sz
= dev_cap
->max_rss_tbl_sz
;
283 if (dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
) {
284 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
285 dev
->caps
.num_qp_per_mgm
= dev_cap
->fs_max_num_qp_per_entry
;
286 dev
->caps
.fs_log_max_ucast_qp_range_size
=
287 dev_cap
->fs_log_max_ucast_qp_range_size
;
289 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
&&
290 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
) {
291 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_B0
;
293 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_A0
;
295 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
||
296 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
297 mlx4_warn(dev
, "Must have UC_STEER and MC_STEER flags "
298 "set to use B0 steering. Falling back to A0 steering mode.\n");
300 dev
->caps
.num_qp_per_mgm
= mlx4_get_qp_per_mgm(dev
);
302 mlx4_dbg(dev
, "Steering mode is: %s\n",
303 mlx4_steering_mode_str(dev
->caps
.steering_mode
));
305 /* Sense port always allowed on supported devices for ConnectX1 and 2 */
306 if (dev
->pdev
->device
!= 0x1003)
307 dev
->caps
.flags
|= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
;
309 dev
->caps
.log_num_macs
= log_num_mac
;
310 dev
->caps
.log_num_vlans
= MLX4_LOG_NUM_VLANS
;
311 dev
->caps
.log_num_prios
= use_prio
? 3 : 0;
313 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
314 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_NONE
;
315 if (dev
->caps
.supported_type
[i
]) {
316 /* if only ETH is supported - assign ETH */
317 if (dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_ETH
)
318 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_ETH
;
319 /* if only IB is supported, assign IB */
320 else if (dev
->caps
.supported_type
[i
] ==
322 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_IB
;
324 /* if IB and ETH are supported, we set the port
325 * type according to user selection of port type;
326 * if user selected none, take the FW hint */
327 if (port_type_array
[i
- 1] == MLX4_PORT_TYPE_NONE
)
328 dev
->caps
.port_type
[i
] = dev
->caps
.suggested_type
[i
] ?
329 MLX4_PORT_TYPE_ETH
: MLX4_PORT_TYPE_IB
;
331 dev
->caps
.port_type
[i
] = port_type_array
[i
- 1];
335 * Link sensing is allowed on the port if 3 conditions are true:
336 * 1. Both protocols are supported on the port.
337 * 2. Different types are supported on the port
338 * 3. FW declared that it supports link sensing
340 mlx4_priv(dev
)->sense
.sense_allowed
[i
] =
341 ((dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_AUTO
) &&
342 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
343 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
));
346 * If "default_sense" bit is set, we move the port to "AUTO" mode
347 * and perform sense_port FW command to try and set the correct
348 * port type from beginning
350 if (mlx4_priv(dev
)->sense
.sense_allowed
[i
] && dev
->caps
.default_sense
[i
]) {
351 enum mlx4_port_type sensed_port
= MLX4_PORT_TYPE_NONE
;
352 dev
->caps
.possible_type
[i
] = MLX4_PORT_TYPE_AUTO
;
353 mlx4_SENSE_PORT(dev
, i
, &sensed_port
);
354 if (sensed_port
!= MLX4_PORT_TYPE_NONE
)
355 dev
->caps
.port_type
[i
] = sensed_port
;
357 dev
->caps
.possible_type
[i
] = dev
->caps
.port_type
[i
];
360 if (dev
->caps
.log_num_macs
> dev_cap
->log_max_macs
[i
]) {
361 dev
->caps
.log_num_macs
= dev_cap
->log_max_macs
[i
];
362 mlx4_warn(dev
, "Requested number of MACs is too much "
363 "for port %d, reducing to %d.\n",
364 i
, 1 << dev
->caps
.log_num_macs
);
366 if (dev
->caps
.log_num_vlans
> dev_cap
->log_max_vlans
[i
]) {
367 dev
->caps
.log_num_vlans
= dev_cap
->log_max_vlans
[i
];
368 mlx4_warn(dev
, "Requested number of VLANs is too much "
369 "for port %d, reducing to %d.\n",
370 i
, 1 << dev
->caps
.log_num_vlans
);
374 dev
->caps
.max_counters
= 1 << ilog2(dev_cap
->max_counters
);
376 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] = dev_cap
->reserved_qps
;
377 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] =
378 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] =
379 (1 << dev
->caps
.log_num_macs
) *
380 (1 << dev
->caps
.log_num_vlans
) *
381 (1 << dev
->caps
.log_num_prios
) *
383 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
] = MLX4_NUM_FEXCH
;
385 dev
->caps
.reserved_qps
= dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] +
386 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] +
387 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] +
388 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
];
392 /*The function checks if there are live vf, return the num of them*/
393 static int mlx4_how_many_lives_vf(struct mlx4_dev
*dev
)
395 struct mlx4_priv
*priv
= mlx4_priv(dev
);
396 struct mlx4_slave_state
*s_state
;
400 for (i
= 1/*the ppf is 0*/; i
< dev
->num_slaves
; ++i
) {
401 s_state
= &priv
->mfunc
.master
.slave_state
[i
];
402 if (s_state
->active
&& s_state
->last_cmd
!=
403 MLX4_COMM_CMD_RESET
) {
404 mlx4_warn(dev
, "%s: slave: %d is still active\n",
412 int mlx4_get_parav_qkey(struct mlx4_dev
*dev
, u32 qpn
, u32
*qkey
)
414 u32 qk
= MLX4_RESERVED_QKEY_BASE
;
415 if (qpn
>= dev
->caps
.base_tunnel_sqpn
+ 8 * MLX4_MFUNC_MAX
||
416 qpn
< dev
->caps
.sqp_start
)
419 if (qpn
>= dev
->caps
.base_tunnel_sqpn
)
421 qk
+= qpn
- dev
->caps
.base_tunnel_sqpn
;
423 qk
+= qpn
- dev
->caps
.sqp_start
;
427 EXPORT_SYMBOL(mlx4_get_parav_qkey
);
429 int mlx4_is_slave_active(struct mlx4_dev
*dev
, int slave
)
431 struct mlx4_priv
*priv
= mlx4_priv(dev
);
432 struct mlx4_slave_state
*s_slave
;
434 if (!mlx4_is_master(dev
))
437 s_slave
= &priv
->mfunc
.master
.slave_state
[slave
];
438 return !!s_slave
->active
;
440 EXPORT_SYMBOL(mlx4_is_slave_active
);
442 static int mlx4_slave_cap(struct mlx4_dev
*dev
)
446 struct mlx4_dev_cap dev_cap
;
447 struct mlx4_func_cap func_cap
;
448 struct mlx4_init_hca_param hca_param
;
451 memset(&hca_param
, 0, sizeof(hca_param
));
452 err
= mlx4_QUERY_HCA(dev
, &hca_param
);
454 mlx4_err(dev
, "QUERY_HCA command failed, aborting.\n");
458 /*fail if the hca has an unknown capability */
459 if ((hca_param
.global_caps
| HCA_GLOBAL_CAP_MASK
) !=
460 HCA_GLOBAL_CAP_MASK
) {
461 mlx4_err(dev
, "Unknown hca global capabilities\n");
465 mlx4_log_num_mgm_entry_size
= hca_param
.log_mc_entry_sz
;
467 memset(&dev_cap
, 0, sizeof(dev_cap
));
468 dev
->caps
.max_qp_dest_rdma
= 1 << hca_param
.log_rd_per_qp
;
469 err
= mlx4_dev_cap(dev
, &dev_cap
);
471 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
475 err
= mlx4_QUERY_FW(dev
);
477 mlx4_err(dev
, "QUERY_FW command failed: could not get FW version.\n");
479 page_size
= ~dev
->caps
.page_size_cap
+ 1;
480 mlx4_warn(dev
, "HCA minimum page size:%d\n", page_size
);
481 if (page_size
> PAGE_SIZE
) {
482 mlx4_err(dev
, "HCA minimum page size of %d bigger than "
483 "kernel PAGE_SIZE of %ld, aborting.\n",
484 page_size
, PAGE_SIZE
);
488 /* slave gets uar page size from QUERY_HCA fw command */
489 dev
->caps
.uar_page_size
= 1 << (hca_param
.uar_page_sz
+ 12);
491 /* TODO: relax this assumption */
492 if (dev
->caps
.uar_page_size
!= PAGE_SIZE
) {
493 mlx4_err(dev
, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
494 dev
->caps
.uar_page_size
, PAGE_SIZE
);
498 memset(&func_cap
, 0, sizeof(func_cap
));
499 err
= mlx4_QUERY_FUNC_CAP(dev
, &func_cap
);
501 mlx4_err(dev
, "QUERY_FUNC_CAP command failed, aborting.\n");
505 if ((func_cap
.pf_context_behaviour
| PF_CONTEXT_BEHAVIOUR_MASK
) !=
506 PF_CONTEXT_BEHAVIOUR_MASK
) {
507 mlx4_err(dev
, "Unknown pf context behaviour\n");
511 dev
->caps
.num_ports
= func_cap
.num_ports
;
512 dev
->caps
.num_qps
= func_cap
.qp_quota
;
513 dev
->caps
.num_srqs
= func_cap
.srq_quota
;
514 dev
->caps
.num_cqs
= func_cap
.cq_quota
;
515 dev
->caps
.num_eqs
= func_cap
.max_eq
;
516 dev
->caps
.reserved_eqs
= func_cap
.reserved_eq
;
517 dev
->caps
.num_mpts
= func_cap
.mpt_quota
;
518 dev
->caps
.num_mtts
= func_cap
.mtt_quota
;
519 dev
->caps
.num_pds
= MLX4_NUM_PDS
;
520 dev
->caps
.num_mgms
= 0;
521 dev
->caps
.num_amgms
= 0;
523 if (dev
->caps
.num_ports
> MLX4_MAX_PORTS
) {
524 mlx4_err(dev
, "HCA has %d ports, but we only support %d, "
525 "aborting.\n", dev
->caps
.num_ports
, MLX4_MAX_PORTS
);
529 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
530 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
531 if (mlx4_get_slave_pkey_gid_tbl_len(dev
, i
,
532 &dev
->caps
.gid_table_len
[i
],
533 &dev
->caps
.pkey_table_len
[i
]))
537 if (dev
->caps
.uar_page_size
* (dev
->caps
.num_uars
-
538 dev
->caps
.reserved_uars
) >
539 pci_resource_len(dev
->pdev
, 2)) {
540 mlx4_err(dev
, "HCA reported UAR region size of 0x%x bigger than "
541 "PCI resource 2 size of 0x%llx, aborting.\n",
542 dev
->caps
.uar_page_size
* dev
->caps
.num_uars
,
543 (unsigned long long) pci_resource_len(dev
->pdev
, 2));
551 * Change the port configuration of the device.
552 * Every user of this function must hold the port mutex.
554 int mlx4_change_port_types(struct mlx4_dev
*dev
,
555 enum mlx4_port_type
*port_types
)
561 for (port
= 0; port
< dev
->caps
.num_ports
; port
++) {
562 /* Change the port type only if the new type is different
563 * from the current, and not set to Auto */
564 if (port_types
[port
] != dev
->caps
.port_type
[port
+ 1])
568 mlx4_unregister_device(dev
);
569 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
570 mlx4_CLOSE_PORT(dev
, port
);
571 dev
->caps
.port_type
[port
] = port_types
[port
- 1];
572 err
= mlx4_SET_PORT(dev
, port
, -1);
574 mlx4_err(dev
, "Failed to set port %d, "
579 mlx4_set_port_mask(dev
);
580 err
= mlx4_register_device(dev
);
587 static ssize_t
show_port_type(struct device
*dev
,
588 struct device_attribute
*attr
,
591 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
593 struct mlx4_dev
*mdev
= info
->dev
;
597 (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_IB
) ?
599 if (mdev
->caps
.possible_type
[info
->port
] == MLX4_PORT_TYPE_AUTO
)
600 sprintf(buf
, "auto (%s)\n", type
);
602 sprintf(buf
, "%s\n", type
);
607 static ssize_t
set_port_type(struct device
*dev
,
608 struct device_attribute
*attr
,
609 const char *buf
, size_t count
)
611 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
613 struct mlx4_dev
*mdev
= info
->dev
;
614 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
615 enum mlx4_port_type types
[MLX4_MAX_PORTS
];
616 enum mlx4_port_type new_types
[MLX4_MAX_PORTS
];
620 if (!strcmp(buf
, "ib\n"))
621 info
->tmp_type
= MLX4_PORT_TYPE_IB
;
622 else if (!strcmp(buf
, "eth\n"))
623 info
->tmp_type
= MLX4_PORT_TYPE_ETH
;
624 else if (!strcmp(buf
, "auto\n"))
625 info
->tmp_type
= MLX4_PORT_TYPE_AUTO
;
627 mlx4_err(mdev
, "%s is not supported port type\n", buf
);
631 mlx4_stop_sense(mdev
);
632 mutex_lock(&priv
->port_mutex
);
633 /* Possible type is always the one that was delivered */
634 mdev
->caps
.possible_type
[info
->port
] = info
->tmp_type
;
636 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++) {
637 types
[i
] = priv
->port
[i
+1].tmp_type
? priv
->port
[i
+1].tmp_type
:
638 mdev
->caps
.possible_type
[i
+1];
639 if (types
[i
] == MLX4_PORT_TYPE_AUTO
)
640 types
[i
] = mdev
->caps
.port_type
[i
+1];
643 if (!(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
644 !(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
)) {
645 for (i
= 1; i
<= mdev
->caps
.num_ports
; i
++) {
646 if (mdev
->caps
.possible_type
[i
] == MLX4_PORT_TYPE_AUTO
) {
647 mdev
->caps
.possible_type
[i
] = mdev
->caps
.port_type
[i
];
653 mlx4_err(mdev
, "Auto sensing is not supported on this HCA. "
654 "Set only 'eth' or 'ib' for both ports "
655 "(should be the same)\n");
659 mlx4_do_sense_ports(mdev
, new_types
, types
);
661 err
= mlx4_check_port_params(mdev
, new_types
);
665 /* We are about to apply the changes after the configuration
666 * was verified, no need to remember the temporary types
668 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++)
669 priv
->port
[i
+ 1].tmp_type
= 0;
671 err
= mlx4_change_port_types(mdev
, new_types
);
674 mlx4_start_sense(mdev
);
675 mutex_unlock(&priv
->port_mutex
);
676 return err
? err
: count
;
687 static inline int int_to_ibta_mtu(int mtu
)
690 case 256: return IB_MTU_256
;
691 case 512: return IB_MTU_512
;
692 case 1024: return IB_MTU_1024
;
693 case 2048: return IB_MTU_2048
;
694 case 4096: return IB_MTU_4096
;
699 static inline int ibta_mtu_to_int(enum ibta_mtu mtu
)
702 case IB_MTU_256
: return 256;
703 case IB_MTU_512
: return 512;
704 case IB_MTU_1024
: return 1024;
705 case IB_MTU_2048
: return 2048;
706 case IB_MTU_4096
: return 4096;
711 static ssize_t
show_port_ib_mtu(struct device
*dev
,
712 struct device_attribute
*attr
,
715 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
717 struct mlx4_dev
*mdev
= info
->dev
;
719 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
)
720 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
723 ibta_mtu_to_int(mdev
->caps
.port_ib_mtu
[info
->port
]));
727 static ssize_t
set_port_ib_mtu(struct device
*dev
,
728 struct device_attribute
*attr
,
729 const char *buf
, size_t count
)
731 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
733 struct mlx4_dev
*mdev
= info
->dev
;
734 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
735 int err
, port
, mtu
, ibta_mtu
= -1;
737 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
) {
738 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
742 err
= sscanf(buf
, "%d", &mtu
);
744 ibta_mtu
= int_to_ibta_mtu(mtu
);
746 if (err
<= 0 || ibta_mtu
< 0) {
747 mlx4_err(mdev
, "%s is invalid IBTA mtu\n", buf
);
751 mdev
->caps
.port_ib_mtu
[info
->port
] = ibta_mtu
;
753 mlx4_stop_sense(mdev
);
754 mutex_lock(&priv
->port_mutex
);
755 mlx4_unregister_device(mdev
);
756 for (port
= 1; port
<= mdev
->caps
.num_ports
; port
++) {
757 mlx4_CLOSE_PORT(mdev
, port
);
758 err
= mlx4_SET_PORT(mdev
, port
, -1);
760 mlx4_err(mdev
, "Failed to set port %d, "
765 err
= mlx4_register_device(mdev
);
767 mutex_unlock(&priv
->port_mutex
);
768 mlx4_start_sense(mdev
);
769 return err
? err
: count
;
772 static int mlx4_load_fw(struct mlx4_dev
*dev
)
774 struct mlx4_priv
*priv
= mlx4_priv(dev
);
777 priv
->fw
.fw_icm
= mlx4_alloc_icm(dev
, priv
->fw
.fw_pages
,
778 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
779 if (!priv
->fw
.fw_icm
) {
780 mlx4_err(dev
, "Couldn't allocate FW area, aborting.\n");
784 err
= mlx4_MAP_FA(dev
, priv
->fw
.fw_icm
);
786 mlx4_err(dev
, "MAP_FA command failed, aborting.\n");
790 err
= mlx4_RUN_FW(dev
);
792 mlx4_err(dev
, "RUN_FW command failed, aborting.\n");
802 mlx4_free_icm(dev
, priv
->fw
.fw_icm
, 0);
806 static int mlx4_init_cmpt_table(struct mlx4_dev
*dev
, u64 cmpt_base
,
809 struct mlx4_priv
*priv
= mlx4_priv(dev
);
813 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.cmpt_table
,
815 ((u64
) (MLX4_CMPT_TYPE_QP
*
816 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
817 cmpt_entry_sz
, dev
->caps
.num_qps
,
818 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
823 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.cmpt_table
,
825 ((u64
) (MLX4_CMPT_TYPE_SRQ
*
826 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
827 cmpt_entry_sz
, dev
->caps
.num_srqs
,
828 dev
->caps
.reserved_srqs
, 0, 0);
832 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.cmpt_table
,
834 ((u64
) (MLX4_CMPT_TYPE_CQ
*
835 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
836 cmpt_entry_sz
, dev
->caps
.num_cqs
,
837 dev
->caps
.reserved_cqs
, 0, 0);
841 num_eqs
= (mlx4_is_master(dev
)) ? dev
->phys_caps
.num_phys_eqs
:
843 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.cmpt_table
,
845 ((u64
) (MLX4_CMPT_TYPE_EQ
*
846 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
847 cmpt_entry_sz
, num_eqs
, num_eqs
, 0, 0);
854 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
857 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
860 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
866 static int mlx4_init_icm(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
,
867 struct mlx4_init_hca_param
*init_hca
, u64 icm_size
)
869 struct mlx4_priv
*priv
= mlx4_priv(dev
);
874 err
= mlx4_SET_ICM_SIZE(dev
, icm_size
, &aux_pages
);
876 mlx4_err(dev
, "SET_ICM_SIZE command failed, aborting.\n");
880 mlx4_dbg(dev
, "%lld KB of HCA context requires %lld KB aux memory.\n",
881 (unsigned long long) icm_size
>> 10,
882 (unsigned long long) aux_pages
<< 2);
884 priv
->fw
.aux_icm
= mlx4_alloc_icm(dev
, aux_pages
,
885 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
886 if (!priv
->fw
.aux_icm
) {
887 mlx4_err(dev
, "Couldn't allocate aux memory, aborting.\n");
891 err
= mlx4_MAP_ICM_AUX(dev
, priv
->fw
.aux_icm
);
893 mlx4_err(dev
, "MAP_ICM_AUX command failed, aborting.\n");
897 err
= mlx4_init_cmpt_table(dev
, init_hca
->cmpt_base
, dev_cap
->cmpt_entry_sz
);
899 mlx4_err(dev
, "Failed to map cMPT context memory, aborting.\n");
904 num_eqs
= (mlx4_is_master(dev
)) ? dev
->phys_caps
.num_phys_eqs
:
906 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.table
,
907 init_hca
->eqc_base
, dev_cap
->eqc_entry_sz
,
908 num_eqs
, num_eqs
, 0, 0);
910 mlx4_err(dev
, "Failed to map EQ context memory, aborting.\n");
915 * Reserved MTT entries must be aligned up to a cacheline
916 * boundary, since the FW will write to them, while the driver
917 * writes to all other MTT entries. (The variable
918 * dev->caps.mtt_entry_sz below is really the MTT segment
919 * size, not the raw entry size)
921 dev
->caps
.reserved_mtts
=
922 ALIGN(dev
->caps
.reserved_mtts
* dev
->caps
.mtt_entry_sz
,
923 dma_get_cache_alignment()) / dev
->caps
.mtt_entry_sz
;
925 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.mtt_table
,
927 dev
->caps
.mtt_entry_sz
,
929 dev
->caps
.reserved_mtts
, 1, 0);
931 mlx4_err(dev
, "Failed to map MTT context memory, aborting.\n");
935 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.dmpt_table
,
937 dev_cap
->dmpt_entry_sz
,
939 dev
->caps
.reserved_mrws
, 1, 1);
941 mlx4_err(dev
, "Failed to map dMPT context memory, aborting.\n");
945 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.qp_table
,
947 dev_cap
->qpc_entry_sz
,
949 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
952 mlx4_err(dev
, "Failed to map QP context memory, aborting.\n");
956 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.auxc_table
,
958 dev_cap
->aux_entry_sz
,
960 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
963 mlx4_err(dev
, "Failed to map AUXC context memory, aborting.\n");
967 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.altc_table
,
969 dev_cap
->altc_entry_sz
,
971 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
974 mlx4_err(dev
, "Failed to map ALTC context memory, aborting.\n");
978 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.rdmarc_table
,
979 init_hca
->rdmarc_base
,
980 dev_cap
->rdmarc_entry_sz
<< priv
->qp_table
.rdmarc_shift
,
982 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
985 mlx4_err(dev
, "Failed to map RDMARC context memory, aborting\n");
989 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.table
,
991 dev_cap
->cqc_entry_sz
,
993 dev
->caps
.reserved_cqs
, 0, 0);
995 mlx4_err(dev
, "Failed to map CQ context memory, aborting.\n");
996 goto err_unmap_rdmarc
;
999 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.table
,
1000 init_hca
->srqc_base
,
1001 dev_cap
->srq_entry_sz
,
1003 dev
->caps
.reserved_srqs
, 0, 0);
1005 mlx4_err(dev
, "Failed to map SRQ context memory, aborting.\n");
1010 * For flow steering device managed mode it is required to use
1011 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1012 * required, but for simplicity just map the whole multicast
1013 * group table now. The table isn't very big and it's a lot
1014 * easier than trying to track ref counts.
1016 err
= mlx4_init_icm_table(dev
, &priv
->mcg_table
.table
,
1018 mlx4_get_mgm_entry_size(dev
),
1019 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1020 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1023 mlx4_err(dev
, "Failed to map MCG context memory, aborting.\n");
1030 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1033 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1036 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1039 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1042 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1045 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1048 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1051 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1054 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1057 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1058 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1059 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1060 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1063 mlx4_UNMAP_ICM_AUX(dev
);
1066 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1071 static void mlx4_free_icms(struct mlx4_dev
*dev
)
1073 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1075 mlx4_cleanup_icm_table(dev
, &priv
->mcg_table
.table
);
1076 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1077 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1078 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1079 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1080 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1081 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1082 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1083 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1084 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1085 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1086 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1087 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1088 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1090 mlx4_UNMAP_ICM_AUX(dev
);
1091 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1094 static void mlx4_slave_exit(struct mlx4_dev
*dev
)
1096 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1098 down(&priv
->cmd
.slave_sem
);
1099 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, MLX4_COMM_TIME
))
1100 mlx4_warn(dev
, "Failed to close slave function.\n");
1101 up(&priv
->cmd
.slave_sem
);
1104 static int map_bf_area(struct mlx4_dev
*dev
)
1106 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1107 resource_size_t bf_start
;
1108 resource_size_t bf_len
;
1111 if (!dev
->caps
.bf_reg_size
)
1114 bf_start
= pci_resource_start(dev
->pdev
, 2) +
1115 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1116 bf_len
= pci_resource_len(dev
->pdev
, 2) -
1117 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1118 priv
->bf_mapping
= io_mapping_create_wc(bf_start
, bf_len
);
1119 if (!priv
->bf_mapping
)
1125 static void unmap_bf_area(struct mlx4_dev
*dev
)
1127 if (mlx4_priv(dev
)->bf_mapping
)
1128 io_mapping_free(mlx4_priv(dev
)->bf_mapping
);
1131 static void mlx4_close_hca(struct mlx4_dev
*dev
)
1134 if (mlx4_is_slave(dev
))
1135 mlx4_slave_exit(dev
);
1137 mlx4_CLOSE_HCA(dev
, 0);
1138 mlx4_free_icms(dev
);
1140 mlx4_free_icm(dev
, mlx4_priv(dev
)->fw
.fw_icm
, 0);
1144 static int mlx4_init_slave(struct mlx4_dev
*dev
)
1146 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1147 u64 dma
= (u64
) priv
->mfunc
.vhcr_dma
;
1148 int num_of_reset_retries
= NUM_OF_RESET_RETRIES
;
1149 int ret_from_reset
= 0;
1151 u32 cmd_channel_ver
;
1153 down(&priv
->cmd
.slave_sem
);
1154 priv
->cmd
.max_cmds
= 1;
1155 mlx4_warn(dev
, "Sending reset\n");
1156 ret_from_reset
= mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0,
1158 /* if we are in the middle of flr the slave will try
1159 * NUM_OF_RESET_RETRIES times before leaving.*/
1160 if (ret_from_reset
) {
1161 if (MLX4_DELAY_RESET_SLAVE
== ret_from_reset
) {
1162 msleep(SLEEP_TIME_IN_RESET
);
1163 while (ret_from_reset
&& num_of_reset_retries
) {
1164 mlx4_warn(dev
, "slave is currently in the"
1165 "middle of FLR. retrying..."
1167 (NUM_OF_RESET_RETRIES
-
1168 num_of_reset_retries
+ 1));
1170 mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
,
1172 num_of_reset_retries
= num_of_reset_retries
- 1;
1178 /* check the driver version - the slave I/F revision
1179 * must match the master's */
1180 slave_read
= swab32(readl(&priv
->mfunc
.comm
->slave_read
));
1181 cmd_channel_ver
= mlx4_comm_get_version();
1183 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver
) !=
1184 MLX4_COMM_GET_IF_REV(slave_read
)) {
1185 mlx4_err(dev
, "slave driver version is not supported"
1186 " by the master\n");
1190 mlx4_warn(dev
, "Sending vhcr0\n");
1191 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR0
, dma
>> 48,
1194 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR1
, dma
>> 32,
1197 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR2
, dma
>> 16,
1200 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR_EN
, dma
, MLX4_COMM_TIME
))
1202 up(&priv
->cmd
.slave_sem
);
1206 mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, 0);
1207 up(&priv
->cmd
.slave_sem
);
1211 static void mlx4_parav_master_pf_caps(struct mlx4_dev
*dev
)
1215 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
1216 dev
->caps
.gid_table_len
[i
] = 1;
1217 dev
->caps
.pkey_table_len
[i
] =
1218 dev
->phys_caps
.pkey_phys_table_len
[i
] - 1;
1222 static int mlx4_init_hca(struct mlx4_dev
*dev
)
1224 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1225 struct mlx4_adapter adapter
;
1226 struct mlx4_dev_cap dev_cap
;
1227 struct mlx4_mod_stat_cfg mlx4_cfg
;
1228 struct mlx4_profile profile
;
1229 struct mlx4_init_hca_param init_hca
;
1233 if (!mlx4_is_slave(dev
)) {
1234 err
= mlx4_QUERY_FW(dev
);
1237 mlx4_info(dev
, "non-primary physical function, skipping.\n");
1239 mlx4_err(dev
, "QUERY_FW command failed, aborting.\n");
1243 err
= mlx4_load_fw(dev
);
1245 mlx4_err(dev
, "Failed to start FW, aborting.\n");
1249 mlx4_cfg
.log_pg_sz_m
= 1;
1250 mlx4_cfg
.log_pg_sz
= 0;
1251 err
= mlx4_MOD_STAT_CFG(dev
, &mlx4_cfg
);
1253 mlx4_warn(dev
, "Failed to override log_pg_sz parameter\n");
1255 err
= mlx4_dev_cap(dev
, &dev_cap
);
1257 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
1261 if (mlx4_is_master(dev
))
1262 mlx4_parav_master_pf_caps(dev
);
1264 priv
->fs_hash_mode
= MLX4_FS_L2_HASH
;
1266 switch (priv
->fs_hash_mode
) {
1267 case MLX4_FS_L2_HASH
:
1268 init_hca
.fs_hash_enable_bits
= 0;
1271 case MLX4_FS_L2_L3_L4_HASH
:
1272 /* Enable flow steering with
1273 * udp unicast and tcp unicast
1275 init_hca
.fs_hash_enable_bits
=
1276 MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
;
1280 profile
= default_profile
;
1281 if (dev
->caps
.steering_mode
==
1282 MLX4_STEERING_MODE_DEVICE_MANAGED
)
1283 profile
.num_mcg
= MLX4_FS_NUM_MCG
;
1285 icm_size
= mlx4_make_profile(dev
, &profile
, &dev_cap
,
1287 if ((long long) icm_size
< 0) {
1292 dev
->caps
.max_fmr_maps
= (1 << (32 - ilog2(dev
->caps
.num_mpts
))) - 1;
1294 init_hca
.log_uar_sz
= ilog2(dev
->caps
.num_uars
);
1295 init_hca
.uar_page_sz
= PAGE_SHIFT
- 12;
1297 err
= mlx4_init_icm(dev
, &dev_cap
, &init_hca
, icm_size
);
1301 err
= mlx4_INIT_HCA(dev
, &init_hca
);
1303 mlx4_err(dev
, "INIT_HCA command failed, aborting.\n");
1307 err
= mlx4_init_slave(dev
);
1309 mlx4_err(dev
, "Failed to initialize slave\n");
1313 err
= mlx4_slave_cap(dev
);
1315 mlx4_err(dev
, "Failed to obtain slave caps\n");
1320 if (map_bf_area(dev
))
1321 mlx4_dbg(dev
, "Failed to map blue flame area\n");
1323 /*Only the master set the ports, all the rest got it from it.*/
1324 if (!mlx4_is_slave(dev
))
1325 mlx4_set_port_mask(dev
);
1327 err
= mlx4_QUERY_ADAPTER(dev
, &adapter
);
1329 mlx4_err(dev
, "QUERY_ADAPTER command failed, aborting.\n");
1333 priv
->eq_table
.inta_pin
= adapter
.inta_pin
;
1334 memcpy(dev
->board_id
, adapter
.board_id
, sizeof dev
->board_id
);
1339 mlx4_close_hca(dev
);
1342 if (!mlx4_is_slave(dev
))
1343 mlx4_free_icms(dev
);
1346 if (!mlx4_is_slave(dev
)) {
1348 mlx4_free_icm(dev
, priv
->fw
.fw_icm
, 0);
1355 static int mlx4_init_counters_table(struct mlx4_dev
*dev
)
1357 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1360 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
1363 nent
= dev
->caps
.max_counters
;
1364 return mlx4_bitmap_init(&priv
->counters_bitmap
, nent
, nent
- 1, 0, 0);
1367 static void mlx4_cleanup_counters_table(struct mlx4_dev
*dev
)
1369 mlx4_bitmap_cleanup(&mlx4_priv(dev
)->counters_bitmap
);
1372 int __mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
1374 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1376 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
1379 *idx
= mlx4_bitmap_alloc(&priv
->counters_bitmap
);
1386 int mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
1391 if (mlx4_is_mfunc(dev
)) {
1392 err
= mlx4_cmd_imm(dev
, 0, &out_param
, RES_COUNTER
,
1393 RES_OP_RESERVE
, MLX4_CMD_ALLOC_RES
,
1394 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
1396 *idx
= get_param_l(&out_param
);
1400 return __mlx4_counter_alloc(dev
, idx
);
1402 EXPORT_SYMBOL_GPL(mlx4_counter_alloc
);
1404 void __mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
1406 mlx4_bitmap_free(&mlx4_priv(dev
)->counters_bitmap
, idx
);
1410 void mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
1414 if (mlx4_is_mfunc(dev
)) {
1415 set_param_l(&in_param
, idx
);
1416 mlx4_cmd(dev
, in_param
, RES_COUNTER
, RES_OP_RESERVE
,
1417 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
1421 __mlx4_counter_free(dev
, idx
);
1423 EXPORT_SYMBOL_GPL(mlx4_counter_free
);
1425 static int mlx4_setup_hca(struct mlx4_dev
*dev
)
1427 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1430 __be32 ib_port_default_caps
;
1432 err
= mlx4_init_uar_table(dev
);
1434 mlx4_err(dev
, "Failed to initialize "
1435 "user access region table, aborting.\n");
1439 err
= mlx4_uar_alloc(dev
, &priv
->driver_uar
);
1441 mlx4_err(dev
, "Failed to allocate driver access region, "
1443 goto err_uar_table_free
;
1446 priv
->kar
= ioremap((phys_addr_t
) priv
->driver_uar
.pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
1448 mlx4_err(dev
, "Couldn't map kernel access region, "
1454 err
= mlx4_init_pd_table(dev
);
1456 mlx4_err(dev
, "Failed to initialize "
1457 "protection domain table, aborting.\n");
1461 err
= mlx4_init_xrcd_table(dev
);
1463 mlx4_err(dev
, "Failed to initialize "
1464 "reliable connection domain table, aborting.\n");
1465 goto err_pd_table_free
;
1468 err
= mlx4_init_mr_table(dev
);
1470 mlx4_err(dev
, "Failed to initialize "
1471 "memory region table, aborting.\n");
1472 goto err_xrcd_table_free
;
1475 err
= mlx4_init_eq_table(dev
);
1477 mlx4_err(dev
, "Failed to initialize "
1478 "event queue table, aborting.\n");
1479 goto err_mr_table_free
;
1482 err
= mlx4_cmd_use_events(dev
);
1484 mlx4_err(dev
, "Failed to switch to event-driven "
1485 "firmware commands, aborting.\n");
1486 goto err_eq_table_free
;
1489 err
= mlx4_NOP(dev
);
1491 if (dev
->flags
& MLX4_FLAG_MSI_X
) {
1492 mlx4_warn(dev
, "NOP command failed to generate MSI-X "
1493 "interrupt IRQ %d).\n",
1494 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].irq
);
1495 mlx4_warn(dev
, "Trying again without MSI-X.\n");
1497 mlx4_err(dev
, "NOP command failed to generate interrupt "
1498 "(IRQ %d), aborting.\n",
1499 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].irq
);
1500 mlx4_err(dev
, "BIOS or ACPI interrupt routing problem?\n");
1506 mlx4_dbg(dev
, "NOP command IRQ test passed\n");
1508 err
= mlx4_init_cq_table(dev
);
1510 mlx4_err(dev
, "Failed to initialize "
1511 "completion queue table, aborting.\n");
1515 err
= mlx4_init_srq_table(dev
);
1517 mlx4_err(dev
, "Failed to initialize "
1518 "shared receive queue table, aborting.\n");
1519 goto err_cq_table_free
;
1522 err
= mlx4_init_qp_table(dev
);
1524 mlx4_err(dev
, "Failed to initialize "
1525 "queue pair table, aborting.\n");
1526 goto err_srq_table_free
;
1529 if (!mlx4_is_slave(dev
)) {
1530 err
= mlx4_init_mcg_table(dev
);
1532 mlx4_err(dev
, "Failed to initialize "
1533 "multicast group table, aborting.\n");
1534 goto err_qp_table_free
;
1538 err
= mlx4_init_counters_table(dev
);
1539 if (err
&& err
!= -ENOENT
) {
1540 mlx4_err(dev
, "Failed to initialize counters table, aborting.\n");
1541 goto err_mcg_table_free
;
1544 if (!mlx4_is_slave(dev
)) {
1545 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
1546 ib_port_default_caps
= 0;
1547 err
= mlx4_get_port_ib_caps(dev
, port
,
1548 &ib_port_default_caps
);
1550 mlx4_warn(dev
, "failed to get port %d default "
1551 "ib capabilities (%d). Continuing "
1552 "with caps = 0\n", port
, err
);
1553 dev
->caps
.ib_port_def_cap
[port
] = ib_port_default_caps
;
1555 /* initialize per-slave default ib port capabilities */
1556 if (mlx4_is_master(dev
)) {
1558 for (i
= 0; i
< dev
->num_slaves
; i
++) {
1559 if (i
== mlx4_master_func_num(dev
))
1561 priv
->mfunc
.master
.slave_state
[i
].ib_cap_mask
[port
] =
1562 ib_port_default_caps
;
1566 if (mlx4_is_mfunc(dev
))
1567 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_2048
;
1569 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_4096
;
1571 err
= mlx4_SET_PORT(dev
, port
, mlx4_is_master(dev
) ?
1572 dev
->caps
.pkey_table_len
[port
] : -1);
1574 mlx4_err(dev
, "Failed to set port %d, aborting\n",
1576 goto err_counters_table_free
;
1583 err_counters_table_free
:
1584 mlx4_cleanup_counters_table(dev
);
1587 mlx4_cleanup_mcg_table(dev
);
1590 mlx4_cleanup_qp_table(dev
);
1593 mlx4_cleanup_srq_table(dev
);
1596 mlx4_cleanup_cq_table(dev
);
1599 mlx4_cmd_use_polling(dev
);
1602 mlx4_cleanup_eq_table(dev
);
1605 mlx4_cleanup_mr_table(dev
);
1607 err_xrcd_table_free
:
1608 mlx4_cleanup_xrcd_table(dev
);
1611 mlx4_cleanup_pd_table(dev
);
1617 mlx4_uar_free(dev
, &priv
->driver_uar
);
1620 mlx4_cleanup_uar_table(dev
);
1624 static void mlx4_enable_msi_x(struct mlx4_dev
*dev
)
1626 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1627 struct msix_entry
*entries
;
1628 int nreq
= min_t(int, dev
->caps
.num_ports
*
1629 min_t(int, netif_get_num_default_rss_queues() + 1,
1630 MAX_MSIX_P_PORT
) + MSIX_LEGACY_SZ
, MAX_MSIX
);
1635 /* In multifunction mode each function gets 2 msi-X vectors
1636 * one for data path completions anf the other for asynch events
1637 * or command completions */
1638 if (mlx4_is_mfunc(dev
)) {
1641 nreq
= min_t(int, dev
->caps
.num_eqs
-
1642 dev
->caps
.reserved_eqs
, nreq
);
1645 entries
= kcalloc(nreq
, sizeof *entries
, GFP_KERNEL
);
1649 for (i
= 0; i
< nreq
; ++i
)
1650 entries
[i
].entry
= i
;
1653 err
= pci_enable_msix(dev
->pdev
, entries
, nreq
);
1655 /* Try again if at least 2 vectors are available */
1657 mlx4_info(dev
, "Requested %d vectors, "
1658 "but only %d MSI-X vectors available, "
1659 "trying again\n", nreq
, err
);
1668 MSIX_LEGACY_SZ
+ dev
->caps
.num_ports
* MIN_MSIX_P_PORT
) {
1669 /*Working in legacy mode , all EQ's shared*/
1670 dev
->caps
.comp_pool
= 0;
1671 dev
->caps
.num_comp_vectors
= nreq
- 1;
1673 dev
->caps
.comp_pool
= nreq
- MSIX_LEGACY_SZ
;
1674 dev
->caps
.num_comp_vectors
= MSIX_LEGACY_SZ
- 1;
1676 for (i
= 0; i
< nreq
; ++i
)
1677 priv
->eq_table
.eq
[i
].irq
= entries
[i
].vector
;
1679 dev
->flags
|= MLX4_FLAG_MSI_X
;
1686 dev
->caps
.num_comp_vectors
= 1;
1687 dev
->caps
.comp_pool
= 0;
1689 for (i
= 0; i
< 2; ++i
)
1690 priv
->eq_table
.eq
[i
].irq
= dev
->pdev
->irq
;
1693 static int mlx4_init_port_info(struct mlx4_dev
*dev
, int port
)
1695 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
1700 if (!mlx4_is_slave(dev
)) {
1701 INIT_RADIX_TREE(&info
->mac_tree
, GFP_KERNEL
);
1702 mlx4_init_mac_table(dev
, &info
->mac_table
);
1703 mlx4_init_vlan_table(dev
, &info
->vlan_table
);
1705 dev
->caps
.reserved_qps_base
[MLX4_QP_REGION_ETH_ADDR
] +
1706 (port
- 1) * (1 << log_num_mac
);
1709 sprintf(info
->dev_name
, "mlx4_port%d", port
);
1710 info
->port_attr
.attr
.name
= info
->dev_name
;
1711 if (mlx4_is_mfunc(dev
))
1712 info
->port_attr
.attr
.mode
= S_IRUGO
;
1714 info
->port_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
1715 info
->port_attr
.store
= set_port_type
;
1717 info
->port_attr
.show
= show_port_type
;
1718 sysfs_attr_init(&info
->port_attr
.attr
);
1720 err
= device_create_file(&dev
->pdev
->dev
, &info
->port_attr
);
1722 mlx4_err(dev
, "Failed to create file for port %d\n", port
);
1726 sprintf(info
->dev_mtu_name
, "mlx4_port%d_mtu", port
);
1727 info
->port_mtu_attr
.attr
.name
= info
->dev_mtu_name
;
1728 if (mlx4_is_mfunc(dev
))
1729 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
;
1731 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
1732 info
->port_mtu_attr
.store
= set_port_ib_mtu
;
1734 info
->port_mtu_attr
.show
= show_port_ib_mtu
;
1735 sysfs_attr_init(&info
->port_mtu_attr
.attr
);
1737 err
= device_create_file(&dev
->pdev
->dev
, &info
->port_mtu_attr
);
1739 mlx4_err(dev
, "Failed to create mtu file for port %d\n", port
);
1740 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_attr
);
1747 static void mlx4_cleanup_port_info(struct mlx4_port_info
*info
)
1752 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_attr
);
1753 device_remove_file(&info
->dev
->pdev
->dev
, &info
->port_mtu_attr
);
1756 static int mlx4_init_steering(struct mlx4_dev
*dev
)
1758 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1759 int num_entries
= dev
->caps
.num_ports
;
1762 priv
->steer
= kzalloc(sizeof(struct mlx4_steer
) * num_entries
, GFP_KERNEL
);
1766 for (i
= 0; i
< num_entries
; i
++)
1767 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
1768 INIT_LIST_HEAD(&priv
->steer
[i
].promisc_qps
[j
]);
1769 INIT_LIST_HEAD(&priv
->steer
[i
].steer_entries
[j
]);
1774 static void mlx4_clear_steering(struct mlx4_dev
*dev
)
1776 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1777 struct mlx4_steer_index
*entry
, *tmp_entry
;
1778 struct mlx4_promisc_qp
*pqp
, *tmp_pqp
;
1779 int num_entries
= dev
->caps
.num_ports
;
1782 for (i
= 0; i
< num_entries
; i
++) {
1783 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
1784 list_for_each_entry_safe(pqp
, tmp_pqp
,
1785 &priv
->steer
[i
].promisc_qps
[j
],
1787 list_del(&pqp
->list
);
1790 list_for_each_entry_safe(entry
, tmp_entry
,
1791 &priv
->steer
[i
].steer_entries
[j
],
1793 list_del(&entry
->list
);
1794 list_for_each_entry_safe(pqp
, tmp_pqp
,
1797 list_del(&pqp
->list
);
1807 static int extended_func_num(struct pci_dev
*pdev
)
1809 return PCI_SLOT(pdev
->devfn
) * 8 + PCI_FUNC(pdev
->devfn
);
1812 #define MLX4_OWNER_BASE 0x8069c
1813 #define MLX4_OWNER_SIZE 4
1815 static int mlx4_get_ownership(struct mlx4_dev
*dev
)
1817 void __iomem
*owner
;
1820 if (pci_channel_offline(dev
->pdev
))
1823 owner
= ioremap(pci_resource_start(dev
->pdev
, 0) + MLX4_OWNER_BASE
,
1826 mlx4_err(dev
, "Failed to obtain ownership bit\n");
1835 static void mlx4_free_ownership(struct mlx4_dev
*dev
)
1837 void __iomem
*owner
;
1839 if (pci_channel_offline(dev
->pdev
))
1842 owner
= ioremap(pci_resource_start(dev
->pdev
, 0) + MLX4_OWNER_BASE
,
1845 mlx4_err(dev
, "Failed to obtain ownership bit\n");
1853 static int __mlx4_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1855 struct mlx4_priv
*priv
;
1856 struct mlx4_dev
*dev
;
1860 pr_info(DRV_NAME
": Initializing %s\n", pci_name(pdev
));
1862 err
= pci_enable_device(pdev
);
1864 dev_err(&pdev
->dev
, "Cannot enable PCI device, "
1868 if (num_vfs
> MLX4_MAX_NUM_VF
) {
1869 printk(KERN_ERR
"There are more VF's (%d) than allowed(%d)\n",
1870 num_vfs
, MLX4_MAX_NUM_VF
);
1876 if (((id
== NULL
) || !(id
->driver_data
& MLX4_VF
)) &&
1877 !(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
1878 dev_err(&pdev
->dev
, "Missing DCS, aborting."
1879 "(id == 0X%p, id->driver_data: 0x%lx,"
1880 " pci_resource_flags(pdev, 0):0x%lx)\n", id
,
1881 id
? id
->driver_data
: 0, pci_resource_flags(pdev
, 0));
1883 goto err_disable_pdev
;
1885 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
1886 dev_err(&pdev
->dev
, "Missing UAR, aborting.\n");
1888 goto err_disable_pdev
;
1891 err
= pci_request_regions(pdev
, DRV_NAME
);
1893 dev_err(&pdev
->dev
, "Couldn't get PCI resources, aborting\n");
1894 goto err_disable_pdev
;
1897 pci_set_master(pdev
);
1899 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1901 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1902 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1904 dev_err(&pdev
->dev
, "Can't set PCI DMA mask, aborting.\n");
1905 goto err_release_regions
;
1908 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
1910 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit "
1911 "consistent PCI DMA mask.\n");
1912 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1914 dev_err(&pdev
->dev
, "Can't set consistent PCI DMA mask, "
1916 goto err_release_regions
;
1920 /* Allow large DMA segments, up to the firmware limit of 1 GB */
1921 dma_set_max_seg_size(&pdev
->dev
, 1024 * 1024 * 1024);
1923 priv
= kzalloc(sizeof *priv
, GFP_KERNEL
);
1925 dev_err(&pdev
->dev
, "Device struct alloc failed, "
1928 goto err_release_regions
;
1933 INIT_LIST_HEAD(&priv
->ctx_list
);
1934 spin_lock_init(&priv
->ctx_lock
);
1936 mutex_init(&priv
->port_mutex
);
1938 INIT_LIST_HEAD(&priv
->pgdir_list
);
1939 mutex_init(&priv
->pgdir_mutex
);
1941 INIT_LIST_HEAD(&priv
->bf_list
);
1942 mutex_init(&priv
->bf_mutex
);
1944 dev
->rev_id
= pdev
->revision
;
1945 /* Detect if this device is a virtual function */
1946 if (id
&& id
->driver_data
& MLX4_VF
) {
1947 /* When acting as pf, we normally skip vfs unless explicitly
1948 * requested to probe them. */
1949 if (num_vfs
&& extended_func_num(pdev
) > probe_vf
) {
1950 mlx4_warn(dev
, "Skipping virtual function:%d\n",
1951 extended_func_num(pdev
));
1955 mlx4_warn(dev
, "Detected virtual function - running in slave mode\n");
1956 dev
->flags
|= MLX4_FLAG_SLAVE
;
1958 /* We reset the device and enable SRIOV only for physical
1959 * devices. Try to claim ownership on the device;
1960 * if already taken, skip -- do not allow multiple PFs */
1961 err
= mlx4_get_ownership(dev
);
1966 mlx4_warn(dev
, "Multiple PFs not yet supported."
1974 mlx4_warn(dev
, "Enabling sriov with:%d vfs\n", num_vfs
);
1975 err
= pci_enable_sriov(pdev
, num_vfs
);
1977 mlx4_err(dev
, "Failed to enable sriov,"
1978 "continuing without sriov enabled"
1979 " (err = %d).\n", err
);
1982 mlx4_warn(dev
, "Running in master mode\n");
1983 dev
->flags
|= MLX4_FLAG_SRIOV
|
1985 dev
->num_vfs
= num_vfs
;
1990 * Now reset the HCA before we touch the PCI capabilities or
1991 * attempt a firmware command, since a boot ROM may have left
1992 * the HCA in an undefined state.
1994 err
= mlx4_reset(dev
);
1996 mlx4_err(dev
, "Failed to reset HCA, aborting.\n");
2002 if (mlx4_cmd_init(dev
)) {
2003 mlx4_err(dev
, "Failed to init command interface, aborting.\n");
2007 /* In slave functions, the communication channel must be initialized
2008 * before posting commands. Also, init num_slaves before calling
2010 if (mlx4_is_mfunc(dev
)) {
2011 if (mlx4_is_master(dev
))
2012 dev
->num_slaves
= MLX4_MAX_NUM_SLAVES
;
2014 dev
->num_slaves
= 0;
2015 if (mlx4_multi_func_init(dev
)) {
2016 mlx4_err(dev
, "Failed to init slave mfunc"
2017 " interface, aborting.\n");
2023 err
= mlx4_init_hca(dev
);
2025 if (err
== -EACCES
) {
2026 /* Not primary Physical function
2027 * Running in slave mode */
2028 mlx4_cmd_cleanup(dev
);
2029 dev
->flags
|= MLX4_FLAG_SLAVE
;
2030 dev
->flags
&= ~MLX4_FLAG_MASTER
;
2036 /* In master functions, the communication channel must be initialized
2037 * after obtaining its address from fw */
2038 if (mlx4_is_master(dev
)) {
2039 if (mlx4_multi_func_init(dev
)) {
2040 mlx4_err(dev
, "Failed to init master mfunc"
2041 "interface, aborting.\n");
2046 err
= mlx4_alloc_eq_table(dev
);
2048 goto err_master_mfunc
;
2050 priv
->msix_ctl
.pool_bm
= 0;
2051 mutex_init(&priv
->msix_ctl
.pool_lock
);
2053 mlx4_enable_msi_x(dev
);
2054 if ((mlx4_is_mfunc(dev
)) &&
2055 !(dev
->flags
& MLX4_FLAG_MSI_X
)) {
2056 mlx4_err(dev
, "INTx is not supported in multi-function mode."
2061 if (!mlx4_is_slave(dev
)) {
2062 err
= mlx4_init_steering(dev
);
2067 err
= mlx4_setup_hca(dev
);
2068 if (err
== -EBUSY
&& (dev
->flags
& MLX4_FLAG_MSI_X
) &&
2069 !mlx4_is_mfunc(dev
)) {
2070 dev
->flags
&= ~MLX4_FLAG_MSI_X
;
2071 dev
->caps
.num_comp_vectors
= 1;
2072 dev
->caps
.comp_pool
= 0;
2073 pci_disable_msix(pdev
);
2074 err
= mlx4_setup_hca(dev
);
2080 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2081 err
= mlx4_init_port_info(dev
, port
);
2086 err
= mlx4_register_device(dev
);
2090 mlx4_sense_init(dev
);
2091 mlx4_start_sense(dev
);
2093 pci_set_drvdata(pdev
, dev
);
2098 for (--port
; port
>= 1; --port
)
2099 mlx4_cleanup_port_info(&priv
->port
[port
]);
2101 mlx4_cleanup_counters_table(dev
);
2102 mlx4_cleanup_mcg_table(dev
);
2103 mlx4_cleanup_qp_table(dev
);
2104 mlx4_cleanup_srq_table(dev
);
2105 mlx4_cleanup_cq_table(dev
);
2106 mlx4_cmd_use_polling(dev
);
2107 mlx4_cleanup_eq_table(dev
);
2108 mlx4_cleanup_mr_table(dev
);
2109 mlx4_cleanup_xrcd_table(dev
);
2110 mlx4_cleanup_pd_table(dev
);
2111 mlx4_cleanup_uar_table(dev
);
2114 if (!mlx4_is_slave(dev
))
2115 mlx4_clear_steering(dev
);
2118 mlx4_free_eq_table(dev
);
2121 if (mlx4_is_master(dev
))
2122 mlx4_multi_func_cleanup(dev
);
2125 if (dev
->flags
& MLX4_FLAG_MSI_X
)
2126 pci_disable_msix(pdev
);
2128 mlx4_close_hca(dev
);
2131 if (mlx4_is_slave(dev
))
2132 mlx4_multi_func_cleanup(dev
);
2135 mlx4_cmd_cleanup(dev
);
2138 if (dev
->flags
& MLX4_FLAG_SRIOV
)
2139 pci_disable_sriov(pdev
);
2142 if (!mlx4_is_slave(dev
))
2143 mlx4_free_ownership(dev
);
2148 err_release_regions
:
2149 pci_release_regions(pdev
);
2152 pci_disable_device(pdev
);
2153 pci_set_drvdata(pdev
, NULL
);
2157 static int __devinit
mlx4_init_one(struct pci_dev
*pdev
,
2158 const struct pci_device_id
*id
)
2160 printk_once(KERN_INFO
"%s", mlx4_version
);
2162 return __mlx4_init_one(pdev
, id
);
2165 static void mlx4_remove_one(struct pci_dev
*pdev
)
2167 struct mlx4_dev
*dev
= pci_get_drvdata(pdev
);
2168 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2172 /* in SRIOV it is not allowed to unload the pf's
2173 * driver while there are alive vf's */
2174 if (mlx4_is_master(dev
)) {
2175 if (mlx4_how_many_lives_vf(dev
))
2176 printk(KERN_ERR
"Removing PF when there are assigned VF's !!!\n");
2178 mlx4_stop_sense(dev
);
2179 mlx4_unregister_device(dev
);
2181 for (p
= 1; p
<= dev
->caps
.num_ports
; p
++) {
2182 mlx4_cleanup_port_info(&priv
->port
[p
]);
2183 mlx4_CLOSE_PORT(dev
, p
);
2186 if (mlx4_is_master(dev
))
2187 mlx4_free_resource_tracker(dev
,
2188 RES_TR_FREE_SLAVES_ONLY
);
2190 mlx4_cleanup_counters_table(dev
);
2191 mlx4_cleanup_mcg_table(dev
);
2192 mlx4_cleanup_qp_table(dev
);
2193 mlx4_cleanup_srq_table(dev
);
2194 mlx4_cleanup_cq_table(dev
);
2195 mlx4_cmd_use_polling(dev
);
2196 mlx4_cleanup_eq_table(dev
);
2197 mlx4_cleanup_mr_table(dev
);
2198 mlx4_cleanup_xrcd_table(dev
);
2199 mlx4_cleanup_pd_table(dev
);
2201 if (mlx4_is_master(dev
))
2202 mlx4_free_resource_tracker(dev
,
2203 RES_TR_FREE_STRUCTS_ONLY
);
2206 mlx4_uar_free(dev
, &priv
->driver_uar
);
2207 mlx4_cleanup_uar_table(dev
);
2208 if (!mlx4_is_slave(dev
))
2209 mlx4_clear_steering(dev
);
2210 mlx4_free_eq_table(dev
);
2211 if (mlx4_is_master(dev
))
2212 mlx4_multi_func_cleanup(dev
);
2213 mlx4_close_hca(dev
);
2214 if (mlx4_is_slave(dev
))
2215 mlx4_multi_func_cleanup(dev
);
2216 mlx4_cmd_cleanup(dev
);
2218 if (dev
->flags
& MLX4_FLAG_MSI_X
)
2219 pci_disable_msix(pdev
);
2220 if (dev
->flags
& MLX4_FLAG_SRIOV
) {
2221 mlx4_warn(dev
, "Disabling sriov\n");
2222 pci_disable_sriov(pdev
);
2225 if (!mlx4_is_slave(dev
))
2226 mlx4_free_ownership(dev
);
2228 pci_release_regions(pdev
);
2229 pci_disable_device(pdev
);
2230 pci_set_drvdata(pdev
, NULL
);
2234 int mlx4_restart_one(struct pci_dev
*pdev
)
2236 mlx4_remove_one(pdev
);
2237 return __mlx4_init_one(pdev
, NULL
);
2240 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table
) = {
2241 /* MT25408 "Hermon" SDR */
2242 { PCI_VDEVICE(MELLANOX
, 0x6340), 0 },
2243 /* MT25408 "Hermon" DDR */
2244 { PCI_VDEVICE(MELLANOX
, 0x634a), 0 },
2245 /* MT25408 "Hermon" QDR */
2246 { PCI_VDEVICE(MELLANOX
, 0x6354), 0 },
2247 /* MT25408 "Hermon" DDR PCIe gen2 */
2248 { PCI_VDEVICE(MELLANOX
, 0x6732), 0 },
2249 /* MT25408 "Hermon" QDR PCIe gen2 */
2250 { PCI_VDEVICE(MELLANOX
, 0x673c), 0 },
2251 /* MT25408 "Hermon" EN 10GigE */
2252 { PCI_VDEVICE(MELLANOX
, 0x6368), 0 },
2253 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
2254 { PCI_VDEVICE(MELLANOX
, 0x6750), 0 },
2255 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
2256 { PCI_VDEVICE(MELLANOX
, 0x6372), 0 },
2257 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
2258 { PCI_VDEVICE(MELLANOX
, 0x675a), 0 },
2259 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
2260 { PCI_VDEVICE(MELLANOX
, 0x6764), 0 },
2261 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
2262 { PCI_VDEVICE(MELLANOX
, 0x6746), 0 },
2263 /* MT26478 ConnectX2 40GigE PCIe gen2 */
2264 { PCI_VDEVICE(MELLANOX
, 0x676e), 0 },
2265 /* MT25400 Family [ConnectX-2 Virtual Function] */
2266 { PCI_VDEVICE(MELLANOX
, 0x1002), MLX4_VF
},
2267 /* MT27500 Family [ConnectX-3] */
2268 { PCI_VDEVICE(MELLANOX
, 0x1003), 0 },
2269 /* MT27500 Family [ConnectX-3 Virtual Function] */
2270 { PCI_VDEVICE(MELLANOX
, 0x1004), MLX4_VF
},
2271 { PCI_VDEVICE(MELLANOX
, 0x1005), 0 }, /* MT27510 Family */
2272 { PCI_VDEVICE(MELLANOX
, 0x1006), 0 }, /* MT27511 Family */
2273 { PCI_VDEVICE(MELLANOX
, 0x1007), 0 }, /* MT27520 Family */
2274 { PCI_VDEVICE(MELLANOX
, 0x1008), 0 }, /* MT27521 Family */
2275 { PCI_VDEVICE(MELLANOX
, 0x1009), 0 }, /* MT27530 Family */
2276 { PCI_VDEVICE(MELLANOX
, 0x100a), 0 }, /* MT27531 Family */
2277 { PCI_VDEVICE(MELLANOX
, 0x100b), 0 }, /* MT27540 Family */
2278 { PCI_VDEVICE(MELLANOX
, 0x100c), 0 }, /* MT27541 Family */
2279 { PCI_VDEVICE(MELLANOX
, 0x100d), 0 }, /* MT27550 Family */
2280 { PCI_VDEVICE(MELLANOX
, 0x100e), 0 }, /* MT27551 Family */
2281 { PCI_VDEVICE(MELLANOX
, 0x100f), 0 }, /* MT27560 Family */
2282 { PCI_VDEVICE(MELLANOX
, 0x1010), 0 }, /* MT27561 Family */
2286 MODULE_DEVICE_TABLE(pci
, mlx4_pci_table
);
2288 static pci_ers_result_t
mlx4_pci_err_detected(struct pci_dev
*pdev
,
2289 pci_channel_state_t state
)
2291 mlx4_remove_one(pdev
);
2293 return state
== pci_channel_io_perm_failure
?
2294 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
2297 static pci_ers_result_t
mlx4_pci_slot_reset(struct pci_dev
*pdev
)
2299 int ret
= __mlx4_init_one(pdev
, NULL
);
2301 return ret
? PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_RECOVERED
;
2304 static struct pci_error_handlers mlx4_err_handler
= {
2305 .error_detected
= mlx4_pci_err_detected
,
2306 .slot_reset
= mlx4_pci_slot_reset
,
2309 static struct pci_driver mlx4_driver
= {
2311 .id_table
= mlx4_pci_table
,
2312 .probe
= mlx4_init_one
,
2313 .remove
= __devexit_p(mlx4_remove_one
),
2314 .err_handler
= &mlx4_err_handler
,
2317 static int __init
mlx4_verify_params(void)
2319 if ((log_num_mac
< 0) || (log_num_mac
> 7)) {
2320 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac
);
2324 if (log_num_vlan
!= 0)
2325 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2326 MLX4_LOG_NUM_VLANS
);
2328 if ((log_mtts_per_seg
< 1) || (log_mtts_per_seg
> 7)) {
2329 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg
);
2333 /* Check if module param for ports type has legal combination */
2334 if (port_type_array
[0] == false && port_type_array
[1] == true) {
2335 printk(KERN_WARNING
"Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
2336 port_type_array
[0] = true;
2342 static int __init
mlx4_init(void)
2346 if (mlx4_verify_params())
2351 mlx4_wq
= create_singlethread_workqueue("mlx4");
2355 ret
= pci_register_driver(&mlx4_driver
);
2356 return ret
< 0 ? ret
: 0;
2359 static void __exit
mlx4_cleanup(void)
2361 pci_unregister_driver(&mlx4_driver
);
2362 destroy_workqueue(mlx4_wq
);
2365 module_init(mlx4_init
);
2366 module_exit(mlx4_cleanup
);