2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <linux/sched/mm.h>
43 #include <linux/sched/task.h>
46 #include <net/addrconf.h>
47 #include <net/devlink.h>
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_cache.h>
54 #include <net/bonding.h>
56 #include <linux/mlx4/driver.h>
57 #include <linux/mlx4/cmd.h>
58 #include <linux/mlx4/qp.h>
61 #include <rdma/mlx4-abi.h>
63 #define DRV_NAME MLX4_IB_DRV_NAME
64 #define DRV_VERSION "4.0-0"
66 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
67 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
68 #define MLX4_IB_CARD_REV_A0 0xA0
70 MODULE_AUTHOR("Roland Dreier");
71 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72 MODULE_LICENSE("Dual BSD/GPL");
73 MODULE_VERSION(DRV_VERSION
);
75 int mlx4_ib_sm_guid_assign
= 0;
76 module_param_named(sm_guid_assign
, mlx4_ib_sm_guid_assign
, int, 0444);
77 MODULE_PARM_DESC(sm_guid_assign
, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
79 static const char mlx4_ib_version
[] =
80 DRV_NAME
": Mellanox ConnectX InfiniBand driver v"
83 static void do_slave_init(struct mlx4_ib_dev
*ibdev
, int slave
, int do_init
);
85 static struct workqueue_struct
*wq
;
87 static void init_query_mad(struct ib_smp
*mad
)
89 mad
->base_version
= 1;
90 mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
91 mad
->class_version
= 1;
92 mad
->method
= IB_MGMT_METHOD_GET
;
95 static int check_flow_steering_support(struct mlx4_dev
*dev
)
97 int eth_num_ports
= 0;
100 int dmfs
= dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
;
104 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_ETH
)
106 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
108 dmfs
&= (!ib_num_ports
||
109 (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
)) &&
111 (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
));
112 if (ib_num_ports
&& mlx4_is_mfunc(dev
)) {
113 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
120 static int num_ib_ports(struct mlx4_dev
*dev
)
125 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
131 static struct net_device
*mlx4_ib_get_netdev(struct ib_device
*device
, u8 port_num
)
133 struct mlx4_ib_dev
*ibdev
= to_mdev(device
);
134 struct net_device
*dev
;
137 dev
= mlx4_get_protocol_dev(ibdev
->dev
, MLX4_PROT_ETH
, port_num
);
140 if (mlx4_is_bonded(ibdev
->dev
)) {
141 struct net_device
*upper
= NULL
;
143 upper
= netdev_master_upper_dev_get_rcu(dev
);
145 struct net_device
*active
;
147 active
= bond_option_active_slave_get_rcu(netdev_priv(upper
));
160 static int mlx4_ib_update_gids_v1(struct gid_entry
*gids
,
161 struct mlx4_ib_dev
*ibdev
,
164 struct mlx4_cmd_mailbox
*mailbox
;
166 struct mlx4_dev
*dev
= ibdev
->dev
;
168 union ib_gid
*gid_tbl
;
170 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
174 gid_tbl
= mailbox
->buf
;
176 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; ++i
)
177 memcpy(&gid_tbl
[i
], &gids
[i
].gid
, sizeof(union ib_gid
));
179 err
= mlx4_cmd(dev
, mailbox
->dma
,
180 MLX4_SET_PORT_GID_TABLE
<< 8 | port_num
,
181 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
183 if (mlx4_is_bonded(dev
))
184 err
+= mlx4_cmd(dev
, mailbox
->dma
,
185 MLX4_SET_PORT_GID_TABLE
<< 8 | 2,
186 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
189 mlx4_free_cmd_mailbox(dev
, mailbox
);
193 static int mlx4_ib_update_gids_v1_v2(struct gid_entry
*gids
,
194 struct mlx4_ib_dev
*ibdev
,
197 struct mlx4_cmd_mailbox
*mailbox
;
199 struct mlx4_dev
*dev
= ibdev
->dev
;
210 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
214 gid_tbl
= mailbox
->buf
;
215 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; ++i
) {
216 memcpy(&gid_tbl
[i
].gid
, &gids
[i
].gid
, sizeof(union ib_gid
));
217 if (gids
[i
].gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) {
218 gid_tbl
[i
].version
= 2;
219 if (!ipv6_addr_v4mapped((struct in6_addr
*)&gids
[i
].gid
))
222 memset(&gid_tbl
[i
].gid
, 0, 12);
226 err
= mlx4_cmd(dev
, mailbox
->dma
,
227 MLX4_SET_PORT_ROCE_ADDR
<< 8 | port_num
,
228 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
230 if (mlx4_is_bonded(dev
))
231 err
+= mlx4_cmd(dev
, mailbox
->dma
,
232 MLX4_SET_PORT_ROCE_ADDR
<< 8 | 2,
233 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
236 mlx4_free_cmd_mailbox(dev
, mailbox
);
240 static int mlx4_ib_update_gids(struct gid_entry
*gids
,
241 struct mlx4_ib_dev
*ibdev
,
244 if (ibdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
)
245 return mlx4_ib_update_gids_v1_v2(gids
, ibdev
, port_num
);
247 return mlx4_ib_update_gids_v1(gids
, ibdev
, port_num
);
250 static int mlx4_ib_add_gid(struct ib_device
*device
,
253 const union ib_gid
*gid
,
254 const struct ib_gid_attr
*attr
,
257 struct mlx4_ib_dev
*ibdev
= to_mdev(device
);
258 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
259 struct mlx4_port_gid_table
*port_gid_table
;
260 int free
= -1, found
= -1;
264 struct gid_entry
*gids
= NULL
;
266 if (!rdma_cap_roce_gid_table(device
, port_num
))
269 if (port_num
> MLX4_MAX_PORTS
)
275 port_gid_table
= &iboe
->gids
[port_num
- 1];
276 spin_lock_bh(&iboe
->lock
);
277 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; ++i
) {
278 if (!memcmp(&port_gid_table
->gids
[i
].gid
, gid
, sizeof(*gid
)) &&
279 (port_gid_table
->gids
[i
].gid_type
== attr
->gid_type
)) {
283 if (free
< 0 && !memcmp(&port_gid_table
->gids
[i
].gid
, &zgid
, sizeof(*gid
)))
284 free
= i
; /* HW has space */
291 port_gid_table
->gids
[free
].ctx
= kmalloc(sizeof(*port_gid_table
->gids
[free
].ctx
), GFP_ATOMIC
);
292 if (!port_gid_table
->gids
[free
].ctx
) {
295 *context
= port_gid_table
->gids
[free
].ctx
;
296 memcpy(&port_gid_table
->gids
[free
].gid
, gid
, sizeof(*gid
));
297 port_gid_table
->gids
[free
].gid_type
= attr
->gid_type
;
298 port_gid_table
->gids
[free
].ctx
->real_index
= free
;
299 port_gid_table
->gids
[free
].ctx
->refcount
= 1;
304 struct gid_cache_context
*ctx
= port_gid_table
->gids
[found
].ctx
;
308 if (!ret
&& hw_update
) {
309 gids
= kmalloc(sizeof(*gids
) * MLX4_MAX_PORT_GIDS
, GFP_ATOMIC
);
313 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; i
++) {
314 memcpy(&gids
[i
].gid
, &port_gid_table
->gids
[i
].gid
, sizeof(union ib_gid
));
315 gids
[i
].gid_type
= port_gid_table
->gids
[i
].gid_type
;
319 spin_unlock_bh(&iboe
->lock
);
321 if (!ret
&& hw_update
) {
322 ret
= mlx4_ib_update_gids(gids
, ibdev
, port_num
);
329 static int mlx4_ib_del_gid(struct ib_device
*device
,
334 struct gid_cache_context
*ctx
= *context
;
335 struct mlx4_ib_dev
*ibdev
= to_mdev(device
);
336 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
337 struct mlx4_port_gid_table
*port_gid_table
;
340 struct gid_entry
*gids
= NULL
;
342 if (!rdma_cap_roce_gid_table(device
, port_num
))
345 if (port_num
> MLX4_MAX_PORTS
)
348 port_gid_table
= &iboe
->gids
[port_num
- 1];
349 spin_lock_bh(&iboe
->lock
);
352 if (!ctx
->refcount
) {
353 unsigned int real_index
= ctx
->real_index
;
355 memcpy(&port_gid_table
->gids
[real_index
].gid
, &zgid
, sizeof(zgid
));
356 kfree(port_gid_table
->gids
[real_index
].ctx
);
357 port_gid_table
->gids
[real_index
].ctx
= NULL
;
361 if (!ret
&& hw_update
) {
364 gids
= kmalloc(sizeof(*gids
) * MLX4_MAX_PORT_GIDS
, GFP_ATOMIC
);
368 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; i
++)
369 memcpy(&gids
[i
].gid
, &port_gid_table
->gids
[i
].gid
, sizeof(union ib_gid
));
372 spin_unlock_bh(&iboe
->lock
);
374 if (!ret
&& hw_update
) {
375 ret
= mlx4_ib_update_gids(gids
, ibdev
, port_num
);
381 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev
*ibdev
,
382 u8 port_num
, int index
)
384 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
385 struct gid_cache_context
*ctx
= NULL
;
387 struct mlx4_port_gid_table
*port_gid_table
;
388 int real_index
= -EINVAL
;
392 struct ib_gid_attr attr
;
394 if (port_num
> MLX4_MAX_PORTS
)
397 if (mlx4_is_bonded(ibdev
->dev
))
400 if (!rdma_cap_roce_gid_table(&ibdev
->ib_dev
, port_num
))
403 ret
= ib_get_cached_gid(&ibdev
->ib_dev
, port_num
, index
, &gid
, &attr
);
410 if (!memcmp(&gid
, &zgid
, sizeof(gid
)))
413 spin_lock_irqsave(&iboe
->lock
, flags
);
414 port_gid_table
= &iboe
->gids
[port_num
- 1];
416 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; ++i
)
417 if (!memcmp(&port_gid_table
->gids
[i
].gid
, &gid
, sizeof(gid
)) &&
418 attr
.gid_type
== port_gid_table
->gids
[i
].gid_type
) {
419 ctx
= port_gid_table
->gids
[i
].ctx
;
423 real_index
= ctx
->real_index
;
424 spin_unlock_irqrestore(&iboe
->lock
, flags
);
428 static int mlx4_ib_query_device(struct ib_device
*ibdev
,
429 struct ib_device_attr
*props
,
430 struct ib_udata
*uhw
)
432 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
433 struct ib_smp
*in_mad
= NULL
;
434 struct ib_smp
*out_mad
= NULL
;
437 struct mlx4_uverbs_ex_query_device cmd
;
438 struct mlx4_uverbs_ex_query_device_resp resp
= {.comp_mask
= 0};
439 struct mlx4_clock_params clock_params
;
442 if (uhw
->inlen
< sizeof(cmd
))
445 err
= ib_copy_from_udata(&cmd
, uhw
, sizeof(cmd
));
456 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
457 sizeof(resp
.response_length
);
458 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
459 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
461 if (!in_mad
|| !out_mad
)
464 init_query_mad(in_mad
);
465 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
467 err
= mlx4_MAD_IFC(to_mdev(ibdev
), MLX4_MAD_IFC_IGNORE_KEYS
,
468 1, NULL
, NULL
, in_mad
, out_mad
);
472 memset(props
, 0, sizeof *props
);
474 have_ib_ports
= num_ib_ports(dev
->dev
);
476 props
->fw_ver
= dev
->dev
->caps
.fw_ver
;
477 props
->device_cap_flags
= IB_DEVICE_CHANGE_PHY_PORT
|
478 IB_DEVICE_PORT_ACTIVE_EVENT
|
479 IB_DEVICE_SYS_IMAGE_GUID
|
480 IB_DEVICE_RC_RNR_NAK_GEN
|
481 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK
;
482 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR
)
483 props
->device_cap_flags
|= IB_DEVICE_BAD_PKEY_CNTR
;
484 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR
)
485 props
->device_cap_flags
|= IB_DEVICE_BAD_QKEY_CNTR
;
486 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_APM
&& have_ib_ports
)
487 props
->device_cap_flags
|= IB_DEVICE_AUTO_PATH_MIG
;
488 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_UD_AV_PORT
)
489 props
->device_cap_flags
|= IB_DEVICE_UD_AV_PORT_ENFORCE
;
490 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IPOIB_CSUM
)
491 props
->device_cap_flags
|= IB_DEVICE_UD_IP_CSUM
;
492 if (dev
->dev
->caps
.max_gso_sz
&&
493 (dev
->dev
->rev_id
!= MLX4_IB_CARD_REV_A0
) &&
494 (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BLH
))
495 props
->device_cap_flags
|= IB_DEVICE_UD_TSO
;
496 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_RESERVED_LKEY
)
497 props
->device_cap_flags
|= IB_DEVICE_LOCAL_DMA_LKEY
;
498 if ((dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_LOCAL_INV
) &&
499 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_REMOTE_INV
) &&
500 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_FAST_REG_WR
))
501 props
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
502 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
)
503 props
->device_cap_flags
|= IB_DEVICE_XRC
;
504 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
)
505 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW
;
506 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
) {
507 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_WIN_TYPE_2B
)
508 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW_TYPE_2B
;
510 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW_TYPE_2A
;
512 if (dev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
)
513 props
->device_cap_flags
|= IB_DEVICE_MANAGED_FLOW_STEERING
;
515 props
->device_cap_flags
|= IB_DEVICE_RAW_IP_CSUM
;
517 props
->vendor_id
= be32_to_cpup((__be32
*) (out_mad
->data
+ 36)) &
519 props
->vendor_part_id
= dev
->dev
->persist
->pdev
->device
;
520 props
->hw_ver
= be32_to_cpup((__be32
*) (out_mad
->data
+ 32));
521 memcpy(&props
->sys_image_guid
, out_mad
->data
+ 4, 8);
523 props
->max_mr_size
= ~0ull;
524 props
->page_size_cap
= dev
->dev
->caps
.page_size_cap
;
525 props
->max_qp
= dev
->dev
->quotas
.qp
;
526 props
->max_qp_wr
= dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
;
527 props
->max_sge
= min(dev
->dev
->caps
.max_sq_sg
,
528 dev
->dev
->caps
.max_rq_sg
);
529 props
->max_sge_rd
= MLX4_MAX_SGE_RD
;
530 props
->max_cq
= dev
->dev
->quotas
.cq
;
531 props
->max_cqe
= dev
->dev
->caps
.max_cqes
;
532 props
->max_mr
= dev
->dev
->quotas
.mpt
;
533 props
->max_pd
= dev
->dev
->caps
.num_pds
- dev
->dev
->caps
.reserved_pds
;
534 props
->max_qp_rd_atom
= dev
->dev
->caps
.max_qp_dest_rdma
;
535 props
->max_qp_init_rd_atom
= dev
->dev
->caps
.max_qp_init_rdma
;
536 props
->max_res_rd_atom
= props
->max_qp_rd_atom
* props
->max_qp
;
537 props
->max_srq
= dev
->dev
->quotas
.srq
;
538 props
->max_srq_wr
= dev
->dev
->caps
.max_srq_wqes
- 1;
539 props
->max_srq_sge
= dev
->dev
->caps
.max_srq_sge
;
540 props
->max_fast_reg_page_list_len
= MLX4_MAX_FAST_REG_PAGES
;
541 props
->local_ca_ack_delay
= dev
->dev
->caps
.local_ca_ack_delay
;
542 props
->atomic_cap
= dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_ATOMIC
?
543 IB_ATOMIC_HCA
: IB_ATOMIC_NONE
;
544 props
->masked_atomic_cap
= props
->atomic_cap
;
545 props
->max_pkeys
= dev
->dev
->caps
.pkey_table_len
[1];
546 props
->max_mcast_grp
= dev
->dev
->caps
.num_mgms
+ dev
->dev
->caps
.num_amgms
;
547 props
->max_mcast_qp_attach
= dev
->dev
->caps
.num_qp_per_mgm
;
548 props
->max_total_mcast_qp_attach
= props
->max_mcast_qp_attach
*
549 props
->max_mcast_grp
;
550 props
->max_map_per_fmr
= dev
->dev
->caps
.max_fmr_maps
;
551 props
->hca_core_clock
= dev
->dev
->caps
.hca_core_clock
* 1000UL;
552 props
->timestamp_mask
= 0xFFFFFFFFFFFFULL
;
553 props
->max_ah
= INT_MAX
;
555 if (!mlx4_is_slave(dev
->dev
))
556 err
= mlx4_get_internal_clock_params(dev
->dev
, &clock_params
);
558 if (uhw
->outlen
>= resp
.response_length
+ sizeof(resp
.hca_core_clock_offset
)) {
559 resp
.response_length
+= sizeof(resp
.hca_core_clock_offset
);
560 if (!err
&& !mlx4_is_slave(dev
->dev
)) {
561 resp
.comp_mask
|= QUERY_DEVICE_RESP_MASK_TIMESTAMP
;
562 resp
.hca_core_clock_offset
= clock_params
.offset
% PAGE_SIZE
;
567 err
= ib_copy_to_udata(uhw
, &resp
, resp
.response_length
);
578 static enum rdma_link_layer
579 mlx4_ib_port_link_layer(struct ib_device
*device
, u8 port_num
)
581 struct mlx4_dev
*dev
= to_mdev(device
)->dev
;
583 return dev
->caps
.port_mask
[port_num
] == MLX4_PORT_TYPE_IB
?
584 IB_LINK_LAYER_INFINIBAND
: IB_LINK_LAYER_ETHERNET
;
587 static int ib_link_query_port(struct ib_device
*ibdev
, u8 port
,
588 struct ib_port_attr
*props
, int netw_view
)
590 struct ib_smp
*in_mad
= NULL
;
591 struct ib_smp
*out_mad
= NULL
;
592 int ext_active_speed
;
593 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
596 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
597 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
598 if (!in_mad
|| !out_mad
)
601 init_query_mad(in_mad
);
602 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
603 in_mad
->attr_mod
= cpu_to_be32(port
);
605 if (mlx4_is_mfunc(to_mdev(ibdev
)->dev
) && netw_view
)
606 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
608 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
, NULL
, NULL
,
614 props
->lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 16));
615 props
->lmc
= out_mad
->data
[34] & 0x7;
616 props
->sm_lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 18));
617 props
->sm_sl
= out_mad
->data
[36] & 0xf;
618 props
->state
= out_mad
->data
[32] & 0xf;
619 props
->phys_state
= out_mad
->data
[33] >> 4;
620 props
->port_cap_flags
= be32_to_cpup((__be32
*) (out_mad
->data
+ 20));
622 props
->gid_tbl_len
= out_mad
->data
[50];
624 props
->gid_tbl_len
= to_mdev(ibdev
)->dev
->caps
.gid_table_len
[port
];
625 props
->max_msg_sz
= to_mdev(ibdev
)->dev
->caps
.max_msg_sz
;
626 props
->pkey_tbl_len
= to_mdev(ibdev
)->dev
->caps
.pkey_table_len
[port
];
627 props
->bad_pkey_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 46));
628 props
->qkey_viol_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 48));
629 props
->active_width
= out_mad
->data
[31] & 0xf;
630 props
->active_speed
= out_mad
->data
[35] >> 4;
631 props
->max_mtu
= out_mad
->data
[41] & 0xf;
632 props
->active_mtu
= out_mad
->data
[36] >> 4;
633 props
->subnet_timeout
= out_mad
->data
[51] & 0x1f;
634 props
->max_vl_num
= out_mad
->data
[37] >> 4;
635 props
->init_type_reply
= out_mad
->data
[41] >> 4;
637 /* Check if extended speeds (EDR/FDR/...) are supported */
638 if (props
->port_cap_flags
& IB_PORT_EXTENDED_SPEEDS_SUP
) {
639 ext_active_speed
= out_mad
->data
[62] >> 4;
641 switch (ext_active_speed
) {
643 props
->active_speed
= IB_SPEED_FDR
;
646 props
->active_speed
= IB_SPEED_EDR
;
651 /* If reported active speed is QDR, check if is FDR-10 */
652 if (props
->active_speed
== IB_SPEED_QDR
) {
653 init_query_mad(in_mad
);
654 in_mad
->attr_id
= MLX4_ATTR_EXTENDED_PORT_INFO
;
655 in_mad
->attr_mod
= cpu_to_be32(port
);
657 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
,
658 NULL
, NULL
, in_mad
, out_mad
);
662 /* Checking LinkSpeedActive for FDR-10 */
663 if (out_mad
->data
[15] & 0x1)
664 props
->active_speed
= IB_SPEED_FDR10
;
667 /* Avoid wrong speed value returned by FW if the IB link is down. */
668 if (props
->state
== IB_PORT_DOWN
)
669 props
->active_speed
= IB_SPEED_SDR
;
677 static u8
state_to_phys_state(enum ib_port_state state
)
679 return state
== IB_PORT_ACTIVE
? 5 : 3;
682 static int eth_link_query_port(struct ib_device
*ibdev
, u8 port
,
683 struct ib_port_attr
*props
)
686 struct mlx4_ib_dev
*mdev
= to_mdev(ibdev
);
687 struct mlx4_ib_iboe
*iboe
= &mdev
->iboe
;
688 struct net_device
*ndev
;
690 struct mlx4_cmd_mailbox
*mailbox
;
692 int is_bonded
= mlx4_is_bonded(mdev
->dev
);
694 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
696 return PTR_ERR(mailbox
);
698 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, port
, 0,
699 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
704 props
->active_width
= (((u8
*)mailbox
->buf
)[5] == 0x40) ||
705 (((u8
*)mailbox
->buf
)[5] == 0x20 /*56Gb*/) ?
706 IB_WIDTH_4X
: IB_WIDTH_1X
;
707 props
->active_speed
= (((u8
*)mailbox
->buf
)[5] == 0x20 /*56Gb*/) ?
708 IB_SPEED_FDR
: IB_SPEED_QDR
;
709 props
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_IP_BASED_GIDS
;
710 props
->gid_tbl_len
= mdev
->dev
->caps
.gid_table_len
[port
];
711 props
->max_msg_sz
= mdev
->dev
->caps
.max_msg_sz
;
712 props
->pkey_tbl_len
= 1;
713 props
->max_mtu
= IB_MTU_4096
;
714 props
->max_vl_num
= 2;
715 props
->state
= IB_PORT_DOWN
;
716 props
->phys_state
= state_to_phys_state(props
->state
);
717 props
->active_mtu
= IB_MTU_256
;
718 spin_lock_bh(&iboe
->lock
);
719 ndev
= iboe
->netdevs
[port
- 1];
720 if (ndev
&& is_bonded
) {
721 rcu_read_lock(); /* required to get upper dev */
722 ndev
= netdev_master_upper_dev_get_rcu(ndev
);
728 tmp
= iboe_get_mtu(ndev
->mtu
);
729 props
->active_mtu
= tmp
? min(props
->max_mtu
, tmp
) : IB_MTU_256
;
731 props
->state
= (netif_running(ndev
) && netif_carrier_ok(ndev
)) ?
732 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
733 props
->phys_state
= state_to_phys_state(props
->state
);
735 spin_unlock_bh(&iboe
->lock
);
737 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
741 int __mlx4_ib_query_port(struct ib_device
*ibdev
, u8 port
,
742 struct ib_port_attr
*props
, int netw_view
)
746 /* props being zeroed by the caller, avoid zeroing it here */
748 err
= mlx4_ib_port_link_layer(ibdev
, port
) == IB_LINK_LAYER_INFINIBAND
?
749 ib_link_query_port(ibdev
, port
, props
, netw_view
) :
750 eth_link_query_port(ibdev
, port
, props
);
755 static int mlx4_ib_query_port(struct ib_device
*ibdev
, u8 port
,
756 struct ib_port_attr
*props
)
758 /* returns host view */
759 return __mlx4_ib_query_port(ibdev
, port
, props
, 0);
762 int __mlx4_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
763 union ib_gid
*gid
, int netw_view
)
765 struct ib_smp
*in_mad
= NULL
;
766 struct ib_smp
*out_mad
= NULL
;
768 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
770 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
772 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
773 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
774 if (!in_mad
|| !out_mad
)
777 init_query_mad(in_mad
);
778 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
779 in_mad
->attr_mod
= cpu_to_be32(port
);
781 if (mlx4_is_mfunc(dev
->dev
) && netw_view
)
782 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
784 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, port
, NULL
, NULL
, in_mad
, out_mad
);
788 memcpy(gid
->raw
, out_mad
->data
+ 8, 8);
790 if (mlx4_is_mfunc(dev
->dev
) && !netw_view
) {
792 /* For any index > 0, return the null guid */
799 init_query_mad(in_mad
);
800 in_mad
->attr_id
= IB_SMP_ATTR_GUID_INFO
;
801 in_mad
->attr_mod
= cpu_to_be32(index
/ 8);
803 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, port
,
804 NULL
, NULL
, in_mad
, out_mad
);
808 memcpy(gid
->raw
+ 8, out_mad
->data
+ (index
% 8) * 8, 8);
812 memset(gid
->raw
+ 8, 0, 8);
818 static int mlx4_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
823 if (rdma_protocol_ib(ibdev
, port
))
824 return __mlx4_ib_query_gid(ibdev
, port
, index
, gid
, 0);
826 if (!rdma_protocol_roce(ibdev
, port
))
829 if (!rdma_cap_roce_gid_table(ibdev
, port
))
832 ret
= ib_get_cached_gid(ibdev
, port
, index
, gid
, NULL
);
833 if (ret
== -EAGAIN
) {
834 memcpy(gid
, &zgid
, sizeof(*gid
));
841 static int mlx4_ib_query_sl2vl(struct ib_device
*ibdev
, u8 port
, u64
*sl2vl_tbl
)
843 union sl2vl_tbl_to_u64 sl2vl64
;
844 struct ib_smp
*in_mad
= NULL
;
845 struct ib_smp
*out_mad
= NULL
;
846 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
850 if (mlx4_is_slave(to_mdev(ibdev
)->dev
)) {
855 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
856 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
857 if (!in_mad
|| !out_mad
)
860 init_query_mad(in_mad
);
861 in_mad
->attr_id
= IB_SMP_ATTR_SL_TO_VL_TABLE
;
862 in_mad
->attr_mod
= 0;
864 if (mlx4_is_mfunc(to_mdev(ibdev
)->dev
))
865 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
867 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
, NULL
, NULL
,
872 for (jj
= 0; jj
< 8; jj
++)
873 sl2vl64
.sl8
[jj
] = ((struct ib_smp
*)out_mad
)->data
[jj
];
874 *sl2vl_tbl
= sl2vl64
.sl64
;
882 static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev
*mdev
)
888 for (i
= 1; i
<= mdev
->dev
->caps
.num_ports
; i
++) {
889 if (mdev
->dev
->caps
.port_type
[i
] == MLX4_PORT_TYPE_ETH
)
891 err
= mlx4_ib_query_sl2vl(&mdev
->ib_dev
, i
, &sl2vl
);
893 pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
897 atomic64_set(&mdev
->sl2vl
[i
- 1], sl2vl
);
901 int __mlx4_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
902 u16
*pkey
, int netw_view
)
904 struct ib_smp
*in_mad
= NULL
;
905 struct ib_smp
*out_mad
= NULL
;
906 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
909 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
910 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
911 if (!in_mad
|| !out_mad
)
914 init_query_mad(in_mad
);
915 in_mad
->attr_id
= IB_SMP_ATTR_PKEY_TABLE
;
916 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
918 if (mlx4_is_mfunc(to_mdev(ibdev
)->dev
) && netw_view
)
919 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
921 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
, NULL
, NULL
,
926 *pkey
= be16_to_cpu(((__be16
*) out_mad
->data
)[index
% 32]);
934 static int mlx4_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
, u16
*pkey
)
936 return __mlx4_ib_query_pkey(ibdev
, port
, index
, pkey
, 0);
939 static int mlx4_ib_modify_device(struct ib_device
*ibdev
, int mask
,
940 struct ib_device_modify
*props
)
942 struct mlx4_cmd_mailbox
*mailbox
;
945 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
948 if (!(mask
& IB_DEVICE_MODIFY_NODE_DESC
))
951 if (mlx4_is_slave(to_mdev(ibdev
)->dev
))
954 spin_lock_irqsave(&to_mdev(ibdev
)->sm_lock
, flags
);
955 memcpy(ibdev
->node_desc
, props
->node_desc
, IB_DEVICE_NODE_DESC_MAX
);
956 spin_unlock_irqrestore(&to_mdev(ibdev
)->sm_lock
, flags
);
959 * If possible, pass node desc to FW, so it can generate
960 * a 144 trap. If cmd fails, just ignore.
962 mailbox
= mlx4_alloc_cmd_mailbox(to_mdev(ibdev
)->dev
);
966 memcpy(mailbox
->buf
, props
->node_desc
, IB_DEVICE_NODE_DESC_MAX
);
967 mlx4_cmd(to_mdev(ibdev
)->dev
, mailbox
->dma
, 1, 0,
968 MLX4_CMD_SET_NODE
, MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
970 mlx4_free_cmd_mailbox(to_mdev(ibdev
)->dev
, mailbox
);
975 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev
*dev
, u8 port
, int reset_qkey_viols
,
978 struct mlx4_cmd_mailbox
*mailbox
;
981 mailbox
= mlx4_alloc_cmd_mailbox(dev
->dev
);
983 return PTR_ERR(mailbox
);
985 if (dev
->dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
986 *(u8
*) mailbox
->buf
= !!reset_qkey_viols
<< 6;
987 ((__be32
*) mailbox
->buf
)[2] = cpu_to_be32(cap_mask
);
989 ((u8
*) mailbox
->buf
)[3] = !!reset_qkey_viols
;
990 ((__be32
*) mailbox
->buf
)[1] = cpu_to_be32(cap_mask
);
993 err
= mlx4_cmd(dev
->dev
, mailbox
->dma
, port
, MLX4_SET_PORT_IB_OPCODE
,
994 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
997 mlx4_free_cmd_mailbox(dev
->dev
, mailbox
);
1001 static int mlx4_ib_modify_port(struct ib_device
*ibdev
, u8 port
, int mask
,
1002 struct ib_port_modify
*props
)
1004 struct mlx4_ib_dev
*mdev
= to_mdev(ibdev
);
1005 u8 is_eth
= mdev
->dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
;
1006 struct ib_port_attr attr
;
1010 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
1011 * of whether port link layer is ETH or IB. For ETH ports, qkey
1012 * violations and port capabilities are not meaningful.
1017 mutex_lock(&mdev
->cap_mask_mutex
);
1019 err
= ib_query_port(ibdev
, port
, &attr
);
1023 cap_mask
= (attr
.port_cap_flags
| props
->set_port_cap_mask
) &
1024 ~props
->clr_port_cap_mask
;
1026 err
= mlx4_ib_SET_PORT(mdev
, port
,
1027 !!(mask
& IB_PORT_RESET_QKEY_CNTR
),
1031 mutex_unlock(&to_mdev(ibdev
)->cap_mask_mutex
);
1035 static struct ib_ucontext
*mlx4_ib_alloc_ucontext(struct ib_device
*ibdev
,
1036 struct ib_udata
*udata
)
1038 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
1039 struct mlx4_ib_ucontext
*context
;
1040 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3
;
1041 struct mlx4_ib_alloc_ucontext_resp resp
;
1044 if (!dev
->ib_active
)
1045 return ERR_PTR(-EAGAIN
);
1047 if (ibdev
->uverbs_abi_ver
== MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
) {
1048 resp_v3
.qp_tab_size
= dev
->dev
->caps
.num_qps
;
1049 resp_v3
.bf_reg_size
= dev
->dev
->caps
.bf_reg_size
;
1050 resp_v3
.bf_regs_per_page
= dev
->dev
->caps
.bf_regs_per_page
;
1052 resp
.dev_caps
= dev
->dev
->caps
.userspace_caps
;
1053 resp
.qp_tab_size
= dev
->dev
->caps
.num_qps
;
1054 resp
.bf_reg_size
= dev
->dev
->caps
.bf_reg_size
;
1055 resp
.bf_regs_per_page
= dev
->dev
->caps
.bf_regs_per_page
;
1056 resp
.cqe_size
= dev
->dev
->caps
.cqe_size
;
1059 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
1061 return ERR_PTR(-ENOMEM
);
1063 err
= mlx4_uar_alloc(to_mdev(ibdev
)->dev
, &context
->uar
);
1066 return ERR_PTR(err
);
1069 INIT_LIST_HEAD(&context
->db_page_list
);
1070 mutex_init(&context
->db_page_mutex
);
1072 if (ibdev
->uverbs_abi_ver
== MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
)
1073 err
= ib_copy_to_udata(udata
, &resp_v3
, sizeof(resp_v3
));
1075 err
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
1078 mlx4_uar_free(to_mdev(ibdev
)->dev
, &context
->uar
);
1080 return ERR_PTR(-EFAULT
);
1083 return &context
->ibucontext
;
1086 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
1088 struct mlx4_ib_ucontext
*context
= to_mucontext(ibcontext
);
1090 mlx4_uar_free(to_mdev(ibcontext
->device
)->dev
, &context
->uar
);
1096 static void mlx4_ib_vma_open(struct vm_area_struct
*area
)
1098 /* vma_open is called when a new VMA is created on top of our VMA.
1099 * This is done through either mremap flow or split_vma (usually due
1100 * to mlock, madvise, munmap, etc.). We do not support a clone of the
1101 * vma, as this VMA is strongly hardware related. Therefore we set the
1102 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1103 * calling us again and trying to do incorrect actions. We assume that
1104 * the original vma size is exactly a single page that there will be no
1105 * "splitting" operations on.
1107 area
->vm_ops
= NULL
;
1110 static void mlx4_ib_vma_close(struct vm_area_struct
*area
)
1112 struct mlx4_ib_vma_private_data
*mlx4_ib_vma_priv_data
;
1114 /* It's guaranteed that all VMAs opened on a FD are closed before the
1115 * file itself is closed, therefore no sync is needed with the regular
1116 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
1117 * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
1118 * The close operation is usually called under mm->mmap_sem except when
1119 * process is exiting. The exiting case is handled explicitly as part
1120 * of mlx4_ib_disassociate_ucontext.
1122 mlx4_ib_vma_priv_data
= (struct mlx4_ib_vma_private_data
*)
1123 area
->vm_private_data
;
1125 /* set the vma context pointer to null in the mlx4_ib driver's private
1126 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
1128 mlx4_ib_vma_priv_data
->vma
= NULL
;
1131 static const struct vm_operations_struct mlx4_ib_vm_ops
= {
1132 .open
= mlx4_ib_vma_open
,
1133 .close
= mlx4_ib_vma_close
1136 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext
*ibcontext
)
1140 struct vm_area_struct
*vma
;
1141 struct mlx4_ib_ucontext
*context
= to_mucontext(ibcontext
);
1142 struct task_struct
*owning_process
= NULL
;
1143 struct mm_struct
*owning_mm
= NULL
;
1145 owning_process
= get_pid_task(ibcontext
->tgid
, PIDTYPE_PID
);
1146 if (!owning_process
)
1149 owning_mm
= get_task_mm(owning_process
);
1151 pr_info("no mm, disassociate ucontext is pending task termination\n");
1153 /* make sure that task is dead before returning, it may
1154 * prevent a rare case of module down in parallel to a
1155 * call to mlx4_ib_vma_close.
1157 put_task_struct(owning_process
);
1158 usleep_range(1000, 2000);
1159 owning_process
= get_pid_task(ibcontext
->tgid
,
1161 if (!owning_process
||
1162 owning_process
->state
== TASK_DEAD
) {
1163 pr_info("disassociate ucontext done, task was terminated\n");
1164 /* in case task was dead need to release the task struct */
1166 put_task_struct(owning_process
);
1172 /* need to protect from a race on closing the vma as part of
1173 * mlx4_ib_vma_close().
1175 down_write(&owning_mm
->mmap_sem
);
1176 for (i
= 0; i
< HW_BAR_COUNT
; i
++) {
1177 vma
= context
->hw_bar_info
[i
].vma
;
1181 ret
= zap_vma_ptes(context
->hw_bar_info
[i
].vma
,
1182 context
->hw_bar_info
[i
].vma
->vm_start
,
1185 pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i
, ret
);
1189 context
->hw_bar_info
[i
].vma
->vm_flags
&=
1190 ~(VM_SHARED
| VM_MAYSHARE
);
1191 /* context going to be destroyed, should not access ops any more */
1192 context
->hw_bar_info
[i
].vma
->vm_ops
= NULL
;
1195 up_write(&owning_mm
->mmap_sem
);
1197 put_task_struct(owning_process
);
1200 static void mlx4_ib_set_vma_data(struct vm_area_struct
*vma
,
1201 struct mlx4_ib_vma_private_data
*vma_private_data
)
1203 vma_private_data
->vma
= vma
;
1204 vma
->vm_private_data
= vma_private_data
;
1205 vma
->vm_ops
= &mlx4_ib_vm_ops
;
1208 static int mlx4_ib_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
1210 struct mlx4_ib_dev
*dev
= to_mdev(context
->device
);
1211 struct mlx4_ib_ucontext
*mucontext
= to_mucontext(context
);
1213 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
1216 if (vma
->vm_pgoff
== 0) {
1217 /* We prevent double mmaping on same context */
1218 if (mucontext
->hw_bar_info
[HW_BAR_DB
].vma
)
1221 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1223 if (io_remap_pfn_range(vma
, vma
->vm_start
,
1224 to_mucontext(context
)->uar
.pfn
,
1225 PAGE_SIZE
, vma
->vm_page_prot
))
1228 mlx4_ib_set_vma_data(vma
, &mucontext
->hw_bar_info
[HW_BAR_DB
]);
1230 } else if (vma
->vm_pgoff
== 1 && dev
->dev
->caps
.bf_reg_size
!= 0) {
1231 /* We prevent double mmaping on same context */
1232 if (mucontext
->hw_bar_info
[HW_BAR_BF
].vma
)
1235 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
1237 if (io_remap_pfn_range(vma
, vma
->vm_start
,
1238 to_mucontext(context
)->uar
.pfn
+
1239 dev
->dev
->caps
.num_uars
,
1240 PAGE_SIZE
, vma
->vm_page_prot
))
1243 mlx4_ib_set_vma_data(vma
, &mucontext
->hw_bar_info
[HW_BAR_BF
]);
1245 } else if (vma
->vm_pgoff
== 3) {
1246 struct mlx4_clock_params params
;
1249 /* We prevent double mmaping on same context */
1250 if (mucontext
->hw_bar_info
[HW_BAR_CLOCK
].vma
)
1253 ret
= mlx4_get_internal_clock_params(dev
->dev
, ¶ms
);
1258 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1259 if (io_remap_pfn_range(vma
, vma
->vm_start
,
1260 (pci_resource_start(dev
->dev
->persist
->pdev
,
1264 PAGE_SIZE
, vma
->vm_page_prot
))
1267 mlx4_ib_set_vma_data(vma
,
1268 &mucontext
->hw_bar_info
[HW_BAR_CLOCK
]);
1276 static struct ib_pd
*mlx4_ib_alloc_pd(struct ib_device
*ibdev
,
1277 struct ib_ucontext
*context
,
1278 struct ib_udata
*udata
)
1280 struct mlx4_ib_pd
*pd
;
1283 pd
= kmalloc(sizeof *pd
, GFP_KERNEL
);
1285 return ERR_PTR(-ENOMEM
);
1287 err
= mlx4_pd_alloc(to_mdev(ibdev
)->dev
, &pd
->pdn
);
1290 return ERR_PTR(err
);
1294 if (ib_copy_to_udata(udata
, &pd
->pdn
, sizeof (__u32
))) {
1295 mlx4_pd_free(to_mdev(ibdev
)->dev
, pd
->pdn
);
1297 return ERR_PTR(-EFAULT
);
1303 static int mlx4_ib_dealloc_pd(struct ib_pd
*pd
)
1305 mlx4_pd_free(to_mdev(pd
->device
)->dev
, to_mpd(pd
)->pdn
);
1311 static struct ib_xrcd
*mlx4_ib_alloc_xrcd(struct ib_device
*ibdev
,
1312 struct ib_ucontext
*context
,
1313 struct ib_udata
*udata
)
1315 struct mlx4_ib_xrcd
*xrcd
;
1316 struct ib_cq_init_attr cq_attr
= {};
1319 if (!(to_mdev(ibdev
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
1320 return ERR_PTR(-ENOSYS
);
1322 xrcd
= kmalloc(sizeof *xrcd
, GFP_KERNEL
);
1324 return ERR_PTR(-ENOMEM
);
1326 err
= mlx4_xrcd_alloc(to_mdev(ibdev
)->dev
, &xrcd
->xrcdn
);
1330 xrcd
->pd
= ib_alloc_pd(ibdev
, 0);
1331 if (IS_ERR(xrcd
->pd
)) {
1332 err
= PTR_ERR(xrcd
->pd
);
1337 xrcd
->cq
= ib_create_cq(ibdev
, NULL
, NULL
, xrcd
, &cq_attr
);
1338 if (IS_ERR(xrcd
->cq
)) {
1339 err
= PTR_ERR(xrcd
->cq
);
1343 return &xrcd
->ibxrcd
;
1346 ib_dealloc_pd(xrcd
->pd
);
1348 mlx4_xrcd_free(to_mdev(ibdev
)->dev
, xrcd
->xrcdn
);
1351 return ERR_PTR(err
);
1354 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
1356 ib_destroy_cq(to_mxrcd(xrcd
)->cq
);
1357 ib_dealloc_pd(to_mxrcd(xrcd
)->pd
);
1358 mlx4_xrcd_free(to_mdev(xrcd
->device
)->dev
, to_mxrcd(xrcd
)->xrcdn
);
1364 static int add_gid_entry(struct ib_qp
*ibqp
, union ib_gid
*gid
)
1366 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
1367 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
1368 struct mlx4_ib_gid_entry
*ge
;
1370 ge
= kzalloc(sizeof *ge
, GFP_KERNEL
);
1375 if (mlx4_ib_add_mc(mdev
, mqp
, gid
)) {
1376 ge
->port
= mqp
->port
;
1380 mutex_lock(&mqp
->mutex
);
1381 list_add_tail(&ge
->list
, &mqp
->gid_list
);
1382 mutex_unlock(&mqp
->mutex
);
1387 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev
*ibdev
,
1388 struct mlx4_ib_counters
*ctr_table
)
1390 struct counter_index
*counter
, *tmp_count
;
1392 mutex_lock(&ctr_table
->mutex
);
1393 list_for_each_entry_safe(counter
, tmp_count
, &ctr_table
->counters_list
,
1395 if (counter
->allocated
)
1396 mlx4_counter_free(ibdev
->dev
, counter
->index
);
1397 list_del(&counter
->list
);
1400 mutex_unlock(&ctr_table
->mutex
);
1403 int mlx4_ib_add_mc(struct mlx4_ib_dev
*mdev
, struct mlx4_ib_qp
*mqp
,
1406 struct net_device
*ndev
;
1412 spin_lock_bh(&mdev
->iboe
.lock
);
1413 ndev
= mdev
->iboe
.netdevs
[mqp
->port
- 1];
1416 spin_unlock_bh(&mdev
->iboe
.lock
);
1426 struct mlx4_ib_steering
{
1427 struct list_head list
;
1428 struct mlx4_flow_reg_id reg_id
;
1432 #define LAST_ETH_FIELD vlan_tag
1433 #define LAST_IB_FIELD sl
1434 #define LAST_IPV4_FIELD dst_ip
1435 #define LAST_TCP_UDP_FIELD src_port
1437 /* Field is the last supported field */
1438 #define FIELDS_NOT_SUPPORTED(filter, field)\
1439 memchr_inv((void *)&filter.field +\
1440 sizeof(filter.field), 0,\
1442 offsetof(typeof(filter), field) -\
1443 sizeof(filter.field))
1445 static int parse_flow_attr(struct mlx4_dev
*dev
,
1447 union ib_flow_spec
*ib_spec
,
1448 struct _rule_hw
*mlx4_spec
)
1450 enum mlx4_net_trans_rule_id type
;
1452 switch (ib_spec
->type
) {
1453 case IB_FLOW_SPEC_ETH
:
1454 if (FIELDS_NOT_SUPPORTED(ib_spec
->eth
.mask
, LAST_ETH_FIELD
))
1457 type
= MLX4_NET_TRANS_RULE_ID_ETH
;
1458 memcpy(mlx4_spec
->eth
.dst_mac
, ib_spec
->eth
.val
.dst_mac
,
1460 memcpy(mlx4_spec
->eth
.dst_mac_msk
, ib_spec
->eth
.mask
.dst_mac
,
1462 mlx4_spec
->eth
.vlan_tag
= ib_spec
->eth
.val
.vlan_tag
;
1463 mlx4_spec
->eth
.vlan_tag_msk
= ib_spec
->eth
.mask
.vlan_tag
;
1465 case IB_FLOW_SPEC_IB
:
1466 if (FIELDS_NOT_SUPPORTED(ib_spec
->ib
.mask
, LAST_IB_FIELD
))
1469 type
= MLX4_NET_TRANS_RULE_ID_IB
;
1470 mlx4_spec
->ib
.l3_qpn
=
1471 cpu_to_be32(qp_num
);
1472 mlx4_spec
->ib
.qpn_mask
=
1473 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK
);
1477 case IB_FLOW_SPEC_IPV4
:
1478 if (FIELDS_NOT_SUPPORTED(ib_spec
->ipv4
.mask
, LAST_IPV4_FIELD
))
1481 type
= MLX4_NET_TRANS_RULE_ID_IPV4
;
1482 mlx4_spec
->ipv4
.src_ip
= ib_spec
->ipv4
.val
.src_ip
;
1483 mlx4_spec
->ipv4
.src_ip_msk
= ib_spec
->ipv4
.mask
.src_ip
;
1484 mlx4_spec
->ipv4
.dst_ip
= ib_spec
->ipv4
.val
.dst_ip
;
1485 mlx4_spec
->ipv4
.dst_ip_msk
= ib_spec
->ipv4
.mask
.dst_ip
;
1488 case IB_FLOW_SPEC_TCP
:
1489 case IB_FLOW_SPEC_UDP
:
1490 if (FIELDS_NOT_SUPPORTED(ib_spec
->tcp_udp
.mask
, LAST_TCP_UDP_FIELD
))
1493 type
= ib_spec
->type
== IB_FLOW_SPEC_TCP
?
1494 MLX4_NET_TRANS_RULE_ID_TCP
:
1495 MLX4_NET_TRANS_RULE_ID_UDP
;
1496 mlx4_spec
->tcp_udp
.dst_port
= ib_spec
->tcp_udp
.val
.dst_port
;
1497 mlx4_spec
->tcp_udp
.dst_port_msk
= ib_spec
->tcp_udp
.mask
.dst_port
;
1498 mlx4_spec
->tcp_udp
.src_port
= ib_spec
->tcp_udp
.val
.src_port
;
1499 mlx4_spec
->tcp_udp
.src_port_msk
= ib_spec
->tcp_udp
.mask
.src_port
;
1505 if (mlx4_map_sw_to_hw_steering_id(dev
, type
) < 0 ||
1506 mlx4_hw_rule_sz(dev
, type
) < 0)
1508 mlx4_spec
->id
= cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev
, type
));
1509 mlx4_spec
->size
= mlx4_hw_rule_sz(dev
, type
) >> 2;
1510 return mlx4_hw_rule_sz(dev
, type
);
1513 struct default_rules
{
1514 __u32 mandatory_fields
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
1515 __u32 mandatory_not_fields
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
1516 __u32 rules_create_list
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
1519 static const struct default_rules default_table
[] = {
1521 .mandatory_fields
= {IB_FLOW_SPEC_IPV4
},
1522 .mandatory_not_fields
= {IB_FLOW_SPEC_ETH
},
1523 .rules_create_list
= {IB_FLOW_SPEC_IB
},
1524 .link_layer
= IB_LINK_LAYER_INFINIBAND
1528 static int __mlx4_ib_default_rules_match(struct ib_qp
*qp
,
1529 struct ib_flow_attr
*flow_attr
)
1533 const struct default_rules
*pdefault_rules
= default_table
;
1534 u8 link_layer
= rdma_port_get_link_layer(qp
->device
, flow_attr
->port
);
1536 for (i
= 0; i
< ARRAY_SIZE(default_table
); i
++, pdefault_rules
++) {
1537 __u32 field_types
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
1538 memset(&field_types
, 0, sizeof(field_types
));
1540 if (link_layer
!= pdefault_rules
->link_layer
)
1543 ib_flow
= flow_attr
+ 1;
1544 /* we assume the specs are sorted */
1545 for (j
= 0, k
= 0; k
< IB_FLOW_SPEC_SUPPORT_LAYERS
&&
1546 j
< flow_attr
->num_of_specs
; k
++) {
1547 union ib_flow_spec
*current_flow
=
1548 (union ib_flow_spec
*)ib_flow
;
1550 /* same layer but different type */
1551 if (((current_flow
->type
& IB_FLOW_SPEC_LAYER_MASK
) ==
1552 (pdefault_rules
->mandatory_fields
[k
] &
1553 IB_FLOW_SPEC_LAYER_MASK
)) &&
1554 (current_flow
->type
!=
1555 pdefault_rules
->mandatory_fields
[k
]))
1558 /* same layer, try match next one */
1559 if (current_flow
->type
==
1560 pdefault_rules
->mandatory_fields
[k
]) {
1563 ((union ib_flow_spec
*)ib_flow
)->size
;
1567 ib_flow
= flow_attr
+ 1;
1568 for (j
= 0; j
< flow_attr
->num_of_specs
;
1569 j
++, ib_flow
+= ((union ib_flow_spec
*)ib_flow
)->size
)
1570 for (k
= 0; k
< IB_FLOW_SPEC_SUPPORT_LAYERS
; k
++)
1571 /* same layer and same type */
1572 if (((union ib_flow_spec
*)ib_flow
)->type
==
1573 pdefault_rules
->mandatory_not_fields
[k
])
1582 static int __mlx4_ib_create_default_rules(
1583 struct mlx4_ib_dev
*mdev
,
1585 const struct default_rules
*pdefault_rules
,
1586 struct _rule_hw
*mlx4_spec
) {
1590 for (i
= 0; i
< ARRAY_SIZE(pdefault_rules
->rules_create_list
); i
++) {
1592 union ib_flow_spec ib_spec
;
1593 switch (pdefault_rules
->rules_create_list
[i
]) {
1597 case IB_FLOW_SPEC_IB
:
1598 ib_spec
.type
= IB_FLOW_SPEC_IB
;
1599 ib_spec
.size
= sizeof(struct ib_flow_spec_ib
);
1606 /* We must put empty rule, qpn is being ignored */
1607 ret
= parse_flow_attr(mdev
->dev
, 0, &ib_spec
,
1610 pr_info("invalid parsing\n");
1614 mlx4_spec
= (void *)mlx4_spec
+ ret
;
1620 static int __mlx4_ib_create_flow(struct ib_qp
*qp
, struct ib_flow_attr
*flow_attr
,
1622 enum mlx4_net_trans_promisc_mode flow_type
,
1628 struct mlx4_ib_dev
*mdev
= to_mdev(qp
->device
);
1629 struct mlx4_cmd_mailbox
*mailbox
;
1630 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
1633 static const u16 __mlx4_domain
[] = {
1634 [IB_FLOW_DOMAIN_USER
] = MLX4_DOMAIN_UVERBS
,
1635 [IB_FLOW_DOMAIN_ETHTOOL
] = MLX4_DOMAIN_ETHTOOL
,
1636 [IB_FLOW_DOMAIN_RFS
] = MLX4_DOMAIN_RFS
,
1637 [IB_FLOW_DOMAIN_NIC
] = MLX4_DOMAIN_NIC
,
1640 if (flow_attr
->priority
> MLX4_IB_FLOW_MAX_PRIO
) {
1641 pr_err("Invalid priority value %d\n", flow_attr
->priority
);
1645 if (domain
>= IB_FLOW_DOMAIN_NUM
) {
1646 pr_err("Invalid domain value %d\n", domain
);
1650 if (mlx4_map_sw_to_hw_steering_mode(mdev
->dev
, flow_type
) < 0)
1653 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
1654 if (IS_ERR(mailbox
))
1655 return PTR_ERR(mailbox
);
1656 ctrl
= mailbox
->buf
;
1658 ctrl
->prio
= cpu_to_be16(__mlx4_domain
[domain
] |
1659 flow_attr
->priority
);
1660 ctrl
->type
= mlx4_map_sw_to_hw_steering_mode(mdev
->dev
, flow_type
);
1661 ctrl
->port
= flow_attr
->port
;
1662 ctrl
->qpn
= cpu_to_be32(qp
->qp_num
);
1664 ib_flow
= flow_attr
+ 1;
1665 size
+= sizeof(struct mlx4_net_trans_rule_hw_ctrl
);
1666 /* Add default flows */
1667 default_flow
= __mlx4_ib_default_rules_match(qp
, flow_attr
);
1668 if (default_flow
>= 0) {
1669 ret
= __mlx4_ib_create_default_rules(
1670 mdev
, qp
, default_table
+ default_flow
,
1671 mailbox
->buf
+ size
);
1673 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1678 for (i
= 0; i
< flow_attr
->num_of_specs
; i
++) {
1679 ret
= parse_flow_attr(mdev
->dev
, qp
->qp_num
, ib_flow
,
1680 mailbox
->buf
+ size
);
1682 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1685 ib_flow
+= ((union ib_flow_spec
*) ib_flow
)->size
;
1689 if (mlx4_is_master(mdev
->dev
) && flow_type
== MLX4_FS_REGULAR
&&
1690 flow_attr
->num_of_specs
== 1) {
1691 struct _rule_hw
*rule_header
= (struct _rule_hw
*)(ctrl
+ 1);
1692 enum ib_flow_spec_type header_spec
=
1693 ((union ib_flow_spec
*)(flow_attr
+ 1))->type
;
1695 if (header_spec
== IB_FLOW_SPEC_ETH
)
1696 mlx4_handle_eth_header_mcast_prio(ctrl
, rule_header
);
1699 ret
= mlx4_cmd_imm(mdev
->dev
, mailbox
->dma
, reg_id
, size
>> 2, 0,
1700 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
1703 pr_err("mcg table is full. Fail to register network rule.\n");
1704 else if (ret
== -ENXIO
)
1705 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1707 pr_err("Invalid argument. Fail to register network rule.\n");
1709 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1713 static int __mlx4_ib_destroy_flow(struct mlx4_dev
*dev
, u64 reg_id
)
1716 err
= mlx4_cmd(dev
, reg_id
, 0, 0,
1717 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
1720 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1725 static int mlx4_ib_tunnel_steer_add(struct ib_qp
*qp
, struct ib_flow_attr
*flow_attr
,
1729 union ib_flow_spec
*ib_spec
;
1730 struct mlx4_dev
*dev
= to_mdev(qp
->device
)->dev
;
1733 if (dev
->caps
.tunnel_offload_mode
!= MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
||
1734 dev
->caps
.dmfs_high_steer_mode
== MLX4_STEERING_DMFS_A0_STATIC
)
1735 return 0; /* do nothing */
1737 ib_flow
= flow_attr
+ 1;
1738 ib_spec
= (union ib_flow_spec
*)ib_flow
;
1740 if (ib_spec
->type
!= IB_FLOW_SPEC_ETH
|| flow_attr
->num_of_specs
!= 1)
1741 return 0; /* do nothing */
1743 err
= mlx4_tunnel_steer_add(to_mdev(qp
->device
)->dev
, ib_spec
->eth
.val
.dst_mac
,
1744 flow_attr
->port
, qp
->qp_num
,
1745 MLX4_DOMAIN_UVERBS
| (flow_attr
->priority
& 0xff),
1750 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev
*dev
,
1751 struct ib_flow_attr
*flow_attr
,
1752 enum mlx4_net_trans_promisc_mode
*type
)
1756 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER
) ||
1757 (dev
->caps
.dmfs_high_steer_mode
== MLX4_STEERING_DMFS_A0_STATIC
) ||
1758 (flow_attr
->num_of_specs
> 1) || (flow_attr
->priority
!= 0)) {
1762 if (flow_attr
->num_of_specs
== 0) {
1763 type
[0] = MLX4_FS_MC_SNIFFER
;
1764 type
[1] = MLX4_FS_UC_SNIFFER
;
1766 union ib_flow_spec
*ib_spec
;
1768 ib_spec
= (union ib_flow_spec
*)(flow_attr
+ 1);
1769 if (ib_spec
->type
!= IB_FLOW_SPEC_ETH
)
1772 /* if all is zero than MC and UC */
1773 if (is_zero_ether_addr(ib_spec
->eth
.mask
.dst_mac
)) {
1774 type
[0] = MLX4_FS_MC_SNIFFER
;
1775 type
[1] = MLX4_FS_UC_SNIFFER
;
1777 u8 mac
[ETH_ALEN
] = {ib_spec
->eth
.mask
.dst_mac
[0] ^ 0x01,
1778 ib_spec
->eth
.mask
.dst_mac
[1],
1779 ib_spec
->eth
.mask
.dst_mac
[2],
1780 ib_spec
->eth
.mask
.dst_mac
[3],
1781 ib_spec
->eth
.mask
.dst_mac
[4],
1782 ib_spec
->eth
.mask
.dst_mac
[5]};
1784 /* Above xor was only on MC bit, non empty mask is valid
1785 * only if this bit is set and rest are zero.
1787 if (!is_zero_ether_addr(&mac
[0]))
1790 if (is_multicast_ether_addr(ib_spec
->eth
.val
.dst_mac
))
1791 type
[0] = MLX4_FS_MC_SNIFFER
;
1793 type
[0] = MLX4_FS_UC_SNIFFER
;
1800 static struct ib_flow
*mlx4_ib_create_flow(struct ib_qp
*qp
,
1801 struct ib_flow_attr
*flow_attr
,
1804 int err
= 0, i
= 0, j
= 0;
1805 struct mlx4_ib_flow
*mflow
;
1806 enum mlx4_net_trans_promisc_mode type
[2];
1807 struct mlx4_dev
*dev
= (to_mdev(qp
->device
))->dev
;
1808 int is_bonded
= mlx4_is_bonded(dev
);
1810 if (flow_attr
->port
< 1 || flow_attr
->port
> qp
->device
->phys_port_cnt
)
1811 return ERR_PTR(-EINVAL
);
1813 if ((flow_attr
->flags
& IB_FLOW_ATTR_FLAGS_DONT_TRAP
) &&
1814 (flow_attr
->type
!= IB_FLOW_ATTR_NORMAL
))
1815 return ERR_PTR(-EOPNOTSUPP
);
1817 memset(type
, 0, sizeof(type
));
1819 mflow
= kzalloc(sizeof(*mflow
), GFP_KERNEL
);
1825 switch (flow_attr
->type
) {
1826 case IB_FLOW_ATTR_NORMAL
:
1827 /* If dont trap flag (continue match) is set, under specific
1828 * condition traffic be replicated to given qp,
1829 * without stealing it
1831 if (unlikely(flow_attr
->flags
& IB_FLOW_ATTR_FLAGS_DONT_TRAP
)) {
1832 err
= mlx4_ib_add_dont_trap_rule(dev
,
1838 type
[0] = MLX4_FS_REGULAR
;
1842 case IB_FLOW_ATTR_ALL_DEFAULT
:
1843 type
[0] = MLX4_FS_ALL_DEFAULT
;
1846 case IB_FLOW_ATTR_MC_DEFAULT
:
1847 type
[0] = MLX4_FS_MC_DEFAULT
;
1850 case IB_FLOW_ATTR_SNIFFER
:
1851 type
[0] = MLX4_FS_MIRROR_RX_PORT
;
1852 type
[1] = MLX4_FS_MIRROR_SX_PORT
;
1860 while (i
< ARRAY_SIZE(type
) && type
[i
]) {
1861 err
= __mlx4_ib_create_flow(qp
, flow_attr
, domain
, type
[i
],
1862 &mflow
->reg_id
[i
].id
);
1864 goto err_create_flow
;
1866 /* Application always sees one port so the mirror rule
1867 * must be on port #2
1869 flow_attr
->port
= 2;
1870 err
= __mlx4_ib_create_flow(qp
, flow_attr
,
1872 &mflow
->reg_id
[j
].mirror
);
1873 flow_attr
->port
= 1;
1875 goto err_create_flow
;
1882 if (i
< ARRAY_SIZE(type
) && flow_attr
->type
== IB_FLOW_ATTR_NORMAL
) {
1883 err
= mlx4_ib_tunnel_steer_add(qp
, flow_attr
,
1884 &mflow
->reg_id
[i
].id
);
1886 goto err_create_flow
;
1889 flow_attr
->port
= 2;
1890 err
= mlx4_ib_tunnel_steer_add(qp
, flow_attr
,
1891 &mflow
->reg_id
[j
].mirror
);
1892 flow_attr
->port
= 1;
1894 goto err_create_flow
;
1897 /* function to create mirror rule */
1901 return &mflow
->ibflow
;
1905 (void)__mlx4_ib_destroy_flow(to_mdev(qp
->device
)->dev
,
1906 mflow
->reg_id
[i
].id
);
1911 (void)__mlx4_ib_destroy_flow(to_mdev(qp
->device
)->dev
,
1912 mflow
->reg_id
[j
].mirror
);
1917 return ERR_PTR(err
);
1920 static int mlx4_ib_destroy_flow(struct ib_flow
*flow_id
)
1924 struct mlx4_ib_dev
*mdev
= to_mdev(flow_id
->qp
->device
);
1925 struct mlx4_ib_flow
*mflow
= to_mflow(flow_id
);
1927 while (i
< ARRAY_SIZE(mflow
->reg_id
) && mflow
->reg_id
[i
].id
) {
1928 err
= __mlx4_ib_destroy_flow(mdev
->dev
, mflow
->reg_id
[i
].id
);
1931 if (mflow
->reg_id
[i
].mirror
) {
1932 err
= __mlx4_ib_destroy_flow(mdev
->dev
,
1933 mflow
->reg_id
[i
].mirror
);
1944 static int mlx4_ib_mcg_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
1947 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
1948 struct mlx4_dev
*dev
= mdev
->dev
;
1949 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
1950 struct mlx4_ib_steering
*ib_steering
= NULL
;
1951 enum mlx4_protocol prot
= MLX4_PROT_IB_IPV6
;
1952 struct mlx4_flow_reg_id reg_id
;
1954 if (mdev
->dev
->caps
.steering_mode
==
1955 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1956 ib_steering
= kmalloc(sizeof(*ib_steering
), GFP_KERNEL
);
1961 err
= mlx4_multicast_attach(mdev
->dev
, &mqp
->mqp
, gid
->raw
, mqp
->port
,
1963 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
),
1966 pr_err("multicast attach op failed, err %d\n", err
);
1971 if (mlx4_is_bonded(dev
)) {
1972 err
= mlx4_multicast_attach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1973 (mqp
->port
== 1) ? 2 : 1,
1975 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
),
1976 prot
, ®_id
.mirror
);
1981 err
= add_gid_entry(ibqp
, gid
);
1986 memcpy(ib_steering
->gid
.raw
, gid
->raw
, 16);
1987 ib_steering
->reg_id
= reg_id
;
1988 mutex_lock(&mqp
->mutex
);
1989 list_add(&ib_steering
->list
, &mqp
->steering_rules
);
1990 mutex_unlock(&mqp
->mutex
);
1995 mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1998 mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1999 prot
, reg_id
.mirror
);
2006 static struct mlx4_ib_gid_entry
*find_gid_entry(struct mlx4_ib_qp
*qp
, u8
*raw
)
2008 struct mlx4_ib_gid_entry
*ge
;
2009 struct mlx4_ib_gid_entry
*tmp
;
2010 struct mlx4_ib_gid_entry
*ret
= NULL
;
2012 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
2013 if (!memcmp(raw
, ge
->gid
.raw
, 16)) {
2022 static int mlx4_ib_mcg_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
2025 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
2026 struct mlx4_dev
*dev
= mdev
->dev
;
2027 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
2028 struct net_device
*ndev
;
2029 struct mlx4_ib_gid_entry
*ge
;
2030 struct mlx4_flow_reg_id reg_id
= {0, 0};
2031 enum mlx4_protocol prot
= MLX4_PROT_IB_IPV6
;
2033 if (mdev
->dev
->caps
.steering_mode
==
2034 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
2035 struct mlx4_ib_steering
*ib_steering
;
2037 mutex_lock(&mqp
->mutex
);
2038 list_for_each_entry(ib_steering
, &mqp
->steering_rules
, list
) {
2039 if (!memcmp(ib_steering
->gid
.raw
, gid
->raw
, 16)) {
2040 list_del(&ib_steering
->list
);
2044 mutex_unlock(&mqp
->mutex
);
2045 if (&ib_steering
->list
== &mqp
->steering_rules
) {
2046 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
2049 reg_id
= ib_steering
->reg_id
;
2053 err
= mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
2058 if (mlx4_is_bonded(dev
)) {
2059 err
= mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
2060 prot
, reg_id
.mirror
);
2065 mutex_lock(&mqp
->mutex
);
2066 ge
= find_gid_entry(mqp
, gid
->raw
);
2068 spin_lock_bh(&mdev
->iboe
.lock
);
2069 ndev
= ge
->added
? mdev
->iboe
.netdevs
[ge
->port
- 1] : NULL
;
2072 spin_unlock_bh(&mdev
->iboe
.lock
);
2075 list_del(&ge
->list
);
2078 pr_warn("could not find mgid entry\n");
2080 mutex_unlock(&mqp
->mutex
);
2085 static int init_node_data(struct mlx4_ib_dev
*dev
)
2087 struct ib_smp
*in_mad
= NULL
;
2088 struct ib_smp
*out_mad
= NULL
;
2089 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
2092 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
2093 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
2094 if (!in_mad
|| !out_mad
)
2097 init_query_mad(in_mad
);
2098 in_mad
->attr_id
= IB_SMP_ATTR_NODE_DESC
;
2099 if (mlx4_is_master(dev
->dev
))
2100 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
2102 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, 1, NULL
, NULL
, in_mad
, out_mad
);
2106 memcpy(dev
->ib_dev
.node_desc
, out_mad
->data
, IB_DEVICE_NODE_DESC_MAX
);
2108 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
2110 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, 1, NULL
, NULL
, in_mad
, out_mad
);
2114 dev
->dev
->rev_id
= be32_to_cpup((__be32
*) (out_mad
->data
+ 32));
2115 memcpy(&dev
->ib_dev
.node_guid
, out_mad
->data
+ 12, 8);
2123 static ssize_t
show_hca(struct device
*device
, struct device_attribute
*attr
,
2126 struct mlx4_ib_dev
*dev
=
2127 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
2128 return sprintf(buf
, "MT%d\n", dev
->dev
->persist
->pdev
->device
);
2131 static ssize_t
show_rev(struct device
*device
, struct device_attribute
*attr
,
2134 struct mlx4_ib_dev
*dev
=
2135 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
2136 return sprintf(buf
, "%x\n", dev
->dev
->rev_id
);
2139 static ssize_t
show_board(struct device
*device
, struct device_attribute
*attr
,
2142 struct mlx4_ib_dev
*dev
=
2143 container_of(device
, struct mlx4_ib_dev
, ib_dev
.dev
);
2144 return sprintf(buf
, "%.*s\n", MLX4_BOARD_ID_LEN
,
2145 dev
->dev
->board_id
);
2148 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
2149 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
2150 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
2152 static struct device_attribute
*mlx4_class_attributes
[] = {
2158 struct diag_counter
{
2163 #define DIAG_COUNTER(_name, _offset) \
2164 { .name = #_name, .offset = _offset }
2166 static const struct diag_counter diag_basic
[] = {
2167 DIAG_COUNTER(rq_num_lle
, 0x00),
2168 DIAG_COUNTER(sq_num_lle
, 0x04),
2169 DIAG_COUNTER(rq_num_lqpoe
, 0x08),
2170 DIAG_COUNTER(sq_num_lqpoe
, 0x0C),
2171 DIAG_COUNTER(rq_num_lpe
, 0x18),
2172 DIAG_COUNTER(sq_num_lpe
, 0x1C),
2173 DIAG_COUNTER(rq_num_wrfe
, 0x20),
2174 DIAG_COUNTER(sq_num_wrfe
, 0x24),
2175 DIAG_COUNTER(sq_num_mwbe
, 0x2C),
2176 DIAG_COUNTER(sq_num_bre
, 0x34),
2177 DIAG_COUNTER(sq_num_rire
, 0x44),
2178 DIAG_COUNTER(rq_num_rire
, 0x48),
2179 DIAG_COUNTER(sq_num_rae
, 0x4C),
2180 DIAG_COUNTER(rq_num_rae
, 0x50),
2181 DIAG_COUNTER(sq_num_roe
, 0x54),
2182 DIAG_COUNTER(sq_num_tree
, 0x5C),
2183 DIAG_COUNTER(sq_num_rree
, 0x64),
2184 DIAG_COUNTER(rq_num_rnr
, 0x68),
2185 DIAG_COUNTER(sq_num_rnr
, 0x6C),
2186 DIAG_COUNTER(rq_num_oos
, 0x100),
2187 DIAG_COUNTER(sq_num_oos
, 0x104),
2190 static const struct diag_counter diag_ext
[] = {
2191 DIAG_COUNTER(rq_num_dup
, 0x130),
2192 DIAG_COUNTER(sq_num_to
, 0x134),
2195 static const struct diag_counter diag_device_only
[] = {
2196 DIAG_COUNTER(num_cqovf
, 0x1A0),
2197 DIAG_COUNTER(rq_num_udsdprd
, 0x118),
2200 static struct rdma_hw_stats
*mlx4_ib_alloc_hw_stats(struct ib_device
*ibdev
,
2203 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
2204 struct mlx4_ib_diag_counters
*diag
= dev
->diag_counters
;
2206 if (!diag
[!!port_num
].name
)
2209 return rdma_alloc_hw_stats_struct(diag
[!!port_num
].name
,
2210 diag
[!!port_num
].num_counters
,
2211 RDMA_HW_STATS_DEFAULT_LIFESPAN
);
2214 static int mlx4_ib_get_hw_stats(struct ib_device
*ibdev
,
2215 struct rdma_hw_stats
*stats
,
2218 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
2219 struct mlx4_ib_diag_counters
*diag
= dev
->diag_counters
;
2220 u32 hw_value
[ARRAY_SIZE(diag_device_only
) +
2221 ARRAY_SIZE(diag_ext
) + ARRAY_SIZE(diag_basic
)] = {};
2225 ret
= mlx4_query_diag_counters(dev
->dev
,
2226 MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS
,
2227 diag
[!!port
].offset
, hw_value
,
2228 diag
[!!port
].num_counters
, port
);
2233 for (i
= 0; i
< diag
[!!port
].num_counters
; i
++)
2234 stats
->value
[i
] = hw_value
[i
];
2236 return diag
[!!port
].num_counters
;
2239 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev
*ibdev
,
2247 num_counters
= ARRAY_SIZE(diag_basic
);
2249 if (ibdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT
)
2250 num_counters
+= ARRAY_SIZE(diag_ext
);
2253 num_counters
+= ARRAY_SIZE(diag_device_only
);
2255 *name
= kcalloc(num_counters
, sizeof(**name
), GFP_KERNEL
);
2259 *offset
= kcalloc(num_counters
, sizeof(**offset
), GFP_KERNEL
);
2263 *num
= num_counters
;
2272 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev
*ibdev
,
2280 for (i
= 0, j
= 0; i
< ARRAY_SIZE(diag_basic
); i
++, j
++) {
2281 name
[i
] = diag_basic
[i
].name
;
2282 offset
[i
] = diag_basic
[i
].offset
;
2285 if (ibdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT
) {
2286 for (i
= 0; i
< ARRAY_SIZE(diag_ext
); i
++, j
++) {
2287 name
[j
] = diag_ext
[i
].name
;
2288 offset
[j
] = diag_ext
[i
].offset
;
2293 for (i
= 0; i
< ARRAY_SIZE(diag_device_only
); i
++, j
++) {
2294 name
[j
] = diag_device_only
[i
].name
;
2295 offset
[j
] = diag_device_only
[i
].offset
;
2300 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev
*ibdev
)
2302 struct mlx4_ib_diag_counters
*diag
= ibdev
->diag_counters
;
2305 bool per_port
= !!(ibdev
->dev
->caps
.flags2
&
2306 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT
);
2308 if (mlx4_is_slave(ibdev
->dev
))
2311 for (i
= 0; i
< MLX4_DIAG_COUNTERS_TYPES
; i
++) {
2312 /* i == 1 means we are building port counters */
2316 ret
= __mlx4_ib_alloc_diag_counters(ibdev
, &diag
[i
].name
,
2318 &diag
[i
].num_counters
, i
);
2322 mlx4_ib_fill_diag_counters(ibdev
, diag
[i
].name
,
2326 ibdev
->ib_dev
.get_hw_stats
= mlx4_ib_get_hw_stats
;
2327 ibdev
->ib_dev
.alloc_hw_stats
= mlx4_ib_alloc_hw_stats
;
2333 kfree(diag
[i
- 1].name
);
2334 kfree(diag
[i
- 1].offset
);
2340 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev
*ibdev
)
2344 for (i
= 0; i
< MLX4_DIAG_COUNTERS_TYPES
; i
++) {
2345 kfree(ibdev
->diag_counters
[i
].offset
);
2346 kfree(ibdev
->diag_counters
[i
].name
);
2350 #define MLX4_IB_INVALID_MAC ((u64)-1)
2351 static void mlx4_ib_update_qps(struct mlx4_ib_dev
*ibdev
,
2352 struct net_device
*dev
,
2356 u64 release_mac
= MLX4_IB_INVALID_MAC
;
2357 struct mlx4_ib_qp
*qp
;
2359 read_lock(&dev_base_lock
);
2360 new_smac
= mlx4_mac_to_u64(dev
->dev_addr
);
2361 read_unlock(&dev_base_lock
);
2363 atomic64_set(&ibdev
->iboe
.mac
[port
- 1], new_smac
);
2365 /* no need for update QP1 and mac registration in non-SRIOV */
2366 if (!mlx4_is_mfunc(ibdev
->dev
))
2369 mutex_lock(&ibdev
->qp1_proxy_lock
[port
- 1]);
2370 qp
= ibdev
->qp1_proxy
[port
- 1];
2374 struct mlx4_update_qp_params update_params
;
2376 mutex_lock(&qp
->mutex
);
2377 old_smac
= qp
->pri
.smac
;
2378 if (new_smac
== old_smac
)
2381 new_smac_index
= mlx4_register_mac(ibdev
->dev
, port
, new_smac
);
2383 if (new_smac_index
< 0)
2386 update_params
.smac_index
= new_smac_index
;
2387 if (mlx4_update_qp(ibdev
->dev
, qp
->mqp
.qpn
, MLX4_UPDATE_QP_SMAC
,
2389 release_mac
= new_smac
;
2392 /* if old port was zero, no mac was yet registered for this QP */
2393 if (qp
->pri
.smac_port
)
2394 release_mac
= old_smac
;
2395 qp
->pri
.smac
= new_smac
;
2396 qp
->pri
.smac_port
= port
;
2397 qp
->pri
.smac_index
= new_smac_index
;
2401 if (release_mac
!= MLX4_IB_INVALID_MAC
)
2402 mlx4_unregister_mac(ibdev
->dev
, port
, release_mac
);
2404 mutex_unlock(&qp
->mutex
);
2405 mutex_unlock(&ibdev
->qp1_proxy_lock
[port
- 1]);
2408 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev
*ibdev
,
2409 struct net_device
*dev
,
2410 unsigned long event
)
2413 struct mlx4_ib_iboe
*iboe
;
2414 int update_qps_port
= -1;
2419 iboe
= &ibdev
->iboe
;
2421 spin_lock_bh(&iboe
->lock
);
2422 mlx4_foreach_ib_transport_port(port
, ibdev
->dev
) {
2424 iboe
->netdevs
[port
- 1] =
2425 mlx4_get_protocol_dev(ibdev
->dev
, MLX4_PROT_ETH
, port
);
2427 if (dev
== iboe
->netdevs
[port
- 1] &&
2428 (event
== NETDEV_CHANGEADDR
|| event
== NETDEV_REGISTER
||
2429 event
== NETDEV_UP
|| event
== NETDEV_CHANGE
))
2430 update_qps_port
= port
;
2433 spin_unlock_bh(&iboe
->lock
);
2435 if (update_qps_port
> 0)
2436 mlx4_ib_update_qps(ibdev
, dev
, update_qps_port
);
2439 static int mlx4_ib_netdev_event(struct notifier_block
*this,
2440 unsigned long event
, void *ptr
)
2442 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2443 struct mlx4_ib_dev
*ibdev
;
2445 if (!net_eq(dev_net(dev
), &init_net
))
2448 ibdev
= container_of(this, struct mlx4_ib_dev
, iboe
.nb
);
2449 mlx4_ib_scan_netdevs(ibdev
, dev
, event
);
2454 static void init_pkeys(struct mlx4_ib_dev
*ibdev
)
2460 if (mlx4_is_master(ibdev
->dev
)) {
2461 for (slave
= 0; slave
<= ibdev
->dev
->persist
->num_vfs
;
2463 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
) {
2465 i
< ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
];
2467 ibdev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][i
] =
2468 /* master has the identity virt2phys pkey mapping */
2469 (slave
== mlx4_master_func_num(ibdev
->dev
) || !i
) ? i
:
2470 ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
] - 1;
2471 mlx4_sync_pkey_table(ibdev
->dev
, slave
, port
, i
,
2472 ibdev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][i
]);
2476 /* initialize pkey cache */
2477 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
) {
2479 i
< ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
];
2481 ibdev
->pkeys
.phys_pkey_cache
[port
-1][i
] =
2487 static void mlx4_ib_alloc_eqs(struct mlx4_dev
*dev
, struct mlx4_ib_dev
*ibdev
)
2489 int i
, j
, eq
= 0, total_eqs
= 0;
2491 ibdev
->eq_table
= kcalloc(dev
->caps
.num_comp_vectors
,
2492 sizeof(ibdev
->eq_table
[0]), GFP_KERNEL
);
2493 if (!ibdev
->eq_table
)
2496 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
2497 for (j
= 0; j
< mlx4_get_eqs_per_port(dev
, i
);
2499 if (i
> 1 && mlx4_is_eq_shared(dev
, total_eqs
))
2501 ibdev
->eq_table
[eq
] = total_eqs
;
2502 if (!mlx4_assign_eq(dev
, i
,
2503 &ibdev
->eq_table
[eq
]))
2506 ibdev
->eq_table
[eq
] = -1;
2510 for (i
= eq
; i
< dev
->caps
.num_comp_vectors
;
2511 ibdev
->eq_table
[i
++] = -1)
2514 /* Advertise the new number of EQs to clients */
2515 ibdev
->ib_dev
.num_comp_vectors
= eq
;
2518 static void mlx4_ib_free_eqs(struct mlx4_dev
*dev
, struct mlx4_ib_dev
*ibdev
)
2521 int total_eqs
= ibdev
->ib_dev
.num_comp_vectors
;
2523 /* no eqs were allocated */
2524 if (!ibdev
->eq_table
)
2527 /* Reset the advertised EQ number */
2528 ibdev
->ib_dev
.num_comp_vectors
= 0;
2530 for (i
= 0; i
< total_eqs
; i
++)
2531 mlx4_release_eq(dev
, ibdev
->eq_table
[i
]);
2533 kfree(ibdev
->eq_table
);
2534 ibdev
->eq_table
= NULL
;
2537 static int mlx4_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
2538 struct ib_port_immutable
*immutable
)
2540 struct ib_port_attr attr
;
2541 struct mlx4_ib_dev
*mdev
= to_mdev(ibdev
);
2544 if (mlx4_ib_port_link_layer(ibdev
, port_num
) == IB_LINK_LAYER_INFINIBAND
) {
2545 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_IB
;
2546 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
2548 if (mdev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IBOE
)
2549 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
;
2550 if (mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
)
2551 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
|
2552 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
2553 immutable
->core_cap_flags
|= RDMA_CORE_PORT_RAW_PACKET
;
2554 if (immutable
->core_cap_flags
& (RDMA_CORE_PORT_IBA_ROCE
|
2555 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
))
2556 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
2559 err
= ib_query_port(ibdev
, port_num
, &attr
);
2563 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
2564 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
2569 static void get_fw_ver_str(struct ib_device
*device
, char *str
,
2572 struct mlx4_ib_dev
*dev
=
2573 container_of(device
, struct mlx4_ib_dev
, ib_dev
);
2574 snprintf(str
, str_len
, "%d.%d.%d",
2575 (int) (dev
->dev
->caps
.fw_ver
>> 32),
2576 (int) (dev
->dev
->caps
.fw_ver
>> 16) & 0xffff,
2577 (int) dev
->dev
->caps
.fw_ver
& 0xffff);
2580 static void *mlx4_ib_add(struct mlx4_dev
*dev
)
2582 struct mlx4_ib_dev
*ibdev
;
2586 struct mlx4_ib_iboe
*iboe
;
2587 int ib_num_ports
= 0;
2588 int num_req_counters
;
2591 struct counter_index
*new_counter_index
= NULL
;
2593 pr_info_once("%s", mlx4_ib_version
);
2596 mlx4_foreach_ib_transport_port(i
, dev
)
2599 /* No point in registering a device with no ports... */
2603 ibdev
= (struct mlx4_ib_dev
*) ib_alloc_device(sizeof *ibdev
);
2605 dev_err(&dev
->persist
->pdev
->dev
,
2606 "Device struct alloc failed\n");
2610 iboe
= &ibdev
->iboe
;
2612 if (mlx4_pd_alloc(dev
, &ibdev
->priv_pdn
))
2615 if (mlx4_uar_alloc(dev
, &ibdev
->priv_uar
))
2618 ibdev
->uar_map
= ioremap((phys_addr_t
) ibdev
->priv_uar
.pfn
<< PAGE_SHIFT
,
2620 if (!ibdev
->uar_map
)
2622 MLX4_INIT_DOORBELL_LOCK(&ibdev
->uar_lock
);
2625 ibdev
->bond_next_port
= 0;
2627 strlcpy(ibdev
->ib_dev
.name
, "mlx4_%d", IB_DEVICE_NAME_MAX
);
2628 ibdev
->ib_dev
.owner
= THIS_MODULE
;
2629 ibdev
->ib_dev
.node_type
= RDMA_NODE_IB_CA
;
2630 ibdev
->ib_dev
.local_dma_lkey
= dev
->caps
.reserved_lkey
;
2631 ibdev
->num_ports
= num_ports
;
2632 ibdev
->ib_dev
.phys_port_cnt
= mlx4_is_bonded(dev
) ?
2633 1 : ibdev
->num_ports
;
2634 ibdev
->ib_dev
.num_comp_vectors
= dev
->caps
.num_comp_vectors
;
2635 ibdev
->ib_dev
.dev
.parent
= &dev
->persist
->pdev
->dev
;
2636 ibdev
->ib_dev
.get_netdev
= mlx4_ib_get_netdev
;
2637 ibdev
->ib_dev
.add_gid
= mlx4_ib_add_gid
;
2638 ibdev
->ib_dev
.del_gid
= mlx4_ib_del_gid
;
2640 if (dev
->caps
.userspace_caps
)
2641 ibdev
->ib_dev
.uverbs_abi_ver
= MLX4_IB_UVERBS_ABI_VERSION
;
2643 ibdev
->ib_dev
.uverbs_abi_ver
= MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
;
2645 ibdev
->ib_dev
.uverbs_cmd_mask
=
2646 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
2647 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
2648 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
2649 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
2650 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
2651 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
2652 (1ull << IB_USER_VERBS_CMD_REREG_MR
) |
2653 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
2654 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
2655 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
2656 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
2657 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
2658 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
2659 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
2660 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
2661 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
2662 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST
) |
2663 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST
) |
2664 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
2665 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
2666 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
2667 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
2668 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ
) |
2669 (1ull << IB_USER_VERBS_CMD_OPEN_QP
);
2671 ibdev
->ib_dev
.query_device
= mlx4_ib_query_device
;
2672 ibdev
->ib_dev
.query_port
= mlx4_ib_query_port
;
2673 ibdev
->ib_dev
.get_link_layer
= mlx4_ib_port_link_layer
;
2674 ibdev
->ib_dev
.query_gid
= mlx4_ib_query_gid
;
2675 ibdev
->ib_dev
.query_pkey
= mlx4_ib_query_pkey
;
2676 ibdev
->ib_dev
.modify_device
= mlx4_ib_modify_device
;
2677 ibdev
->ib_dev
.modify_port
= mlx4_ib_modify_port
;
2678 ibdev
->ib_dev
.alloc_ucontext
= mlx4_ib_alloc_ucontext
;
2679 ibdev
->ib_dev
.dealloc_ucontext
= mlx4_ib_dealloc_ucontext
;
2680 ibdev
->ib_dev
.mmap
= mlx4_ib_mmap
;
2681 ibdev
->ib_dev
.alloc_pd
= mlx4_ib_alloc_pd
;
2682 ibdev
->ib_dev
.dealloc_pd
= mlx4_ib_dealloc_pd
;
2683 ibdev
->ib_dev
.create_ah
= mlx4_ib_create_ah
;
2684 ibdev
->ib_dev
.query_ah
= mlx4_ib_query_ah
;
2685 ibdev
->ib_dev
.destroy_ah
= mlx4_ib_destroy_ah
;
2686 ibdev
->ib_dev
.create_srq
= mlx4_ib_create_srq
;
2687 ibdev
->ib_dev
.modify_srq
= mlx4_ib_modify_srq
;
2688 ibdev
->ib_dev
.query_srq
= mlx4_ib_query_srq
;
2689 ibdev
->ib_dev
.destroy_srq
= mlx4_ib_destroy_srq
;
2690 ibdev
->ib_dev
.post_srq_recv
= mlx4_ib_post_srq_recv
;
2691 ibdev
->ib_dev
.create_qp
= mlx4_ib_create_qp
;
2692 ibdev
->ib_dev
.modify_qp
= mlx4_ib_modify_qp
;
2693 ibdev
->ib_dev
.query_qp
= mlx4_ib_query_qp
;
2694 ibdev
->ib_dev
.destroy_qp
= mlx4_ib_destroy_qp
;
2695 ibdev
->ib_dev
.post_send
= mlx4_ib_post_send
;
2696 ibdev
->ib_dev
.post_recv
= mlx4_ib_post_recv
;
2697 ibdev
->ib_dev
.create_cq
= mlx4_ib_create_cq
;
2698 ibdev
->ib_dev
.modify_cq
= mlx4_ib_modify_cq
;
2699 ibdev
->ib_dev
.resize_cq
= mlx4_ib_resize_cq
;
2700 ibdev
->ib_dev
.destroy_cq
= mlx4_ib_destroy_cq
;
2701 ibdev
->ib_dev
.poll_cq
= mlx4_ib_poll_cq
;
2702 ibdev
->ib_dev
.req_notify_cq
= mlx4_ib_arm_cq
;
2703 ibdev
->ib_dev
.get_dma_mr
= mlx4_ib_get_dma_mr
;
2704 ibdev
->ib_dev
.reg_user_mr
= mlx4_ib_reg_user_mr
;
2705 ibdev
->ib_dev
.rereg_user_mr
= mlx4_ib_rereg_user_mr
;
2706 ibdev
->ib_dev
.dereg_mr
= mlx4_ib_dereg_mr
;
2707 ibdev
->ib_dev
.alloc_mr
= mlx4_ib_alloc_mr
;
2708 ibdev
->ib_dev
.map_mr_sg
= mlx4_ib_map_mr_sg
;
2709 ibdev
->ib_dev
.attach_mcast
= mlx4_ib_mcg_attach
;
2710 ibdev
->ib_dev
.detach_mcast
= mlx4_ib_mcg_detach
;
2711 ibdev
->ib_dev
.process_mad
= mlx4_ib_process_mad
;
2712 ibdev
->ib_dev
.get_port_immutable
= mlx4_port_immutable
;
2713 ibdev
->ib_dev
.get_dev_fw_str
= get_fw_ver_str
;
2714 ibdev
->ib_dev
.disassociate_ucontext
= mlx4_ib_disassociate_ucontext
;
2716 if (!mlx4_is_slave(ibdev
->dev
)) {
2717 ibdev
->ib_dev
.alloc_fmr
= mlx4_ib_fmr_alloc
;
2718 ibdev
->ib_dev
.map_phys_fmr
= mlx4_ib_map_phys_fmr
;
2719 ibdev
->ib_dev
.unmap_fmr
= mlx4_ib_unmap_fmr
;
2720 ibdev
->ib_dev
.dealloc_fmr
= mlx4_ib_fmr_dealloc
;
2723 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
||
2724 dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
) {
2725 ibdev
->ib_dev
.alloc_mw
= mlx4_ib_alloc_mw
;
2726 ibdev
->ib_dev
.dealloc_mw
= mlx4_ib_dealloc_mw
;
2728 ibdev
->ib_dev
.uverbs_cmd_mask
|=
2729 (1ull << IB_USER_VERBS_CMD_ALLOC_MW
) |
2730 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW
);
2733 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) {
2734 ibdev
->ib_dev
.alloc_xrcd
= mlx4_ib_alloc_xrcd
;
2735 ibdev
->ib_dev
.dealloc_xrcd
= mlx4_ib_dealloc_xrcd
;
2736 ibdev
->ib_dev
.uverbs_cmd_mask
|=
2737 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD
) |
2738 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD
);
2741 if (check_flow_steering_support(dev
)) {
2742 ibdev
->steering_support
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
2743 ibdev
->ib_dev
.create_flow
= mlx4_ib_create_flow
;
2744 ibdev
->ib_dev
.destroy_flow
= mlx4_ib_destroy_flow
;
2746 ibdev
->ib_dev
.uverbs_ex_cmd_mask
|=
2747 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW
) |
2748 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW
);
2751 ibdev
->ib_dev
.uverbs_ex_cmd_mask
|=
2752 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE
) |
2753 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ
) |
2754 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP
);
2756 mlx4_ib_alloc_eqs(dev
, ibdev
);
2758 spin_lock_init(&iboe
->lock
);
2760 if (init_node_data(ibdev
))
2762 mlx4_init_sl2vl_tbl(ibdev
);
2764 for (i
= 0; i
< ibdev
->num_ports
; ++i
) {
2765 mutex_init(&ibdev
->counters_table
[i
].mutex
);
2766 INIT_LIST_HEAD(&ibdev
->counters_table
[i
].counters_list
);
2769 num_req_counters
= mlx4_is_bonded(dev
) ? 1 : ibdev
->num_ports
;
2770 for (i
= 0; i
< num_req_counters
; ++i
) {
2771 mutex_init(&ibdev
->qp1_proxy_lock
[i
]);
2773 if (mlx4_ib_port_link_layer(&ibdev
->ib_dev
, i
+ 1) ==
2774 IB_LINK_LAYER_ETHERNET
) {
2775 err
= mlx4_counter_alloc(ibdev
->dev
, &counter_index
);
2776 /* if failed to allocate a new counter, use default */
2779 mlx4_get_default_counter_index(dev
,
2783 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2784 counter_index
= mlx4_get_default_counter_index(dev
,
2787 new_counter_index
= kmalloc(sizeof(*new_counter_index
),
2789 if (!new_counter_index
) {
2791 mlx4_counter_free(ibdev
->dev
, counter_index
);
2794 new_counter_index
->index
= counter_index
;
2795 new_counter_index
->allocated
= allocated
;
2796 list_add_tail(&new_counter_index
->list
,
2797 &ibdev
->counters_table
[i
].counters_list
);
2798 ibdev
->counters_table
[i
].default_counter
= counter_index
;
2799 pr_info("counter index %d for port %d allocated %d\n",
2800 counter_index
, i
+ 1, allocated
);
2802 if (mlx4_is_bonded(dev
))
2803 for (i
= 1; i
< ibdev
->num_ports
; ++i
) {
2805 kmalloc(sizeof(struct counter_index
),
2807 if (!new_counter_index
)
2809 new_counter_index
->index
= counter_index
;
2810 new_counter_index
->allocated
= 0;
2811 list_add_tail(&new_counter_index
->list
,
2812 &ibdev
->counters_table
[i
].counters_list
);
2813 ibdev
->counters_table
[i
].default_counter
=
2817 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
2820 spin_lock_init(&ibdev
->sm_lock
);
2821 mutex_init(&ibdev
->cap_mask_mutex
);
2822 INIT_LIST_HEAD(&ibdev
->qp_list
);
2823 spin_lock_init(&ibdev
->reset_flow_resource_lock
);
2825 if (ibdev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
&&
2827 ibdev
->steer_qpn_count
= MLX4_IB_UC_MAX_NUM_QPS
;
2828 err
= mlx4_qp_reserve_range(dev
, ibdev
->steer_qpn_count
,
2829 MLX4_IB_UC_STEER_QPN_ALIGN
,
2830 &ibdev
->steer_qpn_base
, 0);
2834 ibdev
->ib_uc_qpns_bitmap
=
2835 kmalloc(BITS_TO_LONGS(ibdev
->steer_qpn_count
) *
2838 if (!ibdev
->ib_uc_qpns_bitmap
)
2839 goto err_steer_qp_release
;
2841 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
) {
2842 bitmap_zero(ibdev
->ib_uc_qpns_bitmap
,
2843 ibdev
->steer_qpn_count
);
2844 err
= mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2845 dev
, ibdev
->steer_qpn_base
,
2846 ibdev
->steer_qpn_base
+
2847 ibdev
->steer_qpn_count
- 1);
2849 goto err_steer_free_bitmap
;
2851 bitmap_fill(ibdev
->ib_uc_qpns_bitmap
,
2852 ibdev
->steer_qpn_count
);
2856 for (j
= 1; j
<= ibdev
->dev
->caps
.num_ports
; j
++)
2857 atomic64_set(&iboe
->mac
[j
- 1], ibdev
->dev
->caps
.def_mac
[j
]);
2859 if (mlx4_ib_alloc_diag_counters(ibdev
))
2860 goto err_steer_free_bitmap
;
2862 if (ib_register_device(&ibdev
->ib_dev
, NULL
))
2863 goto err_diag_counters
;
2865 if (mlx4_ib_mad_init(ibdev
))
2868 if (mlx4_ib_init_sriov(ibdev
))
2871 if (!iboe
->nb
.notifier_call
) {
2872 iboe
->nb
.notifier_call
= mlx4_ib_netdev_event
;
2873 err
= register_netdevice_notifier(&iboe
->nb
);
2875 iboe
->nb
.notifier_call
= NULL
;
2879 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
) {
2880 err
= mlx4_config_roce_v2_port(dev
, ROCE_V2_UDP_DPORT
);
2885 for (j
= 0; j
< ARRAY_SIZE(mlx4_class_attributes
); ++j
) {
2886 if (device_create_file(&ibdev
->ib_dev
.dev
,
2887 mlx4_class_attributes
[j
]))
2891 ibdev
->ib_active
= true;
2892 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
2893 devlink_port_type_ib_set(mlx4_get_devlink_port(dev
, i
),
2896 if (mlx4_is_mfunc(ibdev
->dev
))
2899 /* create paravirt contexts for any VFs which are active */
2900 if (mlx4_is_master(ibdev
->dev
)) {
2901 for (j
= 0; j
< MLX4_MFUNC_MAX
; j
++) {
2902 if (j
== mlx4_master_func_num(ibdev
->dev
))
2904 if (mlx4_is_slave_active(ibdev
->dev
, j
))
2905 do_slave_init(ibdev
, j
, 1);
2911 if (ibdev
->iboe
.nb
.notifier_call
) {
2912 if (unregister_netdevice_notifier(&ibdev
->iboe
.nb
))
2913 pr_warn("failure unregistering notifier\n");
2914 ibdev
->iboe
.nb
.notifier_call
= NULL
;
2916 flush_workqueue(wq
);
2918 mlx4_ib_close_sriov(ibdev
);
2921 mlx4_ib_mad_cleanup(ibdev
);
2924 ib_unregister_device(&ibdev
->ib_dev
);
2927 mlx4_ib_diag_cleanup(ibdev
);
2929 err_steer_free_bitmap
:
2930 kfree(ibdev
->ib_uc_qpns_bitmap
);
2932 err_steer_qp_release
:
2933 if (ibdev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
)
2934 mlx4_qp_release_range(dev
, ibdev
->steer_qpn_base
,
2935 ibdev
->steer_qpn_count
);
2937 for (i
= 0; i
< ibdev
->num_ports
; ++i
)
2938 mlx4_ib_delete_counters_table(ibdev
, &ibdev
->counters_table
[i
]);
2941 mlx4_ib_free_eqs(dev
, ibdev
);
2942 iounmap(ibdev
->uar_map
);
2945 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
2948 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
2951 ib_dealloc_device(&ibdev
->ib_dev
);
2956 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev
*dev
, int count
, int *qpn
)
2960 WARN_ON(!dev
->ib_uc_qpns_bitmap
);
2962 offset
= bitmap_find_free_region(dev
->ib_uc_qpns_bitmap
,
2963 dev
->steer_qpn_count
,
2964 get_count_order(count
));
2968 *qpn
= dev
->steer_qpn_base
+ offset
;
2972 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev
*dev
, u32 qpn
, int count
)
2975 dev
->steering_support
!= MLX4_STEERING_MODE_DEVICE_MANAGED
)
2978 BUG_ON(qpn
< dev
->steer_qpn_base
);
2980 bitmap_release_region(dev
->ib_uc_qpns_bitmap
,
2981 qpn
- dev
->steer_qpn_base
,
2982 get_count_order(count
));
2985 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev
*mdev
, struct mlx4_ib_qp
*mqp
,
2990 struct ib_flow_attr
*flow
= NULL
;
2991 struct ib_flow_spec_ib
*ib_spec
;
2994 flow_size
= sizeof(struct ib_flow_attr
) +
2995 sizeof(struct ib_flow_spec_ib
);
2996 flow
= kzalloc(flow_size
, GFP_KERNEL
);
2999 flow
->port
= mqp
->port
;
3000 flow
->num_of_specs
= 1;
3001 flow
->size
= flow_size
;
3002 ib_spec
= (struct ib_flow_spec_ib
*)(flow
+ 1);
3003 ib_spec
->type
= IB_FLOW_SPEC_IB
;
3004 ib_spec
->size
= sizeof(struct ib_flow_spec_ib
);
3005 /* Add an empty rule for IB L2 */
3006 memset(&ib_spec
->mask
, 0, sizeof(ib_spec
->mask
));
3008 err
= __mlx4_ib_create_flow(&mqp
->ibqp
, flow
,
3013 err
= __mlx4_ib_destroy_flow(mdev
->dev
, mqp
->reg_id
);
3019 static void mlx4_ib_remove(struct mlx4_dev
*dev
, void *ibdev_ptr
)
3021 struct mlx4_ib_dev
*ibdev
= ibdev_ptr
;
3025 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
3026 devlink_port_type_clear(mlx4_get_devlink_port(dev
, i
));
3027 ibdev
->ib_active
= false;
3028 flush_workqueue(wq
);
3030 mlx4_ib_close_sriov(ibdev
);
3031 mlx4_ib_mad_cleanup(ibdev
);
3032 ib_unregister_device(&ibdev
->ib_dev
);
3033 mlx4_ib_diag_cleanup(ibdev
);
3034 if (ibdev
->iboe
.nb
.notifier_call
) {
3035 if (unregister_netdevice_notifier(&ibdev
->iboe
.nb
))
3036 pr_warn("failure unregistering notifier\n");
3037 ibdev
->iboe
.nb
.notifier_call
= NULL
;
3040 if (ibdev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
3041 mlx4_qp_release_range(dev
, ibdev
->steer_qpn_base
,
3042 ibdev
->steer_qpn_count
);
3043 kfree(ibdev
->ib_uc_qpns_bitmap
);
3046 iounmap(ibdev
->uar_map
);
3047 for (p
= 0; p
< ibdev
->num_ports
; ++p
)
3048 mlx4_ib_delete_counters_table(ibdev
, &ibdev
->counters_table
[p
]);
3050 mlx4_foreach_port(p
, dev
, MLX4_PORT_TYPE_IB
)
3051 mlx4_CLOSE_PORT(dev
, p
);
3053 mlx4_ib_free_eqs(dev
, ibdev
);
3055 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
3056 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
3057 ib_dealloc_device(&ibdev
->ib_dev
);
3060 static void do_slave_init(struct mlx4_ib_dev
*ibdev
, int slave
, int do_init
)
3062 struct mlx4_ib_demux_work
**dm
= NULL
;
3063 struct mlx4_dev
*dev
= ibdev
->dev
;
3065 unsigned long flags
;
3066 struct mlx4_active_ports actv_ports
;
3068 unsigned int first_port
;
3070 if (!mlx4_is_master(dev
))
3073 actv_ports
= mlx4_get_active_ports(dev
, slave
);
3074 ports
= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
3075 first_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
3077 dm
= kcalloc(ports
, sizeof(*dm
), GFP_ATOMIC
);
3081 for (i
= 0; i
< ports
; i
++) {
3082 dm
[i
] = kmalloc(sizeof (struct mlx4_ib_demux_work
), GFP_ATOMIC
);
3088 INIT_WORK(&dm
[i
]->work
, mlx4_ib_tunnels_update_work
);
3089 dm
[i
]->port
= first_port
+ i
+ 1;
3090 dm
[i
]->slave
= slave
;
3091 dm
[i
]->do_init
= do_init
;
3094 /* initialize or tear down tunnel QPs for the slave */
3095 spin_lock_irqsave(&ibdev
->sriov
.going_down_lock
, flags
);
3096 if (!ibdev
->sriov
.is_going_down
) {
3097 for (i
= 0; i
< ports
; i
++)
3098 queue_work(ibdev
->sriov
.demux
[i
].ud_wq
, &dm
[i
]->work
);
3099 spin_unlock_irqrestore(&ibdev
->sriov
.going_down_lock
, flags
);
3101 spin_unlock_irqrestore(&ibdev
->sriov
.going_down_lock
, flags
);
3102 for (i
= 0; i
< ports
; i
++)
3110 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev
*ibdev
)
3112 struct mlx4_ib_qp
*mqp
;
3113 unsigned long flags_qp
;
3114 unsigned long flags_cq
;
3115 struct mlx4_ib_cq
*send_mcq
, *recv_mcq
;
3116 struct list_head cq_notify_list
;
3117 struct mlx4_cq
*mcq
;
3118 unsigned long flags
;
3120 pr_warn("mlx4_ib_handle_catas_error was started\n");
3121 INIT_LIST_HEAD(&cq_notify_list
);
3123 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3124 spin_lock_irqsave(&ibdev
->reset_flow_resource_lock
, flags
);
3126 list_for_each_entry(mqp
, &ibdev
->qp_list
, qps_list
) {
3127 spin_lock_irqsave(&mqp
->sq
.lock
, flags_qp
);
3128 if (mqp
->sq
.tail
!= mqp
->sq
.head
) {
3129 send_mcq
= to_mcq(mqp
->ibqp
.send_cq
);
3130 spin_lock_irqsave(&send_mcq
->lock
, flags_cq
);
3131 if (send_mcq
->mcq
.comp
&&
3132 mqp
->ibqp
.send_cq
->comp_handler
) {
3133 if (!send_mcq
->mcq
.reset_notify_added
) {
3134 send_mcq
->mcq
.reset_notify_added
= 1;
3135 list_add_tail(&send_mcq
->mcq
.reset_notify
,
3139 spin_unlock_irqrestore(&send_mcq
->lock
, flags_cq
);
3141 spin_unlock_irqrestore(&mqp
->sq
.lock
, flags_qp
);
3142 /* Now, handle the QP's receive queue */
3143 spin_lock_irqsave(&mqp
->rq
.lock
, flags_qp
);
3144 /* no handling is needed for SRQ */
3145 if (!mqp
->ibqp
.srq
) {
3146 if (mqp
->rq
.tail
!= mqp
->rq
.head
) {
3147 recv_mcq
= to_mcq(mqp
->ibqp
.recv_cq
);
3148 spin_lock_irqsave(&recv_mcq
->lock
, flags_cq
);
3149 if (recv_mcq
->mcq
.comp
&&
3150 mqp
->ibqp
.recv_cq
->comp_handler
) {
3151 if (!recv_mcq
->mcq
.reset_notify_added
) {
3152 recv_mcq
->mcq
.reset_notify_added
= 1;
3153 list_add_tail(&recv_mcq
->mcq
.reset_notify
,
3157 spin_unlock_irqrestore(&recv_mcq
->lock
,
3161 spin_unlock_irqrestore(&mqp
->rq
.lock
, flags_qp
);
3164 list_for_each_entry(mcq
, &cq_notify_list
, reset_notify
) {
3167 spin_unlock_irqrestore(&ibdev
->reset_flow_resource_lock
, flags
);
3168 pr_warn("mlx4_ib_handle_catas_error ended\n");
3171 static void handle_bonded_port_state_event(struct work_struct
*work
)
3173 struct ib_event_work
*ew
=
3174 container_of(work
, struct ib_event_work
, work
);
3175 struct mlx4_ib_dev
*ibdev
= ew
->ib_dev
;
3176 enum ib_port_state bonded_port_state
= IB_PORT_NOP
;
3178 struct ib_event ibev
;
3181 spin_lock_bh(&ibdev
->iboe
.lock
);
3182 for (i
= 0; i
< MLX4_MAX_PORTS
; ++i
) {
3183 struct net_device
*curr_netdev
= ibdev
->iboe
.netdevs
[i
];
3184 enum ib_port_state curr_port_state
;
3190 (netif_running(curr_netdev
) &&
3191 netif_carrier_ok(curr_netdev
)) ?
3192 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
3194 bonded_port_state
= (bonded_port_state
!= IB_PORT_ACTIVE
) ?
3195 curr_port_state
: IB_PORT_ACTIVE
;
3197 spin_unlock_bh(&ibdev
->iboe
.lock
);
3199 ibev
.device
= &ibdev
->ib_dev
;
3200 ibev
.element
.port_num
= 1;
3201 ibev
.event
= (bonded_port_state
== IB_PORT_ACTIVE
) ?
3202 IB_EVENT_PORT_ACTIVE
: IB_EVENT_PORT_ERR
;
3204 ib_dispatch_event(&ibev
);
3207 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev
*mdev
, int port
)
3212 err
= mlx4_ib_query_sl2vl(&mdev
->ib_dev
, port
, &sl2vl
);
3214 pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
3218 atomic64_set(&mdev
->sl2vl
[port
- 1], sl2vl
);
3221 static void ib_sl2vl_update_work(struct work_struct
*work
)
3223 struct ib_event_work
*ew
= container_of(work
, struct ib_event_work
, work
);
3224 struct mlx4_ib_dev
*mdev
= ew
->ib_dev
;
3225 int port
= ew
->port
;
3227 mlx4_ib_sl2vl_update(mdev
, port
);
3232 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev
*ibdev
,
3235 struct ib_event_work
*ew
;
3237 ew
= kmalloc(sizeof(*ew
), GFP_ATOMIC
);
3239 INIT_WORK(&ew
->work
, ib_sl2vl_update_work
);
3242 queue_work(wq
, &ew
->work
);
3246 static void mlx4_ib_event(struct mlx4_dev
*dev
, void *ibdev_ptr
,
3247 enum mlx4_dev_event event
, unsigned long param
)
3249 struct ib_event ibev
;
3250 struct mlx4_ib_dev
*ibdev
= to_mdev((struct ib_device
*) ibdev_ptr
);
3251 struct mlx4_eqe
*eqe
= NULL
;
3252 struct ib_event_work
*ew
;
3255 if (mlx4_is_bonded(dev
) &&
3256 ((event
== MLX4_DEV_EVENT_PORT_UP
) ||
3257 (event
== MLX4_DEV_EVENT_PORT_DOWN
))) {
3258 ew
= kmalloc(sizeof(*ew
), GFP_ATOMIC
);
3261 INIT_WORK(&ew
->work
, handle_bonded_port_state_event
);
3263 queue_work(wq
, &ew
->work
);
3267 if (event
== MLX4_DEV_EVENT_PORT_MGMT_CHANGE
)
3268 eqe
= (struct mlx4_eqe
*)param
;
3273 case MLX4_DEV_EVENT_PORT_UP
:
3274 if (p
> ibdev
->num_ports
)
3276 if (!mlx4_is_slave(dev
) &&
3277 rdma_port_get_link_layer(&ibdev
->ib_dev
, p
) ==
3278 IB_LINK_LAYER_INFINIBAND
) {
3279 if (mlx4_is_master(dev
))
3280 mlx4_ib_invalidate_all_guid_record(ibdev
, p
);
3281 if (ibdev
->dev
->flags
& MLX4_FLAG_SECURE_HOST
&&
3282 !(ibdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT
))
3283 mlx4_sched_ib_sl2vl_update_work(ibdev
, p
);
3285 ibev
.event
= IB_EVENT_PORT_ACTIVE
;
3288 case MLX4_DEV_EVENT_PORT_DOWN
:
3289 if (p
> ibdev
->num_ports
)
3291 ibev
.event
= IB_EVENT_PORT_ERR
;
3294 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR
:
3295 ibdev
->ib_active
= false;
3296 ibev
.event
= IB_EVENT_DEVICE_FATAL
;
3297 mlx4_ib_handle_catas_error(ibdev
);
3300 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE
:
3301 ew
= kmalloc(sizeof *ew
, GFP_ATOMIC
);
3305 INIT_WORK(&ew
->work
, handle_port_mgmt_change_event
);
3306 memcpy(&ew
->ib_eqe
, eqe
, sizeof *eqe
);
3308 /* need to queue only for port owner, which uses GEN_EQE */
3309 if (mlx4_is_master(dev
))
3310 queue_work(wq
, &ew
->work
);
3312 handle_port_mgmt_change_event(&ew
->work
);
3315 case MLX4_DEV_EVENT_SLAVE_INIT
:
3316 /* here, p is the slave id */
3317 do_slave_init(ibdev
, p
, 1);
3318 if (mlx4_is_master(dev
)) {
3321 for (i
= 1; i
<= ibdev
->num_ports
; i
++) {
3322 if (rdma_port_get_link_layer(&ibdev
->ib_dev
, i
)
3323 == IB_LINK_LAYER_INFINIBAND
)
3324 mlx4_ib_slave_alias_guid_event(ibdev
,
3331 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN
:
3332 if (mlx4_is_master(dev
)) {
3335 for (i
= 1; i
<= ibdev
->num_ports
; i
++) {
3336 if (rdma_port_get_link_layer(&ibdev
->ib_dev
, i
)
3337 == IB_LINK_LAYER_INFINIBAND
)
3338 mlx4_ib_slave_alias_guid_event(ibdev
,
3343 /* here, p is the slave id */
3344 do_slave_init(ibdev
, p
, 0);
3351 ibev
.device
= ibdev_ptr
;
3352 ibev
.element
.port_num
= mlx4_is_bonded(ibdev
->dev
) ? 1 : (u8
)p
;
3354 ib_dispatch_event(&ibev
);
3357 static struct mlx4_interface mlx4_ib_interface
= {
3359 .remove
= mlx4_ib_remove
,
3360 .event
= mlx4_ib_event
,
3361 .protocol
= MLX4_PROT_IB_IPV6
,
3362 .flags
= MLX4_INTFF_BONDING
3365 static int __init
mlx4_ib_init(void)
3369 wq
= alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM
);
3373 err
= mlx4_ib_mcg_init();
3377 err
= mlx4_register_interface(&mlx4_ib_interface
);
3384 mlx4_ib_mcg_destroy();
3387 destroy_workqueue(wq
);
3391 static void __exit
mlx4_ib_cleanup(void)
3393 mlx4_unregister_interface(&mlx4_ib_interface
);
3394 mlx4_ib_mcg_destroy();
3395 destroy_workqueue(wq
);
3398 module_init(mlx4_ib_init
);
3399 module_exit(mlx4_ib_cleanup
);