2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/igmp.h>
42 #include <linux/idr.h>
43 #include <linux/inetdevice.h>
44 #include <linux/slab.h>
45 #include <linux/module.h>
46 #include <net/route.h>
48 #include <net/net_namespace.h>
49 #include <net/netns/generic.h>
52 #include <net/ip_fib.h>
53 #include <net/ip6_route.h>
55 #include <rdma/rdma_cm.h>
56 #include <rdma/rdma_cm_ib.h>
57 #include <rdma/rdma_netlink.h>
59 #include <rdma/ib_cache.h>
60 #include <rdma/ib_cm.h>
61 #include <rdma/ib_sa.h>
62 #include <rdma/iw_cm.h>
64 #include "core_priv.h"
66 MODULE_AUTHOR("Sean Hefty");
67 MODULE_DESCRIPTION("Generic RDMA CM Agent");
68 MODULE_LICENSE("Dual BSD/GPL");
70 #define CMA_CM_RESPONSE_TIMEOUT 20
71 #define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
72 #define CMA_MAX_CM_RETRIES 15
73 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
74 #define CMA_IBOE_PACKET_LIFETIME 18
76 static const char * const cma_events
[] = {
77 [RDMA_CM_EVENT_ADDR_RESOLVED
] = "address resolved",
78 [RDMA_CM_EVENT_ADDR_ERROR
] = "address error",
79 [RDMA_CM_EVENT_ROUTE_RESOLVED
] = "route resolved ",
80 [RDMA_CM_EVENT_ROUTE_ERROR
] = "route error",
81 [RDMA_CM_EVENT_CONNECT_REQUEST
] = "connect request",
82 [RDMA_CM_EVENT_CONNECT_RESPONSE
] = "connect response",
83 [RDMA_CM_EVENT_CONNECT_ERROR
] = "connect error",
84 [RDMA_CM_EVENT_UNREACHABLE
] = "unreachable",
85 [RDMA_CM_EVENT_REJECTED
] = "rejected",
86 [RDMA_CM_EVENT_ESTABLISHED
] = "established",
87 [RDMA_CM_EVENT_DISCONNECTED
] = "disconnected",
88 [RDMA_CM_EVENT_DEVICE_REMOVAL
] = "device removal",
89 [RDMA_CM_EVENT_MULTICAST_JOIN
] = "multicast join",
90 [RDMA_CM_EVENT_MULTICAST_ERROR
] = "multicast error",
91 [RDMA_CM_EVENT_ADDR_CHANGE
] = "address change",
92 [RDMA_CM_EVENT_TIMEWAIT_EXIT
] = "timewait exit",
95 const char *__attribute_const__
rdma_event_msg(enum rdma_cm_event_type event
)
99 return (index
< ARRAY_SIZE(cma_events
) && cma_events
[index
]) ?
100 cma_events
[index
] : "unrecognized event";
102 EXPORT_SYMBOL(rdma_event_msg
);
104 static void cma_add_one(struct ib_device
*device
);
105 static void cma_remove_one(struct ib_device
*device
, void *client_data
);
107 static struct ib_client cma_client
= {
110 .remove
= cma_remove_one
113 static struct ib_sa_client sa_client
;
114 static struct rdma_addr_client addr_client
;
115 static LIST_HEAD(dev_list
);
116 static LIST_HEAD(listen_any_list
);
117 static DEFINE_MUTEX(lock
);
118 static struct workqueue_struct
*cma_wq
;
119 static int cma_pernet_id
;
128 static struct cma_pernet
*cma_pernet(struct net
*net
)
130 return net_generic(net
, cma_pernet_id
);
133 static struct idr
*cma_pernet_idr(struct net
*net
, enum rdma_port_space ps
)
135 struct cma_pernet
*pernet
= cma_pernet(net
);
139 return &pernet
->tcp_ps
;
141 return &pernet
->udp_ps
;
143 return &pernet
->ipoib_ps
;
145 return &pernet
->ib_ps
;
152 struct list_head list
;
153 struct ib_device
*device
;
154 struct completion comp
;
156 struct list_head id_list
;
157 enum ib_gid_type
*default_gid_type
;
160 struct rdma_bind_list
{
161 enum rdma_port_space ps
;
162 struct hlist_head owners
;
166 struct class_port_info_context
{
167 struct ib_class_port_info
*class_port_info
;
168 struct ib_device
*device
;
169 struct completion done
;
170 struct ib_sa_query
*sa_query
;
174 static int cma_ps_alloc(struct net
*net
, enum rdma_port_space ps
,
175 struct rdma_bind_list
*bind_list
, int snum
)
177 struct idr
*idr
= cma_pernet_idr(net
, ps
);
179 return idr_alloc(idr
, bind_list
, snum
, snum
+ 1, GFP_KERNEL
);
182 static struct rdma_bind_list
*cma_ps_find(struct net
*net
,
183 enum rdma_port_space ps
, int snum
)
185 struct idr
*idr
= cma_pernet_idr(net
, ps
);
187 return idr_find(idr
, snum
);
190 static void cma_ps_remove(struct net
*net
, enum rdma_port_space ps
, int snum
)
192 struct idr
*idr
= cma_pernet_idr(net
, ps
);
194 idr_remove(idr
, snum
);
201 void cma_ref_dev(struct cma_device
*cma_dev
)
203 atomic_inc(&cma_dev
->refcount
);
206 struct cma_device
*cma_enum_devices_by_ibdev(cma_device_filter filter
,
209 struct cma_device
*cma_dev
;
210 struct cma_device
*found_cma_dev
= NULL
;
214 list_for_each_entry(cma_dev
, &dev_list
, list
)
215 if (filter(cma_dev
->device
, cookie
)) {
216 found_cma_dev
= cma_dev
;
221 cma_ref_dev(found_cma_dev
);
223 return found_cma_dev
;
226 int cma_get_default_gid_type(struct cma_device
*cma_dev
,
229 if (port
< rdma_start_port(cma_dev
->device
) ||
230 port
> rdma_end_port(cma_dev
->device
))
233 return cma_dev
->default_gid_type
[port
- rdma_start_port(cma_dev
->device
)];
236 int cma_set_default_gid_type(struct cma_device
*cma_dev
,
238 enum ib_gid_type default_gid_type
)
240 unsigned long supported_gids
;
242 if (port
< rdma_start_port(cma_dev
->device
) ||
243 port
> rdma_end_port(cma_dev
->device
))
246 supported_gids
= roce_gid_type_mask_support(cma_dev
->device
, port
);
248 if (!(supported_gids
& 1 << default_gid_type
))
251 cma_dev
->default_gid_type
[port
- rdma_start_port(cma_dev
->device
)] =
257 struct ib_device
*cma_get_ib_dev(struct cma_device
*cma_dev
)
259 return cma_dev
->device
;
263 * Device removal can occur at anytime, so we need extra handling to
264 * serialize notifying the user of device removal with other callbacks.
265 * We do this by disabling removal notification while a callback is in process,
266 * and reporting it after the callback completes.
268 struct rdma_id_private
{
269 struct rdma_cm_id id
;
271 struct rdma_bind_list
*bind_list
;
272 struct hlist_node node
;
273 struct list_head list
; /* listen_any_list or cma_device.list */
274 struct list_head listen_list
; /* per device listens */
275 struct cma_device
*cma_dev
;
276 struct list_head mc_list
;
279 enum rdma_cm_state state
;
281 struct mutex qp_mutex
;
283 struct completion comp
;
285 struct mutex handler_mutex
;
289 struct ib_sa_query
*query
;
305 enum ib_gid_type gid_type
;
308 struct cma_multicast
{
309 struct rdma_id_private
*id_priv
;
311 struct ib_sa_multicast
*ib
;
313 struct list_head list
;
315 struct sockaddr_storage addr
;
322 struct work_struct work
;
323 struct rdma_id_private
*id
;
324 enum rdma_cm_state old_state
;
325 enum rdma_cm_state new_state
;
326 struct rdma_cm_event event
;
329 struct cma_ndev_work
{
330 struct work_struct work
;
331 struct rdma_id_private
*id
;
332 struct rdma_cm_event event
;
335 struct iboe_mcast_work
{
336 struct work_struct work
;
337 struct rdma_id_private
*id
;
338 struct cma_multicast
*mc
;
351 u8 ip_version
; /* IP version: 7:4 */
353 union cma_ip_addr src_addr
;
354 union cma_ip_addr dst_addr
;
357 #define CMA_VERSION 0x00
359 struct cma_req_info
{
360 struct ib_device
*device
;
362 union ib_gid local_gid
;
368 static int cma_comp(struct rdma_id_private
*id_priv
, enum rdma_cm_state comp
)
373 spin_lock_irqsave(&id_priv
->lock
, flags
);
374 ret
= (id_priv
->state
== comp
);
375 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
379 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
380 enum rdma_cm_state comp
, enum rdma_cm_state exch
)
385 spin_lock_irqsave(&id_priv
->lock
, flags
);
386 if ((ret
= (id_priv
->state
== comp
)))
387 id_priv
->state
= exch
;
388 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
392 static enum rdma_cm_state
cma_exch(struct rdma_id_private
*id_priv
,
393 enum rdma_cm_state exch
)
396 enum rdma_cm_state old
;
398 spin_lock_irqsave(&id_priv
->lock
, flags
);
399 old
= id_priv
->state
;
400 id_priv
->state
= exch
;
401 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
405 static inline u8
cma_get_ip_ver(const struct cma_hdr
*hdr
)
407 return hdr
->ip_version
>> 4;
410 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
412 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
415 static int cma_igmp_send(struct net_device
*ndev
, union ib_gid
*mgid
, bool join
)
417 struct in_device
*in_dev
= NULL
;
421 in_dev
= __in_dev_get_rtnl(ndev
);
424 ip_mc_inc_group(in_dev
,
425 *(__be32
*)(mgid
->raw
+ 12));
427 ip_mc_dec_group(in_dev
,
428 *(__be32
*)(mgid
->raw
+ 12));
432 return (in_dev
) ? 0 : -ENODEV
;
435 static void _cma_attach_to_dev(struct rdma_id_private
*id_priv
,
436 struct cma_device
*cma_dev
)
438 cma_ref_dev(cma_dev
);
439 id_priv
->cma_dev
= cma_dev
;
440 id_priv
->gid_type
= 0;
441 id_priv
->id
.device
= cma_dev
->device
;
442 id_priv
->id
.route
.addr
.dev_addr
.transport
=
443 rdma_node_get_transport(cma_dev
->device
->node_type
);
444 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
447 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
448 struct cma_device
*cma_dev
)
450 _cma_attach_to_dev(id_priv
, cma_dev
);
452 cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
453 rdma_start_port(cma_dev
->device
)];
456 void cma_deref_dev(struct cma_device
*cma_dev
)
458 if (atomic_dec_and_test(&cma_dev
->refcount
))
459 complete(&cma_dev
->comp
);
462 static inline void release_mc(struct kref
*kref
)
464 struct cma_multicast
*mc
= container_of(kref
, struct cma_multicast
, mcref
);
466 kfree(mc
->multicast
.ib
);
470 static void cma_release_dev(struct rdma_id_private
*id_priv
)
473 list_del(&id_priv
->list
);
474 cma_deref_dev(id_priv
->cma_dev
);
475 id_priv
->cma_dev
= NULL
;
479 static inline struct sockaddr
*cma_src_addr(struct rdma_id_private
*id_priv
)
481 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
484 static inline struct sockaddr
*cma_dst_addr(struct rdma_id_private
*id_priv
)
486 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.dst_addr
;
489 static inline unsigned short cma_family(struct rdma_id_private
*id_priv
)
491 return id_priv
->id
.route
.addr
.src_addr
.ss_family
;
494 static int cma_set_qkey(struct rdma_id_private
*id_priv
, u32 qkey
)
496 struct ib_sa_mcmember_rec rec
;
500 if (qkey
&& id_priv
->qkey
!= qkey
)
506 id_priv
->qkey
= qkey
;
510 switch (id_priv
->id
.ps
) {
513 id_priv
->qkey
= RDMA_UDP_QKEY
;
516 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
517 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
518 id_priv
->id
.port_num
, &rec
.mgid
,
521 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
529 static void cma_translate_ib(struct sockaddr_ib
*sib
, struct rdma_dev_addr
*dev_addr
)
531 dev_addr
->dev_type
= ARPHRD_INFINIBAND
;
532 rdma_addr_set_sgid(dev_addr
, (union ib_gid
*) &sib
->sib_addr
);
533 ib_addr_set_pkey(dev_addr
, ntohs(sib
->sib_pkey
));
536 static int cma_translate_addr(struct sockaddr
*addr
, struct rdma_dev_addr
*dev_addr
)
540 if (addr
->sa_family
!= AF_IB
) {
541 ret
= rdma_translate_ip(addr
, dev_addr
, NULL
);
543 cma_translate_ib((struct sockaddr_ib
*) addr
, dev_addr
);
550 static inline int cma_validate_port(struct ib_device
*device
, u8 port
,
551 enum ib_gid_type gid_type
,
552 union ib_gid
*gid
, int dev_type
,
556 struct net_device
*ndev
= NULL
;
558 if ((dev_type
== ARPHRD_INFINIBAND
) && !rdma_protocol_ib(device
, port
))
561 if ((dev_type
!= ARPHRD_INFINIBAND
) && rdma_protocol_ib(device
, port
))
564 if (dev_type
== ARPHRD_ETHER
&& rdma_protocol_roce(device
, port
)) {
565 ndev
= dev_get_by_index(&init_net
, bound_if_index
);
566 if (ndev
&& ndev
->flags
& IFF_LOOPBACK
) {
567 pr_info("detected loopback device\n");
570 if (!device
->get_netdev
)
573 ndev
= device
->get_netdev(device
, port
);
578 gid_type
= IB_GID_TYPE_IB
;
581 ret
= ib_find_cached_gid_by_port(device
, gid
, gid_type
, port
,
590 static int cma_acquire_dev(struct rdma_id_private
*id_priv
,
591 struct rdma_id_private
*listen_id_priv
)
593 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
594 struct cma_device
*cma_dev
;
595 union ib_gid gid
, iboe_gid
, *gidp
;
599 if (dev_addr
->dev_type
!= ARPHRD_INFINIBAND
&&
600 id_priv
->id
.ps
== RDMA_PS_IPOIB
)
604 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
607 memcpy(&gid
, dev_addr
->src_dev_addr
+
608 rdma_addr_gid_offset(dev_addr
), sizeof gid
);
610 if (listen_id_priv
) {
611 cma_dev
= listen_id_priv
->cma_dev
;
612 port
= listen_id_priv
->id
.port_num
;
613 gidp
= rdma_protocol_roce(cma_dev
->device
, port
) ?
616 ret
= cma_validate_port(cma_dev
->device
, port
,
617 rdma_protocol_ib(cma_dev
->device
, port
) ?
619 listen_id_priv
->gid_type
, gidp
,
621 dev_addr
->bound_dev_if
);
623 id_priv
->id
.port_num
= port
;
628 list_for_each_entry(cma_dev
, &dev_list
, list
) {
629 for (port
= 1; port
<= cma_dev
->device
->phys_port_cnt
; ++port
) {
630 if (listen_id_priv
&&
631 listen_id_priv
->cma_dev
== cma_dev
&&
632 listen_id_priv
->id
.port_num
== port
)
635 gidp
= rdma_protocol_roce(cma_dev
->device
, port
) ?
638 ret
= cma_validate_port(cma_dev
->device
, port
,
639 rdma_protocol_ib(cma_dev
->device
, port
) ?
641 cma_dev
->default_gid_type
[port
- 1],
642 gidp
, dev_addr
->dev_type
,
643 dev_addr
->bound_dev_if
);
645 id_priv
->id
.port_num
= port
;
653 cma_attach_to_dev(id_priv
, cma_dev
);
660 * Select the source IB device and address to reach the destination IB address.
662 static int cma_resolve_ib_dev(struct rdma_id_private
*id_priv
)
664 struct cma_device
*cma_dev
, *cur_dev
;
665 struct sockaddr_ib
*addr
;
666 union ib_gid gid
, sgid
, *dgid
;
672 addr
= (struct sockaddr_ib
*) cma_dst_addr(id_priv
);
673 dgid
= (union ib_gid
*) &addr
->sib_addr
;
674 pkey
= ntohs(addr
->sib_pkey
);
676 list_for_each_entry(cur_dev
, &dev_list
, list
) {
677 for (p
= 1; p
<= cur_dev
->device
->phys_port_cnt
; ++p
) {
678 if (!rdma_cap_af_ib(cur_dev
->device
, p
))
681 if (ib_find_cached_pkey(cur_dev
->device
, p
, pkey
, &index
))
684 for (i
= 0; !ib_get_cached_gid(cur_dev
->device
, p
, i
,
687 if (!memcmp(&gid
, dgid
, sizeof(gid
))) {
690 id_priv
->id
.port_num
= p
;
694 if (!cma_dev
&& (gid
.global
.subnet_prefix
==
695 dgid
->global
.subnet_prefix
)) {
698 id_priv
->id
.port_num
= p
;
708 cma_attach_to_dev(id_priv
, cma_dev
);
709 addr
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
710 memcpy(&addr
->sib_addr
, &sgid
, sizeof sgid
);
711 cma_translate_ib(addr
, &id_priv
->id
.route
.addr
.dev_addr
);
715 static void cma_deref_id(struct rdma_id_private
*id_priv
)
717 if (atomic_dec_and_test(&id_priv
->refcount
))
718 complete(&id_priv
->comp
);
721 struct rdma_cm_id
*rdma_create_id(struct net
*net
,
722 rdma_cm_event_handler event_handler
,
723 void *context
, enum rdma_port_space ps
,
724 enum ib_qp_type qp_type
)
726 struct rdma_id_private
*id_priv
;
728 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
730 return ERR_PTR(-ENOMEM
);
732 id_priv
->owner
= task_pid_nr(current
);
733 id_priv
->state
= RDMA_CM_IDLE
;
734 id_priv
->id
.context
= context
;
735 id_priv
->id
.event_handler
= event_handler
;
737 id_priv
->id
.qp_type
= qp_type
;
738 spin_lock_init(&id_priv
->lock
);
739 mutex_init(&id_priv
->qp_mutex
);
740 init_completion(&id_priv
->comp
);
741 atomic_set(&id_priv
->refcount
, 1);
742 mutex_init(&id_priv
->handler_mutex
);
743 INIT_LIST_HEAD(&id_priv
->listen_list
);
744 INIT_LIST_HEAD(&id_priv
->mc_list
);
745 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
746 id_priv
->id
.route
.addr
.dev_addr
.net
= get_net(net
);
750 EXPORT_SYMBOL(rdma_create_id
);
752 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
754 struct ib_qp_attr qp_attr
;
755 int qp_attr_mask
, ret
;
757 qp_attr
.qp_state
= IB_QPS_INIT
;
758 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
762 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
766 qp_attr
.qp_state
= IB_QPS_RTR
;
767 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
771 qp_attr
.qp_state
= IB_QPS_RTS
;
773 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
778 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
780 struct ib_qp_attr qp_attr
;
781 int qp_attr_mask
, ret
;
783 qp_attr
.qp_state
= IB_QPS_INIT
;
784 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
788 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
791 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
792 struct ib_qp_init_attr
*qp_init_attr
)
794 struct rdma_id_private
*id_priv
;
798 id_priv
= container_of(id
, struct rdma_id_private
, id
);
799 if (id
->device
!= pd
->device
)
802 qp_init_attr
->port_num
= id
->port_num
;
803 qp
= ib_create_qp(pd
, qp_init_attr
);
807 if (id
->qp_type
== IB_QPT_UD
)
808 ret
= cma_init_ud_qp(id_priv
, qp
);
810 ret
= cma_init_conn_qp(id_priv
, qp
);
815 id_priv
->qp_num
= qp
->qp_num
;
816 id_priv
->srq
= (qp
->srq
!= NULL
);
822 EXPORT_SYMBOL(rdma_create_qp
);
824 void rdma_destroy_qp(struct rdma_cm_id
*id
)
826 struct rdma_id_private
*id_priv
;
828 id_priv
= container_of(id
, struct rdma_id_private
, id
);
829 mutex_lock(&id_priv
->qp_mutex
);
830 ib_destroy_qp(id_priv
->id
.qp
);
831 id_priv
->id
.qp
= NULL
;
832 mutex_unlock(&id_priv
->qp_mutex
);
834 EXPORT_SYMBOL(rdma_destroy_qp
);
836 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
837 struct rdma_conn_param
*conn_param
)
839 struct ib_qp_attr qp_attr
;
840 int qp_attr_mask
, ret
;
843 mutex_lock(&id_priv
->qp_mutex
);
844 if (!id_priv
->id
.qp
) {
849 /* Need to update QP attributes from default values. */
850 qp_attr
.qp_state
= IB_QPS_INIT
;
851 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
855 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
859 qp_attr
.qp_state
= IB_QPS_RTR
;
860 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
864 ret
= ib_query_gid(id_priv
->id
.device
, id_priv
->id
.port_num
,
865 qp_attr
.ah_attr
.grh
.sgid_index
, &sgid
, NULL
);
869 BUG_ON(id_priv
->cma_dev
->device
!= id_priv
->id
.device
);
872 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
873 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
875 mutex_unlock(&id_priv
->qp_mutex
);
879 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
880 struct rdma_conn_param
*conn_param
)
882 struct ib_qp_attr qp_attr
;
883 int qp_attr_mask
, ret
;
885 mutex_lock(&id_priv
->qp_mutex
);
886 if (!id_priv
->id
.qp
) {
891 qp_attr
.qp_state
= IB_QPS_RTS
;
892 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
897 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
898 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
900 mutex_unlock(&id_priv
->qp_mutex
);
904 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
906 struct ib_qp_attr qp_attr
;
909 mutex_lock(&id_priv
->qp_mutex
);
910 if (!id_priv
->id
.qp
) {
915 qp_attr
.qp_state
= IB_QPS_ERR
;
916 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
918 mutex_unlock(&id_priv
->qp_mutex
);
922 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
923 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
925 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
929 if (rdma_cap_eth_ah(id_priv
->id
.device
, id_priv
->id
.port_num
))
932 pkey
= ib_addr_get_pkey(dev_addr
);
934 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
935 pkey
, &qp_attr
->pkey_index
);
939 qp_attr
->port_num
= id_priv
->id
.port_num
;
940 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
942 if (id_priv
->id
.qp_type
== IB_QPT_UD
) {
943 ret
= cma_set_qkey(id_priv
, 0);
947 qp_attr
->qkey
= id_priv
->qkey
;
948 *qp_attr_mask
|= IB_QP_QKEY
;
950 qp_attr
->qp_access_flags
= 0;
951 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
956 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
959 struct rdma_id_private
*id_priv
;
962 id_priv
= container_of(id
, struct rdma_id_private
, id
);
963 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
964 if (!id_priv
->cm_id
.ib
|| (id_priv
->id
.qp_type
== IB_QPT_UD
))
965 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
967 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
970 if (qp_attr
->qp_state
== IB_QPS_RTR
)
971 qp_attr
->rq_psn
= id_priv
->seq_num
;
972 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
973 if (!id_priv
->cm_id
.iw
) {
974 qp_attr
->qp_access_flags
= 0;
975 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
977 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
984 EXPORT_SYMBOL(rdma_init_qp_attr
);
986 static inline int cma_zero_addr(struct sockaddr
*addr
)
988 switch (addr
->sa_family
) {
990 return ipv4_is_zeronet(((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
992 return ipv6_addr_any(&((struct sockaddr_in6
*) addr
)->sin6_addr
);
994 return ib_addr_any(&((struct sockaddr_ib
*) addr
)->sib_addr
);
1000 static inline int cma_loopback_addr(struct sockaddr
*addr
)
1002 switch (addr
->sa_family
) {
1004 return ipv4_is_loopback(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
1006 return ipv6_addr_loopback(&((struct sockaddr_in6
*) addr
)->sin6_addr
);
1008 return ib_addr_loopback(&((struct sockaddr_ib
*) addr
)->sib_addr
);
1014 static inline int cma_any_addr(struct sockaddr
*addr
)
1016 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
1019 static int cma_addr_cmp(struct sockaddr
*src
, struct sockaddr
*dst
)
1021 if (src
->sa_family
!= dst
->sa_family
)
1024 switch (src
->sa_family
) {
1026 return ((struct sockaddr_in
*) src
)->sin_addr
.s_addr
!=
1027 ((struct sockaddr_in
*) dst
)->sin_addr
.s_addr
;
1029 return ipv6_addr_cmp(&((struct sockaddr_in6
*) src
)->sin6_addr
,
1030 &((struct sockaddr_in6
*) dst
)->sin6_addr
);
1032 return ib_addr_cmp(&((struct sockaddr_ib
*) src
)->sib_addr
,
1033 &((struct sockaddr_ib
*) dst
)->sib_addr
);
1037 static __be16
cma_port(struct sockaddr
*addr
)
1039 struct sockaddr_ib
*sib
;
1041 switch (addr
->sa_family
) {
1043 return ((struct sockaddr_in
*) addr
)->sin_port
;
1045 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
1047 sib
= (struct sockaddr_ib
*) addr
;
1048 return htons((u16
) (be64_to_cpu(sib
->sib_sid
) &
1049 be64_to_cpu(sib
->sib_sid_mask
)));
1055 static inline int cma_any_port(struct sockaddr
*addr
)
1057 return !cma_port(addr
);
1060 static void cma_save_ib_info(struct sockaddr
*src_addr
,
1061 struct sockaddr
*dst_addr
,
1062 struct rdma_cm_id
*listen_id
,
1063 struct ib_sa_path_rec
*path
)
1065 struct sockaddr_ib
*listen_ib
, *ib
;
1067 listen_ib
= (struct sockaddr_ib
*) &listen_id
->route
.addr
.src_addr
;
1069 ib
= (struct sockaddr_ib
*)src_addr
;
1070 ib
->sib_family
= AF_IB
;
1072 ib
->sib_pkey
= path
->pkey
;
1073 ib
->sib_flowinfo
= path
->flow_label
;
1074 memcpy(&ib
->sib_addr
, &path
->sgid
, 16);
1075 ib
->sib_sid
= path
->service_id
;
1076 ib
->sib_scope_id
= 0;
1078 ib
->sib_pkey
= listen_ib
->sib_pkey
;
1079 ib
->sib_flowinfo
= listen_ib
->sib_flowinfo
;
1080 ib
->sib_addr
= listen_ib
->sib_addr
;
1081 ib
->sib_sid
= listen_ib
->sib_sid
;
1082 ib
->sib_scope_id
= listen_ib
->sib_scope_id
;
1084 ib
->sib_sid_mask
= cpu_to_be64(0xffffffffffffffffULL
);
1087 ib
= (struct sockaddr_ib
*)dst_addr
;
1088 ib
->sib_family
= AF_IB
;
1090 ib
->sib_pkey
= path
->pkey
;
1091 ib
->sib_flowinfo
= path
->flow_label
;
1092 memcpy(&ib
->sib_addr
, &path
->dgid
, 16);
1097 static void cma_save_ip4_info(struct sockaddr_in
*src_addr
,
1098 struct sockaddr_in
*dst_addr
,
1099 struct cma_hdr
*hdr
,
1103 *src_addr
= (struct sockaddr_in
) {
1104 .sin_family
= AF_INET
,
1105 .sin_addr
.s_addr
= hdr
->dst_addr
.ip4
.addr
,
1106 .sin_port
= local_port
,
1111 *dst_addr
= (struct sockaddr_in
) {
1112 .sin_family
= AF_INET
,
1113 .sin_addr
.s_addr
= hdr
->src_addr
.ip4
.addr
,
1114 .sin_port
= hdr
->port
,
1119 static void cma_save_ip6_info(struct sockaddr_in6
*src_addr
,
1120 struct sockaddr_in6
*dst_addr
,
1121 struct cma_hdr
*hdr
,
1125 *src_addr
= (struct sockaddr_in6
) {
1126 .sin6_family
= AF_INET6
,
1127 .sin6_addr
= hdr
->dst_addr
.ip6
,
1128 .sin6_port
= local_port
,
1133 *dst_addr
= (struct sockaddr_in6
) {
1134 .sin6_family
= AF_INET6
,
1135 .sin6_addr
= hdr
->src_addr
.ip6
,
1136 .sin6_port
= hdr
->port
,
1141 static u16
cma_port_from_service_id(__be64 service_id
)
1143 return (u16
)be64_to_cpu(service_id
);
1146 static int cma_save_ip_info(struct sockaddr
*src_addr
,
1147 struct sockaddr
*dst_addr
,
1148 struct ib_cm_event
*ib_event
,
1151 struct cma_hdr
*hdr
;
1154 hdr
= ib_event
->private_data
;
1155 if (hdr
->cma_version
!= CMA_VERSION
)
1158 port
= htons(cma_port_from_service_id(service_id
));
1160 switch (cma_get_ip_ver(hdr
)) {
1162 cma_save_ip4_info((struct sockaddr_in
*)src_addr
,
1163 (struct sockaddr_in
*)dst_addr
, hdr
, port
);
1166 cma_save_ip6_info((struct sockaddr_in6
*)src_addr
,
1167 (struct sockaddr_in6
*)dst_addr
, hdr
, port
);
1170 return -EAFNOSUPPORT
;
1176 static int cma_save_net_info(struct sockaddr
*src_addr
,
1177 struct sockaddr
*dst_addr
,
1178 struct rdma_cm_id
*listen_id
,
1179 struct ib_cm_event
*ib_event
,
1180 sa_family_t sa_family
, __be64 service_id
)
1182 if (sa_family
== AF_IB
) {
1183 if (ib_event
->event
== IB_CM_REQ_RECEIVED
)
1184 cma_save_ib_info(src_addr
, dst_addr
, listen_id
,
1185 ib_event
->param
.req_rcvd
.primary_path
);
1186 else if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
)
1187 cma_save_ib_info(src_addr
, dst_addr
, listen_id
, NULL
);
1191 return cma_save_ip_info(src_addr
, dst_addr
, ib_event
, service_id
);
1194 static int cma_save_req_info(const struct ib_cm_event
*ib_event
,
1195 struct cma_req_info
*req
)
1197 const struct ib_cm_req_event_param
*req_param
=
1198 &ib_event
->param
.req_rcvd
;
1199 const struct ib_cm_sidr_req_event_param
*sidr_param
=
1200 &ib_event
->param
.sidr_req_rcvd
;
1202 switch (ib_event
->event
) {
1203 case IB_CM_REQ_RECEIVED
:
1204 req
->device
= req_param
->listen_id
->device
;
1205 req
->port
= req_param
->port
;
1206 memcpy(&req
->local_gid
, &req_param
->primary_path
->sgid
,
1207 sizeof(req
->local_gid
));
1208 req
->has_gid
= true;
1209 req
->service_id
= req_param
->primary_path
->service_id
;
1210 req
->pkey
= be16_to_cpu(req_param
->primary_path
->pkey
);
1211 if (req
->pkey
!= req_param
->bth_pkey
)
1212 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
1213 "RDMA CMA: in the future this may cause the request to be dropped\n",
1214 req_param
->bth_pkey
, req
->pkey
);
1216 case IB_CM_SIDR_REQ_RECEIVED
:
1217 req
->device
= sidr_param
->listen_id
->device
;
1218 req
->port
= sidr_param
->port
;
1219 req
->has_gid
= false;
1220 req
->service_id
= sidr_param
->service_id
;
1221 req
->pkey
= sidr_param
->pkey
;
1222 if (req
->pkey
!= sidr_param
->bth_pkey
)
1223 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
1224 "RDMA CMA: in the future this may cause the request to be dropped\n",
1225 sidr_param
->bth_pkey
, req
->pkey
);
1234 static bool validate_ipv4_net_dev(struct net_device
*net_dev
,
1235 const struct sockaddr_in
*dst_addr
,
1236 const struct sockaddr_in
*src_addr
)
1238 __be32 daddr
= dst_addr
->sin_addr
.s_addr
,
1239 saddr
= src_addr
->sin_addr
.s_addr
;
1240 struct fib_result res
;
1245 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
1246 ipv4_is_lbcast(daddr
) || ipv4_is_zeronet(saddr
) ||
1247 ipv4_is_zeronet(daddr
) || ipv4_is_loopback(daddr
) ||
1248 ipv4_is_loopback(saddr
))
1251 memset(&fl4
, 0, sizeof(fl4
));
1252 fl4
.flowi4_iif
= net_dev
->ifindex
;
1257 err
= fib_lookup(dev_net(net_dev
), &fl4
, &res
, 0);
1258 ret
= err
== 0 && FIB_RES_DEV(res
) == net_dev
;
1264 static bool validate_ipv6_net_dev(struct net_device
*net_dev
,
1265 const struct sockaddr_in6
*dst_addr
,
1266 const struct sockaddr_in6
*src_addr
)
1268 #if IS_ENABLED(CONFIG_IPV6)
1269 const int strict
= ipv6_addr_type(&dst_addr
->sin6_addr
) &
1270 IPV6_ADDR_LINKLOCAL
;
1271 struct rt6_info
*rt
= rt6_lookup(dev_net(net_dev
), &dst_addr
->sin6_addr
,
1272 &src_addr
->sin6_addr
, net_dev
->ifindex
,
1279 ret
= rt
->rt6i_idev
->dev
== net_dev
;
1288 static bool validate_net_dev(struct net_device
*net_dev
,
1289 const struct sockaddr
*daddr
,
1290 const struct sockaddr
*saddr
)
1292 const struct sockaddr_in
*daddr4
= (const struct sockaddr_in
*)daddr
;
1293 const struct sockaddr_in
*saddr4
= (const struct sockaddr_in
*)saddr
;
1294 const struct sockaddr_in6
*daddr6
= (const struct sockaddr_in6
*)daddr
;
1295 const struct sockaddr_in6
*saddr6
= (const struct sockaddr_in6
*)saddr
;
1297 switch (daddr
->sa_family
) {
1299 return saddr
->sa_family
== AF_INET
&&
1300 validate_ipv4_net_dev(net_dev
, daddr4
, saddr4
);
1303 return saddr
->sa_family
== AF_INET6
&&
1304 validate_ipv6_net_dev(net_dev
, daddr6
, saddr6
);
1311 static struct net_device
*cma_get_net_dev(struct ib_cm_event
*ib_event
,
1312 const struct cma_req_info
*req
)
1314 struct sockaddr_storage listen_addr_storage
, src_addr_storage
;
1315 struct sockaddr
*listen_addr
= (struct sockaddr
*)&listen_addr_storage
,
1316 *src_addr
= (struct sockaddr
*)&src_addr_storage
;
1317 struct net_device
*net_dev
;
1318 const union ib_gid
*gid
= req
->has_gid
? &req
->local_gid
: NULL
;
1321 err
= cma_save_ip_info(listen_addr
, src_addr
, ib_event
,
1324 return ERR_PTR(err
);
1326 net_dev
= ib_get_net_dev_by_params(req
->device
, req
->port
, req
->pkey
,
1329 return ERR_PTR(-ENODEV
);
1331 if (!validate_net_dev(net_dev
, listen_addr
, src_addr
)) {
1333 return ERR_PTR(-EHOSTUNREACH
);
1339 static enum rdma_port_space
rdma_ps_from_service_id(__be64 service_id
)
1341 return (be64_to_cpu(service_id
) >> 16) & 0xffff;
1344 static bool cma_match_private_data(struct rdma_id_private
*id_priv
,
1345 const struct cma_hdr
*hdr
)
1347 struct sockaddr
*addr
= cma_src_addr(id_priv
);
1349 struct in6_addr ip6_addr
;
1351 if (cma_any_addr(addr
) && !id_priv
->afonly
)
1354 switch (addr
->sa_family
) {
1356 ip4_addr
= ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
;
1357 if (cma_get_ip_ver(hdr
) != 4)
1359 if (!cma_any_addr(addr
) &&
1360 hdr
->dst_addr
.ip4
.addr
!= ip4_addr
)
1364 ip6_addr
= ((struct sockaddr_in6
*)addr
)->sin6_addr
;
1365 if (cma_get_ip_ver(hdr
) != 6)
1367 if (!cma_any_addr(addr
) &&
1368 memcmp(&hdr
->dst_addr
.ip6
, &ip6_addr
, sizeof(ip6_addr
)))
1380 static bool cma_protocol_roce_dev_port(struct ib_device
*device
, int port_num
)
1382 enum rdma_link_layer ll
= rdma_port_get_link_layer(device
, port_num
);
1383 enum rdma_transport_type transport
=
1384 rdma_node_get_transport(device
->node_type
);
1386 return ll
== IB_LINK_LAYER_ETHERNET
&& transport
== RDMA_TRANSPORT_IB
;
1389 static bool cma_protocol_roce(const struct rdma_cm_id
*id
)
1391 struct ib_device
*device
= id
->device
;
1392 const int port_num
= id
->port_num
?: rdma_start_port(device
);
1394 return cma_protocol_roce_dev_port(device
, port_num
);
1397 static bool cma_match_net_dev(const struct rdma_cm_id
*id
,
1398 const struct net_device
*net_dev
,
1401 const struct rdma_addr
*addr
= &id
->route
.addr
;
1404 /* This request is an AF_IB request or a RoCE request */
1405 return (!id
->port_num
|| id
->port_num
== port_num
) &&
1406 (addr
->src_addr
.ss_family
== AF_IB
||
1407 cma_protocol_roce_dev_port(id
->device
, port_num
));
1409 return !addr
->dev_addr
.bound_dev_if
||
1410 (net_eq(dev_net(net_dev
), addr
->dev_addr
.net
) &&
1411 addr
->dev_addr
.bound_dev_if
== net_dev
->ifindex
);
1414 static struct rdma_id_private
*cma_find_listener(
1415 const struct rdma_bind_list
*bind_list
,
1416 const struct ib_cm_id
*cm_id
,
1417 const struct ib_cm_event
*ib_event
,
1418 const struct cma_req_info
*req
,
1419 const struct net_device
*net_dev
)
1421 struct rdma_id_private
*id_priv
, *id_priv_dev
;
1424 return ERR_PTR(-EINVAL
);
1426 hlist_for_each_entry(id_priv
, &bind_list
->owners
, node
) {
1427 if (cma_match_private_data(id_priv
, ib_event
->private_data
)) {
1428 if (id_priv
->id
.device
== cm_id
->device
&&
1429 cma_match_net_dev(&id_priv
->id
, net_dev
, req
->port
))
1431 list_for_each_entry(id_priv_dev
,
1432 &id_priv
->listen_list
,
1434 if (id_priv_dev
->id
.device
== cm_id
->device
&&
1435 cma_match_net_dev(&id_priv_dev
->id
, net_dev
, req
->port
))
1441 return ERR_PTR(-EINVAL
);
1444 static struct rdma_id_private
*cma_id_from_event(struct ib_cm_id
*cm_id
,
1445 struct ib_cm_event
*ib_event
,
1446 struct net_device
**net_dev
)
1448 struct cma_req_info req
;
1449 struct rdma_bind_list
*bind_list
;
1450 struct rdma_id_private
*id_priv
;
1453 err
= cma_save_req_info(ib_event
, &req
);
1455 return ERR_PTR(err
);
1457 *net_dev
= cma_get_net_dev(ib_event
, &req
);
1458 if (IS_ERR(*net_dev
)) {
1459 if (PTR_ERR(*net_dev
) == -EAFNOSUPPORT
) {
1460 /* Assuming the protocol is AF_IB */
1462 } else if (cma_protocol_roce_dev_port(req
.device
, req
.port
)) {
1463 /* TODO find the net dev matching the request parameters
1464 * through the RoCE GID table */
1467 return ERR_CAST(*net_dev
);
1471 bind_list
= cma_ps_find(*net_dev
? dev_net(*net_dev
) : &init_net
,
1472 rdma_ps_from_service_id(req
.service_id
),
1473 cma_port_from_service_id(req
.service_id
));
1474 id_priv
= cma_find_listener(bind_list
, cm_id
, ib_event
, &req
, *net_dev
);
1475 if (IS_ERR(id_priv
) && *net_dev
) {
1483 static inline int cma_user_data_offset(struct rdma_id_private
*id_priv
)
1485 return cma_family(id_priv
) == AF_IB
? 0 : sizeof(struct cma_hdr
);
1488 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
1490 if (rdma_cap_ib_sa(id_priv
->id
.device
, id_priv
->id
.port_num
)) {
1492 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
1496 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
1498 struct rdma_id_private
*dev_id_priv
;
1501 * Remove from listen_any_list to prevent added devices from spawning
1502 * additional listen requests.
1505 list_del(&id_priv
->list
);
1507 while (!list_empty(&id_priv
->listen_list
)) {
1508 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
1509 struct rdma_id_private
, listen_list
);
1510 /* sync with device removal to avoid duplicate destruction */
1511 list_del_init(&dev_id_priv
->list
);
1512 list_del(&dev_id_priv
->listen_list
);
1513 mutex_unlock(&lock
);
1515 rdma_destroy_id(&dev_id_priv
->id
);
1518 mutex_unlock(&lock
);
1521 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
1522 enum rdma_cm_state state
)
1525 case RDMA_CM_ADDR_QUERY
:
1526 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
1528 case RDMA_CM_ROUTE_QUERY
:
1529 cma_cancel_route(id_priv
);
1531 case RDMA_CM_LISTEN
:
1532 if (cma_any_addr(cma_src_addr(id_priv
)) && !id_priv
->cma_dev
)
1533 cma_cancel_listens(id_priv
);
1540 static void cma_release_port(struct rdma_id_private
*id_priv
)
1542 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
1543 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
1549 hlist_del(&id_priv
->node
);
1550 if (hlist_empty(&bind_list
->owners
)) {
1551 cma_ps_remove(net
, bind_list
->ps
, bind_list
->port
);
1554 mutex_unlock(&lock
);
1557 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
1559 struct cma_multicast
*mc
;
1561 while (!list_empty(&id_priv
->mc_list
)) {
1562 mc
= container_of(id_priv
->mc_list
.next
,
1563 struct cma_multicast
, list
);
1564 list_del(&mc
->list
);
1565 if (rdma_cap_ib_mcast(id_priv
->cma_dev
->device
,
1566 id_priv
->id
.port_num
)) {
1567 ib_sa_free_multicast(mc
->multicast
.ib
);
1570 if (mc
->igmp_joined
) {
1571 struct rdma_dev_addr
*dev_addr
=
1572 &id_priv
->id
.route
.addr
.dev_addr
;
1573 struct net_device
*ndev
= NULL
;
1575 if (dev_addr
->bound_dev_if
)
1576 ndev
= dev_get_by_index(&init_net
,
1577 dev_addr
->bound_dev_if
);
1580 &mc
->multicast
.ib
->rec
.mgid
,
1585 kref_put(&mc
->mcref
, release_mc
);
1590 void rdma_destroy_id(struct rdma_cm_id
*id
)
1592 struct rdma_id_private
*id_priv
;
1593 enum rdma_cm_state state
;
1595 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1596 state
= cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1597 cma_cancel_operation(id_priv
, state
);
1600 * Wait for any active callback to finish. New callbacks will find
1601 * the id_priv state set to destroying and abort.
1603 mutex_lock(&id_priv
->handler_mutex
);
1604 mutex_unlock(&id_priv
->handler_mutex
);
1606 if (id_priv
->cma_dev
) {
1607 if (rdma_cap_ib_cm(id_priv
->id
.device
, 1)) {
1608 if (id_priv
->cm_id
.ib
)
1609 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1610 } else if (rdma_cap_iw_cm(id_priv
->id
.device
, 1)) {
1611 if (id_priv
->cm_id
.iw
)
1612 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1614 cma_leave_mc_groups(id_priv
);
1615 cma_release_dev(id_priv
);
1618 cma_release_port(id_priv
);
1619 cma_deref_id(id_priv
);
1620 wait_for_completion(&id_priv
->comp
);
1622 if (id_priv
->internal_id
)
1623 cma_deref_id(id_priv
->id
.context
);
1625 kfree(id_priv
->id
.route
.path_rec
);
1626 put_net(id_priv
->id
.route
.addr
.dev_addr
.net
);
1629 EXPORT_SYMBOL(rdma_destroy_id
);
1631 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
1635 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
1639 ret
= cma_modify_qp_rts(id_priv
, NULL
);
1643 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
1649 cma_modify_qp_err(id_priv
);
1650 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
1655 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
1656 struct ib_cm_rep_event_param
*rep_data
,
1659 event
->param
.conn
.private_data
= private_data
;
1660 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
1661 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
1662 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
1663 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
1664 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
1665 event
->param
.conn
.srq
= rep_data
->srq
;
1666 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
1669 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1671 struct rdma_id_private
*id_priv
= cm_id
->context
;
1672 struct rdma_cm_event event
;
1675 mutex_lock(&id_priv
->handler_mutex
);
1676 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
1677 id_priv
->state
!= RDMA_CM_CONNECT
) ||
1678 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
1679 id_priv
->state
!= RDMA_CM_DISCONNECT
))
1682 memset(&event
, 0, sizeof event
);
1683 switch (ib_event
->event
) {
1684 case IB_CM_REQ_ERROR
:
1685 case IB_CM_REP_ERROR
:
1686 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1687 event
.status
= -ETIMEDOUT
;
1689 case IB_CM_REP_RECEIVED
:
1690 if (id_priv
->id
.qp
) {
1691 event
.status
= cma_rep_recv(id_priv
);
1692 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
1693 RDMA_CM_EVENT_ESTABLISHED
;
1695 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
1697 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
1698 ib_event
->private_data
);
1700 case IB_CM_RTU_RECEIVED
:
1701 case IB_CM_USER_ESTABLISHED
:
1702 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1704 case IB_CM_DREQ_ERROR
:
1705 event
.status
= -ETIMEDOUT
; /* fall through */
1706 case IB_CM_DREQ_RECEIVED
:
1707 case IB_CM_DREP_RECEIVED
:
1708 if (!cma_comp_exch(id_priv
, RDMA_CM_CONNECT
,
1709 RDMA_CM_DISCONNECT
))
1711 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1713 case IB_CM_TIMEWAIT_EXIT
:
1714 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
1716 case IB_CM_MRA_RECEIVED
:
1719 case IB_CM_REJ_RECEIVED
:
1720 cma_modify_qp_err(id_priv
);
1721 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
1722 event
.event
= RDMA_CM_EVENT_REJECTED
;
1723 event
.param
.conn
.private_data
= ib_event
->private_data
;
1724 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
1727 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
1732 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1734 /* Destroy the CM ID by returning a non-zero value. */
1735 id_priv
->cm_id
.ib
= NULL
;
1736 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1737 mutex_unlock(&id_priv
->handler_mutex
);
1738 rdma_destroy_id(&id_priv
->id
);
1742 mutex_unlock(&id_priv
->handler_mutex
);
1746 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
1747 struct ib_cm_event
*ib_event
,
1748 struct net_device
*net_dev
)
1750 struct rdma_id_private
*id_priv
;
1751 struct rdma_cm_id
*id
;
1752 struct rdma_route
*rt
;
1753 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
1754 const __be64 service_id
=
1755 ib_event
->param
.req_rcvd
.primary_path
->service_id
;
1758 id
= rdma_create_id(listen_id
->route
.addr
.dev_addr
.net
,
1759 listen_id
->event_handler
, listen_id
->context
,
1760 listen_id
->ps
, ib_event
->param
.req_rcvd
.qp_type
);
1764 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1765 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
1766 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
1767 listen_id
, ib_event
, ss_family
, service_id
))
1771 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1772 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1777 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
1778 if (rt
->num_paths
== 2)
1779 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1782 ret
= rdma_copy_addr(&rt
->addr
.dev_addr
, net_dev
, NULL
);
1786 if (!cma_protocol_roce(listen_id
) &&
1787 cma_any_addr(cma_src_addr(id_priv
))) {
1788 rt
->addr
.dev_addr
.dev_type
= ARPHRD_INFINIBAND
;
1789 rdma_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
1790 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
1791 } else if (!cma_any_addr(cma_src_addr(id_priv
))) {
1792 ret
= cma_translate_addr(cma_src_addr(id_priv
), &rt
->addr
.dev_addr
);
1797 rdma_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1799 id_priv
->state
= RDMA_CM_CONNECT
;
1803 rdma_destroy_id(id
);
1807 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1808 struct ib_cm_event
*ib_event
,
1809 struct net_device
*net_dev
)
1811 struct rdma_id_private
*id_priv
;
1812 struct rdma_cm_id
*id
;
1813 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
1814 struct net
*net
= listen_id
->route
.addr
.dev_addr
.net
;
1817 id
= rdma_create_id(net
, listen_id
->event_handler
, listen_id
->context
,
1818 listen_id
->ps
, IB_QPT_UD
);
1822 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1823 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
1824 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
1825 listen_id
, ib_event
, ss_family
,
1826 ib_event
->param
.sidr_req_rcvd
.service_id
))
1830 ret
= rdma_copy_addr(&id
->route
.addr
.dev_addr
, net_dev
, NULL
);
1834 if (!cma_any_addr(cma_src_addr(id_priv
))) {
1835 ret
= cma_translate_addr(cma_src_addr(id_priv
),
1836 &id
->route
.addr
.dev_addr
);
1842 id_priv
->state
= RDMA_CM_CONNECT
;
1845 rdma_destroy_id(id
);
1849 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1850 struct ib_cm_req_event_param
*req_data
,
1851 void *private_data
, int offset
)
1853 event
->param
.conn
.private_data
= private_data
+ offset
;
1854 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1855 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1856 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1857 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1858 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1859 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1860 event
->param
.conn
.srq
= req_data
->srq
;
1861 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1864 static int cma_check_req_qp_type(struct rdma_cm_id
*id
, struct ib_cm_event
*ib_event
)
1866 return (((ib_event
->event
== IB_CM_REQ_RECEIVED
) &&
1867 (ib_event
->param
.req_rcvd
.qp_type
== id
->qp_type
)) ||
1868 ((ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) &&
1869 (id
->qp_type
== IB_QPT_UD
)) ||
1873 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1875 struct rdma_id_private
*listen_id
, *conn_id
= NULL
;
1876 struct rdma_cm_event event
;
1877 struct net_device
*net_dev
;
1880 listen_id
= cma_id_from_event(cm_id
, ib_event
, &net_dev
);
1881 if (IS_ERR(listen_id
))
1882 return PTR_ERR(listen_id
);
1884 if (!cma_check_req_qp_type(&listen_id
->id
, ib_event
)) {
1889 mutex_lock(&listen_id
->handler_mutex
);
1890 if (listen_id
->state
!= RDMA_CM_LISTEN
) {
1891 ret
= -ECONNABORTED
;
1895 memset(&event
, 0, sizeof event
);
1896 offset
= cma_user_data_offset(listen_id
);
1897 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1898 if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) {
1899 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
, net_dev
);
1900 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1901 event
.param
.ud
.private_data_len
=
1902 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1904 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
, net_dev
);
1905 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1906 ib_event
->private_data
, offset
);
1913 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1914 ret
= cma_acquire_dev(conn_id
, listen_id
);
1918 conn_id
->cm_id
.ib
= cm_id
;
1919 cm_id
->context
= conn_id
;
1920 cm_id
->cm_handler
= cma_ib_handler
;
1923 * Protect against the user destroying conn_id from another thread
1924 * until we're done accessing it.
1926 atomic_inc(&conn_id
->refcount
);
1927 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1931 * Acquire mutex to prevent user executing rdma_destroy_id()
1932 * while we're accessing the cm_id.
1935 if (cma_comp(conn_id
, RDMA_CM_CONNECT
) &&
1936 (conn_id
->id
.qp_type
!= IB_QPT_UD
))
1937 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1938 mutex_unlock(&lock
);
1939 mutex_unlock(&conn_id
->handler_mutex
);
1940 mutex_unlock(&listen_id
->handler_mutex
);
1941 cma_deref_id(conn_id
);
1947 cma_deref_id(conn_id
);
1948 /* Destroy the CM ID by returning a non-zero value. */
1949 conn_id
->cm_id
.ib
= NULL
;
1951 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
1952 mutex_unlock(&conn_id
->handler_mutex
);
1954 mutex_unlock(&listen_id
->handler_mutex
);
1956 rdma_destroy_id(&conn_id
->id
);
1965 __be64
rdma_get_service_id(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
1967 if (addr
->sa_family
== AF_IB
)
1968 return ((struct sockaddr_ib
*) addr
)->sib_sid
;
1970 return cpu_to_be64(((u64
)id
->ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1972 EXPORT_SYMBOL(rdma_get_service_id
);
1974 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1976 struct rdma_id_private
*id_priv
= iw_id
->context
;
1977 struct rdma_cm_event event
;
1979 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
1980 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
1982 mutex_lock(&id_priv
->handler_mutex
);
1983 if (id_priv
->state
!= RDMA_CM_CONNECT
)
1986 memset(&event
, 0, sizeof event
);
1987 switch (iw_event
->event
) {
1988 case IW_CM_EVENT_CLOSE
:
1989 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1991 case IW_CM_EVENT_CONNECT_REPLY
:
1992 memcpy(cma_src_addr(id_priv
), laddr
,
1993 rdma_addr_size(laddr
));
1994 memcpy(cma_dst_addr(id_priv
), raddr
,
1995 rdma_addr_size(raddr
));
1996 switch (iw_event
->status
) {
1998 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1999 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2000 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2004 event
.event
= RDMA_CM_EVENT_REJECTED
;
2007 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2010 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
2014 case IW_CM_EVENT_ESTABLISHED
:
2015 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2016 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2017 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2023 event
.status
= iw_event
->status
;
2024 event
.param
.conn
.private_data
= iw_event
->private_data
;
2025 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
2026 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2028 /* Destroy the CM ID by returning a non-zero value. */
2029 id_priv
->cm_id
.iw
= NULL
;
2030 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2031 mutex_unlock(&id_priv
->handler_mutex
);
2032 rdma_destroy_id(&id_priv
->id
);
2037 mutex_unlock(&id_priv
->handler_mutex
);
2041 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
2042 struct iw_cm_event
*iw_event
)
2044 struct rdma_cm_id
*new_cm_id
;
2045 struct rdma_id_private
*listen_id
, *conn_id
;
2046 struct rdma_cm_event event
;
2047 int ret
= -ECONNABORTED
;
2048 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
2049 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
2051 listen_id
= cm_id
->context
;
2053 mutex_lock(&listen_id
->handler_mutex
);
2054 if (listen_id
->state
!= RDMA_CM_LISTEN
)
2057 /* Create a new RDMA id for the new IW CM ID */
2058 new_cm_id
= rdma_create_id(listen_id
->id
.route
.addr
.dev_addr
.net
,
2059 listen_id
->id
.event_handler
,
2060 listen_id
->id
.context
,
2061 RDMA_PS_TCP
, IB_QPT_RC
);
2062 if (IS_ERR(new_cm_id
)) {
2066 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
2067 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
2068 conn_id
->state
= RDMA_CM_CONNECT
;
2070 ret
= rdma_translate_ip(laddr
, &conn_id
->id
.route
.addr
.dev_addr
, NULL
);
2072 mutex_unlock(&conn_id
->handler_mutex
);
2073 rdma_destroy_id(new_cm_id
);
2077 ret
= cma_acquire_dev(conn_id
, listen_id
);
2079 mutex_unlock(&conn_id
->handler_mutex
);
2080 rdma_destroy_id(new_cm_id
);
2084 conn_id
->cm_id
.iw
= cm_id
;
2085 cm_id
->context
= conn_id
;
2086 cm_id
->cm_handler
= cma_iw_handler
;
2088 memcpy(cma_src_addr(conn_id
), laddr
, rdma_addr_size(laddr
));
2089 memcpy(cma_dst_addr(conn_id
), raddr
, rdma_addr_size(raddr
));
2091 memset(&event
, 0, sizeof event
);
2092 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
2093 event
.param
.conn
.private_data
= iw_event
->private_data
;
2094 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
2095 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2096 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2099 * Protect against the user destroying conn_id from another thread
2100 * until we're done accessing it.
2102 atomic_inc(&conn_id
->refcount
);
2103 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
2105 /* User wants to destroy the CM ID */
2106 conn_id
->cm_id
.iw
= NULL
;
2107 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
2108 mutex_unlock(&conn_id
->handler_mutex
);
2109 cma_deref_id(conn_id
);
2110 rdma_destroy_id(&conn_id
->id
);
2114 mutex_unlock(&conn_id
->handler_mutex
);
2115 cma_deref_id(conn_id
);
2118 mutex_unlock(&listen_id
->handler_mutex
);
2122 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
2124 struct sockaddr
*addr
;
2125 struct ib_cm_id
*id
;
2128 addr
= cma_src_addr(id_priv
);
2129 svc_id
= rdma_get_service_id(&id_priv
->id
, addr
);
2130 id
= ib_cm_insert_listen(id_priv
->id
.device
, cma_req_handler
, svc_id
);
2133 id_priv
->cm_id
.ib
= id
;
2138 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
2141 struct iw_cm_id
*id
;
2143 id
= iw_create_cm_id(id_priv
->id
.device
,
2144 iw_conn_req_handler
,
2149 id
->tos
= id_priv
->tos
;
2150 id_priv
->cm_id
.iw
= id
;
2152 memcpy(&id_priv
->cm_id
.iw
->local_addr
, cma_src_addr(id_priv
),
2153 rdma_addr_size(cma_src_addr(id_priv
)));
2155 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
2158 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
2159 id_priv
->cm_id
.iw
= NULL
;
2165 static int cma_listen_handler(struct rdma_cm_id
*id
,
2166 struct rdma_cm_event
*event
)
2168 struct rdma_id_private
*id_priv
= id
->context
;
2170 id
->context
= id_priv
->id
.context
;
2171 id
->event_handler
= id_priv
->id
.event_handler
;
2172 return id_priv
->id
.event_handler(id
, event
);
2175 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
2176 struct cma_device
*cma_dev
)
2178 struct rdma_id_private
*dev_id_priv
;
2179 struct rdma_cm_id
*id
;
2180 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
2183 if (cma_family(id_priv
) == AF_IB
&& !rdma_cap_ib_cm(cma_dev
->device
, 1))
2186 id
= rdma_create_id(net
, cma_listen_handler
, id_priv
, id_priv
->id
.ps
,
2187 id_priv
->id
.qp_type
);
2191 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
2193 dev_id_priv
->state
= RDMA_CM_ADDR_BOUND
;
2194 memcpy(cma_src_addr(dev_id_priv
), cma_src_addr(id_priv
),
2195 rdma_addr_size(cma_src_addr(id_priv
)));
2197 _cma_attach_to_dev(dev_id_priv
, cma_dev
);
2198 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
2199 atomic_inc(&id_priv
->refcount
);
2200 dev_id_priv
->internal_id
= 1;
2201 dev_id_priv
->afonly
= id_priv
->afonly
;
2203 ret
= rdma_listen(id
, id_priv
->backlog
);
2205 pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
2206 ret
, cma_dev
->device
->name
);
2209 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
2211 struct cma_device
*cma_dev
;
2214 list_add_tail(&id_priv
->list
, &listen_any_list
);
2215 list_for_each_entry(cma_dev
, &dev_list
, list
)
2216 cma_listen_on_dev(id_priv
, cma_dev
);
2217 mutex_unlock(&lock
);
2220 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
2222 struct rdma_id_private
*id_priv
;
2224 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2225 id_priv
->tos
= (u8
) tos
;
2227 EXPORT_SYMBOL(rdma_set_service_type
);
2229 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
2232 struct cma_work
*work
= context
;
2233 struct rdma_route
*route
;
2235 route
= &work
->id
->id
.route
;
2238 route
->num_paths
= 1;
2239 *route
->path_rec
= *path_rec
;
2241 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2242 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2243 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
2244 work
->event
.status
= status
;
2247 queue_work(cma_wq
, &work
->work
);
2250 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
2251 struct cma_work
*work
)
2253 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2254 struct ib_sa_path_rec path_rec
;
2255 ib_sa_comp_mask comp_mask
;
2256 struct sockaddr_in6
*sin6
;
2257 struct sockaddr_ib
*sib
;
2259 memset(&path_rec
, 0, sizeof path_rec
);
2260 rdma_addr_get_sgid(dev_addr
, &path_rec
.sgid
);
2261 rdma_addr_get_dgid(dev_addr
, &path_rec
.dgid
);
2262 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2263 path_rec
.numb_path
= 1;
2264 path_rec
.reversible
= 1;
2265 path_rec
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
2267 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
2268 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
2269 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
2271 switch (cma_family(id_priv
)) {
2273 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
2274 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
2277 sin6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
2278 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
2279 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2282 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
2283 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sib
->sib_flowinfo
) >> 20);
2284 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2288 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
2289 id_priv
->id
.port_num
, &path_rec
,
2290 comp_mask
, timeout_ms
,
2291 GFP_KERNEL
, cma_query_handler
,
2292 work
, &id_priv
->query
);
2294 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
2297 static void cma_work_handler(struct work_struct
*_work
)
2299 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
2300 struct rdma_id_private
*id_priv
= work
->id
;
2303 mutex_lock(&id_priv
->handler_mutex
);
2304 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
2307 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
2308 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2312 mutex_unlock(&id_priv
->handler_mutex
);
2313 cma_deref_id(id_priv
);
2315 rdma_destroy_id(&id_priv
->id
);
2319 static void cma_ndev_work_handler(struct work_struct
*_work
)
2321 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
2322 struct rdma_id_private
*id_priv
= work
->id
;
2325 mutex_lock(&id_priv
->handler_mutex
);
2326 if (id_priv
->state
== RDMA_CM_DESTROYING
||
2327 id_priv
->state
== RDMA_CM_DEVICE_REMOVAL
)
2330 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
2331 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2336 mutex_unlock(&id_priv
->handler_mutex
);
2337 cma_deref_id(id_priv
);
2339 rdma_destroy_id(&id_priv
->id
);
2343 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
2345 struct rdma_route
*route
= &id_priv
->id
.route
;
2346 struct cma_work
*work
;
2349 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2354 INIT_WORK(&work
->work
, cma_work_handler
);
2355 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2356 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2357 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2359 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
2360 if (!route
->path_rec
) {
2365 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
2371 kfree(route
->path_rec
);
2372 route
->path_rec
= NULL
;
2378 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
2379 struct ib_sa_path_rec
*path_rec
, int num_paths
)
2381 struct rdma_id_private
*id_priv
;
2384 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2385 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
2386 RDMA_CM_ROUTE_RESOLVED
))
2389 id
->route
.path_rec
= kmemdup(path_rec
, sizeof *path_rec
* num_paths
,
2391 if (!id
->route
.path_rec
) {
2396 id
->route
.num_paths
= num_paths
;
2399 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_ADDR_RESOLVED
);
2402 EXPORT_SYMBOL(rdma_set_ib_paths
);
2404 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
2406 struct cma_work
*work
;
2408 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2413 INIT_WORK(&work
->work
, cma_work_handler
);
2414 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2415 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2416 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2417 queue_work(cma_wq
, &work
->work
);
2421 static int iboe_tos_to_sl(struct net_device
*ndev
, int tos
)
2424 struct net_device
*dev
;
2426 prio
= rt_tos2priority(tos
);
2427 dev
= ndev
->priv_flags
& IFF_802_1Q_VLAN
?
2428 vlan_dev_real_dev(ndev
) : ndev
;
2431 return netdev_get_prio_tc_map(dev
, prio
);
2433 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2434 if (ndev
->priv_flags
& IFF_802_1Q_VLAN
)
2435 return (vlan_dev_get_egress_qos_mask(ndev
, prio
) &
2436 VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
2441 static enum ib_gid_type
cma_route_gid_type(enum rdma_network_type network_type
,
2442 unsigned long supported_gids
,
2443 enum ib_gid_type default_gid
)
2445 if ((network_type
== RDMA_NETWORK_IPV4
||
2446 network_type
== RDMA_NETWORK_IPV6
) &&
2447 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP
, &supported_gids
))
2448 return IB_GID_TYPE_ROCE_UDP_ENCAP
;
2453 static int cma_resolve_iboe_route(struct rdma_id_private
*id_priv
)
2455 struct rdma_route
*route
= &id_priv
->id
.route
;
2456 struct rdma_addr
*addr
= &route
->addr
;
2457 struct cma_work
*work
;
2459 struct net_device
*ndev
= NULL
;
2462 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2467 INIT_WORK(&work
->work
, cma_work_handler
);
2469 route
->path_rec
= kzalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
2470 if (!route
->path_rec
) {
2475 route
->num_paths
= 1;
2477 if (addr
->dev_addr
.bound_dev_if
) {
2478 unsigned long supported_gids
;
2480 ndev
= dev_get_by_index(&init_net
, addr
->dev_addr
.bound_dev_if
);
2486 if (ndev
->flags
& IFF_LOOPBACK
) {
2488 if (!id_priv
->id
.device
->get_netdev
) {
2493 ndev
= id_priv
->id
.device
->get_netdev(id_priv
->id
.device
,
2494 id_priv
->id
.port_num
);
2501 route
->path_rec
->net
= &init_net
;
2502 route
->path_rec
->ifindex
= ndev
->ifindex
;
2503 supported_gids
= roce_gid_type_mask_support(id_priv
->id
.device
,
2504 id_priv
->id
.port_num
);
2505 route
->path_rec
->gid_type
=
2506 cma_route_gid_type(addr
->dev_addr
.network
,
2515 memcpy(route
->path_rec
->dmac
, addr
->dev_addr
.dst_dev_addr
, ETH_ALEN
);
2517 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
2518 &route
->path_rec
->sgid
);
2519 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
,
2520 &route
->path_rec
->dgid
);
2522 /* Use the hint from IP Stack to select GID Type */
2523 if (route
->path_rec
->gid_type
< ib_network_to_gid_type(addr
->dev_addr
.network
))
2524 route
->path_rec
->gid_type
= ib_network_to_gid_type(addr
->dev_addr
.network
);
2525 if (((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
)->sa_family
!= AF_IB
)
2526 /* TODO: get the hoplimit from the inet/inet6 device */
2527 route
->path_rec
->hop_limit
= addr
->dev_addr
.hoplimit
;
2529 route
->path_rec
->hop_limit
= 1;
2530 route
->path_rec
->reversible
= 1;
2531 route
->path_rec
->pkey
= cpu_to_be16(0xffff);
2532 route
->path_rec
->mtu_selector
= IB_SA_EQ
;
2533 route
->path_rec
->sl
= iboe_tos_to_sl(ndev
, id_priv
->tos
);
2534 route
->path_rec
->mtu
= iboe_get_mtu(ndev
->mtu
);
2535 route
->path_rec
->rate_selector
= IB_SA_EQ
;
2536 route
->path_rec
->rate
= iboe_get_rate(ndev
);
2538 route
->path_rec
->packet_life_time_selector
= IB_SA_EQ
;
2539 route
->path_rec
->packet_life_time
= CMA_IBOE_PACKET_LIFETIME
;
2540 if (!route
->path_rec
->mtu
) {
2545 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2546 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2547 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2548 work
->event
.status
= 0;
2550 queue_work(cma_wq
, &work
->work
);
2555 kfree(route
->path_rec
);
2556 route
->path_rec
= NULL
;
2562 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
2564 struct rdma_id_private
*id_priv
;
2567 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2568 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
, RDMA_CM_ROUTE_QUERY
))
2571 atomic_inc(&id_priv
->refcount
);
2572 if (rdma_cap_ib_sa(id
->device
, id
->port_num
))
2573 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
2574 else if (rdma_protocol_roce(id
->device
, id
->port_num
))
2575 ret
= cma_resolve_iboe_route(id_priv
);
2576 else if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
2577 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
2586 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_QUERY
, RDMA_CM_ADDR_RESOLVED
);
2587 cma_deref_id(id_priv
);
2590 EXPORT_SYMBOL(rdma_resolve_route
);
2592 static void cma_set_loopback(struct sockaddr
*addr
)
2594 switch (addr
->sa_family
) {
2596 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
= htonl(INADDR_LOOPBACK
);
2599 ipv6_addr_set(&((struct sockaddr_in6
*) addr
)->sin6_addr
,
2603 ib_addr_set(&((struct sockaddr_ib
*) addr
)->sib_addr
,
2609 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
2611 struct cma_device
*cma_dev
, *cur_dev
;
2612 struct ib_port_attr port_attr
;
2620 list_for_each_entry(cur_dev
, &dev_list
, list
) {
2621 if (cma_family(id_priv
) == AF_IB
&&
2622 !rdma_cap_ib_cm(cur_dev
->device
, 1))
2628 for (p
= 1; p
<= cur_dev
->device
->phys_port_cnt
; ++p
) {
2629 if (!ib_query_port(cur_dev
->device
, p
, &port_attr
) &&
2630 port_attr
.state
== IB_PORT_ACTIVE
) {
2645 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
, NULL
);
2649 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
2653 id_priv
->id
.route
.addr
.dev_addr
.dev_type
=
2654 (rdma_protocol_ib(cma_dev
->device
, p
)) ?
2655 ARPHRD_INFINIBAND
: ARPHRD_ETHER
;
2657 rdma_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2658 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
2659 id_priv
->id
.port_num
= p
;
2660 cma_attach_to_dev(id_priv
, cma_dev
);
2661 cma_set_loopback(cma_src_addr(id_priv
));
2663 mutex_unlock(&lock
);
2667 static void addr_handler(int status
, struct sockaddr
*src_addr
,
2668 struct rdma_dev_addr
*dev_addr
, void *context
)
2670 struct rdma_id_private
*id_priv
= context
;
2671 struct rdma_cm_event event
;
2673 memset(&event
, 0, sizeof event
);
2674 mutex_lock(&id_priv
->handler_mutex
);
2675 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
,
2676 RDMA_CM_ADDR_RESOLVED
))
2679 memcpy(cma_src_addr(id_priv
), src_addr
, rdma_addr_size(src_addr
));
2680 if (!status
&& !id_priv
->cma_dev
)
2681 status
= cma_acquire_dev(id_priv
, NULL
);
2684 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
2685 RDMA_CM_ADDR_BOUND
))
2687 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
2688 event
.status
= status
;
2690 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2692 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
2693 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2694 mutex_unlock(&id_priv
->handler_mutex
);
2695 cma_deref_id(id_priv
);
2696 rdma_destroy_id(&id_priv
->id
);
2700 mutex_unlock(&id_priv
->handler_mutex
);
2701 cma_deref_id(id_priv
);
2704 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
2706 struct cma_work
*work
;
2710 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2714 if (!id_priv
->cma_dev
) {
2715 ret
= cma_bind_loopback(id_priv
);
2720 rdma_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2721 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2724 INIT_WORK(&work
->work
, cma_work_handler
);
2725 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2726 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2727 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2728 queue_work(cma_wq
, &work
->work
);
2735 static int cma_resolve_ib_addr(struct rdma_id_private
*id_priv
)
2737 struct cma_work
*work
;
2740 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2744 if (!id_priv
->cma_dev
) {
2745 ret
= cma_resolve_ib_dev(id_priv
);
2750 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, (union ib_gid
*)
2751 &(((struct sockaddr_ib
*) &id_priv
->id
.route
.addr
.dst_addr
)->sib_addr
));
2754 INIT_WORK(&work
->work
, cma_work_handler
);
2755 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2756 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2757 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2758 queue_work(cma_wq
, &work
->work
);
2765 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2766 struct sockaddr
*dst_addr
)
2768 if (!src_addr
|| !src_addr
->sa_family
) {
2769 src_addr
= (struct sockaddr
*) &id
->route
.addr
.src_addr
;
2770 src_addr
->sa_family
= dst_addr
->sa_family
;
2771 if (dst_addr
->sa_family
== AF_INET6
) {
2772 struct sockaddr_in6
*src_addr6
= (struct sockaddr_in6
*) src_addr
;
2773 struct sockaddr_in6
*dst_addr6
= (struct sockaddr_in6
*) dst_addr
;
2774 src_addr6
->sin6_scope_id
= dst_addr6
->sin6_scope_id
;
2775 if (ipv6_addr_type(&dst_addr6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
)
2776 id
->route
.addr
.dev_addr
.bound_dev_if
= dst_addr6
->sin6_scope_id
;
2777 } else if (dst_addr
->sa_family
== AF_IB
) {
2778 ((struct sockaddr_ib
*) src_addr
)->sib_pkey
=
2779 ((struct sockaddr_ib
*) dst_addr
)->sib_pkey
;
2782 return rdma_bind_addr(id
, src_addr
);
2785 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2786 struct sockaddr
*dst_addr
, int timeout_ms
)
2788 struct rdma_id_private
*id_priv
;
2791 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2792 if (id_priv
->state
== RDMA_CM_IDLE
) {
2793 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
2798 if (cma_family(id_priv
) != dst_addr
->sa_family
)
2801 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_ADDR_QUERY
))
2804 atomic_inc(&id_priv
->refcount
);
2805 memcpy(cma_dst_addr(id_priv
), dst_addr
, rdma_addr_size(dst_addr
));
2806 if (cma_any_addr(dst_addr
)) {
2807 ret
= cma_resolve_loopback(id_priv
);
2809 if (dst_addr
->sa_family
== AF_IB
) {
2810 ret
= cma_resolve_ib_addr(id_priv
);
2812 ret
= rdma_resolve_ip(&addr_client
, cma_src_addr(id_priv
),
2813 dst_addr
, &id
->route
.addr
.dev_addr
,
2814 timeout_ms
, addr_handler
, id_priv
);
2822 cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
, RDMA_CM_ADDR_BOUND
);
2823 cma_deref_id(id_priv
);
2826 EXPORT_SYMBOL(rdma_resolve_addr
);
2828 int rdma_set_reuseaddr(struct rdma_cm_id
*id
, int reuse
)
2830 struct rdma_id_private
*id_priv
;
2831 unsigned long flags
;
2834 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2835 spin_lock_irqsave(&id_priv
->lock
, flags
);
2836 if (reuse
|| id_priv
->state
== RDMA_CM_IDLE
) {
2837 id_priv
->reuseaddr
= reuse
;
2842 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2845 EXPORT_SYMBOL(rdma_set_reuseaddr
);
2847 int rdma_set_afonly(struct rdma_cm_id
*id
, int afonly
)
2849 struct rdma_id_private
*id_priv
;
2850 unsigned long flags
;
2853 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2854 spin_lock_irqsave(&id_priv
->lock
, flags
);
2855 if (id_priv
->state
== RDMA_CM_IDLE
|| id_priv
->state
== RDMA_CM_ADDR_BOUND
) {
2856 id_priv
->options
|= (1 << CMA_OPTION_AFONLY
);
2857 id_priv
->afonly
= afonly
;
2862 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2865 EXPORT_SYMBOL(rdma_set_afonly
);
2867 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
2868 struct rdma_id_private
*id_priv
)
2870 struct sockaddr
*addr
;
2871 struct sockaddr_ib
*sib
;
2875 addr
= cma_src_addr(id_priv
);
2876 port
= htons(bind_list
->port
);
2878 switch (addr
->sa_family
) {
2880 ((struct sockaddr_in
*) addr
)->sin_port
= port
;
2883 ((struct sockaddr_in6
*) addr
)->sin6_port
= port
;
2886 sib
= (struct sockaddr_ib
*) addr
;
2887 sid
= be64_to_cpu(sib
->sib_sid
);
2888 mask
= be64_to_cpu(sib
->sib_sid_mask
);
2889 sib
->sib_sid
= cpu_to_be64((sid
& mask
) | (u64
) ntohs(port
));
2890 sib
->sib_sid_mask
= cpu_to_be64(~0ULL);
2893 id_priv
->bind_list
= bind_list
;
2894 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
2897 static int cma_alloc_port(enum rdma_port_space ps
,
2898 struct rdma_id_private
*id_priv
, unsigned short snum
)
2900 struct rdma_bind_list
*bind_list
;
2903 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
2907 ret
= cma_ps_alloc(id_priv
->id
.route
.addr
.dev_addr
.net
, ps
, bind_list
,
2913 bind_list
->port
= (unsigned short)ret
;
2914 cma_bind_port(bind_list
, id_priv
);
2918 return ret
== -ENOSPC
? -EADDRNOTAVAIL
: ret
;
2921 static int cma_alloc_any_port(enum rdma_port_space ps
,
2922 struct rdma_id_private
*id_priv
)
2924 static unsigned int last_used_port
;
2925 int low
, high
, remaining
;
2927 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
2929 inet_get_local_port_range(net
, &low
, &high
);
2930 remaining
= (high
- low
) + 1;
2931 rover
= prandom_u32() % remaining
+ low
;
2933 if (last_used_port
!= rover
&&
2934 !cma_ps_find(net
, ps
, (unsigned short)rover
)) {
2935 int ret
= cma_alloc_port(ps
, id_priv
, rover
);
2937 * Remember previously used port number in order to avoid
2938 * re-using same port immediately after it is closed.
2941 last_used_port
= rover
;
2942 if (ret
!= -EADDRNOTAVAIL
)
2947 if ((rover
< low
) || (rover
> high
))
2951 return -EADDRNOTAVAIL
;
2955 * Check that the requested port is available. This is called when trying to
2956 * bind to a specific port, or when trying to listen on a bound port. In
2957 * the latter case, the provided id_priv may already be on the bind_list, but
2958 * we still need to check that it's okay to start listening.
2960 static int cma_check_port(struct rdma_bind_list
*bind_list
,
2961 struct rdma_id_private
*id_priv
, uint8_t reuseaddr
)
2963 struct rdma_id_private
*cur_id
;
2964 struct sockaddr
*addr
, *cur_addr
;
2966 addr
= cma_src_addr(id_priv
);
2967 hlist_for_each_entry(cur_id
, &bind_list
->owners
, node
) {
2968 if (id_priv
== cur_id
)
2971 if ((cur_id
->state
!= RDMA_CM_LISTEN
) && reuseaddr
&&
2975 cur_addr
= cma_src_addr(cur_id
);
2976 if (id_priv
->afonly
&& cur_id
->afonly
&&
2977 (addr
->sa_family
!= cur_addr
->sa_family
))
2980 if (cma_any_addr(addr
) || cma_any_addr(cur_addr
))
2981 return -EADDRNOTAVAIL
;
2983 if (!cma_addr_cmp(addr
, cur_addr
))
2989 static int cma_use_port(enum rdma_port_space ps
,
2990 struct rdma_id_private
*id_priv
)
2992 struct rdma_bind_list
*bind_list
;
2993 unsigned short snum
;
2996 snum
= ntohs(cma_port(cma_src_addr(id_priv
)));
2997 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
3000 bind_list
= cma_ps_find(id_priv
->id
.route
.addr
.dev_addr
.net
, ps
, snum
);
3002 ret
= cma_alloc_port(ps
, id_priv
, snum
);
3004 ret
= cma_check_port(bind_list
, id_priv
, id_priv
->reuseaddr
);
3006 cma_bind_port(bind_list
, id_priv
);
3011 static int cma_bind_listen(struct rdma_id_private
*id_priv
)
3013 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
3017 if (bind_list
->owners
.first
->next
)
3018 ret
= cma_check_port(bind_list
, id_priv
, 0);
3019 mutex_unlock(&lock
);
3023 static enum rdma_port_space
cma_select_inet_ps(
3024 struct rdma_id_private
*id_priv
)
3026 switch (id_priv
->id
.ps
) {
3031 return id_priv
->id
.ps
;
3038 static enum rdma_port_space
cma_select_ib_ps(struct rdma_id_private
*id_priv
)
3040 enum rdma_port_space ps
= 0;
3041 struct sockaddr_ib
*sib
;
3042 u64 sid_ps
, mask
, sid
;
3044 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
3045 mask
= be64_to_cpu(sib
->sib_sid_mask
) & RDMA_IB_IP_PS_MASK
;
3046 sid
= be64_to_cpu(sib
->sib_sid
) & mask
;
3048 if ((id_priv
->id
.ps
== RDMA_PS_IB
) && (sid
== (RDMA_IB_IP_PS_IB
& mask
))) {
3049 sid_ps
= RDMA_IB_IP_PS_IB
;
3051 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_TCP
)) &&
3052 (sid
== (RDMA_IB_IP_PS_TCP
& mask
))) {
3053 sid_ps
= RDMA_IB_IP_PS_TCP
;
3055 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_UDP
)) &&
3056 (sid
== (RDMA_IB_IP_PS_UDP
& mask
))) {
3057 sid_ps
= RDMA_IB_IP_PS_UDP
;
3062 sib
->sib_sid
= cpu_to_be64(sid_ps
| ntohs(cma_port((struct sockaddr
*) sib
)));
3063 sib
->sib_sid_mask
= cpu_to_be64(RDMA_IB_IP_PS_MASK
|
3064 be64_to_cpu(sib
->sib_sid_mask
));
3069 static int cma_get_port(struct rdma_id_private
*id_priv
)
3071 enum rdma_port_space ps
;
3074 if (cma_family(id_priv
) != AF_IB
)
3075 ps
= cma_select_inet_ps(id_priv
);
3077 ps
= cma_select_ib_ps(id_priv
);
3079 return -EPROTONOSUPPORT
;
3082 if (cma_any_port(cma_src_addr(id_priv
)))
3083 ret
= cma_alloc_any_port(ps
, id_priv
);
3085 ret
= cma_use_port(ps
, id_priv
);
3086 mutex_unlock(&lock
);
3091 static int cma_check_linklocal(struct rdma_dev_addr
*dev_addr
,
3092 struct sockaddr
*addr
)
3094 #if IS_ENABLED(CONFIG_IPV6)
3095 struct sockaddr_in6
*sin6
;
3097 if (addr
->sa_family
!= AF_INET6
)
3100 sin6
= (struct sockaddr_in6
*) addr
;
3102 if (!(ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
))
3105 if (!sin6
->sin6_scope_id
)
3108 dev_addr
->bound_dev_if
= sin6
->sin6_scope_id
;
3113 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
3115 struct rdma_id_private
*id_priv
;
3118 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3119 if (id_priv
->state
== RDMA_CM_IDLE
) {
3120 id
->route
.addr
.src_addr
.ss_family
= AF_INET
;
3121 ret
= rdma_bind_addr(id
, cma_src_addr(id_priv
));
3126 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_LISTEN
))
3129 if (id_priv
->reuseaddr
) {
3130 ret
= cma_bind_listen(id_priv
);
3135 id_priv
->backlog
= backlog
;
3137 if (rdma_cap_ib_cm(id
->device
, 1)) {
3138 ret
= cma_ib_listen(id_priv
);
3141 } else if (rdma_cap_iw_cm(id
->device
, 1)) {
3142 ret
= cma_iw_listen(id_priv
, backlog
);
3150 cma_listen_on_all(id_priv
);
3154 id_priv
->backlog
= 0;
3155 cma_comp_exch(id_priv
, RDMA_CM_LISTEN
, RDMA_CM_ADDR_BOUND
);
3158 EXPORT_SYMBOL(rdma_listen
);
3160 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
3162 struct rdma_id_private
*id_priv
;
3165 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
&&
3166 addr
->sa_family
!= AF_IB
)
3167 return -EAFNOSUPPORT
;
3169 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3170 if (!cma_comp_exch(id_priv
, RDMA_CM_IDLE
, RDMA_CM_ADDR_BOUND
))
3173 ret
= cma_check_linklocal(&id
->route
.addr
.dev_addr
, addr
);
3177 memcpy(cma_src_addr(id_priv
), addr
, rdma_addr_size(addr
));
3178 if (!cma_any_addr(addr
)) {
3179 ret
= cma_translate_addr(addr
, &id
->route
.addr
.dev_addr
);
3183 ret
= cma_acquire_dev(id_priv
, NULL
);
3188 if (!(id_priv
->options
& (1 << CMA_OPTION_AFONLY
))) {
3189 if (addr
->sa_family
== AF_INET
)
3190 id_priv
->afonly
= 1;
3191 #if IS_ENABLED(CONFIG_IPV6)
3192 else if (addr
->sa_family
== AF_INET6
) {
3193 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
3195 id_priv
->afonly
= net
->ipv6
.sysctl
.bindv6only
;
3199 ret
= cma_get_port(id_priv
);
3205 if (id_priv
->cma_dev
)
3206 cma_release_dev(id_priv
);
3208 cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_IDLE
);
3211 EXPORT_SYMBOL(rdma_bind_addr
);
3213 static int cma_format_hdr(void *hdr
, struct rdma_id_private
*id_priv
)
3215 struct cma_hdr
*cma_hdr
;
3218 cma_hdr
->cma_version
= CMA_VERSION
;
3219 if (cma_family(id_priv
) == AF_INET
) {
3220 struct sockaddr_in
*src4
, *dst4
;
3222 src4
= (struct sockaddr_in
*) cma_src_addr(id_priv
);
3223 dst4
= (struct sockaddr_in
*) cma_dst_addr(id_priv
);
3225 cma_set_ip_ver(cma_hdr
, 4);
3226 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
3227 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
3228 cma_hdr
->port
= src4
->sin_port
;
3229 } else if (cma_family(id_priv
) == AF_INET6
) {
3230 struct sockaddr_in6
*src6
, *dst6
;
3232 src6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
3233 dst6
= (struct sockaddr_in6
*) cma_dst_addr(id_priv
);
3235 cma_set_ip_ver(cma_hdr
, 6);
3236 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
3237 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
3238 cma_hdr
->port
= src6
->sin6_port
;
3243 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
3244 struct ib_cm_event
*ib_event
)
3246 struct rdma_id_private
*id_priv
= cm_id
->context
;
3247 struct rdma_cm_event event
;
3248 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
3251 mutex_lock(&id_priv
->handler_mutex
);
3252 if (id_priv
->state
!= RDMA_CM_CONNECT
)
3255 memset(&event
, 0, sizeof event
);
3256 switch (ib_event
->event
) {
3257 case IB_CM_SIDR_REQ_ERROR
:
3258 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
3259 event
.status
= -ETIMEDOUT
;
3261 case IB_CM_SIDR_REP_RECEIVED
:
3262 event
.param
.ud
.private_data
= ib_event
->private_data
;
3263 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
3264 if (rep
->status
!= IB_SIDR_SUCCESS
) {
3265 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
3266 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
3269 ret
= cma_set_qkey(id_priv
, rep
->qkey
);
3271 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
3275 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
3276 id_priv
->id
.route
.path_rec
,
3277 &event
.param
.ud
.ah_attr
);
3278 event
.param
.ud
.qp_num
= rep
->qpn
;
3279 event
.param
.ud
.qkey
= rep
->qkey
;
3280 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
3284 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
3289 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3291 /* Destroy the CM ID by returning a non-zero value. */
3292 id_priv
->cm_id
.ib
= NULL
;
3293 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
3294 mutex_unlock(&id_priv
->handler_mutex
);
3295 rdma_destroy_id(&id_priv
->id
);
3299 mutex_unlock(&id_priv
->handler_mutex
);
3303 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
3304 struct rdma_conn_param
*conn_param
)
3306 struct ib_cm_sidr_req_param req
;
3307 struct ib_cm_id
*id
;
3311 memset(&req
, 0, sizeof req
);
3312 offset
= cma_user_data_offset(id_priv
);
3313 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
3314 if (req
.private_data_len
< conn_param
->private_data_len
)
3317 if (req
.private_data_len
) {
3318 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
3322 private_data
= NULL
;
3325 if (conn_param
->private_data
&& conn_param
->private_data_len
)
3326 memcpy(private_data
+ offset
, conn_param
->private_data
,
3327 conn_param
->private_data_len
);
3330 ret
= cma_format_hdr(private_data
, id_priv
);
3333 req
.private_data
= private_data
;
3336 id
= ib_create_cm_id(id_priv
->id
.device
, cma_sidr_rep_handler
,
3342 id_priv
->cm_id
.ib
= id
;
3344 req
.path
= id_priv
->id
.route
.path_rec
;
3345 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
3346 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
3347 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
3349 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
3351 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
3352 id_priv
->cm_id
.ib
= NULL
;
3355 kfree(private_data
);
3359 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
3360 struct rdma_conn_param
*conn_param
)
3362 struct ib_cm_req_param req
;
3363 struct rdma_route
*route
;
3365 struct ib_cm_id
*id
;
3368 memset(&req
, 0, sizeof req
);
3369 offset
= cma_user_data_offset(id_priv
);
3370 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
3371 if (req
.private_data_len
< conn_param
->private_data_len
)
3374 if (req
.private_data_len
) {
3375 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
3379 private_data
= NULL
;
3382 if (conn_param
->private_data
&& conn_param
->private_data_len
)
3383 memcpy(private_data
+ offset
, conn_param
->private_data
,
3384 conn_param
->private_data_len
);
3386 id
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
, id_priv
);
3391 id_priv
->cm_id
.ib
= id
;
3393 route
= &id_priv
->id
.route
;
3395 ret
= cma_format_hdr(private_data
, id_priv
);
3398 req
.private_data
= private_data
;
3401 req
.primary_path
= &route
->path_rec
[0];
3402 if (route
->num_paths
== 2)
3403 req
.alternate_path
= &route
->path_rec
[1];
3405 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
3406 req
.qp_num
= id_priv
->qp_num
;
3407 req
.qp_type
= id_priv
->id
.qp_type
;
3408 req
.starting_psn
= id_priv
->seq_num
;
3409 req
.responder_resources
= conn_param
->responder_resources
;
3410 req
.initiator_depth
= conn_param
->initiator_depth
;
3411 req
.flow_control
= conn_param
->flow_control
;
3412 req
.retry_count
= min_t(u8
, 7, conn_param
->retry_count
);
3413 req
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
3414 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
3415 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
3416 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
3417 req
.srq
= id_priv
->srq
? 1 : 0;
3419 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
3421 if (ret
&& !IS_ERR(id
)) {
3422 ib_destroy_cm_id(id
);
3423 id_priv
->cm_id
.ib
= NULL
;
3426 kfree(private_data
);
3430 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
3431 struct rdma_conn_param
*conn_param
)
3433 struct iw_cm_id
*cm_id
;
3435 struct iw_cm_conn_param iw_param
;
3437 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
3439 return PTR_ERR(cm_id
);
3441 cm_id
->tos
= id_priv
->tos
;
3442 id_priv
->cm_id
.iw
= cm_id
;
3444 memcpy(&cm_id
->local_addr
, cma_src_addr(id_priv
),
3445 rdma_addr_size(cma_src_addr(id_priv
)));
3446 memcpy(&cm_id
->remote_addr
, cma_dst_addr(id_priv
),
3447 rdma_addr_size(cma_dst_addr(id_priv
)));
3449 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3454 iw_param
.ord
= conn_param
->initiator_depth
;
3455 iw_param
.ird
= conn_param
->responder_resources
;
3456 iw_param
.private_data
= conn_param
->private_data
;
3457 iw_param
.private_data_len
= conn_param
->private_data_len
;
3458 iw_param
.qpn
= id_priv
->id
.qp
? id_priv
->qp_num
: conn_param
->qp_num
;
3460 memset(&iw_param
, 0, sizeof iw_param
);
3461 iw_param
.qpn
= id_priv
->qp_num
;
3463 ret
= iw_cm_connect(cm_id
, &iw_param
);
3466 iw_destroy_cm_id(cm_id
);
3467 id_priv
->cm_id
.iw
= NULL
;
3472 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
3474 struct rdma_id_private
*id_priv
;
3477 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3478 if (!cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_CONNECT
))
3482 id_priv
->qp_num
= conn_param
->qp_num
;
3483 id_priv
->srq
= conn_param
->srq
;
3486 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3487 if (id
->qp_type
== IB_QPT_UD
)
3488 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
3490 ret
= cma_connect_ib(id_priv
, conn_param
);
3491 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
))
3492 ret
= cma_connect_iw(id_priv
, conn_param
);
3500 cma_comp_exch(id_priv
, RDMA_CM_CONNECT
, RDMA_CM_ROUTE_RESOLVED
);
3503 EXPORT_SYMBOL(rdma_connect
);
3505 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
3506 struct rdma_conn_param
*conn_param
)
3508 struct ib_cm_rep_param rep
;
3511 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3515 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
3519 memset(&rep
, 0, sizeof rep
);
3520 rep
.qp_num
= id_priv
->qp_num
;
3521 rep
.starting_psn
= id_priv
->seq_num
;
3522 rep
.private_data
= conn_param
->private_data
;
3523 rep
.private_data_len
= conn_param
->private_data_len
;
3524 rep
.responder_resources
= conn_param
->responder_resources
;
3525 rep
.initiator_depth
= conn_param
->initiator_depth
;
3526 rep
.failover_accepted
= 0;
3527 rep
.flow_control
= conn_param
->flow_control
;
3528 rep
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
3529 rep
.srq
= id_priv
->srq
? 1 : 0;
3531 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
3536 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
3537 struct rdma_conn_param
*conn_param
)
3539 struct iw_cm_conn_param iw_param
;
3542 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3546 iw_param
.ord
= conn_param
->initiator_depth
;
3547 iw_param
.ird
= conn_param
->responder_resources
;
3548 iw_param
.private_data
= conn_param
->private_data
;
3549 iw_param
.private_data_len
= conn_param
->private_data_len
;
3550 if (id_priv
->id
.qp
) {
3551 iw_param
.qpn
= id_priv
->qp_num
;
3553 iw_param
.qpn
= conn_param
->qp_num
;
3555 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
3558 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
3559 enum ib_cm_sidr_status status
, u32 qkey
,
3560 const void *private_data
, int private_data_len
)
3562 struct ib_cm_sidr_rep_param rep
;
3565 memset(&rep
, 0, sizeof rep
);
3566 rep
.status
= status
;
3567 if (status
== IB_SIDR_SUCCESS
) {
3568 ret
= cma_set_qkey(id_priv
, qkey
);
3571 rep
.qp_num
= id_priv
->qp_num
;
3572 rep
.qkey
= id_priv
->qkey
;
3574 rep
.private_data
= private_data
;
3575 rep
.private_data_len
= private_data_len
;
3577 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
3580 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
3582 struct rdma_id_private
*id_priv
;
3585 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3587 id_priv
->owner
= task_pid_nr(current
);
3589 if (!cma_comp(id_priv
, RDMA_CM_CONNECT
))
3592 if (!id
->qp
&& conn_param
) {
3593 id_priv
->qp_num
= conn_param
->qp_num
;
3594 id_priv
->srq
= conn_param
->srq
;
3597 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3598 if (id
->qp_type
== IB_QPT_UD
) {
3600 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
3602 conn_param
->private_data
,
3603 conn_param
->private_data_len
);
3605 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
3609 ret
= cma_accept_ib(id_priv
, conn_param
);
3611 ret
= cma_rep_recv(id_priv
);
3613 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
))
3614 ret
= cma_accept_iw(id_priv
, conn_param
);
3623 cma_modify_qp_err(id_priv
);
3624 rdma_reject(id
, NULL
, 0);
3627 EXPORT_SYMBOL(rdma_accept
);
3629 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
3631 struct rdma_id_private
*id_priv
;
3634 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3635 if (!id_priv
->cm_id
.ib
)
3638 switch (id
->device
->node_type
) {
3639 case RDMA_NODE_IB_CA
:
3640 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
3648 EXPORT_SYMBOL(rdma_notify
);
3650 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
3651 u8 private_data_len
)
3653 struct rdma_id_private
*id_priv
;
3656 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3657 if (!id_priv
->cm_id
.ib
)
3660 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3661 if (id
->qp_type
== IB_QPT_UD
)
3662 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
, 0,
3663 private_data
, private_data_len
);
3665 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
3666 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
3667 0, private_data
, private_data_len
);
3668 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
3669 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
3670 private_data
, private_data_len
);
3676 EXPORT_SYMBOL(rdma_reject
);
3678 int rdma_disconnect(struct rdma_cm_id
*id
)
3680 struct rdma_id_private
*id_priv
;
3683 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3684 if (!id_priv
->cm_id
.ib
)
3687 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3688 ret
= cma_modify_qp_err(id_priv
);
3691 /* Initiate or respond to a disconnect. */
3692 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
3693 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
3694 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
3695 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
3702 EXPORT_SYMBOL(rdma_disconnect
);
3704 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
3706 struct rdma_id_private
*id_priv
;
3707 struct cma_multicast
*mc
= multicast
->context
;
3708 struct rdma_cm_event event
;
3711 id_priv
= mc
->id_priv
;
3712 mutex_lock(&id_priv
->handler_mutex
);
3713 if (id_priv
->state
!= RDMA_CM_ADDR_BOUND
&&
3714 id_priv
->state
!= RDMA_CM_ADDR_RESOLVED
)
3718 status
= cma_set_qkey(id_priv
, be32_to_cpu(multicast
->rec
.qkey
));
3719 mutex_lock(&id_priv
->qp_mutex
);
3720 if (!status
&& id_priv
->id
.qp
)
3721 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
3722 be16_to_cpu(multicast
->rec
.mlid
));
3723 mutex_unlock(&id_priv
->qp_mutex
);
3725 memset(&event
, 0, sizeof event
);
3726 event
.status
= status
;
3727 event
.param
.ud
.private_data
= mc
->context
;
3729 struct rdma_dev_addr
*dev_addr
=
3730 &id_priv
->id
.route
.addr
.dev_addr
;
3731 struct net_device
*ndev
=
3732 dev_get_by_index(&init_net
, dev_addr
->bound_dev_if
);
3733 enum ib_gid_type gid_type
=
3734 id_priv
->cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
3735 rdma_start_port(id_priv
->cma_dev
->device
)];
3737 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
3738 ib_init_ah_from_mcmember(id_priv
->id
.device
,
3739 id_priv
->id
.port_num
, &multicast
->rec
,
3741 &event
.param
.ud
.ah_attr
);
3742 event
.param
.ud
.qp_num
= 0xFFFFFF;
3743 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
3747 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
3749 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3751 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
3752 mutex_unlock(&id_priv
->handler_mutex
);
3753 rdma_destroy_id(&id_priv
->id
);
3758 mutex_unlock(&id_priv
->handler_mutex
);
3762 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
3763 struct sockaddr
*addr
, union ib_gid
*mgid
)
3765 unsigned char mc_map
[MAX_ADDR_LEN
];
3766 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3767 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
3768 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
3770 if (cma_any_addr(addr
)) {
3771 memset(mgid
, 0, sizeof *mgid
);
3772 } else if ((addr
->sa_family
== AF_INET6
) &&
3773 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFFF0FFFF) ==
3775 /* IPv6 address is an SA assigned MGID. */
3776 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
3777 } else if (addr
->sa_family
== AF_IB
) {
3778 memcpy(mgid
, &((struct sockaddr_ib
*) addr
)->sib_addr
, sizeof *mgid
);
3779 } else if ((addr
->sa_family
== AF_INET6
)) {
3780 ipv6_ib_mc_map(&sin6
->sin6_addr
, dev_addr
->broadcast
, mc_map
);
3781 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3782 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3783 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3785 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
3786 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3787 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3788 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3792 static void cma_query_sa_classport_info_cb(int status
,
3793 struct ib_class_port_info
*rec
,
3796 struct class_port_info_context
*cb_ctx
= context
;
3800 if (status
|| !rec
) {
3801 pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n",
3802 cb_ctx
->device
->name
, cb_ctx
->port_num
, status
);
3806 memcpy(cb_ctx
->class_port_info
, rec
, sizeof(struct ib_class_port_info
));
3809 complete(&cb_ctx
->done
);
3812 static int cma_query_sa_classport_info(struct ib_device
*device
, u8 port_num
,
3813 struct ib_class_port_info
*class_port_info
)
3815 struct class_port_info_context
*cb_ctx
;
3818 cb_ctx
= kmalloc(sizeof(*cb_ctx
), GFP_KERNEL
);
3822 cb_ctx
->device
= device
;
3823 cb_ctx
->class_port_info
= class_port_info
;
3824 cb_ctx
->port_num
= port_num
;
3825 init_completion(&cb_ctx
->done
);
3827 ret
= ib_sa_classport_info_rec_query(&sa_client
, device
, port_num
,
3828 CMA_QUERY_CLASSPORT_INFO_TIMEOUT
,
3829 GFP_KERNEL
, cma_query_sa_classport_info_cb
,
3830 cb_ctx
, &cb_ctx
->sa_query
);
3832 pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n",
3833 device
->name
, port_num
, ret
);
3837 wait_for_completion(&cb_ctx
->done
);
3844 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
3845 struct cma_multicast
*mc
)
3847 struct ib_sa_mcmember_rec rec
;
3848 struct ib_class_port_info class_port_info
;
3849 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3850 ib_sa_comp_mask comp_mask
;
3853 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
3854 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
3859 ret
= cma_set_qkey(id_priv
, 0);
3863 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
3864 rec
.qkey
= cpu_to_be32(id_priv
->qkey
);
3865 rdma_addr_get_sgid(dev_addr
, &rec
.port_gid
);
3866 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
3867 rec
.join_state
= mc
->join_state
;
3869 if (rec
.join_state
== BIT(SENDONLY_FULLMEMBER_JOIN
)) {
3870 ret
= cma_query_sa_classport_info(id_priv
->id
.device
,
3871 id_priv
->id
.port_num
,
3877 if (!(ib_get_cpi_capmask2(&class_port_info
) &
3878 IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT
)) {
3879 pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
3880 "RDMA CM: SM doesn't support Send Only Full Member option\n",
3881 id_priv
->id
.device
->name
, id_priv
->id
.port_num
);
3886 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
3887 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
3888 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
3889 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
3890 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
3892 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
3893 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
3894 IB_SA_MCMEMBER_REC_RATE_SELECTOR
|
3895 IB_SA_MCMEMBER_REC_MTU_SELECTOR
|
3896 IB_SA_MCMEMBER_REC_MTU
|
3897 IB_SA_MCMEMBER_REC_HOP_LIMIT
;
3899 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
3900 id_priv
->id
.port_num
, &rec
,
3901 comp_mask
, GFP_KERNEL
,
3902 cma_ib_mc_handler
, mc
);
3903 return PTR_ERR_OR_ZERO(mc
->multicast
.ib
);
3906 static void iboe_mcast_work_handler(struct work_struct
*work
)
3908 struct iboe_mcast_work
*mw
= container_of(work
, struct iboe_mcast_work
, work
);
3909 struct cma_multicast
*mc
= mw
->mc
;
3910 struct ib_sa_multicast
*m
= mc
->multicast
.ib
;
3912 mc
->multicast
.ib
->context
= mc
;
3913 cma_ib_mc_handler(0, m
);
3914 kref_put(&mc
->mcref
, release_mc
);
3918 static void cma_iboe_set_mgid(struct sockaddr
*addr
, union ib_gid
*mgid
)
3920 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
3921 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
3923 if (cma_any_addr(addr
)) {
3924 memset(mgid
, 0, sizeof *mgid
);
3925 } else if (addr
->sa_family
== AF_INET6
) {
3926 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
3928 mgid
->raw
[0] = 0xff;
3929 mgid
->raw
[1] = 0x0e;
3938 mgid
->raw
[10] = 0xff;
3939 mgid
->raw
[11] = 0xff;
3940 *(__be32
*)(&mgid
->raw
[12]) = sin
->sin_addr
.s_addr
;
3944 static int cma_iboe_join_multicast(struct rdma_id_private
*id_priv
,
3945 struct cma_multicast
*mc
)
3947 struct iboe_mcast_work
*work
;
3948 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3950 struct sockaddr
*addr
= (struct sockaddr
*)&mc
->addr
;
3951 struct net_device
*ndev
= NULL
;
3952 enum ib_gid_type gid_type
;
3955 send_only
= mc
->join_state
== BIT(SENDONLY_FULLMEMBER_JOIN
);
3957 if (cma_zero_addr((struct sockaddr
*)&mc
->addr
))
3960 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3964 mc
->multicast
.ib
= kzalloc(sizeof(struct ib_sa_multicast
), GFP_KERNEL
);
3965 if (!mc
->multicast
.ib
) {
3970 cma_iboe_set_mgid(addr
, &mc
->multicast
.ib
->rec
.mgid
);
3972 mc
->multicast
.ib
->rec
.pkey
= cpu_to_be16(0xffff);
3973 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3974 mc
->multicast
.ib
->rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
3976 if (dev_addr
->bound_dev_if
)
3977 ndev
= dev_get_by_index(&init_net
, dev_addr
->bound_dev_if
);
3982 mc
->multicast
.ib
->rec
.rate
= iboe_get_rate(ndev
);
3983 mc
->multicast
.ib
->rec
.hop_limit
= 1;
3984 mc
->multicast
.ib
->rec
.mtu
= iboe_get_mtu(ndev
->mtu
);
3986 gid_type
= id_priv
->cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
3987 rdma_start_port(id_priv
->cma_dev
->device
)];
3988 if (addr
->sa_family
== AF_INET
) {
3989 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) {
3990 mc
->multicast
.ib
->rec
.hop_limit
= IPV6_DEFAULT_HOPLIMIT
;
3992 err
= cma_igmp_send(ndev
, &mc
->multicast
.ib
->rec
.mgid
,
3995 mc
->igmp_joined
= true;
3999 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
)
4003 if (err
|| !mc
->multicast
.ib
->rec
.mtu
) {
4008 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
4009 &mc
->multicast
.ib
->rec
.port_gid
);
4012 INIT_WORK(&work
->work
, iboe_mcast_work_handler
);
4013 kref_get(&mc
->mcref
);
4014 queue_work(cma_wq
, &work
->work
);
4019 kfree(mc
->multicast
.ib
);
4025 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
4026 u8 join_state
, void *context
)
4028 struct rdma_id_private
*id_priv
;
4029 struct cma_multicast
*mc
;
4032 id_priv
= container_of(id
, struct rdma_id_private
, id
);
4033 if (!cma_comp(id_priv
, RDMA_CM_ADDR_BOUND
) &&
4034 !cma_comp(id_priv
, RDMA_CM_ADDR_RESOLVED
))
4037 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
4041 memcpy(&mc
->addr
, addr
, rdma_addr_size(addr
));
4042 mc
->context
= context
;
4043 mc
->id_priv
= id_priv
;
4044 mc
->igmp_joined
= false;
4045 mc
->join_state
= join_state
;
4046 spin_lock(&id_priv
->lock
);
4047 list_add(&mc
->list
, &id_priv
->mc_list
);
4048 spin_unlock(&id_priv
->lock
);
4050 if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
4051 kref_init(&mc
->mcref
);
4052 ret
= cma_iboe_join_multicast(id_priv
, mc
);
4053 } else if (rdma_cap_ib_mcast(id
->device
, id
->port_num
))
4054 ret
= cma_join_ib_multicast(id_priv
, mc
);
4059 spin_lock_irq(&id_priv
->lock
);
4060 list_del(&mc
->list
);
4061 spin_unlock_irq(&id_priv
->lock
);
4066 EXPORT_SYMBOL(rdma_join_multicast
);
4068 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
4070 struct rdma_id_private
*id_priv
;
4071 struct cma_multicast
*mc
;
4073 id_priv
= container_of(id
, struct rdma_id_private
, id
);
4074 spin_lock_irq(&id_priv
->lock
);
4075 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
4076 if (!memcmp(&mc
->addr
, addr
, rdma_addr_size(addr
))) {
4077 list_del(&mc
->list
);
4078 spin_unlock_irq(&id_priv
->lock
);
4081 ib_detach_mcast(id
->qp
,
4082 &mc
->multicast
.ib
->rec
.mgid
,
4083 be16_to_cpu(mc
->multicast
.ib
->rec
.mlid
));
4085 BUG_ON(id_priv
->cma_dev
->device
!= id
->device
);
4087 if (rdma_cap_ib_mcast(id
->device
, id
->port_num
)) {
4088 ib_sa_free_multicast(mc
->multicast
.ib
);
4090 } else if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
4091 if (mc
->igmp_joined
) {
4092 struct rdma_dev_addr
*dev_addr
=
4093 &id
->route
.addr
.dev_addr
;
4094 struct net_device
*ndev
= NULL
;
4096 if (dev_addr
->bound_dev_if
)
4097 ndev
= dev_get_by_index(&init_net
,
4098 dev_addr
->bound_dev_if
);
4101 &mc
->multicast
.ib
->rec
.mgid
,
4105 mc
->igmp_joined
= false;
4107 kref_put(&mc
->mcref
, release_mc
);
4112 spin_unlock_irq(&id_priv
->lock
);
4114 EXPORT_SYMBOL(rdma_leave_multicast
);
4116 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
4118 struct rdma_dev_addr
*dev_addr
;
4119 struct cma_ndev_work
*work
;
4121 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4123 if ((dev_addr
->bound_dev_if
== ndev
->ifindex
) &&
4124 (net_eq(dev_net(ndev
), dev_addr
->net
)) &&
4125 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
4126 pr_info("RDMA CM addr change for ndev %s used by id %p\n",
4127 ndev
->name
, &id_priv
->id
);
4128 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
4132 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
4134 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
4135 atomic_inc(&id_priv
->refcount
);
4136 queue_work(cma_wq
, &work
->work
);
4142 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
4145 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
4146 struct cma_device
*cma_dev
;
4147 struct rdma_id_private
*id_priv
;
4148 int ret
= NOTIFY_DONE
;
4150 if (event
!= NETDEV_BONDING_FAILOVER
)
4153 if (!(ndev
->flags
& IFF_MASTER
) || !(ndev
->priv_flags
& IFF_BONDING
))
4157 list_for_each_entry(cma_dev
, &dev_list
, list
)
4158 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
4159 ret
= cma_netdev_change(ndev
, id_priv
);
4165 mutex_unlock(&lock
);
4169 static struct notifier_block cma_nb
= {
4170 .notifier_call
= cma_netdev_callback
4173 static void cma_add_one(struct ib_device
*device
)
4175 struct cma_device
*cma_dev
;
4176 struct rdma_id_private
*id_priv
;
4178 unsigned long supported_gids
= 0;
4180 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
4184 cma_dev
->device
= device
;
4185 cma_dev
->default_gid_type
= kcalloc(device
->phys_port_cnt
,
4186 sizeof(*cma_dev
->default_gid_type
),
4188 if (!cma_dev
->default_gid_type
) {
4192 for (i
= rdma_start_port(device
); i
<= rdma_end_port(device
); i
++) {
4193 supported_gids
= roce_gid_type_mask_support(device
, i
);
4194 WARN_ON(!supported_gids
);
4195 cma_dev
->default_gid_type
[i
- rdma_start_port(device
)] =
4196 find_first_bit(&supported_gids
, BITS_PER_LONG
);
4199 init_completion(&cma_dev
->comp
);
4200 atomic_set(&cma_dev
->refcount
, 1);
4201 INIT_LIST_HEAD(&cma_dev
->id_list
);
4202 ib_set_client_data(device
, &cma_client
, cma_dev
);
4205 list_add_tail(&cma_dev
->list
, &dev_list
);
4206 list_for_each_entry(id_priv
, &listen_any_list
, list
)
4207 cma_listen_on_dev(id_priv
, cma_dev
);
4208 mutex_unlock(&lock
);
4211 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
4213 struct rdma_cm_event event
;
4214 enum rdma_cm_state state
;
4217 /* Record that we want to remove the device */
4218 state
= cma_exch(id_priv
, RDMA_CM_DEVICE_REMOVAL
);
4219 if (state
== RDMA_CM_DESTROYING
)
4222 cma_cancel_operation(id_priv
, state
);
4223 mutex_lock(&id_priv
->handler_mutex
);
4225 /* Check for destruction from another callback. */
4226 if (!cma_comp(id_priv
, RDMA_CM_DEVICE_REMOVAL
))
4229 memset(&event
, 0, sizeof event
);
4230 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
4231 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
4233 mutex_unlock(&id_priv
->handler_mutex
);
4237 static void cma_process_remove(struct cma_device
*cma_dev
)
4239 struct rdma_id_private
*id_priv
;
4243 while (!list_empty(&cma_dev
->id_list
)) {
4244 id_priv
= list_entry(cma_dev
->id_list
.next
,
4245 struct rdma_id_private
, list
);
4247 list_del(&id_priv
->listen_list
);
4248 list_del_init(&id_priv
->list
);
4249 atomic_inc(&id_priv
->refcount
);
4250 mutex_unlock(&lock
);
4252 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
4253 cma_deref_id(id_priv
);
4255 rdma_destroy_id(&id_priv
->id
);
4259 mutex_unlock(&lock
);
4261 cma_deref_dev(cma_dev
);
4262 wait_for_completion(&cma_dev
->comp
);
4265 static void cma_remove_one(struct ib_device
*device
, void *client_data
)
4267 struct cma_device
*cma_dev
= client_data
;
4273 list_del(&cma_dev
->list
);
4274 mutex_unlock(&lock
);
4276 cma_process_remove(cma_dev
);
4277 kfree(cma_dev
->default_gid_type
);
4281 static int cma_get_id_stats(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4283 struct nlmsghdr
*nlh
;
4284 struct rdma_cm_id_stats
*id_stats
;
4285 struct rdma_id_private
*id_priv
;
4286 struct rdma_cm_id
*id
= NULL
;
4287 struct cma_device
*cma_dev
;
4288 int i_dev
= 0, i_id
= 0;
4291 * We export all of the IDs as a sequence of messages. Each
4292 * ID gets its own netlink message.
4296 list_for_each_entry(cma_dev
, &dev_list
, list
) {
4297 if (i_dev
< cb
->args
[0]) {
4303 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
4304 if (i_id
< cb
->args
[1]) {
4309 id_stats
= ibnl_put_msg(skb
, &nlh
, cb
->nlh
->nlmsg_seq
,
4310 sizeof *id_stats
, RDMA_NL_RDMA_CM
,
4311 RDMA_NL_RDMA_CM_ID_STATS
,
4316 memset(id_stats
, 0, sizeof *id_stats
);
4318 id_stats
->node_type
= id
->route
.addr
.dev_addr
.dev_type
;
4319 id_stats
->port_num
= id
->port_num
;
4320 id_stats
->bound_dev_if
=
4321 id
->route
.addr
.dev_addr
.bound_dev_if
;
4323 if (ibnl_put_attr(skb
, nlh
,
4324 rdma_addr_size(cma_src_addr(id_priv
)),
4325 cma_src_addr(id_priv
),
4326 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR
))
4328 if (ibnl_put_attr(skb
, nlh
,
4329 rdma_addr_size(cma_src_addr(id_priv
)),
4330 cma_dst_addr(id_priv
),
4331 RDMA_NL_RDMA_CM_ATTR_DST_ADDR
))
4334 id_stats
->pid
= id_priv
->owner
;
4335 id_stats
->port_space
= id
->ps
;
4336 id_stats
->cm_state
= id_priv
->state
;
4337 id_stats
->qp_num
= id_priv
->qp_num
;
4338 id_stats
->qp_type
= id
->qp_type
;
4348 mutex_unlock(&lock
);
4349 cb
->args
[0] = i_dev
;
4355 static const struct ibnl_client_cbs cma_cb_table
[] = {
4356 [RDMA_NL_RDMA_CM_ID_STATS
] = { .dump
= cma_get_id_stats
,
4357 .module
= THIS_MODULE
},
4360 static int cma_init_net(struct net
*net
)
4362 struct cma_pernet
*pernet
= cma_pernet(net
);
4364 idr_init(&pernet
->tcp_ps
);
4365 idr_init(&pernet
->udp_ps
);
4366 idr_init(&pernet
->ipoib_ps
);
4367 idr_init(&pernet
->ib_ps
);
4372 static void cma_exit_net(struct net
*net
)
4374 struct cma_pernet
*pernet
= cma_pernet(net
);
4376 idr_destroy(&pernet
->tcp_ps
);
4377 idr_destroy(&pernet
->udp_ps
);
4378 idr_destroy(&pernet
->ipoib_ps
);
4379 idr_destroy(&pernet
->ib_ps
);
4382 static struct pernet_operations cma_pernet_operations
= {
4383 .init
= cma_init_net
,
4384 .exit
= cma_exit_net
,
4385 .id
= &cma_pernet_id
,
4386 .size
= sizeof(struct cma_pernet
),
4389 static int __init
cma_init(void)
4393 cma_wq
= alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM
);
4397 ret
= register_pernet_subsys(&cma_pernet_operations
);
4401 ib_sa_register_client(&sa_client
);
4402 rdma_addr_register_client(&addr_client
);
4403 register_netdevice_notifier(&cma_nb
);
4405 ret
= ib_register_client(&cma_client
);
4409 if (ibnl_add_client(RDMA_NL_RDMA_CM
, ARRAY_SIZE(cma_cb_table
),
4411 pr_warn("RDMA CMA: failed to add netlink callback\n");
4412 cma_configfs_init();
4417 unregister_netdevice_notifier(&cma_nb
);
4418 rdma_addr_unregister_client(&addr_client
);
4419 ib_sa_unregister_client(&sa_client
);
4421 destroy_workqueue(cma_wq
);
4425 static void __exit
cma_cleanup(void)
4427 cma_configfs_exit();
4428 ibnl_remove_client(RDMA_NL_RDMA_CM
);
4429 ib_unregister_client(&cma_client
);
4430 unregister_netdevice_notifier(&cma_nb
);
4431 rdma_addr_unregister_client(&addr_client
);
4432 ib_sa_unregister_client(&sa_client
);
4433 unregister_pernet_subsys(&cma_pernet_operations
);
4434 destroy_workqueue(cma_wq
);
4437 module_init(cma_init
);
4438 module_exit(cma_cleanup
);