2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/idr.h>
42 #include <linux/inetdevice.h>
43 #include <linux/slab.h>
44 #include <linux/module.h>
45 #include <net/route.h>
50 #include <rdma/rdma_cm.h>
51 #include <rdma/rdma_cm_ib.h>
52 #include <rdma/rdma_netlink.h>
53 #include <rdma/ib_cache.h>
54 #include <rdma/ib_cm.h>
55 #include <rdma/ib_sa.h>
56 #include <rdma/iw_cm.h>
58 MODULE_AUTHOR("Sean Hefty");
59 MODULE_DESCRIPTION("Generic RDMA CM Agent");
60 MODULE_LICENSE("Dual BSD/GPL");
62 #define CMA_CM_RESPONSE_TIMEOUT 20
63 #define CMA_MAX_CM_RETRIES 15
64 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
65 #define CMA_IBOE_PACKET_LIFETIME 18
67 static void cma_add_one(struct ib_device
*device
);
68 static void cma_remove_one(struct ib_device
*device
);
70 static struct ib_client cma_client
= {
73 .remove
= cma_remove_one
76 static struct ib_sa_client sa_client
;
77 static struct rdma_addr_client addr_client
;
78 static LIST_HEAD(dev_list
);
79 static LIST_HEAD(listen_any_list
);
80 static DEFINE_MUTEX(lock
);
81 static struct workqueue_struct
*cma_wq
;
82 static DEFINE_IDR(sdp_ps
);
83 static DEFINE_IDR(tcp_ps
);
84 static DEFINE_IDR(udp_ps
);
85 static DEFINE_IDR(ipoib_ps
);
86 static DEFINE_IDR(ib_ps
);
89 struct list_head list
;
90 struct ib_device
*device
;
91 struct completion comp
;
93 struct list_head id_list
;
96 struct rdma_bind_list
{
98 struct hlist_head owners
;
107 * Device removal can occur at anytime, so we need extra handling to
108 * serialize notifying the user of device removal with other callbacks.
109 * We do this by disabling removal notification while a callback is in process,
110 * and reporting it after the callback completes.
112 struct rdma_id_private
{
113 struct rdma_cm_id id
;
115 struct rdma_bind_list
*bind_list
;
116 struct hlist_node node
;
117 struct list_head list
; /* listen_any_list or cma_device.list */
118 struct list_head listen_list
; /* per device listens */
119 struct cma_device
*cma_dev
;
120 struct list_head mc_list
;
123 enum rdma_cm_state state
;
125 struct mutex qp_mutex
;
127 struct completion comp
;
129 struct mutex handler_mutex
;
133 struct ib_sa_query
*query
;
151 struct cma_multicast
{
152 struct rdma_id_private
*id_priv
;
154 struct ib_sa_multicast
*ib
;
156 struct list_head list
;
158 struct sockaddr_storage addr
;
163 struct work_struct work
;
164 struct rdma_id_private
*id
;
165 enum rdma_cm_state old_state
;
166 enum rdma_cm_state new_state
;
167 struct rdma_cm_event event
;
170 struct cma_ndev_work
{
171 struct work_struct work
;
172 struct rdma_id_private
*id
;
173 struct rdma_cm_event event
;
176 struct iboe_mcast_work
{
177 struct work_struct work
;
178 struct rdma_id_private
*id
;
179 struct cma_multicast
*mc
;
192 u8 ip_version
; /* IP version: 7:4 */
194 union cma_ip_addr src_addr
;
195 union cma_ip_addr dst_addr
;
200 u8 sdp_version
; /* Major version: 7:4 */
201 u8 ip_version
; /* IP version: 7:4 */
202 u8 sdp_specific1
[10];
204 __be16 sdp_specific2
;
205 union cma_ip_addr src_addr
;
206 union cma_ip_addr dst_addr
;
214 #define CMA_VERSION 0x00
215 #define SDP_MAJ_VERSION 0x2
217 static int cma_comp(struct rdma_id_private
*id_priv
, enum rdma_cm_state comp
)
222 spin_lock_irqsave(&id_priv
->lock
, flags
);
223 ret
= (id_priv
->state
== comp
);
224 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
228 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
229 enum rdma_cm_state comp
, enum rdma_cm_state exch
)
234 spin_lock_irqsave(&id_priv
->lock
, flags
);
235 if ((ret
= (id_priv
->state
== comp
)))
236 id_priv
->state
= exch
;
237 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
241 static enum rdma_cm_state
cma_exch(struct rdma_id_private
*id_priv
,
242 enum rdma_cm_state exch
)
245 enum rdma_cm_state old
;
247 spin_lock_irqsave(&id_priv
->lock
, flags
);
248 old
= id_priv
->state
;
249 id_priv
->state
= exch
;
250 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
254 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
256 return hdr
->ip_version
>> 4;
259 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
261 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
264 static inline u8
sdp_get_majv(u8 sdp_version
)
266 return sdp_version
>> 4;
269 static inline u8
sdp_get_ip_ver(struct sdp_hh
*hh
)
271 return hh
->ip_version
>> 4;
274 static inline void sdp_set_ip_ver(struct sdp_hh
*hh
, u8 ip_ver
)
276 hh
->ip_version
= (ip_ver
<< 4) | (hh
->ip_version
& 0xF);
279 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
280 struct cma_device
*cma_dev
)
282 atomic_inc(&cma_dev
->refcount
);
283 id_priv
->cma_dev
= cma_dev
;
284 id_priv
->id
.device
= cma_dev
->device
;
285 id_priv
->id
.route
.addr
.dev_addr
.transport
=
286 rdma_node_get_transport(cma_dev
->device
->node_type
);
287 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
290 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
292 if (atomic_dec_and_test(&cma_dev
->refcount
))
293 complete(&cma_dev
->comp
);
296 static inline void release_mc(struct kref
*kref
)
298 struct cma_multicast
*mc
= container_of(kref
, struct cma_multicast
, mcref
);
300 kfree(mc
->multicast
.ib
);
304 static void cma_release_dev(struct rdma_id_private
*id_priv
)
307 list_del(&id_priv
->list
);
308 cma_deref_dev(id_priv
->cma_dev
);
309 id_priv
->cma_dev
= NULL
;
313 static int cma_set_qkey(struct rdma_id_private
*id_priv
)
315 struct ib_sa_mcmember_rec rec
;
321 switch (id_priv
->id
.ps
) {
323 id_priv
->qkey
= RDMA_UDP_QKEY
;
326 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
327 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
328 id_priv
->id
.port_num
, &rec
.mgid
,
331 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
339 static int find_gid_port(struct ib_device
*device
, union ib_gid
*gid
, u8 port_num
)
343 struct ib_port_attr props
;
346 err
= ib_query_port(device
, port_num
, &props
);
350 for (i
= 0; i
< props
.gid_tbl_len
; ++i
) {
351 err
= ib_query_gid(device
, port_num
, i
, &tmp
);
354 if (!memcmp(&tmp
, gid
, sizeof tmp
))
358 return -EADDRNOTAVAIL
;
361 static int cma_acquire_dev(struct rdma_id_private
*id_priv
)
363 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
364 struct cma_device
*cma_dev
;
365 union ib_gid gid
, iboe_gid
;
368 enum rdma_link_layer dev_ll
= dev_addr
->dev_type
== ARPHRD_INFINIBAND
?
369 IB_LINK_LAYER_INFINIBAND
: IB_LINK_LAYER_ETHERNET
;
371 if (dev_ll
!= IB_LINK_LAYER_INFINIBAND
&&
372 id_priv
->id
.ps
== RDMA_PS_IPOIB
)
376 iboe_addr_get_sgid(dev_addr
, &iboe_gid
);
377 memcpy(&gid
, dev_addr
->src_dev_addr
+
378 rdma_addr_gid_offset(dev_addr
), sizeof gid
);
379 list_for_each_entry(cma_dev
, &dev_list
, list
) {
380 for (port
= 1; port
<= cma_dev
->device
->phys_port_cnt
; ++port
) {
381 if (rdma_port_get_link_layer(cma_dev
->device
, port
) == dev_ll
) {
382 if (rdma_node_get_transport(cma_dev
->device
->node_type
) == RDMA_TRANSPORT_IB
&&
383 rdma_port_get_link_layer(cma_dev
->device
, port
) == IB_LINK_LAYER_ETHERNET
)
384 ret
= find_gid_port(cma_dev
->device
, &iboe_gid
, port
);
386 ret
= find_gid_port(cma_dev
->device
, &gid
, port
);
389 id_priv
->id
.port_num
= port
;
398 cma_attach_to_dev(id_priv
, cma_dev
);
404 static void cma_deref_id(struct rdma_id_private
*id_priv
)
406 if (atomic_dec_and_test(&id_priv
->refcount
))
407 complete(&id_priv
->comp
);
410 static int cma_disable_callback(struct rdma_id_private
*id_priv
,
411 enum rdma_cm_state state
)
413 mutex_lock(&id_priv
->handler_mutex
);
414 if (id_priv
->state
!= state
) {
415 mutex_unlock(&id_priv
->handler_mutex
);
421 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
422 void *context
, enum rdma_port_space ps
,
423 enum ib_qp_type qp_type
)
425 struct rdma_id_private
*id_priv
;
427 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
429 return ERR_PTR(-ENOMEM
);
431 id_priv
->owner
= task_pid_nr(current
);
432 id_priv
->state
= RDMA_CM_IDLE
;
433 id_priv
->id
.context
= context
;
434 id_priv
->id
.event_handler
= event_handler
;
436 id_priv
->id
.qp_type
= qp_type
;
437 spin_lock_init(&id_priv
->lock
);
438 mutex_init(&id_priv
->qp_mutex
);
439 init_completion(&id_priv
->comp
);
440 atomic_set(&id_priv
->refcount
, 1);
441 mutex_init(&id_priv
->handler_mutex
);
442 INIT_LIST_HEAD(&id_priv
->listen_list
);
443 INIT_LIST_HEAD(&id_priv
->mc_list
);
444 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
448 EXPORT_SYMBOL(rdma_create_id
);
450 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
452 struct ib_qp_attr qp_attr
;
453 int qp_attr_mask
, ret
;
455 qp_attr
.qp_state
= IB_QPS_INIT
;
456 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
460 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
464 qp_attr
.qp_state
= IB_QPS_RTR
;
465 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
469 qp_attr
.qp_state
= IB_QPS_RTS
;
471 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
476 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
478 struct ib_qp_attr qp_attr
;
479 int qp_attr_mask
, ret
;
481 qp_attr
.qp_state
= IB_QPS_INIT
;
482 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
486 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
489 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
490 struct ib_qp_init_attr
*qp_init_attr
)
492 struct rdma_id_private
*id_priv
;
496 id_priv
= container_of(id
, struct rdma_id_private
, id
);
497 if (id
->device
!= pd
->device
)
500 qp
= ib_create_qp(pd
, qp_init_attr
);
504 if (id
->qp_type
== IB_QPT_UD
)
505 ret
= cma_init_ud_qp(id_priv
, qp
);
507 ret
= cma_init_conn_qp(id_priv
, qp
);
512 id_priv
->qp_num
= qp
->qp_num
;
513 id_priv
->srq
= (qp
->srq
!= NULL
);
519 EXPORT_SYMBOL(rdma_create_qp
);
521 void rdma_destroy_qp(struct rdma_cm_id
*id
)
523 struct rdma_id_private
*id_priv
;
525 id_priv
= container_of(id
, struct rdma_id_private
, id
);
526 mutex_lock(&id_priv
->qp_mutex
);
527 ib_destroy_qp(id_priv
->id
.qp
);
528 id_priv
->id
.qp
= NULL
;
529 mutex_unlock(&id_priv
->qp_mutex
);
531 EXPORT_SYMBOL(rdma_destroy_qp
);
533 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
534 struct rdma_conn_param
*conn_param
)
536 struct ib_qp_attr qp_attr
;
537 int qp_attr_mask
, ret
;
539 mutex_lock(&id_priv
->qp_mutex
);
540 if (!id_priv
->id
.qp
) {
545 /* Need to update QP attributes from default values. */
546 qp_attr
.qp_state
= IB_QPS_INIT
;
547 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
551 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
555 qp_attr
.qp_state
= IB_QPS_RTR
;
556 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
561 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
562 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
564 mutex_unlock(&id_priv
->qp_mutex
);
568 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
569 struct rdma_conn_param
*conn_param
)
571 struct ib_qp_attr qp_attr
;
572 int qp_attr_mask
, ret
;
574 mutex_lock(&id_priv
->qp_mutex
);
575 if (!id_priv
->id
.qp
) {
580 qp_attr
.qp_state
= IB_QPS_RTS
;
581 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
586 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
587 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
589 mutex_unlock(&id_priv
->qp_mutex
);
593 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
595 struct ib_qp_attr qp_attr
;
598 mutex_lock(&id_priv
->qp_mutex
);
599 if (!id_priv
->id
.qp
) {
604 qp_attr
.qp_state
= IB_QPS_ERR
;
605 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
607 mutex_unlock(&id_priv
->qp_mutex
);
611 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
612 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
614 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
618 if (rdma_port_get_link_layer(id_priv
->id
.device
, id_priv
->id
.port_num
) ==
619 IB_LINK_LAYER_INFINIBAND
)
620 pkey
= ib_addr_get_pkey(dev_addr
);
624 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
625 pkey
, &qp_attr
->pkey_index
);
629 qp_attr
->port_num
= id_priv
->id
.port_num
;
630 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
632 if (id_priv
->id
.qp_type
== IB_QPT_UD
) {
633 ret
= cma_set_qkey(id_priv
);
637 qp_attr
->qkey
= id_priv
->qkey
;
638 *qp_attr_mask
|= IB_QP_QKEY
;
640 qp_attr
->qp_access_flags
= 0;
641 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
646 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
649 struct rdma_id_private
*id_priv
;
652 id_priv
= container_of(id
, struct rdma_id_private
, id
);
653 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
654 case RDMA_TRANSPORT_IB
:
655 if (!id_priv
->cm_id
.ib
|| (id_priv
->id
.qp_type
== IB_QPT_UD
))
656 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
658 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
660 if (qp_attr
->qp_state
== IB_QPS_RTR
)
661 qp_attr
->rq_psn
= id_priv
->seq_num
;
663 case RDMA_TRANSPORT_IWARP
:
664 if (!id_priv
->cm_id
.iw
) {
665 qp_attr
->qp_access_flags
= 0;
666 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
668 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
678 EXPORT_SYMBOL(rdma_init_qp_attr
);
680 static inline int cma_zero_addr(struct sockaddr
*addr
)
682 struct in6_addr
*ip6
;
684 if (addr
->sa_family
== AF_INET
)
685 return ipv4_is_zeronet(
686 ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
688 ip6
= &((struct sockaddr_in6
*) addr
)->sin6_addr
;
689 return (ip6
->s6_addr32
[0] | ip6
->s6_addr32
[1] |
690 ip6
->s6_addr32
[2] | ip6
->s6_addr32
[3]) == 0;
694 static inline int cma_loopback_addr(struct sockaddr
*addr
)
696 if (addr
->sa_family
== AF_INET
)
697 return ipv4_is_loopback(
698 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
700 return ipv6_addr_loopback(
701 &((struct sockaddr_in6
*) addr
)->sin6_addr
);
704 static inline int cma_any_addr(struct sockaddr
*addr
)
706 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
709 static int cma_addr_cmp(struct sockaddr
*src
, struct sockaddr
*dst
)
711 if (src
->sa_family
!= dst
->sa_family
)
714 switch (src
->sa_family
) {
716 return ((struct sockaddr_in
*) src
)->sin_addr
.s_addr
!=
717 ((struct sockaddr_in
*) dst
)->sin_addr
.s_addr
;
719 return ipv6_addr_cmp(&((struct sockaddr_in6
*) src
)->sin6_addr
,
720 &((struct sockaddr_in6
*) dst
)->sin6_addr
);
724 static inline __be16
cma_port(struct sockaddr
*addr
)
726 if (addr
->sa_family
== AF_INET
)
727 return ((struct sockaddr_in
*) addr
)->sin_port
;
729 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
732 static inline int cma_any_port(struct sockaddr
*addr
)
734 return !cma_port(addr
);
737 static int cma_get_net_info(void *hdr
, enum rdma_port_space ps
,
738 u8
*ip_ver
, __be16
*port
,
739 union cma_ip_addr
**src
, union cma_ip_addr
**dst
)
743 if (sdp_get_majv(((struct sdp_hh
*) hdr
)->sdp_version
) !=
747 *ip_ver
= sdp_get_ip_ver(hdr
);
748 *port
= ((struct sdp_hh
*) hdr
)->port
;
749 *src
= &((struct sdp_hh
*) hdr
)->src_addr
;
750 *dst
= &((struct sdp_hh
*) hdr
)->dst_addr
;
753 if (((struct cma_hdr
*) hdr
)->cma_version
!= CMA_VERSION
)
756 *ip_ver
= cma_get_ip_ver(hdr
);
757 *port
= ((struct cma_hdr
*) hdr
)->port
;
758 *src
= &((struct cma_hdr
*) hdr
)->src_addr
;
759 *dst
= &((struct cma_hdr
*) hdr
)->dst_addr
;
763 if (*ip_ver
!= 4 && *ip_ver
!= 6)
768 static void cma_save_net_info(struct rdma_addr
*addr
,
769 struct rdma_addr
*listen_addr
,
770 u8 ip_ver
, __be16 port
,
771 union cma_ip_addr
*src
, union cma_ip_addr
*dst
)
773 struct sockaddr_in
*listen4
, *ip4
;
774 struct sockaddr_in6
*listen6
, *ip6
;
778 listen4
= (struct sockaddr_in
*) &listen_addr
->src_addr
;
779 ip4
= (struct sockaddr_in
*) &addr
->src_addr
;
780 ip4
->sin_family
= listen4
->sin_family
;
781 ip4
->sin_addr
.s_addr
= dst
->ip4
.addr
;
782 ip4
->sin_port
= listen4
->sin_port
;
784 ip4
= (struct sockaddr_in
*) &addr
->dst_addr
;
785 ip4
->sin_family
= listen4
->sin_family
;
786 ip4
->sin_addr
.s_addr
= src
->ip4
.addr
;
787 ip4
->sin_port
= port
;
790 listen6
= (struct sockaddr_in6
*) &listen_addr
->src_addr
;
791 ip6
= (struct sockaddr_in6
*) &addr
->src_addr
;
792 ip6
->sin6_family
= listen6
->sin6_family
;
793 ip6
->sin6_addr
= dst
->ip6
;
794 ip6
->sin6_port
= listen6
->sin6_port
;
796 ip6
= (struct sockaddr_in6
*) &addr
->dst_addr
;
797 ip6
->sin6_family
= listen6
->sin6_family
;
798 ip6
->sin6_addr
= src
->ip6
;
799 ip6
->sin6_port
= port
;
806 static inline int cma_user_data_offset(enum rdma_port_space ps
)
812 return sizeof(struct cma_hdr
);
816 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
818 switch (rdma_port_get_link_layer(id_priv
->id
.device
, id_priv
->id
.port_num
)) {
819 case IB_LINK_LAYER_INFINIBAND
:
821 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
828 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
830 struct rdma_id_private
*dev_id_priv
;
833 * Remove from listen_any_list to prevent added devices from spawning
834 * additional listen requests.
837 list_del(&id_priv
->list
);
839 while (!list_empty(&id_priv
->listen_list
)) {
840 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
841 struct rdma_id_private
, listen_list
);
842 /* sync with device removal to avoid duplicate destruction */
843 list_del_init(&dev_id_priv
->list
);
844 list_del(&dev_id_priv
->listen_list
);
847 rdma_destroy_id(&dev_id_priv
->id
);
853 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
854 enum rdma_cm_state state
)
857 case RDMA_CM_ADDR_QUERY
:
858 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
860 case RDMA_CM_ROUTE_QUERY
:
861 cma_cancel_route(id_priv
);
864 if (cma_any_addr((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
)
865 && !id_priv
->cma_dev
)
866 cma_cancel_listens(id_priv
);
873 static void cma_release_port(struct rdma_id_private
*id_priv
)
875 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
881 hlist_del(&id_priv
->node
);
882 if (hlist_empty(&bind_list
->owners
)) {
883 idr_remove(bind_list
->ps
, bind_list
->port
);
889 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
891 struct cma_multicast
*mc
;
893 while (!list_empty(&id_priv
->mc_list
)) {
894 mc
= container_of(id_priv
->mc_list
.next
,
895 struct cma_multicast
, list
);
897 switch (rdma_port_get_link_layer(id_priv
->cma_dev
->device
, id_priv
->id
.port_num
)) {
898 case IB_LINK_LAYER_INFINIBAND
:
899 ib_sa_free_multicast(mc
->multicast
.ib
);
902 case IB_LINK_LAYER_ETHERNET
:
903 kref_put(&mc
->mcref
, release_mc
);
911 void rdma_destroy_id(struct rdma_cm_id
*id
)
913 struct rdma_id_private
*id_priv
;
914 enum rdma_cm_state state
;
916 id_priv
= container_of(id
, struct rdma_id_private
, id
);
917 state
= cma_exch(id_priv
, RDMA_CM_DESTROYING
);
918 cma_cancel_operation(id_priv
, state
);
921 * Wait for any active callback to finish. New callbacks will find
922 * the id_priv state set to destroying and abort.
924 mutex_lock(&id_priv
->handler_mutex
);
925 mutex_unlock(&id_priv
->handler_mutex
);
927 if (id_priv
->cma_dev
) {
928 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
929 case RDMA_TRANSPORT_IB
:
930 if (id_priv
->cm_id
.ib
)
931 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
933 case RDMA_TRANSPORT_IWARP
:
934 if (id_priv
->cm_id
.iw
)
935 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
940 cma_leave_mc_groups(id_priv
);
941 cma_release_dev(id_priv
);
944 cma_release_port(id_priv
);
945 cma_deref_id(id_priv
);
946 wait_for_completion(&id_priv
->comp
);
948 if (id_priv
->internal_id
)
949 cma_deref_id(id_priv
->id
.context
);
951 kfree(id_priv
->id
.route
.path_rec
);
954 EXPORT_SYMBOL(rdma_destroy_id
);
956 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
960 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
964 ret
= cma_modify_qp_rts(id_priv
, NULL
);
968 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
974 cma_modify_qp_err(id_priv
);
975 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
980 static int cma_verify_rep(struct rdma_id_private
*id_priv
, void *data
)
982 if (id_priv
->id
.ps
== RDMA_PS_SDP
&&
983 sdp_get_majv(((struct sdp_hah
*) data
)->sdp_version
) !=
990 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
991 struct ib_cm_rep_event_param
*rep_data
,
994 event
->param
.conn
.private_data
= private_data
;
995 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
996 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
997 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
998 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
999 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
1000 event
->param
.conn
.srq
= rep_data
->srq
;
1001 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
1004 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1006 struct rdma_id_private
*id_priv
= cm_id
->context
;
1007 struct rdma_cm_event event
;
1010 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
1011 cma_disable_callback(id_priv
, RDMA_CM_CONNECT
)) ||
1012 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
1013 cma_disable_callback(id_priv
, RDMA_CM_DISCONNECT
)))
1016 memset(&event
, 0, sizeof event
);
1017 switch (ib_event
->event
) {
1018 case IB_CM_REQ_ERROR
:
1019 case IB_CM_REP_ERROR
:
1020 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1021 event
.status
= -ETIMEDOUT
;
1023 case IB_CM_REP_RECEIVED
:
1024 event
.status
= cma_verify_rep(id_priv
, ib_event
->private_data
);
1026 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1027 else if (id_priv
->id
.qp
&& id_priv
->id
.ps
!= RDMA_PS_SDP
) {
1028 event
.status
= cma_rep_recv(id_priv
);
1029 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
1030 RDMA_CM_EVENT_ESTABLISHED
;
1032 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
1033 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
1034 ib_event
->private_data
);
1036 case IB_CM_RTU_RECEIVED
:
1037 case IB_CM_USER_ESTABLISHED
:
1038 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1040 case IB_CM_DREQ_ERROR
:
1041 event
.status
= -ETIMEDOUT
; /* fall through */
1042 case IB_CM_DREQ_RECEIVED
:
1043 case IB_CM_DREP_RECEIVED
:
1044 if (!cma_comp_exch(id_priv
, RDMA_CM_CONNECT
,
1045 RDMA_CM_DISCONNECT
))
1047 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1049 case IB_CM_TIMEWAIT_EXIT
:
1050 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
1052 case IB_CM_MRA_RECEIVED
:
1055 case IB_CM_REJ_RECEIVED
:
1056 cma_modify_qp_err(id_priv
);
1057 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
1058 event
.event
= RDMA_CM_EVENT_REJECTED
;
1059 event
.param
.conn
.private_data
= ib_event
->private_data
;
1060 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
1063 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
1068 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1070 /* Destroy the CM ID by returning a non-zero value. */
1071 id_priv
->cm_id
.ib
= NULL
;
1072 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1073 mutex_unlock(&id_priv
->handler_mutex
);
1074 rdma_destroy_id(&id_priv
->id
);
1078 mutex_unlock(&id_priv
->handler_mutex
);
1082 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
1083 struct ib_cm_event
*ib_event
)
1085 struct rdma_id_private
*id_priv
;
1086 struct rdma_cm_id
*id
;
1087 struct rdma_route
*rt
;
1088 union cma_ip_addr
*src
, *dst
;
1093 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1094 &ip_ver
, &port
, &src
, &dst
))
1097 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1098 listen_id
->ps
, ib_event
->param
.req_rcvd
.qp_type
);
1102 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1103 ip_ver
, port
, src
, dst
);
1106 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1107 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1112 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
1113 if (rt
->num_paths
== 2)
1114 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1116 if (cma_any_addr((struct sockaddr
*) &rt
->addr
.src_addr
)) {
1117 rt
->addr
.dev_addr
.dev_type
= ARPHRD_INFINIBAND
;
1118 rdma_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
1119 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
1121 ret
= rdma_translate_ip((struct sockaddr
*) &rt
->addr
.src_addr
,
1122 &rt
->addr
.dev_addr
);
1126 rdma_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1128 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1129 id_priv
->state
= RDMA_CM_CONNECT
;
1133 rdma_destroy_id(id
);
1137 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1138 struct ib_cm_event
*ib_event
)
1140 struct rdma_id_private
*id_priv
;
1141 struct rdma_cm_id
*id
;
1142 union cma_ip_addr
*src
, *dst
;
1147 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1148 listen_id
->ps
, IB_QPT_UD
);
1153 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1154 &ip_ver
, &port
, &src
, &dst
))
1157 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1158 ip_ver
, port
, src
, dst
);
1160 if (!cma_any_addr((struct sockaddr
*) &id
->route
.addr
.src_addr
)) {
1161 ret
= rdma_translate_ip((struct sockaddr
*) &id
->route
.addr
.src_addr
,
1162 &id
->route
.addr
.dev_addr
);
1167 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1168 id_priv
->state
= RDMA_CM_CONNECT
;
1171 rdma_destroy_id(id
);
1175 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1176 struct ib_cm_req_event_param
*req_data
,
1177 void *private_data
, int offset
)
1179 event
->param
.conn
.private_data
= private_data
+ offset
;
1180 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1181 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1182 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1183 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1184 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1185 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1186 event
->param
.conn
.srq
= req_data
->srq
;
1187 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1190 static int cma_check_req_qp_type(struct rdma_cm_id
*id
, struct ib_cm_event
*ib_event
)
1192 return (((ib_event
->event
== IB_CM_REQ_RECEIVED
) &&
1193 (ib_event
->param
.req_rcvd
.qp_type
== id
->qp_type
)) ||
1194 ((ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) &&
1195 (id
->qp_type
== IB_QPT_UD
)) ||
1199 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1201 struct rdma_id_private
*listen_id
, *conn_id
;
1202 struct rdma_cm_event event
;
1205 listen_id
= cm_id
->context
;
1206 if (!cma_check_req_qp_type(&listen_id
->id
, ib_event
))
1209 if (cma_disable_callback(listen_id
, RDMA_CM_LISTEN
))
1210 return -ECONNABORTED
;
1212 memset(&event
, 0, sizeof event
);
1213 offset
= cma_user_data_offset(listen_id
->id
.ps
);
1214 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1215 if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) {
1216 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
);
1217 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1218 event
.param
.ud
.private_data_len
=
1219 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1221 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
);
1222 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1223 ib_event
->private_data
, offset
);
1230 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1231 ret
= cma_acquire_dev(conn_id
);
1235 conn_id
->cm_id
.ib
= cm_id
;
1236 cm_id
->context
= conn_id
;
1237 cm_id
->cm_handler
= cma_ib_handler
;
1240 * Protect against the user destroying conn_id from another thread
1241 * until we're done accessing it.
1243 atomic_inc(&conn_id
->refcount
);
1244 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1249 * Acquire mutex to prevent user executing rdma_destroy_id()
1250 * while we're accessing the cm_id.
1253 if (cma_comp(conn_id
, RDMA_CM_CONNECT
) && (conn_id
->id
.qp_type
!= IB_QPT_UD
))
1254 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1255 mutex_unlock(&lock
);
1256 mutex_unlock(&conn_id
->handler_mutex
);
1257 mutex_unlock(&listen_id
->handler_mutex
);
1258 cma_deref_id(conn_id
);
1262 cma_deref_id(conn_id
);
1263 /* Destroy the CM ID by returning a non-zero value. */
1264 conn_id
->cm_id
.ib
= NULL
;
1266 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
1267 mutex_unlock(&conn_id
->handler_mutex
);
1269 mutex_unlock(&listen_id
->handler_mutex
);
1271 rdma_destroy_id(&conn_id
->id
);
1275 static __be64
cma_get_service_id(enum rdma_port_space ps
, struct sockaddr
*addr
)
1277 return cpu_to_be64(((u64
)ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1280 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
1281 struct ib_cm_compare_data
*compare
)
1283 struct cma_hdr
*cma_data
, *cma_mask
;
1284 struct sdp_hh
*sdp_data
, *sdp_mask
;
1286 struct in6_addr ip6_addr
;
1288 memset(compare
, 0, sizeof *compare
);
1289 cma_data
= (void *) compare
->data
;
1290 cma_mask
= (void *) compare
->mask
;
1291 sdp_data
= (void *) compare
->data
;
1292 sdp_mask
= (void *) compare
->mask
;
1294 switch (addr
->sa_family
) {
1296 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
1297 if (ps
== RDMA_PS_SDP
) {
1298 sdp_set_ip_ver(sdp_data
, 4);
1299 sdp_set_ip_ver(sdp_mask
, 0xF);
1300 sdp_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1301 sdp_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1303 cma_set_ip_ver(cma_data
, 4);
1304 cma_set_ip_ver(cma_mask
, 0xF);
1305 if (!cma_any_addr(addr
)) {
1306 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1307 cma_mask
->dst_addr
.ip4
.addr
= htonl(~0);
1312 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
1313 if (ps
== RDMA_PS_SDP
) {
1314 sdp_set_ip_ver(sdp_data
, 6);
1315 sdp_set_ip_ver(sdp_mask
, 0xF);
1316 sdp_data
->dst_addr
.ip6
= ip6_addr
;
1317 memset(&sdp_mask
->dst_addr
.ip6
, 0xFF,
1318 sizeof sdp_mask
->dst_addr
.ip6
);
1320 cma_set_ip_ver(cma_data
, 6);
1321 cma_set_ip_ver(cma_mask
, 0xF);
1322 if (!cma_any_addr(addr
)) {
1323 cma_data
->dst_addr
.ip6
= ip6_addr
;
1324 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
1325 sizeof cma_mask
->dst_addr
.ip6
);
1334 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1336 struct rdma_id_private
*id_priv
= iw_id
->context
;
1337 struct rdma_cm_event event
;
1338 struct sockaddr_in
*sin
;
1341 if (cma_disable_callback(id_priv
, RDMA_CM_CONNECT
))
1344 memset(&event
, 0, sizeof event
);
1345 switch (iw_event
->event
) {
1346 case IW_CM_EVENT_CLOSE
:
1347 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1349 case IW_CM_EVENT_CONNECT_REPLY
:
1350 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1351 *sin
= iw_event
->local_addr
;
1352 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
1353 *sin
= iw_event
->remote_addr
;
1354 switch (iw_event
->status
) {
1356 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1357 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
1358 event
.param
.conn
.responder_resources
= iw_event
->ord
;
1362 event
.event
= RDMA_CM_EVENT_REJECTED
;
1365 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1368 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1372 case IW_CM_EVENT_ESTABLISHED
:
1373 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1374 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
1375 event
.param
.conn
.responder_resources
= iw_event
->ord
;
1381 event
.status
= iw_event
->status
;
1382 event
.param
.conn
.private_data
= iw_event
->private_data
;
1383 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1384 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1386 /* Destroy the CM ID by returning a non-zero value. */
1387 id_priv
->cm_id
.iw
= NULL
;
1388 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1389 mutex_unlock(&id_priv
->handler_mutex
);
1390 rdma_destroy_id(&id_priv
->id
);
1394 mutex_unlock(&id_priv
->handler_mutex
);
1398 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1399 struct iw_cm_event
*iw_event
)
1401 struct rdma_cm_id
*new_cm_id
;
1402 struct rdma_id_private
*listen_id
, *conn_id
;
1403 struct sockaddr_in
*sin
;
1404 struct net_device
*dev
= NULL
;
1405 struct rdma_cm_event event
;
1407 struct ib_device_attr attr
;
1409 listen_id
= cm_id
->context
;
1410 if (cma_disable_callback(listen_id
, RDMA_CM_LISTEN
))
1411 return -ECONNABORTED
;
1413 /* Create a new RDMA id for the new IW CM ID */
1414 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1415 listen_id
->id
.context
,
1416 RDMA_PS_TCP
, IB_QPT_RC
);
1417 if (IS_ERR(new_cm_id
)) {
1421 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1422 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1423 conn_id
->state
= RDMA_CM_CONNECT
;
1425 dev
= ip_dev_find(&init_net
, iw_event
->local_addr
.sin_addr
.s_addr
);
1427 ret
= -EADDRNOTAVAIL
;
1428 mutex_unlock(&conn_id
->handler_mutex
);
1429 rdma_destroy_id(new_cm_id
);
1432 ret
= rdma_copy_addr(&conn_id
->id
.route
.addr
.dev_addr
, dev
, NULL
);
1434 mutex_unlock(&conn_id
->handler_mutex
);
1435 rdma_destroy_id(new_cm_id
);
1439 ret
= cma_acquire_dev(conn_id
);
1441 mutex_unlock(&conn_id
->handler_mutex
);
1442 rdma_destroy_id(new_cm_id
);
1446 conn_id
->cm_id
.iw
= cm_id
;
1447 cm_id
->context
= conn_id
;
1448 cm_id
->cm_handler
= cma_iw_handler
;
1450 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.src_addr
;
1451 *sin
= iw_event
->local_addr
;
1452 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.dst_addr
;
1453 *sin
= iw_event
->remote_addr
;
1455 ret
= ib_query_device(conn_id
->id
.device
, &attr
);
1457 mutex_unlock(&conn_id
->handler_mutex
);
1458 rdma_destroy_id(new_cm_id
);
1462 memset(&event
, 0, sizeof event
);
1463 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1464 event
.param
.conn
.private_data
= iw_event
->private_data
;
1465 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1466 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
1467 event
.param
.conn
.responder_resources
= iw_event
->ord
;
1470 * Protect against the user destroying conn_id from another thread
1471 * until we're done accessing it.
1473 atomic_inc(&conn_id
->refcount
);
1474 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1476 /* User wants to destroy the CM ID */
1477 conn_id
->cm_id
.iw
= NULL
;
1478 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
1479 mutex_unlock(&conn_id
->handler_mutex
);
1480 cma_deref_id(conn_id
);
1481 rdma_destroy_id(&conn_id
->id
);
1485 mutex_unlock(&conn_id
->handler_mutex
);
1486 cma_deref_id(conn_id
);
1491 mutex_unlock(&listen_id
->handler_mutex
);
1495 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1497 struct ib_cm_compare_data compare_data
;
1498 struct sockaddr
*addr
;
1499 struct ib_cm_id
*id
;
1503 id
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
, id_priv
);
1507 id_priv
->cm_id
.ib
= id
;
1509 addr
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
1510 svc_id
= cma_get_service_id(id_priv
->id
.ps
, addr
);
1511 if (cma_any_addr(addr
) && !id_priv
->afonly
)
1512 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1514 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1515 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1519 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1520 id_priv
->cm_id
.ib
= NULL
;
1526 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1529 struct sockaddr_in
*sin
;
1530 struct iw_cm_id
*id
;
1532 id
= iw_create_cm_id(id_priv
->id
.device
,
1533 iw_conn_req_handler
,
1538 id_priv
->cm_id
.iw
= id
;
1540 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1541 id_priv
->cm_id
.iw
->local_addr
= *sin
;
1543 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1546 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1547 id_priv
->cm_id
.iw
= NULL
;
1553 static int cma_listen_handler(struct rdma_cm_id
*id
,
1554 struct rdma_cm_event
*event
)
1556 struct rdma_id_private
*id_priv
= id
->context
;
1558 id
->context
= id_priv
->id
.context
;
1559 id
->event_handler
= id_priv
->id
.event_handler
;
1560 return id_priv
->id
.event_handler(id
, event
);
1563 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1564 struct cma_device
*cma_dev
)
1566 struct rdma_id_private
*dev_id_priv
;
1567 struct rdma_cm_id
*id
;
1570 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
,
1571 id_priv
->id
.qp_type
);
1575 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1577 dev_id_priv
->state
= RDMA_CM_ADDR_BOUND
;
1578 memcpy(&id
->route
.addr
.src_addr
, &id_priv
->id
.route
.addr
.src_addr
,
1579 ip_addr_size((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
));
1581 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1582 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1583 atomic_inc(&id_priv
->refcount
);
1584 dev_id_priv
->internal_id
= 1;
1585 dev_id_priv
->afonly
= id_priv
->afonly
;
1587 ret
= rdma_listen(id
, id_priv
->backlog
);
1589 printk(KERN_WARNING
"RDMA CMA: cma_listen_on_dev, error %d, "
1590 "listening on device %s\n", ret
, cma_dev
->device
->name
);
1593 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1595 struct cma_device
*cma_dev
;
1598 list_add_tail(&id_priv
->list
, &listen_any_list
);
1599 list_for_each_entry(cma_dev
, &dev_list
, list
)
1600 cma_listen_on_dev(id_priv
, cma_dev
);
1601 mutex_unlock(&lock
);
1604 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
1606 struct rdma_id_private
*id_priv
;
1608 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1609 id_priv
->tos
= (u8
) tos
;
1611 EXPORT_SYMBOL(rdma_set_service_type
);
1613 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1616 struct cma_work
*work
= context
;
1617 struct rdma_route
*route
;
1619 route
= &work
->id
->id
.route
;
1622 route
->num_paths
= 1;
1623 *route
->path_rec
= *path_rec
;
1625 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
1626 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
1627 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1628 work
->event
.status
= status
;
1631 queue_work(cma_wq
, &work
->work
);
1634 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1635 struct cma_work
*work
)
1637 struct rdma_addr
*addr
= &id_priv
->id
.route
.addr
;
1638 struct ib_sa_path_rec path_rec
;
1639 ib_sa_comp_mask comp_mask
;
1640 struct sockaddr_in6
*sin6
;
1642 memset(&path_rec
, 0, sizeof path_rec
);
1643 rdma_addr_get_sgid(&addr
->dev_addr
, &path_rec
.sgid
);
1644 rdma_addr_get_dgid(&addr
->dev_addr
, &path_rec
.dgid
);
1645 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(&addr
->dev_addr
));
1646 path_rec
.numb_path
= 1;
1647 path_rec
.reversible
= 1;
1648 path_rec
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
1649 (struct sockaddr
*) &addr
->dst_addr
);
1651 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1652 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
1653 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
1655 if (addr
->src_addr
.ss_family
== AF_INET
) {
1656 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
1657 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
1659 sin6
= (struct sockaddr_in6
*) &addr
->src_addr
;
1660 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
1661 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
1664 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
1665 id_priv
->id
.port_num
, &path_rec
,
1666 comp_mask
, timeout_ms
,
1667 GFP_KERNEL
, cma_query_handler
,
1668 work
, &id_priv
->query
);
1670 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1673 static void cma_work_handler(struct work_struct
*_work
)
1675 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
1676 struct rdma_id_private
*id_priv
= work
->id
;
1679 mutex_lock(&id_priv
->handler_mutex
);
1680 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1683 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1684 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1688 mutex_unlock(&id_priv
->handler_mutex
);
1689 cma_deref_id(id_priv
);
1691 rdma_destroy_id(&id_priv
->id
);
1695 static void cma_ndev_work_handler(struct work_struct
*_work
)
1697 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
1698 struct rdma_id_private
*id_priv
= work
->id
;
1701 mutex_lock(&id_priv
->handler_mutex
);
1702 if (id_priv
->state
== RDMA_CM_DESTROYING
||
1703 id_priv
->state
== RDMA_CM_DEVICE_REMOVAL
)
1706 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1707 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1712 mutex_unlock(&id_priv
->handler_mutex
);
1713 cma_deref_id(id_priv
);
1715 rdma_destroy_id(&id_priv
->id
);
1719 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1721 struct rdma_route
*route
= &id_priv
->id
.route
;
1722 struct cma_work
*work
;
1725 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1730 INIT_WORK(&work
->work
, cma_work_handler
);
1731 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
1732 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
1733 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1735 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1736 if (!route
->path_rec
) {
1741 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1747 kfree(route
->path_rec
);
1748 route
->path_rec
= NULL
;
1754 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1755 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1757 struct rdma_id_private
*id_priv
;
1760 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1761 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
1762 RDMA_CM_ROUTE_RESOLVED
))
1765 id
->route
.path_rec
= kmemdup(path_rec
, sizeof *path_rec
* num_paths
,
1767 if (!id
->route
.path_rec
) {
1772 id
->route
.num_paths
= num_paths
;
1775 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_ADDR_RESOLVED
);
1778 EXPORT_SYMBOL(rdma_set_ib_paths
);
1780 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1782 struct cma_work
*work
;
1784 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1789 INIT_WORK(&work
->work
, cma_work_handler
);
1790 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
1791 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
1792 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1793 queue_work(cma_wq
, &work
->work
);
1797 static int cma_resolve_iboe_route(struct rdma_id_private
*id_priv
)
1799 struct rdma_route
*route
= &id_priv
->id
.route
;
1800 struct rdma_addr
*addr
= &route
->addr
;
1801 struct cma_work
*work
;
1803 struct sockaddr_in
*src_addr
= (struct sockaddr_in
*)&route
->addr
.src_addr
;
1804 struct sockaddr_in
*dst_addr
= (struct sockaddr_in
*)&route
->addr
.dst_addr
;
1805 struct net_device
*ndev
= NULL
;
1808 if (src_addr
->sin_family
!= dst_addr
->sin_family
)
1811 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1816 INIT_WORK(&work
->work
, cma_work_handler
);
1818 route
->path_rec
= kzalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1819 if (!route
->path_rec
) {
1824 route
->num_paths
= 1;
1826 if (addr
->dev_addr
.bound_dev_if
)
1827 ndev
= dev_get_by_index(&init_net
, addr
->dev_addr
.bound_dev_if
);
1833 vid
= rdma_vlan_dev_vlan_id(ndev
);
1835 iboe_mac_vlan_to_ll(&route
->path_rec
->sgid
, addr
->dev_addr
.src_dev_addr
, vid
);
1836 iboe_mac_vlan_to_ll(&route
->path_rec
->dgid
, addr
->dev_addr
.dst_dev_addr
, vid
);
1838 route
->path_rec
->hop_limit
= 1;
1839 route
->path_rec
->reversible
= 1;
1840 route
->path_rec
->pkey
= cpu_to_be16(0xffff);
1841 route
->path_rec
->mtu_selector
= IB_SA_EQ
;
1842 route
->path_rec
->sl
= netdev_get_prio_tc_map(
1843 ndev
->priv_flags
& IFF_802_1Q_VLAN
?
1844 vlan_dev_real_dev(ndev
) : ndev
,
1845 rt_tos2priority(id_priv
->tos
));
1847 route
->path_rec
->mtu
= iboe_get_mtu(ndev
->mtu
);
1848 route
->path_rec
->rate_selector
= IB_SA_EQ
;
1849 route
->path_rec
->rate
= iboe_get_rate(ndev
);
1851 route
->path_rec
->packet_life_time_selector
= IB_SA_EQ
;
1852 route
->path_rec
->packet_life_time
= CMA_IBOE_PACKET_LIFETIME
;
1853 if (!route
->path_rec
->mtu
) {
1858 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
1859 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
1860 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1861 work
->event
.status
= 0;
1863 queue_work(cma_wq
, &work
->work
);
1868 kfree(route
->path_rec
);
1869 route
->path_rec
= NULL
;
1875 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1877 struct rdma_id_private
*id_priv
;
1880 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1881 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
, RDMA_CM_ROUTE_QUERY
))
1884 atomic_inc(&id_priv
->refcount
);
1885 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1886 case RDMA_TRANSPORT_IB
:
1887 switch (rdma_port_get_link_layer(id
->device
, id
->port_num
)) {
1888 case IB_LINK_LAYER_INFINIBAND
:
1889 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
1891 case IB_LINK_LAYER_ETHERNET
:
1892 ret
= cma_resolve_iboe_route(id_priv
);
1898 case RDMA_TRANSPORT_IWARP
:
1899 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
1910 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_QUERY
, RDMA_CM_ADDR_RESOLVED
);
1911 cma_deref_id(id_priv
);
1914 EXPORT_SYMBOL(rdma_resolve_route
);
1916 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
1918 struct cma_device
*cma_dev
;
1919 struct ib_port_attr port_attr
;
1926 if (list_empty(&dev_list
)) {
1930 list_for_each_entry(cma_dev
, &dev_list
, list
)
1931 for (p
= 1; p
<= cma_dev
->device
->phys_port_cnt
; ++p
)
1932 if (!ib_query_port(cma_dev
->device
, p
, &port_attr
) &&
1933 port_attr
.state
== IB_PORT_ACTIVE
)
1937 cma_dev
= list_entry(dev_list
.next
, struct cma_device
, list
);
1940 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
1944 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
1948 id_priv
->id
.route
.addr
.dev_addr
.dev_type
=
1949 (rdma_port_get_link_layer(cma_dev
->device
, p
) == IB_LINK_LAYER_INFINIBAND
) ?
1950 ARPHRD_INFINIBAND
: ARPHRD_ETHER
;
1952 rdma_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1953 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
1954 id_priv
->id
.port_num
= p
;
1955 cma_attach_to_dev(id_priv
, cma_dev
);
1957 mutex_unlock(&lock
);
1961 static void addr_handler(int status
, struct sockaddr
*src_addr
,
1962 struct rdma_dev_addr
*dev_addr
, void *context
)
1964 struct rdma_id_private
*id_priv
= context
;
1965 struct rdma_cm_event event
;
1967 memset(&event
, 0, sizeof event
);
1968 mutex_lock(&id_priv
->handler_mutex
);
1969 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
,
1970 RDMA_CM_ADDR_RESOLVED
))
1973 if (!status
&& !id_priv
->cma_dev
)
1974 status
= cma_acquire_dev(id_priv
);
1977 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
1978 RDMA_CM_ADDR_BOUND
))
1980 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
1981 event
.status
= status
;
1983 memcpy(&id_priv
->id
.route
.addr
.src_addr
, src_addr
,
1984 ip_addr_size(src_addr
));
1985 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1988 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
1989 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1990 mutex_unlock(&id_priv
->handler_mutex
);
1991 cma_deref_id(id_priv
);
1992 rdma_destroy_id(&id_priv
->id
);
1996 mutex_unlock(&id_priv
->handler_mutex
);
1997 cma_deref_id(id_priv
);
2000 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
2002 struct cma_work
*work
;
2003 struct sockaddr
*src
, *dst
;
2007 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2011 if (!id_priv
->cma_dev
) {
2012 ret
= cma_bind_loopback(id_priv
);
2017 rdma_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2018 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2020 src
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
2021 if (cma_zero_addr(src
)) {
2022 dst
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.dst_addr
;
2023 if ((src
->sa_family
= dst
->sa_family
) == AF_INET
) {
2024 ((struct sockaddr_in
*)src
)->sin_addr
=
2025 ((struct sockaddr_in
*)dst
)->sin_addr
;
2027 ((struct sockaddr_in6
*)src
)->sin6_addr
=
2028 ((struct sockaddr_in6
*)dst
)->sin6_addr
;
2033 INIT_WORK(&work
->work
, cma_work_handler
);
2034 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2035 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2036 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2037 queue_work(cma_wq
, &work
->work
);
2044 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2045 struct sockaddr
*dst_addr
)
2047 if (!src_addr
|| !src_addr
->sa_family
) {
2048 src_addr
= (struct sockaddr
*) &id
->route
.addr
.src_addr
;
2049 if ((src_addr
->sa_family
= dst_addr
->sa_family
) == AF_INET6
) {
2050 ((struct sockaddr_in6
*) src_addr
)->sin6_scope_id
=
2051 ((struct sockaddr_in6
*) dst_addr
)->sin6_scope_id
;
2054 return rdma_bind_addr(id
, src_addr
);
2057 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2058 struct sockaddr
*dst_addr
, int timeout_ms
)
2060 struct rdma_id_private
*id_priv
;
2063 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2064 if (id_priv
->state
== RDMA_CM_IDLE
) {
2065 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
2070 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_ADDR_QUERY
))
2073 atomic_inc(&id_priv
->refcount
);
2074 memcpy(&id
->route
.addr
.dst_addr
, dst_addr
, ip_addr_size(dst_addr
));
2075 if (cma_any_addr(dst_addr
))
2076 ret
= cma_resolve_loopback(id_priv
);
2078 ret
= rdma_resolve_ip(&addr_client
, (struct sockaddr
*) &id
->route
.addr
.src_addr
,
2079 dst_addr
, &id
->route
.addr
.dev_addr
,
2080 timeout_ms
, addr_handler
, id_priv
);
2086 cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
, RDMA_CM_ADDR_BOUND
);
2087 cma_deref_id(id_priv
);
2090 EXPORT_SYMBOL(rdma_resolve_addr
);
2092 int rdma_set_reuseaddr(struct rdma_cm_id
*id
, int reuse
)
2094 struct rdma_id_private
*id_priv
;
2095 unsigned long flags
;
2098 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2099 spin_lock_irqsave(&id_priv
->lock
, flags
);
2100 if (id_priv
->state
== RDMA_CM_IDLE
) {
2101 id_priv
->reuseaddr
= reuse
;
2106 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2109 EXPORT_SYMBOL(rdma_set_reuseaddr
);
2111 int rdma_set_afonly(struct rdma_cm_id
*id
, int afonly
)
2113 struct rdma_id_private
*id_priv
;
2114 unsigned long flags
;
2117 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2118 spin_lock_irqsave(&id_priv
->lock
, flags
);
2119 if (id_priv
->state
== RDMA_CM_IDLE
|| id_priv
->state
== RDMA_CM_ADDR_BOUND
) {
2120 id_priv
->options
|= (1 << CMA_OPTION_AFONLY
);
2121 id_priv
->afonly
= afonly
;
2126 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2129 EXPORT_SYMBOL(rdma_set_afonly
);
2131 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
2132 struct rdma_id_private
*id_priv
)
2134 struct sockaddr_in
*sin
;
2136 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2137 sin
->sin_port
= htons(bind_list
->port
);
2138 id_priv
->bind_list
= bind_list
;
2139 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
2142 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
2143 unsigned short snum
)
2145 struct rdma_bind_list
*bind_list
;
2148 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
2153 ret
= idr_get_new_above(ps
, bind_list
, snum
, &port
);
2154 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
2160 ret
= -EADDRNOTAVAIL
;
2165 bind_list
->port
= (unsigned short) port
;
2166 cma_bind_port(bind_list
, id_priv
);
2169 idr_remove(ps
, port
);
2175 static int cma_alloc_any_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
2177 static unsigned int last_used_port
;
2178 int low
, high
, remaining
;
2181 inet_get_local_port_range(&low
, &high
);
2182 remaining
= (high
- low
) + 1;
2183 rover
= net_random() % remaining
+ low
;
2185 if (last_used_port
!= rover
&&
2186 !idr_find(ps
, (unsigned short) rover
)) {
2187 int ret
= cma_alloc_port(ps
, id_priv
, rover
);
2189 * Remember previously used port number in order to avoid
2190 * re-using same port immediately after it is closed.
2193 last_used_port
= rover
;
2194 if (ret
!= -EADDRNOTAVAIL
)
2199 if ((rover
< low
) || (rover
> high
))
2203 return -EADDRNOTAVAIL
;
2207 * Check that the requested port is available. This is called when trying to
2208 * bind to a specific port, or when trying to listen on a bound port. In
2209 * the latter case, the provided id_priv may already be on the bind_list, but
2210 * we still need to check that it's okay to start listening.
2212 static int cma_check_port(struct rdma_bind_list
*bind_list
,
2213 struct rdma_id_private
*id_priv
, uint8_t reuseaddr
)
2215 struct rdma_id_private
*cur_id
;
2216 struct sockaddr
*addr
, *cur_addr
;
2217 struct hlist_node
*node
;
2219 addr
= (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
2220 hlist_for_each_entry(cur_id
, node
, &bind_list
->owners
, node
) {
2221 if (id_priv
== cur_id
)
2224 if ((cur_id
->state
!= RDMA_CM_LISTEN
) && reuseaddr
&&
2228 cur_addr
= (struct sockaddr
*) &cur_id
->id
.route
.addr
.src_addr
;
2229 if (id_priv
->afonly
&& cur_id
->afonly
&&
2230 (addr
->sa_family
!= cur_addr
->sa_family
))
2233 if (cma_any_addr(addr
) || cma_any_addr(cur_addr
))
2234 return -EADDRNOTAVAIL
;
2236 if (!cma_addr_cmp(addr
, cur_addr
))
2242 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
2244 struct rdma_bind_list
*bind_list
;
2245 unsigned short snum
;
2248 snum
= ntohs(cma_port((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
));
2249 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
2252 bind_list
= idr_find(ps
, snum
);
2254 ret
= cma_alloc_port(ps
, id_priv
, snum
);
2256 ret
= cma_check_port(bind_list
, id_priv
, id_priv
->reuseaddr
);
2258 cma_bind_port(bind_list
, id_priv
);
2263 static int cma_bind_listen(struct rdma_id_private
*id_priv
)
2265 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
2269 if (bind_list
->owners
.first
->next
)
2270 ret
= cma_check_port(bind_list
, id_priv
, 0);
2271 mutex_unlock(&lock
);
2275 static int cma_get_port(struct rdma_id_private
*id_priv
)
2280 switch (id_priv
->id
.ps
) {
2297 return -EPROTONOSUPPORT
;
2301 if (cma_any_port((struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
))
2302 ret
= cma_alloc_any_port(ps
, id_priv
);
2304 ret
= cma_use_port(ps
, id_priv
);
2305 mutex_unlock(&lock
);
2310 static int cma_check_linklocal(struct rdma_dev_addr
*dev_addr
,
2311 struct sockaddr
*addr
)
2313 #if IS_ENABLED(CONFIG_IPV6)
2314 struct sockaddr_in6
*sin6
;
2316 if (addr
->sa_family
!= AF_INET6
)
2319 sin6
= (struct sockaddr_in6
*) addr
;
2320 if ((ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
) &&
2321 !sin6
->sin6_scope_id
)
2324 dev_addr
->bound_dev_if
= sin6
->sin6_scope_id
;
2329 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
2331 struct rdma_id_private
*id_priv
;
2334 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2335 if (id_priv
->state
== RDMA_CM_IDLE
) {
2336 ((struct sockaddr
*) &id
->route
.addr
.src_addr
)->sa_family
= AF_INET
;
2337 ret
= rdma_bind_addr(id
, (struct sockaddr
*) &id
->route
.addr
.src_addr
);
2342 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_LISTEN
))
2345 if (id_priv
->reuseaddr
) {
2346 ret
= cma_bind_listen(id_priv
);
2351 id_priv
->backlog
= backlog
;
2353 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2354 case RDMA_TRANSPORT_IB
:
2355 ret
= cma_ib_listen(id_priv
);
2359 case RDMA_TRANSPORT_IWARP
:
2360 ret
= cma_iw_listen(id_priv
, backlog
);
2369 cma_listen_on_all(id_priv
);
2373 id_priv
->backlog
= 0;
2374 cma_comp_exch(id_priv
, RDMA_CM_LISTEN
, RDMA_CM_ADDR_BOUND
);
2377 EXPORT_SYMBOL(rdma_listen
);
2379 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2381 struct rdma_id_private
*id_priv
;
2384 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
)
2385 return -EAFNOSUPPORT
;
2387 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2388 if (!cma_comp_exch(id_priv
, RDMA_CM_IDLE
, RDMA_CM_ADDR_BOUND
))
2391 ret
= cma_check_linklocal(&id
->route
.addr
.dev_addr
, addr
);
2395 if (!cma_any_addr(addr
)) {
2396 ret
= rdma_translate_ip(addr
, &id
->route
.addr
.dev_addr
);
2400 ret
= cma_acquire_dev(id_priv
);
2405 memcpy(&id
->route
.addr
.src_addr
, addr
, ip_addr_size(addr
));
2406 if (!(id_priv
->options
& (1 << CMA_OPTION_AFONLY
))) {
2407 if (addr
->sa_family
== AF_INET
)
2408 id_priv
->afonly
= 1;
2409 #if IS_ENABLED(CONFIG_IPV6)
2410 else if (addr
->sa_family
== AF_INET6
)
2411 id_priv
->afonly
= init_net
.ipv6
.sysctl
.bindv6only
;
2414 ret
= cma_get_port(id_priv
);
2420 if (id_priv
->cma_dev
)
2421 cma_release_dev(id_priv
);
2423 cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_IDLE
);
2426 EXPORT_SYMBOL(rdma_bind_addr
);
2428 static int cma_format_hdr(void *hdr
, enum rdma_port_space ps
,
2429 struct rdma_route
*route
)
2431 struct cma_hdr
*cma_hdr
;
2432 struct sdp_hh
*sdp_hdr
;
2434 if (route
->addr
.src_addr
.ss_family
== AF_INET
) {
2435 struct sockaddr_in
*src4
, *dst4
;
2437 src4
= (struct sockaddr_in
*) &route
->addr
.src_addr
;
2438 dst4
= (struct sockaddr_in
*) &route
->addr
.dst_addr
;
2443 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2445 sdp_set_ip_ver(sdp_hdr
, 4);
2446 sdp_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2447 sdp_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2448 sdp_hdr
->port
= src4
->sin_port
;
2452 cma_hdr
->cma_version
= CMA_VERSION
;
2453 cma_set_ip_ver(cma_hdr
, 4);
2454 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2455 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2456 cma_hdr
->port
= src4
->sin_port
;
2460 struct sockaddr_in6
*src6
, *dst6
;
2462 src6
= (struct sockaddr_in6
*) &route
->addr
.src_addr
;
2463 dst6
= (struct sockaddr_in6
*) &route
->addr
.dst_addr
;
2468 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2470 sdp_set_ip_ver(sdp_hdr
, 6);
2471 sdp_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2472 sdp_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2473 sdp_hdr
->port
= src6
->sin6_port
;
2477 cma_hdr
->cma_version
= CMA_VERSION
;
2478 cma_set_ip_ver(cma_hdr
, 6);
2479 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2480 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2481 cma_hdr
->port
= src6
->sin6_port
;
2488 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
2489 struct ib_cm_event
*ib_event
)
2491 struct rdma_id_private
*id_priv
= cm_id
->context
;
2492 struct rdma_cm_event event
;
2493 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
2496 if (cma_disable_callback(id_priv
, RDMA_CM_CONNECT
))
2499 memset(&event
, 0, sizeof event
);
2500 switch (ib_event
->event
) {
2501 case IB_CM_SIDR_REQ_ERROR
:
2502 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2503 event
.status
= -ETIMEDOUT
;
2505 case IB_CM_SIDR_REP_RECEIVED
:
2506 event
.param
.ud
.private_data
= ib_event
->private_data
;
2507 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
2508 if (rep
->status
!= IB_SIDR_SUCCESS
) {
2509 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2510 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
2513 ret
= cma_set_qkey(id_priv
);
2515 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
2516 event
.status
= -EINVAL
;
2519 if (id_priv
->qkey
!= rep
->qkey
) {
2520 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2521 event
.status
= -EINVAL
;
2524 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
2525 id_priv
->id
.route
.path_rec
,
2526 &event
.param
.ud
.ah_attr
);
2527 event
.param
.ud
.qp_num
= rep
->qpn
;
2528 event
.param
.ud
.qkey
= rep
->qkey
;
2529 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2533 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
2538 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2540 /* Destroy the CM ID by returning a non-zero value. */
2541 id_priv
->cm_id
.ib
= NULL
;
2542 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2543 mutex_unlock(&id_priv
->handler_mutex
);
2544 rdma_destroy_id(&id_priv
->id
);
2548 mutex_unlock(&id_priv
->handler_mutex
);
2552 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
2553 struct rdma_conn_param
*conn_param
)
2555 struct ib_cm_sidr_req_param req
;
2556 struct rdma_route
*route
;
2557 struct ib_cm_id
*id
;
2560 req
.private_data_len
= sizeof(struct cma_hdr
) +
2561 conn_param
->private_data_len
;
2562 if (req
.private_data_len
< conn_param
->private_data_len
)
2565 req
.private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2566 if (!req
.private_data
)
2569 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2570 memcpy((void *) req
.private_data
+ sizeof(struct cma_hdr
),
2571 conn_param
->private_data
, conn_param
->private_data_len
);
2573 route
= &id_priv
->id
.route
;
2574 ret
= cma_format_hdr((void *) req
.private_data
, id_priv
->id
.ps
, route
);
2578 id
= ib_create_cm_id(id_priv
->id
.device
, cma_sidr_rep_handler
,
2584 id_priv
->cm_id
.ib
= id
;
2586 req
.path
= route
->path_rec
;
2587 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2588 (struct sockaddr
*) &route
->addr
.dst_addr
);
2589 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
2590 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2592 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
2594 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2595 id_priv
->cm_id
.ib
= NULL
;
2598 kfree(req
.private_data
);
2602 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
2603 struct rdma_conn_param
*conn_param
)
2605 struct ib_cm_req_param req
;
2606 struct rdma_route
*route
;
2608 struct ib_cm_id
*id
;
2611 memset(&req
, 0, sizeof req
);
2612 offset
= cma_user_data_offset(id_priv
->id
.ps
);
2613 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
2614 if (req
.private_data_len
< conn_param
->private_data_len
)
2617 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2621 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2622 memcpy(private_data
+ offset
, conn_param
->private_data
,
2623 conn_param
->private_data_len
);
2625 id
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
, id_priv
);
2630 id_priv
->cm_id
.ib
= id
;
2632 route
= &id_priv
->id
.route
;
2633 ret
= cma_format_hdr(private_data
, id_priv
->id
.ps
, route
);
2636 req
.private_data
= private_data
;
2638 req
.primary_path
= &route
->path_rec
[0];
2639 if (route
->num_paths
== 2)
2640 req
.alternate_path
= &route
->path_rec
[1];
2642 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2643 (struct sockaddr
*) &route
->addr
.dst_addr
);
2644 req
.qp_num
= id_priv
->qp_num
;
2645 req
.qp_type
= id_priv
->id
.qp_type
;
2646 req
.starting_psn
= id_priv
->seq_num
;
2647 req
.responder_resources
= conn_param
->responder_resources
;
2648 req
.initiator_depth
= conn_param
->initiator_depth
;
2649 req
.flow_control
= conn_param
->flow_control
;
2650 req
.retry_count
= min_t(u8
, 7, conn_param
->retry_count
);
2651 req
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
2652 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2653 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2654 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2655 req
.srq
= id_priv
->srq
? 1 : 0;
2657 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
2659 if (ret
&& !IS_ERR(id
)) {
2660 ib_destroy_cm_id(id
);
2661 id_priv
->cm_id
.ib
= NULL
;
2664 kfree(private_data
);
2668 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
2669 struct rdma_conn_param
*conn_param
)
2671 struct iw_cm_id
*cm_id
;
2672 struct sockaddr_in
* sin
;
2674 struct iw_cm_conn_param iw_param
;
2676 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
2678 return PTR_ERR(cm_id
);
2680 id_priv
->cm_id
.iw
= cm_id
;
2682 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2683 cm_id
->local_addr
= *sin
;
2685 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
2686 cm_id
->remote_addr
= *sin
;
2688 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2693 iw_param
.ord
= conn_param
->initiator_depth
;
2694 iw_param
.ird
= conn_param
->responder_resources
;
2695 iw_param
.private_data
= conn_param
->private_data
;
2696 iw_param
.private_data_len
= conn_param
->private_data_len
;
2697 iw_param
.qpn
= id_priv
->id
.qp
? id_priv
->qp_num
: conn_param
->qp_num
;
2699 memset(&iw_param
, 0, sizeof iw_param
);
2700 iw_param
.qpn
= id_priv
->qp_num
;
2702 ret
= iw_cm_connect(cm_id
, &iw_param
);
2705 iw_destroy_cm_id(cm_id
);
2706 id_priv
->cm_id
.iw
= NULL
;
2711 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2713 struct rdma_id_private
*id_priv
;
2716 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2717 if (!cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_CONNECT
))
2721 id_priv
->qp_num
= conn_param
->qp_num
;
2722 id_priv
->srq
= conn_param
->srq
;
2725 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2726 case RDMA_TRANSPORT_IB
:
2727 if (id
->qp_type
== IB_QPT_UD
)
2728 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
2730 ret
= cma_connect_ib(id_priv
, conn_param
);
2732 case RDMA_TRANSPORT_IWARP
:
2733 ret
= cma_connect_iw(id_priv
, conn_param
);
2744 cma_comp_exch(id_priv
, RDMA_CM_CONNECT
, RDMA_CM_ROUTE_RESOLVED
);
2747 EXPORT_SYMBOL(rdma_connect
);
2749 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
2750 struct rdma_conn_param
*conn_param
)
2752 struct ib_cm_rep_param rep
;
2755 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2759 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
2763 memset(&rep
, 0, sizeof rep
);
2764 rep
.qp_num
= id_priv
->qp_num
;
2765 rep
.starting_psn
= id_priv
->seq_num
;
2766 rep
.private_data
= conn_param
->private_data
;
2767 rep
.private_data_len
= conn_param
->private_data_len
;
2768 rep
.responder_resources
= conn_param
->responder_resources
;
2769 rep
.initiator_depth
= conn_param
->initiator_depth
;
2770 rep
.failover_accepted
= 0;
2771 rep
.flow_control
= conn_param
->flow_control
;
2772 rep
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
2773 rep
.srq
= id_priv
->srq
? 1 : 0;
2775 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
2780 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
2781 struct rdma_conn_param
*conn_param
)
2783 struct iw_cm_conn_param iw_param
;
2786 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
2790 iw_param
.ord
= conn_param
->initiator_depth
;
2791 iw_param
.ird
= conn_param
->responder_resources
;
2792 iw_param
.private_data
= conn_param
->private_data
;
2793 iw_param
.private_data_len
= conn_param
->private_data_len
;
2794 if (id_priv
->id
.qp
) {
2795 iw_param
.qpn
= id_priv
->qp_num
;
2797 iw_param
.qpn
= conn_param
->qp_num
;
2799 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
2802 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
2803 enum ib_cm_sidr_status status
,
2804 const void *private_data
, int private_data_len
)
2806 struct ib_cm_sidr_rep_param rep
;
2809 memset(&rep
, 0, sizeof rep
);
2810 rep
.status
= status
;
2811 if (status
== IB_SIDR_SUCCESS
) {
2812 ret
= cma_set_qkey(id_priv
);
2815 rep
.qp_num
= id_priv
->qp_num
;
2816 rep
.qkey
= id_priv
->qkey
;
2818 rep
.private_data
= private_data
;
2819 rep
.private_data_len
= private_data_len
;
2821 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
2824 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2826 struct rdma_id_private
*id_priv
;
2829 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2831 id_priv
->owner
= task_pid_nr(current
);
2833 if (!cma_comp(id_priv
, RDMA_CM_CONNECT
))
2836 if (!id
->qp
&& conn_param
) {
2837 id_priv
->qp_num
= conn_param
->qp_num
;
2838 id_priv
->srq
= conn_param
->srq
;
2841 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2842 case RDMA_TRANSPORT_IB
:
2843 if (id
->qp_type
== IB_QPT_UD
) {
2845 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
2846 conn_param
->private_data
,
2847 conn_param
->private_data_len
);
2849 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
2853 ret
= cma_accept_ib(id_priv
, conn_param
);
2855 ret
= cma_rep_recv(id_priv
);
2858 case RDMA_TRANSPORT_IWARP
:
2859 ret
= cma_accept_iw(id_priv
, conn_param
);
2871 cma_modify_qp_err(id_priv
);
2872 rdma_reject(id
, NULL
, 0);
2875 EXPORT_SYMBOL(rdma_accept
);
2877 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
2879 struct rdma_id_private
*id_priv
;
2882 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2883 if (!id_priv
->cm_id
.ib
)
2886 switch (id
->device
->node_type
) {
2887 case RDMA_NODE_IB_CA
:
2888 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
2896 EXPORT_SYMBOL(rdma_notify
);
2898 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
2899 u8 private_data_len
)
2901 struct rdma_id_private
*id_priv
;
2904 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2905 if (!id_priv
->cm_id
.ib
)
2908 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2909 case RDMA_TRANSPORT_IB
:
2910 if (id
->qp_type
== IB_QPT_UD
)
2911 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
,
2912 private_data
, private_data_len
);
2914 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
2915 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
2916 0, private_data
, private_data_len
);
2918 case RDMA_TRANSPORT_IWARP
:
2919 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
2920 private_data
, private_data_len
);
2928 EXPORT_SYMBOL(rdma_reject
);
2930 int rdma_disconnect(struct rdma_cm_id
*id
)
2932 struct rdma_id_private
*id_priv
;
2935 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2936 if (!id_priv
->cm_id
.ib
)
2939 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2940 case RDMA_TRANSPORT_IB
:
2941 ret
= cma_modify_qp_err(id_priv
);
2944 /* Initiate or respond to a disconnect. */
2945 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
2946 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
2948 case RDMA_TRANSPORT_IWARP
:
2949 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
2958 EXPORT_SYMBOL(rdma_disconnect
);
2960 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
2962 struct rdma_id_private
*id_priv
;
2963 struct cma_multicast
*mc
= multicast
->context
;
2964 struct rdma_cm_event event
;
2967 id_priv
= mc
->id_priv
;
2968 if (cma_disable_callback(id_priv
, RDMA_CM_ADDR_BOUND
) &&
2969 cma_disable_callback(id_priv
, RDMA_CM_ADDR_RESOLVED
))
2972 mutex_lock(&id_priv
->qp_mutex
);
2973 if (!status
&& id_priv
->id
.qp
)
2974 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
2975 be16_to_cpu(multicast
->rec
.mlid
));
2976 mutex_unlock(&id_priv
->qp_mutex
);
2978 memset(&event
, 0, sizeof event
);
2979 event
.status
= status
;
2980 event
.param
.ud
.private_data
= mc
->context
;
2982 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
2983 ib_init_ah_from_mcmember(id_priv
->id
.device
,
2984 id_priv
->id
.port_num
, &multicast
->rec
,
2985 &event
.param
.ud
.ah_attr
);
2986 event
.param
.ud
.qp_num
= 0xFFFFFF;
2987 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
2989 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
2991 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2993 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2994 mutex_unlock(&id_priv
->handler_mutex
);
2995 rdma_destroy_id(&id_priv
->id
);
2999 mutex_unlock(&id_priv
->handler_mutex
);
3003 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
3004 struct sockaddr
*addr
, union ib_gid
*mgid
)
3006 unsigned char mc_map
[MAX_ADDR_LEN
];
3007 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3008 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
3009 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
3011 if (cma_any_addr(addr
)) {
3012 memset(mgid
, 0, sizeof *mgid
);
3013 } else if ((addr
->sa_family
== AF_INET6
) &&
3014 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFFF0FFFF) ==
3016 /* IPv6 address is an SA assigned MGID. */
3017 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
3018 } else if ((addr
->sa_family
== AF_INET6
)) {
3019 ipv6_ib_mc_map(&sin6
->sin6_addr
, dev_addr
->broadcast
, mc_map
);
3020 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3021 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3022 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3024 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
3025 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3026 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3027 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3031 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
3032 struct cma_multicast
*mc
)
3034 struct ib_sa_mcmember_rec rec
;
3035 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3036 ib_sa_comp_mask comp_mask
;
3039 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
3040 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
3045 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
3046 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3047 rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
3048 rdma_addr_get_sgid(dev_addr
, &rec
.port_gid
);
3049 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
3052 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
3053 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
3054 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
3055 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
3056 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
3058 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
3059 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
3060 IB_SA_MCMEMBER_REC_RATE_SELECTOR
|
3061 IB_SA_MCMEMBER_REC_MTU_SELECTOR
|
3062 IB_SA_MCMEMBER_REC_MTU
|
3063 IB_SA_MCMEMBER_REC_HOP_LIMIT
;
3065 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
3066 id_priv
->id
.port_num
, &rec
,
3067 comp_mask
, GFP_KERNEL
,
3068 cma_ib_mc_handler
, mc
);
3069 return PTR_RET(mc
->multicast
.ib
);
3072 static void iboe_mcast_work_handler(struct work_struct
*work
)
3074 struct iboe_mcast_work
*mw
= container_of(work
, struct iboe_mcast_work
, work
);
3075 struct cma_multicast
*mc
= mw
->mc
;
3076 struct ib_sa_multicast
*m
= mc
->multicast
.ib
;
3078 mc
->multicast
.ib
->context
= mc
;
3079 cma_ib_mc_handler(0, m
);
3080 kref_put(&mc
->mcref
, release_mc
);
3084 static void cma_iboe_set_mgid(struct sockaddr
*addr
, union ib_gid
*mgid
)
3086 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
3087 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
3089 if (cma_any_addr(addr
)) {
3090 memset(mgid
, 0, sizeof *mgid
);
3091 } else if (addr
->sa_family
== AF_INET6
) {
3092 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
3094 mgid
->raw
[0] = 0xff;
3095 mgid
->raw
[1] = 0x0e;
3104 mgid
->raw
[10] = 0xff;
3105 mgid
->raw
[11] = 0xff;
3106 *(__be32
*)(&mgid
->raw
[12]) = sin
->sin_addr
.s_addr
;
3110 static int cma_iboe_join_multicast(struct rdma_id_private
*id_priv
,
3111 struct cma_multicast
*mc
)
3113 struct iboe_mcast_work
*work
;
3114 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3116 struct sockaddr
*addr
= (struct sockaddr
*)&mc
->addr
;
3117 struct net_device
*ndev
= NULL
;
3119 if (cma_zero_addr((struct sockaddr
*)&mc
->addr
))
3122 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3126 mc
->multicast
.ib
= kzalloc(sizeof(struct ib_sa_multicast
), GFP_KERNEL
);
3127 if (!mc
->multicast
.ib
) {
3132 cma_iboe_set_mgid(addr
, &mc
->multicast
.ib
->rec
.mgid
);
3134 mc
->multicast
.ib
->rec
.pkey
= cpu_to_be16(0xffff);
3135 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3136 mc
->multicast
.ib
->rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
3138 if (dev_addr
->bound_dev_if
)
3139 ndev
= dev_get_by_index(&init_net
, dev_addr
->bound_dev_if
);
3144 mc
->multicast
.ib
->rec
.rate
= iboe_get_rate(ndev
);
3145 mc
->multicast
.ib
->rec
.hop_limit
= 1;
3146 mc
->multicast
.ib
->rec
.mtu
= iboe_get_mtu(ndev
->mtu
);
3148 if (!mc
->multicast
.ib
->rec
.mtu
) {
3152 iboe_addr_get_sgid(dev_addr
, &mc
->multicast
.ib
->rec
.port_gid
);
3155 INIT_WORK(&work
->work
, iboe_mcast_work_handler
);
3156 kref_get(&mc
->mcref
);
3157 queue_work(cma_wq
, &work
->work
);
3162 kfree(mc
->multicast
.ib
);
3168 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
3171 struct rdma_id_private
*id_priv
;
3172 struct cma_multicast
*mc
;
3175 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3176 if (!cma_comp(id_priv
, RDMA_CM_ADDR_BOUND
) &&
3177 !cma_comp(id_priv
, RDMA_CM_ADDR_RESOLVED
))
3180 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
3184 memcpy(&mc
->addr
, addr
, ip_addr_size(addr
));
3185 mc
->context
= context
;
3186 mc
->id_priv
= id_priv
;
3188 spin_lock(&id_priv
->lock
);
3189 list_add(&mc
->list
, &id_priv
->mc_list
);
3190 spin_unlock(&id_priv
->lock
);
3192 switch (rdma_node_get_transport(id
->device
->node_type
)) {
3193 case RDMA_TRANSPORT_IB
:
3194 switch (rdma_port_get_link_layer(id
->device
, id
->port_num
)) {
3195 case IB_LINK_LAYER_INFINIBAND
:
3196 ret
= cma_join_ib_multicast(id_priv
, mc
);
3198 case IB_LINK_LAYER_ETHERNET
:
3199 kref_init(&mc
->mcref
);
3200 ret
= cma_iboe_join_multicast(id_priv
, mc
);
3212 spin_lock_irq(&id_priv
->lock
);
3213 list_del(&mc
->list
);
3214 spin_unlock_irq(&id_priv
->lock
);
3219 EXPORT_SYMBOL(rdma_join_multicast
);
3221 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
3223 struct rdma_id_private
*id_priv
;
3224 struct cma_multicast
*mc
;
3226 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3227 spin_lock_irq(&id_priv
->lock
);
3228 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
3229 if (!memcmp(&mc
->addr
, addr
, ip_addr_size(addr
))) {
3230 list_del(&mc
->list
);
3231 spin_unlock_irq(&id_priv
->lock
);
3234 ib_detach_mcast(id
->qp
,
3235 &mc
->multicast
.ib
->rec
.mgid
,
3236 be16_to_cpu(mc
->multicast
.ib
->rec
.mlid
));
3237 if (rdma_node_get_transport(id_priv
->cma_dev
->device
->node_type
) == RDMA_TRANSPORT_IB
) {
3238 switch (rdma_port_get_link_layer(id
->device
, id
->port_num
)) {
3239 case IB_LINK_LAYER_INFINIBAND
:
3240 ib_sa_free_multicast(mc
->multicast
.ib
);
3243 case IB_LINK_LAYER_ETHERNET
:
3244 kref_put(&mc
->mcref
, release_mc
);
3253 spin_unlock_irq(&id_priv
->lock
);
3255 EXPORT_SYMBOL(rdma_leave_multicast
);
3257 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
3259 struct rdma_dev_addr
*dev_addr
;
3260 struct cma_ndev_work
*work
;
3262 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3264 if ((dev_addr
->bound_dev_if
== ndev
->ifindex
) &&
3265 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
3266 printk(KERN_INFO
"RDMA CM addr change for ndev %s used by id %p\n",
3267 ndev
->name
, &id_priv
->id
);
3268 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3272 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
3274 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
3275 atomic_inc(&id_priv
->refcount
);
3276 queue_work(cma_wq
, &work
->work
);
3282 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
3285 struct net_device
*ndev
= (struct net_device
*)ctx
;
3286 struct cma_device
*cma_dev
;
3287 struct rdma_id_private
*id_priv
;
3288 int ret
= NOTIFY_DONE
;
3290 if (dev_net(ndev
) != &init_net
)
3293 if (event
!= NETDEV_BONDING_FAILOVER
)
3296 if (!(ndev
->flags
& IFF_MASTER
) || !(ndev
->priv_flags
& IFF_BONDING
))
3300 list_for_each_entry(cma_dev
, &dev_list
, list
)
3301 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
3302 ret
= cma_netdev_change(ndev
, id_priv
);
3308 mutex_unlock(&lock
);
3312 static struct notifier_block cma_nb
= {
3313 .notifier_call
= cma_netdev_callback
3316 static void cma_add_one(struct ib_device
*device
)
3318 struct cma_device
*cma_dev
;
3319 struct rdma_id_private
*id_priv
;
3321 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
3325 cma_dev
->device
= device
;
3327 init_completion(&cma_dev
->comp
);
3328 atomic_set(&cma_dev
->refcount
, 1);
3329 INIT_LIST_HEAD(&cma_dev
->id_list
);
3330 ib_set_client_data(device
, &cma_client
, cma_dev
);
3333 list_add_tail(&cma_dev
->list
, &dev_list
);
3334 list_for_each_entry(id_priv
, &listen_any_list
, list
)
3335 cma_listen_on_dev(id_priv
, cma_dev
);
3336 mutex_unlock(&lock
);
3339 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
3341 struct rdma_cm_event event
;
3342 enum rdma_cm_state state
;
3345 /* Record that we want to remove the device */
3346 state
= cma_exch(id_priv
, RDMA_CM_DEVICE_REMOVAL
);
3347 if (state
== RDMA_CM_DESTROYING
)
3350 cma_cancel_operation(id_priv
, state
);
3351 mutex_lock(&id_priv
->handler_mutex
);
3353 /* Check for destruction from another callback. */
3354 if (!cma_comp(id_priv
, RDMA_CM_DEVICE_REMOVAL
))
3357 memset(&event
, 0, sizeof event
);
3358 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
3359 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3361 mutex_unlock(&id_priv
->handler_mutex
);
3365 static void cma_process_remove(struct cma_device
*cma_dev
)
3367 struct rdma_id_private
*id_priv
;
3371 while (!list_empty(&cma_dev
->id_list
)) {
3372 id_priv
= list_entry(cma_dev
->id_list
.next
,
3373 struct rdma_id_private
, list
);
3375 list_del(&id_priv
->listen_list
);
3376 list_del_init(&id_priv
->list
);
3377 atomic_inc(&id_priv
->refcount
);
3378 mutex_unlock(&lock
);
3380 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
3381 cma_deref_id(id_priv
);
3383 rdma_destroy_id(&id_priv
->id
);
3387 mutex_unlock(&lock
);
3389 cma_deref_dev(cma_dev
);
3390 wait_for_completion(&cma_dev
->comp
);
3393 static void cma_remove_one(struct ib_device
*device
)
3395 struct cma_device
*cma_dev
;
3397 cma_dev
= ib_get_client_data(device
, &cma_client
);
3402 list_del(&cma_dev
->list
);
3403 mutex_unlock(&lock
);
3405 cma_process_remove(cma_dev
);
3409 static int cma_get_id_stats(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3411 struct nlmsghdr
*nlh
;
3412 struct rdma_cm_id_stats
*id_stats
;
3413 struct rdma_id_private
*id_priv
;
3414 struct rdma_cm_id
*id
= NULL
;
3415 struct cma_device
*cma_dev
;
3416 int i_dev
= 0, i_id
= 0;
3419 * We export all of the IDs as a sequence of messages. Each
3420 * ID gets its own netlink message.
3424 list_for_each_entry(cma_dev
, &dev_list
, list
) {
3425 if (i_dev
< cb
->args
[0]) {
3431 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
3432 if (i_id
< cb
->args
[1]) {
3437 id_stats
= ibnl_put_msg(skb
, &nlh
, cb
->nlh
->nlmsg_seq
,
3438 sizeof *id_stats
, RDMA_NL_RDMA_CM
,
3439 RDMA_NL_RDMA_CM_ID_STATS
);
3443 memset(id_stats
, 0, sizeof *id_stats
);
3445 id_stats
->node_type
= id
->route
.addr
.dev_addr
.dev_type
;
3446 id_stats
->port_num
= id
->port_num
;
3447 id_stats
->bound_dev_if
=
3448 id
->route
.addr
.dev_addr
.bound_dev_if
;
3450 if (id
->route
.addr
.src_addr
.ss_family
== AF_INET
) {
3451 if (ibnl_put_attr(skb
, nlh
,
3452 sizeof(struct sockaddr_in
),
3453 &id
->route
.addr
.src_addr
,
3454 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR
)) {
3457 if (ibnl_put_attr(skb
, nlh
,
3458 sizeof(struct sockaddr_in
),
3459 &id
->route
.addr
.dst_addr
,
3460 RDMA_NL_RDMA_CM_ATTR_DST_ADDR
)) {
3463 } else if (id
->route
.addr
.src_addr
.ss_family
== AF_INET6
) {
3464 if (ibnl_put_attr(skb
, nlh
,
3465 sizeof(struct sockaddr_in6
),
3466 &id
->route
.addr
.src_addr
,
3467 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR
)) {
3470 if (ibnl_put_attr(skb
, nlh
,
3471 sizeof(struct sockaddr_in6
),
3472 &id
->route
.addr
.dst_addr
,
3473 RDMA_NL_RDMA_CM_ATTR_DST_ADDR
)) {
3478 id_stats
->pid
= id_priv
->owner
;
3479 id_stats
->port_space
= id
->ps
;
3480 id_stats
->cm_state
= id_priv
->state
;
3481 id_stats
->qp_num
= id_priv
->qp_num
;
3482 id_stats
->qp_type
= id
->qp_type
;
3492 mutex_unlock(&lock
);
3493 cb
->args
[0] = i_dev
;
3499 static const struct ibnl_client_cbs cma_cb_table
[] = {
3500 [RDMA_NL_RDMA_CM_ID_STATS
] = { .dump
= cma_get_id_stats
,
3501 .module
= THIS_MODULE
},
3504 static int __init
cma_init(void)
3508 cma_wq
= create_singlethread_workqueue("rdma_cm");
3512 ib_sa_register_client(&sa_client
);
3513 rdma_addr_register_client(&addr_client
);
3514 register_netdevice_notifier(&cma_nb
);
3516 ret
= ib_register_client(&cma_client
);
3520 if (ibnl_add_client(RDMA_NL_RDMA_CM
, RDMA_NL_RDMA_CM_NUM_OPS
, cma_cb_table
))
3521 printk(KERN_WARNING
"RDMA CMA: failed to add netlink callback\n");
3526 unregister_netdevice_notifier(&cma_nb
);
3527 rdma_addr_unregister_client(&addr_client
);
3528 ib_sa_unregister_client(&sa_client
);
3529 destroy_workqueue(cma_wq
);
3533 static void __exit
cma_cleanup(void)
3535 ibnl_remove_client(RDMA_NL_RDMA_CM
);
3536 ib_unregister_client(&cma_client
);
3537 unregister_netdevice_notifier(&cma_nb
);
3538 rdma_addr_unregister_client(&addr_client
);
3539 ib_sa_unregister_client(&sa_client
);
3540 destroy_workqueue(cma_wq
);
3541 idr_destroy(&sdp_ps
);
3542 idr_destroy(&tcp_ps
);
3543 idr_destroy(&udp_ps
);
3544 idr_destroy(&ipoib_ps
);
3545 idr_destroy(&ib_ps
);
3548 module_init(cma_init
);
3549 module_exit(cma_cleanup
);