2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This Software is licensed under one of the following licenses:
9 * 1) under the terms of the "Common Public License 1.0" a copy of which is
10 * available from the Open Source Initiative, see
11 * http://www.opensource.org/licenses/cpl.php.
13 * 2) under the terms of the "The BSD License" a copy of which is
14 * available from the Open Source Initiative, see
15 * http://www.opensource.org/licenses/bsd-license.php.
17 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
18 * copy of which is available from the Open Source Initiative, see
19 * http://www.opensource.org/licenses/gpl-license.php.
21 * Licensee has the right to choose one of the above licenses.
23 * Redistributions of source code must retain the above copyright
24 * notice and one of the license notices.
26 * Redistributions in binary form must reproduce both the above copyright
27 * notice, one of the license notices in the documentation
28 * and/or other materials provided with the distribution.
32 #include <linux/completion.h>
34 #include <linux/in6.h>
35 #include <linux/mutex.h>
36 #include <linux/random.h>
37 #include <linux/idr.h>
38 #include <linux/inetdevice.h>
42 #include <rdma/rdma_cm.h>
43 #include <rdma/rdma_cm_ib.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/ib_cm.h>
46 #include <rdma/ib_sa.h>
47 #include <rdma/iw_cm.h>
49 MODULE_AUTHOR("Sean Hefty");
50 MODULE_DESCRIPTION("Generic RDMA CM Agent");
51 MODULE_LICENSE("Dual BSD/GPL");
53 #define CMA_CM_RESPONSE_TIMEOUT 20
54 #define CMA_MAX_CM_RETRIES 15
55 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
57 static void cma_add_one(struct ib_device
*device
);
58 static void cma_remove_one(struct ib_device
*device
);
60 static struct ib_client cma_client
= {
63 .remove
= cma_remove_one
66 static struct ib_sa_client sa_client
;
67 static struct rdma_addr_client addr_client
;
68 static LIST_HEAD(dev_list
);
69 static LIST_HEAD(listen_any_list
);
70 static DEFINE_MUTEX(lock
);
71 static struct workqueue_struct
*cma_wq
;
72 static DEFINE_IDR(sdp_ps
);
73 static DEFINE_IDR(tcp_ps
);
74 static DEFINE_IDR(udp_ps
);
75 static DEFINE_IDR(ipoib_ps
);
79 struct list_head list
;
80 struct ib_device
*device
;
81 struct completion comp
;
83 struct list_head id_list
;
100 struct rdma_bind_list
{
102 struct hlist_head owners
;
107 * Device removal can occur at anytime, so we need extra handling to
108 * serialize notifying the user of device removal with other callbacks.
109 * We do this by disabling removal notification while a callback is in process,
110 * and reporting it after the callback completes.
112 struct rdma_id_private
{
113 struct rdma_cm_id id
;
115 struct rdma_bind_list
*bind_list
;
116 struct hlist_node node
;
117 struct list_head list
; /* listen_any_list or cma_device.list */
118 struct list_head listen_list
; /* per device listens */
119 struct cma_device
*cma_dev
;
120 struct list_head mc_list
;
123 enum cma_state state
;
125 struct mutex qp_mutex
;
127 struct completion comp
;
129 wait_queue_head_t wait_remove
;
134 struct ib_sa_query
*query
;
148 struct cma_multicast
{
149 struct rdma_id_private
*id_priv
;
151 struct ib_sa_multicast
*ib
;
153 struct list_head list
;
155 struct sockaddr addr
;
156 u8 pad
[sizeof(struct sockaddr_in6
) -
157 sizeof(struct sockaddr
)];
161 struct work_struct work
;
162 struct rdma_id_private
*id
;
163 enum cma_state old_state
;
164 enum cma_state new_state
;
165 struct rdma_cm_event event
;
178 u8 ip_version
; /* IP version: 7:4 */
180 union cma_ip_addr src_addr
;
181 union cma_ip_addr dst_addr
;
186 u8 sdp_version
; /* Major version: 7:4 */
187 u8 ip_version
; /* IP version: 7:4 */
188 u8 sdp_specific1
[10];
191 union cma_ip_addr src_addr
;
192 union cma_ip_addr dst_addr
;
200 #define CMA_VERSION 0x00
201 #define SDP_MAJ_VERSION 0x2
203 static int cma_comp(struct rdma_id_private
*id_priv
, enum cma_state comp
)
208 spin_lock_irqsave(&id_priv
->lock
, flags
);
209 ret
= (id_priv
->state
== comp
);
210 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
214 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
215 enum cma_state comp
, enum cma_state exch
)
220 spin_lock_irqsave(&id_priv
->lock
, flags
);
221 if ((ret
= (id_priv
->state
== comp
)))
222 id_priv
->state
= exch
;
223 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
227 static enum cma_state
cma_exch(struct rdma_id_private
*id_priv
,
233 spin_lock_irqsave(&id_priv
->lock
, flags
);
234 old
= id_priv
->state
;
235 id_priv
->state
= exch
;
236 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
240 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
242 return hdr
->ip_version
>> 4;
245 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
247 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
250 static inline u8
sdp_get_majv(u8 sdp_version
)
252 return sdp_version
>> 4;
255 static inline u8
sdp_get_ip_ver(struct sdp_hh
*hh
)
257 return hh
->ip_version
>> 4;
260 static inline void sdp_set_ip_ver(struct sdp_hh
*hh
, u8 ip_ver
)
262 hh
->ip_version
= (ip_ver
<< 4) | (hh
->ip_version
& 0xF);
265 static inline int cma_is_ud_ps(enum rdma_port_space ps
)
267 return (ps
== RDMA_PS_UDP
|| ps
== RDMA_PS_IPOIB
);
270 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
271 struct cma_device
*cma_dev
)
273 atomic_inc(&cma_dev
->refcount
);
274 id_priv
->cma_dev
= cma_dev
;
275 id_priv
->id
.device
= cma_dev
->device
;
276 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
279 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
281 if (atomic_dec_and_test(&cma_dev
->refcount
))
282 complete(&cma_dev
->comp
);
285 static void cma_detach_from_dev(struct rdma_id_private
*id_priv
)
287 list_del(&id_priv
->list
);
288 cma_deref_dev(id_priv
->cma_dev
);
289 id_priv
->cma_dev
= NULL
;
292 static int cma_set_qkey(struct ib_device
*device
, u8 port_num
,
293 enum rdma_port_space ps
,
294 struct rdma_dev_addr
*dev_addr
, u32
*qkey
)
296 struct ib_sa_mcmember_rec rec
;
301 *qkey
= RDMA_UDP_QKEY
;
304 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
305 ret
= ib_sa_get_mcmember_rec(device
, port_num
, &rec
.mgid
, &rec
);
306 *qkey
= be32_to_cpu(rec
.qkey
);
314 static int cma_acquire_dev(struct rdma_id_private
*id_priv
)
316 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
317 struct cma_device
*cma_dev
;
321 switch (rdma_node_get_transport(dev_addr
->dev_type
)) {
322 case RDMA_TRANSPORT_IB
:
323 ib_addr_get_sgid(dev_addr
, &gid
);
325 case RDMA_TRANSPORT_IWARP
:
326 iw_addr_get_sgid(dev_addr
, &gid
);
332 list_for_each_entry(cma_dev
, &dev_list
, list
) {
333 ret
= ib_find_cached_gid(cma_dev
->device
, &gid
,
334 &id_priv
->id
.port_num
, NULL
);
336 ret
= cma_set_qkey(cma_dev
->device
,
337 id_priv
->id
.port_num
,
338 id_priv
->id
.ps
, dev_addr
,
341 cma_attach_to_dev(id_priv
, cma_dev
);
348 static void cma_deref_id(struct rdma_id_private
*id_priv
)
350 if (atomic_dec_and_test(&id_priv
->refcount
))
351 complete(&id_priv
->comp
);
354 static int cma_disable_remove(struct rdma_id_private
*id_priv
,
355 enum cma_state state
)
360 spin_lock_irqsave(&id_priv
->lock
, flags
);
361 if (id_priv
->state
== state
) {
362 atomic_inc(&id_priv
->dev_remove
);
366 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
370 static void cma_enable_remove(struct rdma_id_private
*id_priv
)
372 if (atomic_dec_and_test(&id_priv
->dev_remove
))
373 wake_up(&id_priv
->wait_remove
);
376 static int cma_has_cm_dev(struct rdma_id_private
*id_priv
)
378 return (id_priv
->id
.device
&& id_priv
->cm_id
.ib
);
381 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
382 void *context
, enum rdma_port_space ps
)
384 struct rdma_id_private
*id_priv
;
386 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
388 return ERR_PTR(-ENOMEM
);
390 id_priv
->state
= CMA_IDLE
;
391 id_priv
->id
.context
= context
;
392 id_priv
->id
.event_handler
= event_handler
;
394 spin_lock_init(&id_priv
->lock
);
395 mutex_init(&id_priv
->qp_mutex
);
396 init_completion(&id_priv
->comp
);
397 atomic_set(&id_priv
->refcount
, 1);
398 init_waitqueue_head(&id_priv
->wait_remove
);
399 atomic_set(&id_priv
->dev_remove
, 0);
400 INIT_LIST_HEAD(&id_priv
->listen_list
);
401 INIT_LIST_HEAD(&id_priv
->mc_list
);
402 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
406 EXPORT_SYMBOL(rdma_create_id
);
408 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
410 struct ib_qp_attr qp_attr
;
411 int qp_attr_mask
, ret
;
413 qp_attr
.qp_state
= IB_QPS_INIT
;
414 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
418 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
422 qp_attr
.qp_state
= IB_QPS_RTR
;
423 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
427 qp_attr
.qp_state
= IB_QPS_RTS
;
429 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
434 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
436 struct ib_qp_attr qp_attr
;
437 int qp_attr_mask
, ret
;
439 qp_attr
.qp_state
= IB_QPS_INIT
;
440 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
444 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
447 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
448 struct ib_qp_init_attr
*qp_init_attr
)
450 struct rdma_id_private
*id_priv
;
454 id_priv
= container_of(id
, struct rdma_id_private
, id
);
455 if (id
->device
!= pd
->device
)
458 qp
= ib_create_qp(pd
, qp_init_attr
);
462 if (cma_is_ud_ps(id_priv
->id
.ps
))
463 ret
= cma_init_ud_qp(id_priv
, qp
);
465 ret
= cma_init_conn_qp(id_priv
, qp
);
470 id_priv
->qp_num
= qp
->qp_num
;
471 id_priv
->srq
= (qp
->srq
!= NULL
);
477 EXPORT_SYMBOL(rdma_create_qp
);
479 void rdma_destroy_qp(struct rdma_cm_id
*id
)
481 struct rdma_id_private
*id_priv
;
483 id_priv
= container_of(id
, struct rdma_id_private
, id
);
484 mutex_lock(&id_priv
->qp_mutex
);
485 ib_destroy_qp(id_priv
->id
.qp
);
486 id_priv
->id
.qp
= NULL
;
487 mutex_unlock(&id_priv
->qp_mutex
);
489 EXPORT_SYMBOL(rdma_destroy_qp
);
491 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
)
493 struct ib_qp_attr qp_attr
;
494 int qp_attr_mask
, ret
;
496 mutex_lock(&id_priv
->qp_mutex
);
497 if (!id_priv
->id
.qp
) {
502 /* Need to update QP attributes from default values. */
503 qp_attr
.qp_state
= IB_QPS_INIT
;
504 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
508 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
512 qp_attr
.qp_state
= IB_QPS_RTR
;
513 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
517 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
519 mutex_unlock(&id_priv
->qp_mutex
);
523 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
)
525 struct ib_qp_attr qp_attr
;
526 int qp_attr_mask
, ret
;
528 mutex_lock(&id_priv
->qp_mutex
);
529 if (!id_priv
->id
.qp
) {
534 qp_attr
.qp_state
= IB_QPS_RTS
;
535 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
539 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
541 mutex_unlock(&id_priv
->qp_mutex
);
545 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
547 struct ib_qp_attr qp_attr
;
550 mutex_lock(&id_priv
->qp_mutex
);
551 if (!id_priv
->id
.qp
) {
556 qp_attr
.qp_state
= IB_QPS_ERR
;
557 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
559 mutex_unlock(&id_priv
->qp_mutex
);
563 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
564 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
566 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
569 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
570 ib_addr_get_pkey(dev_addr
),
571 &qp_attr
->pkey_index
);
575 qp_attr
->port_num
= id_priv
->id
.port_num
;
576 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
578 if (cma_is_ud_ps(id_priv
->id
.ps
)) {
579 qp_attr
->qkey
= id_priv
->qkey
;
580 *qp_attr_mask
|= IB_QP_QKEY
;
582 qp_attr
->qp_access_flags
= 0;
583 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
588 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
591 struct rdma_id_private
*id_priv
;
594 id_priv
= container_of(id
, struct rdma_id_private
, id
);
595 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
596 case RDMA_TRANSPORT_IB
:
597 if (!id_priv
->cm_id
.ib
|| cma_is_ud_ps(id_priv
->id
.ps
))
598 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
600 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
602 if (qp_attr
->qp_state
== IB_QPS_RTR
)
603 qp_attr
->rq_psn
= id_priv
->seq_num
;
605 case RDMA_TRANSPORT_IWARP
:
606 if (!id_priv
->cm_id
.iw
) {
607 qp_attr
->qp_access_flags
= 0;
608 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
610 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
620 EXPORT_SYMBOL(rdma_init_qp_attr
);
622 static inline int cma_zero_addr(struct sockaddr
*addr
)
624 struct in6_addr
*ip6
;
626 if (addr
->sa_family
== AF_INET
)
627 return ZERONET(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
629 ip6
= &((struct sockaddr_in6
*) addr
)->sin6_addr
;
630 return (ip6
->s6_addr32
[0] | ip6
->s6_addr32
[1] |
631 ip6
->s6_addr32
[2] | ip6
->s6_addr32
[3]) == 0;
635 static inline int cma_loopback_addr(struct sockaddr
*addr
)
637 return LOOPBACK(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
640 static inline int cma_any_addr(struct sockaddr
*addr
)
642 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
645 static inline __be16
cma_port(struct sockaddr
*addr
)
647 if (addr
->sa_family
== AF_INET
)
648 return ((struct sockaddr_in
*) addr
)->sin_port
;
650 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
653 static inline int cma_any_port(struct sockaddr
*addr
)
655 return !cma_port(addr
);
658 static int cma_get_net_info(void *hdr
, enum rdma_port_space ps
,
659 u8
*ip_ver
, __u16
*port
,
660 union cma_ip_addr
**src
, union cma_ip_addr
**dst
)
664 if (sdp_get_majv(((struct sdp_hh
*) hdr
)->sdp_version
) !=
668 *ip_ver
= sdp_get_ip_ver(hdr
);
669 *port
= ((struct sdp_hh
*) hdr
)->port
;
670 *src
= &((struct sdp_hh
*) hdr
)->src_addr
;
671 *dst
= &((struct sdp_hh
*) hdr
)->dst_addr
;
674 if (((struct cma_hdr
*) hdr
)->cma_version
!= CMA_VERSION
)
677 *ip_ver
= cma_get_ip_ver(hdr
);
678 *port
= ((struct cma_hdr
*) hdr
)->port
;
679 *src
= &((struct cma_hdr
*) hdr
)->src_addr
;
680 *dst
= &((struct cma_hdr
*) hdr
)->dst_addr
;
684 if (*ip_ver
!= 4 && *ip_ver
!= 6)
689 static void cma_save_net_info(struct rdma_addr
*addr
,
690 struct rdma_addr
*listen_addr
,
691 u8 ip_ver
, __u16 port
,
692 union cma_ip_addr
*src
, union cma_ip_addr
*dst
)
694 struct sockaddr_in
*listen4
, *ip4
;
695 struct sockaddr_in6
*listen6
, *ip6
;
699 listen4
= (struct sockaddr_in
*) &listen_addr
->src_addr
;
700 ip4
= (struct sockaddr_in
*) &addr
->src_addr
;
701 ip4
->sin_family
= listen4
->sin_family
;
702 ip4
->sin_addr
.s_addr
= dst
->ip4
.addr
;
703 ip4
->sin_port
= listen4
->sin_port
;
705 ip4
= (struct sockaddr_in
*) &addr
->dst_addr
;
706 ip4
->sin_family
= listen4
->sin_family
;
707 ip4
->sin_addr
.s_addr
= src
->ip4
.addr
;
708 ip4
->sin_port
= port
;
711 listen6
= (struct sockaddr_in6
*) &listen_addr
->src_addr
;
712 ip6
= (struct sockaddr_in6
*) &addr
->src_addr
;
713 ip6
->sin6_family
= listen6
->sin6_family
;
714 ip6
->sin6_addr
= dst
->ip6
;
715 ip6
->sin6_port
= listen6
->sin6_port
;
717 ip6
= (struct sockaddr_in6
*) &addr
->dst_addr
;
718 ip6
->sin6_family
= listen6
->sin6_family
;
719 ip6
->sin6_addr
= src
->ip6
;
720 ip6
->sin6_port
= port
;
727 static inline int cma_user_data_offset(enum rdma_port_space ps
)
733 return sizeof(struct cma_hdr
);
737 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
739 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
740 case RDMA_TRANSPORT_IB
:
742 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
749 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
751 struct rdma_id_private
*dev_id_priv
;
754 * Remove from listen_any_list to prevent added devices from spawning
755 * additional listen requests.
758 list_del(&id_priv
->list
);
760 while (!list_empty(&id_priv
->listen_list
)) {
761 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
762 struct rdma_id_private
, listen_list
);
763 /* sync with device removal to avoid duplicate destruction */
764 list_del_init(&dev_id_priv
->list
);
765 list_del(&dev_id_priv
->listen_list
);
768 rdma_destroy_id(&dev_id_priv
->id
);
774 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
775 enum cma_state state
)
779 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
781 case CMA_ROUTE_QUERY
:
782 cma_cancel_route(id_priv
);
785 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
) &&
787 cma_cancel_listens(id_priv
);
794 static void cma_release_port(struct rdma_id_private
*id_priv
)
796 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
802 hlist_del(&id_priv
->node
);
803 if (hlist_empty(&bind_list
->owners
)) {
804 idr_remove(bind_list
->ps
, bind_list
->port
);
810 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
812 struct cma_multicast
*mc
;
814 while (!list_empty(&id_priv
->mc_list
)) {
815 mc
= container_of(id_priv
->mc_list
.next
,
816 struct cma_multicast
, list
);
818 ib_sa_free_multicast(mc
->multicast
.ib
);
823 void rdma_destroy_id(struct rdma_cm_id
*id
)
825 struct rdma_id_private
*id_priv
;
826 enum cma_state state
;
828 id_priv
= container_of(id
, struct rdma_id_private
, id
);
829 state
= cma_exch(id_priv
, CMA_DESTROYING
);
830 cma_cancel_operation(id_priv
, state
);
833 if (id_priv
->cma_dev
) {
835 switch (rdma_node_get_transport(id
->device
->node_type
)) {
836 case RDMA_TRANSPORT_IB
:
837 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
838 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
840 case RDMA_TRANSPORT_IWARP
:
841 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
842 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
847 cma_leave_mc_groups(id_priv
);
849 cma_detach_from_dev(id_priv
);
853 cma_release_port(id_priv
);
854 cma_deref_id(id_priv
);
855 wait_for_completion(&id_priv
->comp
);
857 if (id_priv
->internal_id
)
858 cma_deref_id(id_priv
->id
.context
);
860 kfree(id_priv
->id
.route
.path_rec
);
863 EXPORT_SYMBOL(rdma_destroy_id
);
865 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
869 ret
= cma_modify_qp_rtr(id_priv
);
873 ret
= cma_modify_qp_rts(id_priv
);
877 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
883 cma_modify_qp_err(id_priv
);
884 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
889 static int cma_verify_rep(struct rdma_id_private
*id_priv
, void *data
)
891 if (id_priv
->id
.ps
== RDMA_PS_SDP
&&
892 sdp_get_majv(((struct sdp_hah
*) data
)->sdp_version
) !=
899 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
900 struct ib_cm_rep_event_param
*rep_data
,
903 event
->param
.conn
.private_data
= private_data
;
904 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
905 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
906 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
907 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
908 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
909 event
->param
.conn
.srq
= rep_data
->srq
;
910 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
913 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
915 struct rdma_id_private
*id_priv
= cm_id
->context
;
916 struct rdma_cm_event event
;
919 if (cma_disable_remove(id_priv
, CMA_CONNECT
))
922 memset(&event
, 0, sizeof event
);
923 switch (ib_event
->event
) {
924 case IB_CM_REQ_ERROR
:
925 case IB_CM_REP_ERROR
:
926 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
927 event
.status
= -ETIMEDOUT
;
929 case IB_CM_REP_RECEIVED
:
930 event
.status
= cma_verify_rep(id_priv
, ib_event
->private_data
);
932 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
933 else if (id_priv
->id
.qp
&& id_priv
->id
.ps
!= RDMA_PS_SDP
) {
934 event
.status
= cma_rep_recv(id_priv
);
935 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
936 RDMA_CM_EVENT_ESTABLISHED
;
938 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
939 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
940 ib_event
->private_data
);
942 case IB_CM_RTU_RECEIVED
:
943 case IB_CM_USER_ESTABLISHED
:
944 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
946 case IB_CM_DREQ_ERROR
:
947 event
.status
= -ETIMEDOUT
; /* fall through */
948 case IB_CM_DREQ_RECEIVED
:
949 case IB_CM_DREP_RECEIVED
:
950 if (!cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_DISCONNECT
))
952 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
954 case IB_CM_TIMEWAIT_EXIT
:
955 case IB_CM_MRA_RECEIVED
:
958 case IB_CM_REJ_RECEIVED
:
959 cma_modify_qp_err(id_priv
);
960 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
961 event
.event
= RDMA_CM_EVENT_REJECTED
;
962 event
.param
.conn
.private_data
= ib_event
->private_data
;
963 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
966 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d",
971 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
973 /* Destroy the CM ID by returning a non-zero value. */
974 id_priv
->cm_id
.ib
= NULL
;
975 cma_exch(id_priv
, CMA_DESTROYING
);
976 cma_enable_remove(id_priv
);
977 rdma_destroy_id(&id_priv
->id
);
981 cma_enable_remove(id_priv
);
985 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
986 struct ib_cm_event
*ib_event
)
988 struct rdma_id_private
*id_priv
;
989 struct rdma_cm_id
*id
;
990 struct rdma_route
*rt
;
991 union cma_ip_addr
*src
, *dst
;
995 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
996 &ip_ver
, &port
, &src
, &dst
))
999 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1004 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1005 ip_ver
, port
, src
, dst
);
1008 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1009 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1014 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
1015 if (rt
->num_paths
== 2)
1016 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1018 ib_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
1019 ib_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1020 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
1021 rt
->addr
.dev_addr
.dev_type
= RDMA_NODE_IB_CA
;
1023 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1024 id_priv
->state
= CMA_CONNECT
;
1028 rdma_destroy_id(id
);
1033 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1034 struct ib_cm_event
*ib_event
)
1036 struct rdma_id_private
*id_priv
;
1037 struct rdma_cm_id
*id
;
1038 union cma_ip_addr
*src
, *dst
;
1043 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1049 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
1050 &ip_ver
, &port
, &src
, &dst
))
1053 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
1054 ip_ver
, port
, src
, dst
);
1056 ret
= rdma_translate_ip(&id
->route
.addr
.src_addr
,
1057 &id
->route
.addr
.dev_addr
);
1061 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1062 id_priv
->state
= CMA_CONNECT
;
1065 rdma_destroy_id(id
);
1069 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1070 struct ib_cm_req_event_param
*req_data
,
1071 void *private_data
, int offset
)
1073 event
->param
.conn
.private_data
= private_data
+ offset
;
1074 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1075 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1076 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1077 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1078 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1079 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1080 event
->param
.conn
.srq
= req_data
->srq
;
1081 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1084 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1086 struct rdma_id_private
*listen_id
, *conn_id
;
1087 struct rdma_cm_event event
;
1090 listen_id
= cm_id
->context
;
1091 if (cma_disable_remove(listen_id
, CMA_LISTEN
))
1092 return -ECONNABORTED
;
1094 memset(&event
, 0, sizeof event
);
1095 offset
= cma_user_data_offset(listen_id
->id
.ps
);
1096 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1097 if (cma_is_ud_ps(listen_id
->id
.ps
)) {
1098 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
);
1099 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1100 event
.param
.ud
.private_data_len
=
1101 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1103 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1104 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
);
1105 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1106 ib_event
->private_data
, offset
);
1113 atomic_inc(&conn_id
->dev_remove
);
1115 ret
= cma_acquire_dev(conn_id
);
1116 mutex_unlock(&lock
);
1118 goto release_conn_id
;
1120 conn_id
->cm_id
.ib
= cm_id
;
1121 cm_id
->context
= conn_id
;
1122 cm_id
->cm_handler
= cma_ib_handler
;
1124 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1126 cma_enable_remove(conn_id
);
1130 /* Destroy the CM ID by returning a non-zero value. */
1131 conn_id
->cm_id
.ib
= NULL
;
1134 cma_exch(conn_id
, CMA_DESTROYING
);
1135 cma_enable_remove(conn_id
);
1136 rdma_destroy_id(&conn_id
->id
);
1139 cma_enable_remove(listen_id
);
1143 static __be64
cma_get_service_id(enum rdma_port_space ps
, struct sockaddr
*addr
)
1145 return cpu_to_be64(((u64
)ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1148 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
1149 struct ib_cm_compare_data
*compare
)
1151 struct cma_hdr
*cma_data
, *cma_mask
;
1152 struct sdp_hh
*sdp_data
, *sdp_mask
;
1154 struct in6_addr ip6_addr
;
1156 memset(compare
, 0, sizeof *compare
);
1157 cma_data
= (void *) compare
->data
;
1158 cma_mask
= (void *) compare
->mask
;
1159 sdp_data
= (void *) compare
->data
;
1160 sdp_mask
= (void *) compare
->mask
;
1162 switch (addr
->sa_family
) {
1164 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
1165 if (ps
== RDMA_PS_SDP
) {
1166 sdp_set_ip_ver(sdp_data
, 4);
1167 sdp_set_ip_ver(sdp_mask
, 0xF);
1168 sdp_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1169 sdp_mask
->dst_addr
.ip4
.addr
= ~0;
1171 cma_set_ip_ver(cma_data
, 4);
1172 cma_set_ip_ver(cma_mask
, 0xF);
1173 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1174 cma_mask
->dst_addr
.ip4
.addr
= ~0;
1178 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
1179 if (ps
== RDMA_PS_SDP
) {
1180 sdp_set_ip_ver(sdp_data
, 6);
1181 sdp_set_ip_ver(sdp_mask
, 0xF);
1182 sdp_data
->dst_addr
.ip6
= ip6_addr
;
1183 memset(&sdp_mask
->dst_addr
.ip6
, 0xFF,
1184 sizeof sdp_mask
->dst_addr
.ip6
);
1186 cma_set_ip_ver(cma_data
, 6);
1187 cma_set_ip_ver(cma_mask
, 0xF);
1188 cma_data
->dst_addr
.ip6
= ip6_addr
;
1189 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
1190 sizeof cma_mask
->dst_addr
.ip6
);
1198 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1200 struct rdma_id_private
*id_priv
= iw_id
->context
;
1201 struct rdma_cm_event event
;
1202 struct sockaddr_in
*sin
;
1205 if (cma_disable_remove(id_priv
, CMA_CONNECT
))
1208 memset(&event
, 0, sizeof event
);
1209 switch (iw_event
->event
) {
1210 case IW_CM_EVENT_CLOSE
:
1211 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1213 case IW_CM_EVENT_CONNECT_REPLY
:
1214 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1215 *sin
= iw_event
->local_addr
;
1216 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
1217 *sin
= iw_event
->remote_addr
;
1218 switch (iw_event
->status
) {
1220 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1224 event
.event
= RDMA_CM_EVENT_REJECTED
;
1227 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1230 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1234 case IW_CM_EVENT_ESTABLISHED
:
1235 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1241 event
.status
= iw_event
->status
;
1242 event
.param
.conn
.private_data
= iw_event
->private_data
;
1243 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1244 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1246 /* Destroy the CM ID by returning a non-zero value. */
1247 id_priv
->cm_id
.iw
= NULL
;
1248 cma_exch(id_priv
, CMA_DESTROYING
);
1249 cma_enable_remove(id_priv
);
1250 rdma_destroy_id(&id_priv
->id
);
1254 cma_enable_remove(id_priv
);
1258 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1259 struct iw_cm_event
*iw_event
)
1261 struct rdma_cm_id
*new_cm_id
;
1262 struct rdma_id_private
*listen_id
, *conn_id
;
1263 struct sockaddr_in
*sin
;
1264 struct net_device
*dev
= NULL
;
1265 struct rdma_cm_event event
;
1267 struct ib_device_attr attr
;
1269 listen_id
= cm_id
->context
;
1270 if (cma_disable_remove(listen_id
, CMA_LISTEN
))
1271 return -ECONNABORTED
;
1273 /* Create a new RDMA id for the new IW CM ID */
1274 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1275 listen_id
->id
.context
,
1281 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1282 atomic_inc(&conn_id
->dev_remove
);
1283 conn_id
->state
= CMA_CONNECT
;
1285 dev
= ip_dev_find(iw_event
->local_addr
.sin_addr
.s_addr
);
1287 ret
= -EADDRNOTAVAIL
;
1288 cma_enable_remove(conn_id
);
1289 rdma_destroy_id(new_cm_id
);
1292 ret
= rdma_copy_addr(&conn_id
->id
.route
.addr
.dev_addr
, dev
, NULL
);
1294 cma_enable_remove(conn_id
);
1295 rdma_destroy_id(new_cm_id
);
1300 ret
= cma_acquire_dev(conn_id
);
1301 mutex_unlock(&lock
);
1303 cma_enable_remove(conn_id
);
1304 rdma_destroy_id(new_cm_id
);
1308 conn_id
->cm_id
.iw
= cm_id
;
1309 cm_id
->context
= conn_id
;
1310 cm_id
->cm_handler
= cma_iw_handler
;
1312 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.src_addr
;
1313 *sin
= iw_event
->local_addr
;
1314 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.dst_addr
;
1315 *sin
= iw_event
->remote_addr
;
1317 ret
= ib_query_device(conn_id
->id
.device
, &attr
);
1319 cma_enable_remove(conn_id
);
1320 rdma_destroy_id(new_cm_id
);
1324 memset(&event
, 0, sizeof event
);
1325 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1326 event
.param
.conn
.private_data
= iw_event
->private_data
;
1327 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1328 event
.param
.conn
.initiator_depth
= attr
.max_qp_init_rd_atom
;
1329 event
.param
.conn
.responder_resources
= attr
.max_qp_rd_atom
;
1330 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1332 /* User wants to destroy the CM ID */
1333 conn_id
->cm_id
.iw
= NULL
;
1334 cma_exch(conn_id
, CMA_DESTROYING
);
1335 cma_enable_remove(conn_id
);
1336 rdma_destroy_id(&conn_id
->id
);
1342 cma_enable_remove(listen_id
);
1346 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1348 struct ib_cm_compare_data compare_data
;
1349 struct sockaddr
*addr
;
1353 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
,
1355 if (IS_ERR(id_priv
->cm_id
.ib
))
1356 return PTR_ERR(id_priv
->cm_id
.ib
);
1358 addr
= &id_priv
->id
.route
.addr
.src_addr
;
1359 svc_id
= cma_get_service_id(id_priv
->id
.ps
, addr
);
1360 if (cma_any_addr(addr
))
1361 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1363 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1364 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1368 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1369 id_priv
->cm_id
.ib
= NULL
;
1375 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1378 struct sockaddr_in
*sin
;
1380 id_priv
->cm_id
.iw
= iw_create_cm_id(id_priv
->id
.device
,
1381 iw_conn_req_handler
,
1383 if (IS_ERR(id_priv
->cm_id
.iw
))
1384 return PTR_ERR(id_priv
->cm_id
.iw
);
1386 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1387 id_priv
->cm_id
.iw
->local_addr
= *sin
;
1389 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1392 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1393 id_priv
->cm_id
.iw
= NULL
;
1399 static int cma_listen_handler(struct rdma_cm_id
*id
,
1400 struct rdma_cm_event
*event
)
1402 struct rdma_id_private
*id_priv
= id
->context
;
1404 id
->context
= id_priv
->id
.context
;
1405 id
->event_handler
= id_priv
->id
.event_handler
;
1406 return id_priv
->id
.event_handler(id
, event
);
1409 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1410 struct cma_device
*cma_dev
)
1412 struct rdma_id_private
*dev_id_priv
;
1413 struct rdma_cm_id
*id
;
1416 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
);
1420 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1422 dev_id_priv
->state
= CMA_ADDR_BOUND
;
1423 memcpy(&id
->route
.addr
.src_addr
, &id_priv
->id
.route
.addr
.src_addr
,
1424 ip_addr_size(&id_priv
->id
.route
.addr
.src_addr
));
1426 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1427 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1428 atomic_inc(&id_priv
->refcount
);
1429 dev_id_priv
->internal_id
= 1;
1431 ret
= rdma_listen(id
, id_priv
->backlog
);
1433 printk(KERN_WARNING
"RDMA CMA: cma_listen_on_dev, error %d, "
1434 "listening on device %s", ret
, cma_dev
->device
->name
);
1437 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1439 struct cma_device
*cma_dev
;
1442 list_add_tail(&id_priv
->list
, &listen_any_list
);
1443 list_for_each_entry(cma_dev
, &dev_list
, list
)
1444 cma_listen_on_dev(id_priv
, cma_dev
);
1445 mutex_unlock(&lock
);
1448 static int cma_bind_any(struct rdma_cm_id
*id
, sa_family_t af
)
1450 struct sockaddr_in addr_in
;
1452 memset(&addr_in
, 0, sizeof addr_in
);
1453 addr_in
.sin_family
= af
;
1454 return rdma_bind_addr(id
, (struct sockaddr
*) &addr_in
);
1457 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
1459 struct rdma_id_private
*id_priv
;
1462 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1463 if (id_priv
->state
== CMA_IDLE
) {
1464 ret
= cma_bind_any(id
, AF_INET
);
1469 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_LISTEN
))
1472 id_priv
->backlog
= backlog
;
1474 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1475 case RDMA_TRANSPORT_IB
:
1476 ret
= cma_ib_listen(id_priv
);
1480 case RDMA_TRANSPORT_IWARP
:
1481 ret
= cma_iw_listen(id_priv
, backlog
);
1490 cma_listen_on_all(id_priv
);
1494 id_priv
->backlog
= 0;
1495 cma_comp_exch(id_priv
, CMA_LISTEN
, CMA_ADDR_BOUND
);
1498 EXPORT_SYMBOL(rdma_listen
);
1500 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
1502 struct rdma_id_private
*id_priv
;
1504 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1505 id_priv
->tos
= (u8
) tos
;
1507 EXPORT_SYMBOL(rdma_set_service_type
);
1509 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1512 struct cma_work
*work
= context
;
1513 struct rdma_route
*route
;
1515 route
= &work
->id
->id
.route
;
1518 route
->num_paths
= 1;
1519 *route
->path_rec
= *path_rec
;
1521 work
->old_state
= CMA_ROUTE_QUERY
;
1522 work
->new_state
= CMA_ADDR_RESOLVED
;
1523 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1524 work
->event
.status
= status
;
1527 queue_work(cma_wq
, &work
->work
);
1530 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1531 struct cma_work
*work
)
1533 struct rdma_addr
*addr
= &id_priv
->id
.route
.addr
;
1534 struct ib_sa_path_rec path_rec
;
1535 ib_sa_comp_mask comp_mask
;
1536 struct sockaddr_in6
*sin6
;
1538 memset(&path_rec
, 0, sizeof path_rec
);
1539 ib_addr_get_sgid(&addr
->dev_addr
, &path_rec
.sgid
);
1540 ib_addr_get_dgid(&addr
->dev_addr
, &path_rec
.dgid
);
1541 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(&addr
->dev_addr
));
1542 path_rec
.numb_path
= 1;
1543 path_rec
.reversible
= 1;
1544 path_rec
.service_id
= cma_get_service_id(id_priv
->id
.ps
, &addr
->dst_addr
);
1546 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1547 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
1548 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
1550 if (addr
->src_addr
.sa_family
== AF_INET
) {
1551 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
1552 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
1554 sin6
= (struct sockaddr_in6
*) &addr
->src_addr
;
1555 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
1556 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
1559 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
1560 id_priv
->id
.port_num
, &path_rec
,
1561 comp_mask
, timeout_ms
,
1562 GFP_KERNEL
, cma_query_handler
,
1563 work
, &id_priv
->query
);
1565 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1568 static void cma_work_handler(struct work_struct
*_work
)
1570 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
1571 struct rdma_id_private
*id_priv
= work
->id
;
1574 atomic_inc(&id_priv
->dev_remove
);
1575 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1578 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1579 cma_exch(id_priv
, CMA_DESTROYING
);
1583 cma_enable_remove(id_priv
);
1584 cma_deref_id(id_priv
);
1586 rdma_destroy_id(&id_priv
->id
);
1590 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1592 struct rdma_route
*route
= &id_priv
->id
.route
;
1593 struct cma_work
*work
;
1596 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1601 INIT_WORK(&work
->work
, cma_work_handler
);
1602 work
->old_state
= CMA_ROUTE_QUERY
;
1603 work
->new_state
= CMA_ROUTE_RESOLVED
;
1604 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1606 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1607 if (!route
->path_rec
) {
1612 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1618 kfree(route
->path_rec
);
1619 route
->path_rec
= NULL
;
1625 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1626 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1628 struct rdma_id_private
*id_priv
;
1631 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1632 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_RESOLVED
))
1635 id
->route
.path_rec
= kmalloc(sizeof *path_rec
* num_paths
, GFP_KERNEL
);
1636 if (!id
->route
.path_rec
) {
1641 memcpy(id
->route
.path_rec
, path_rec
, sizeof *path_rec
* num_paths
);
1644 cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_ADDR_RESOLVED
);
1647 EXPORT_SYMBOL(rdma_set_ib_paths
);
1649 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1651 struct cma_work
*work
;
1653 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1658 INIT_WORK(&work
->work
, cma_work_handler
);
1659 work
->old_state
= CMA_ROUTE_QUERY
;
1660 work
->new_state
= CMA_ROUTE_RESOLVED
;
1661 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1662 queue_work(cma_wq
, &work
->work
);
1666 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1668 struct rdma_id_private
*id_priv
;
1671 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1672 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_QUERY
))
1675 atomic_inc(&id_priv
->refcount
);
1676 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1677 case RDMA_TRANSPORT_IB
:
1678 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
1680 case RDMA_TRANSPORT_IWARP
:
1681 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
1692 cma_comp_exch(id_priv
, CMA_ROUTE_QUERY
, CMA_ADDR_RESOLVED
);
1693 cma_deref_id(id_priv
);
1696 EXPORT_SYMBOL(rdma_resolve_route
);
1698 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
1700 struct cma_device
*cma_dev
;
1701 struct ib_port_attr port_attr
;
1708 if (list_empty(&dev_list
)) {
1712 list_for_each_entry(cma_dev
, &dev_list
, list
)
1713 for (p
= 1; p
<= cma_dev
->device
->phys_port_cnt
; ++p
)
1714 if (!ib_query_port(cma_dev
->device
, p
, &port_attr
) &&
1715 port_attr
.state
== IB_PORT_ACTIVE
)
1719 cma_dev
= list_entry(dev_list
.next
, struct cma_device
, list
);
1722 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
1726 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
1730 ib_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1731 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
1732 id_priv
->id
.port_num
= p
;
1733 cma_attach_to_dev(id_priv
, cma_dev
);
1735 mutex_unlock(&lock
);
1739 static void addr_handler(int status
, struct sockaddr
*src_addr
,
1740 struct rdma_dev_addr
*dev_addr
, void *context
)
1742 struct rdma_id_private
*id_priv
= context
;
1743 struct rdma_cm_event event
;
1745 memset(&event
, 0, sizeof event
);
1746 atomic_inc(&id_priv
->dev_remove
);
1749 * Grab mutex to block rdma_destroy_id() from removing the device while
1750 * we're trying to acquire it.
1753 if (!cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_RESOLVED
)) {
1754 mutex_unlock(&lock
);
1758 if (!status
&& !id_priv
->cma_dev
)
1759 status
= cma_acquire_dev(id_priv
);
1760 mutex_unlock(&lock
);
1763 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ADDR_BOUND
))
1765 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
1766 event
.status
= status
;
1768 memcpy(&id_priv
->id
.route
.addr
.src_addr
, src_addr
,
1769 ip_addr_size(src_addr
));
1770 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1773 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
1774 cma_exch(id_priv
, CMA_DESTROYING
);
1775 cma_enable_remove(id_priv
);
1776 cma_deref_id(id_priv
);
1777 rdma_destroy_id(&id_priv
->id
);
1781 cma_enable_remove(id_priv
);
1782 cma_deref_id(id_priv
);
1785 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
1787 struct cma_work
*work
;
1788 struct sockaddr_in
*src_in
, *dst_in
;
1792 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1796 if (!id_priv
->cma_dev
) {
1797 ret
= cma_bind_loopback(id_priv
);
1802 ib_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1803 ib_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1805 if (cma_zero_addr(&id_priv
->id
.route
.addr
.src_addr
)) {
1806 src_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.src_addr
;
1807 dst_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.dst_addr
;
1808 src_in
->sin_family
= dst_in
->sin_family
;
1809 src_in
->sin_addr
.s_addr
= dst_in
->sin_addr
.s_addr
;
1813 INIT_WORK(&work
->work
, cma_work_handler
);
1814 work
->old_state
= CMA_ADDR_QUERY
;
1815 work
->new_state
= CMA_ADDR_RESOLVED
;
1816 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1817 queue_work(cma_wq
, &work
->work
);
1824 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1825 struct sockaddr
*dst_addr
)
1827 if (src_addr
&& src_addr
->sa_family
)
1828 return rdma_bind_addr(id
, src_addr
);
1830 return cma_bind_any(id
, dst_addr
->sa_family
);
1833 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1834 struct sockaddr
*dst_addr
, int timeout_ms
)
1836 struct rdma_id_private
*id_priv
;
1839 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1840 if (id_priv
->state
== CMA_IDLE
) {
1841 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
1846 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_ADDR_QUERY
))
1849 atomic_inc(&id_priv
->refcount
);
1850 memcpy(&id
->route
.addr
.dst_addr
, dst_addr
, ip_addr_size(dst_addr
));
1851 if (cma_any_addr(dst_addr
))
1852 ret
= cma_resolve_loopback(id_priv
);
1854 ret
= rdma_resolve_ip(&addr_client
, &id
->route
.addr
.src_addr
,
1855 dst_addr
, &id
->route
.addr
.dev_addr
,
1856 timeout_ms
, addr_handler
, id_priv
);
1862 cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_BOUND
);
1863 cma_deref_id(id_priv
);
1866 EXPORT_SYMBOL(rdma_resolve_addr
);
1868 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
1869 struct rdma_id_private
*id_priv
)
1871 struct sockaddr_in
*sin
;
1873 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1874 sin
->sin_port
= htons(bind_list
->port
);
1875 id_priv
->bind_list
= bind_list
;
1876 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
1879 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
1880 unsigned short snum
)
1882 struct rdma_bind_list
*bind_list
;
1885 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1890 ret
= idr_get_new_above(ps
, bind_list
, snum
, &port
);
1891 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1897 ret
= -EADDRNOTAVAIL
;
1902 bind_list
->port
= (unsigned short) port
;
1903 cma_bind_port(bind_list
, id_priv
);
1906 idr_remove(ps
, port
);
1912 static int cma_alloc_any_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1914 struct rdma_bind_list
*bind_list
;
1915 int port
, ret
, low
, high
;
1917 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1922 /* FIXME: add proper port randomization per like inet_csk_get_port */
1924 ret
= idr_get_new_above(ps
, bind_list
, next_port
, &port
);
1925 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1930 inet_get_local_port_range(&low
, &high
);
1932 if (next_port
!= low
) {
1933 idr_remove(ps
, port
);
1937 ret
= -EADDRNOTAVAIL
;
1944 next_port
= port
+ 1;
1947 bind_list
->port
= (unsigned short) port
;
1948 cma_bind_port(bind_list
, id_priv
);
1951 idr_remove(ps
, port
);
1957 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1959 struct rdma_id_private
*cur_id
;
1960 struct sockaddr_in
*sin
, *cur_sin
;
1961 struct rdma_bind_list
*bind_list
;
1962 struct hlist_node
*node
;
1963 unsigned short snum
;
1965 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1966 snum
= ntohs(sin
->sin_port
);
1967 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
1970 bind_list
= idr_find(ps
, snum
);
1972 return cma_alloc_port(ps
, id_priv
, snum
);
1975 * We don't support binding to any address if anyone is bound to
1976 * a specific address on the same port.
1978 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
))
1979 return -EADDRNOTAVAIL
;
1981 hlist_for_each_entry(cur_id
, node
, &bind_list
->owners
, node
) {
1982 if (cma_any_addr(&cur_id
->id
.route
.addr
.src_addr
))
1983 return -EADDRNOTAVAIL
;
1985 cur_sin
= (struct sockaddr_in
*) &cur_id
->id
.route
.addr
.src_addr
;
1986 if (sin
->sin_addr
.s_addr
== cur_sin
->sin_addr
.s_addr
)
1990 cma_bind_port(bind_list
, id_priv
);
1994 static int cma_get_port(struct rdma_id_private
*id_priv
)
1999 switch (id_priv
->id
.ps
) {
2013 return -EPROTONOSUPPORT
;
2017 if (cma_any_port(&id_priv
->id
.route
.addr
.src_addr
))
2018 ret
= cma_alloc_any_port(ps
, id_priv
);
2020 ret
= cma_use_port(ps
, id_priv
);
2021 mutex_unlock(&lock
);
2026 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2028 struct rdma_id_private
*id_priv
;
2031 if (addr
->sa_family
!= AF_INET
)
2032 return -EAFNOSUPPORT
;
2034 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2035 if (!cma_comp_exch(id_priv
, CMA_IDLE
, CMA_ADDR_BOUND
))
2038 if (!cma_any_addr(addr
)) {
2039 ret
= rdma_translate_ip(addr
, &id
->route
.addr
.dev_addr
);
2044 ret
= cma_acquire_dev(id_priv
);
2045 mutex_unlock(&lock
);
2050 memcpy(&id
->route
.addr
.src_addr
, addr
, ip_addr_size(addr
));
2051 ret
= cma_get_port(id_priv
);
2057 if (!cma_any_addr(addr
)) {
2059 cma_detach_from_dev(id_priv
);
2060 mutex_unlock(&lock
);
2063 cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_IDLE
);
2066 EXPORT_SYMBOL(rdma_bind_addr
);
2068 static int cma_format_hdr(void *hdr
, enum rdma_port_space ps
,
2069 struct rdma_route
*route
)
2071 struct sockaddr_in
*src4
, *dst4
;
2072 struct cma_hdr
*cma_hdr
;
2073 struct sdp_hh
*sdp_hdr
;
2075 src4
= (struct sockaddr_in
*) &route
->addr
.src_addr
;
2076 dst4
= (struct sockaddr_in
*) &route
->addr
.dst_addr
;
2081 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
2083 sdp_set_ip_ver(sdp_hdr
, 4);
2084 sdp_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2085 sdp_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2086 sdp_hdr
->port
= src4
->sin_port
;
2090 cma_hdr
->cma_version
= CMA_VERSION
;
2091 cma_set_ip_ver(cma_hdr
, 4);
2092 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2093 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2094 cma_hdr
->port
= src4
->sin_port
;
2100 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
2101 struct ib_cm_event
*ib_event
)
2103 struct rdma_id_private
*id_priv
= cm_id
->context
;
2104 struct rdma_cm_event event
;
2105 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
2108 if (cma_disable_remove(id_priv
, CMA_CONNECT
))
2111 memset(&event
, 0, sizeof event
);
2112 switch (ib_event
->event
) {
2113 case IB_CM_SIDR_REQ_ERROR
:
2114 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2115 event
.status
= -ETIMEDOUT
;
2117 case IB_CM_SIDR_REP_RECEIVED
:
2118 event
.param
.ud
.private_data
= ib_event
->private_data
;
2119 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
2120 if (rep
->status
!= IB_SIDR_SUCCESS
) {
2121 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2122 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
2125 if (id_priv
->qkey
!= rep
->qkey
) {
2126 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2127 event
.status
= -EINVAL
;
2130 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
2131 id_priv
->id
.route
.path_rec
,
2132 &event
.param
.ud
.ah_attr
);
2133 event
.param
.ud
.qp_num
= rep
->qpn
;
2134 event
.param
.ud
.qkey
= rep
->qkey
;
2135 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2139 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d",
2144 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2146 /* Destroy the CM ID by returning a non-zero value. */
2147 id_priv
->cm_id
.ib
= NULL
;
2148 cma_exch(id_priv
, CMA_DESTROYING
);
2149 cma_enable_remove(id_priv
);
2150 rdma_destroy_id(&id_priv
->id
);
2154 cma_enable_remove(id_priv
);
2158 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
2159 struct rdma_conn_param
*conn_param
)
2161 struct ib_cm_sidr_req_param req
;
2162 struct rdma_route
*route
;
2165 req
.private_data_len
= sizeof(struct cma_hdr
) +
2166 conn_param
->private_data_len
;
2167 req
.private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2168 if (!req
.private_data
)
2171 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2172 memcpy((void *) req
.private_data
+ sizeof(struct cma_hdr
),
2173 conn_param
->private_data
, conn_param
->private_data_len
);
2175 route
= &id_priv
->id
.route
;
2176 ret
= cma_format_hdr((void *) req
.private_data
, id_priv
->id
.ps
, route
);
2180 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
,
2181 cma_sidr_rep_handler
, id_priv
);
2182 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2183 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2187 req
.path
= route
->path_rec
;
2188 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2189 &route
->addr
.dst_addr
);
2190 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
2191 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2193 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
2195 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2196 id_priv
->cm_id
.ib
= NULL
;
2199 kfree(req
.private_data
);
2203 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
2204 struct rdma_conn_param
*conn_param
)
2206 struct ib_cm_req_param req
;
2207 struct rdma_route
*route
;
2211 memset(&req
, 0, sizeof req
);
2212 offset
= cma_user_data_offset(id_priv
->id
.ps
);
2213 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
2214 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2218 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2219 memcpy(private_data
+ offset
, conn_param
->private_data
,
2220 conn_param
->private_data_len
);
2222 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
,
2224 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2225 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2229 route
= &id_priv
->id
.route
;
2230 ret
= cma_format_hdr(private_data
, id_priv
->id
.ps
, route
);
2233 req
.private_data
= private_data
;
2235 req
.primary_path
= &route
->path_rec
[0];
2236 if (route
->num_paths
== 2)
2237 req
.alternate_path
= &route
->path_rec
[1];
2239 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2240 &route
->addr
.dst_addr
);
2241 req
.qp_num
= id_priv
->qp_num
;
2242 req
.qp_type
= IB_QPT_RC
;
2243 req
.starting_psn
= id_priv
->seq_num
;
2244 req
.responder_resources
= conn_param
->responder_resources
;
2245 req
.initiator_depth
= conn_param
->initiator_depth
;
2246 req
.flow_control
= conn_param
->flow_control
;
2247 req
.retry_count
= conn_param
->retry_count
;
2248 req
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2249 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2250 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2251 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2252 req
.srq
= id_priv
->srq
? 1 : 0;
2254 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
2256 if (ret
&& !IS_ERR(id_priv
->cm_id
.ib
)) {
2257 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2258 id_priv
->cm_id
.ib
= NULL
;
2261 kfree(private_data
);
2265 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
2266 struct rdma_conn_param
*conn_param
)
2268 struct iw_cm_id
*cm_id
;
2269 struct sockaddr_in
* sin
;
2271 struct iw_cm_conn_param iw_param
;
2273 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
2274 if (IS_ERR(cm_id
)) {
2275 ret
= PTR_ERR(cm_id
);
2279 id_priv
->cm_id
.iw
= cm_id
;
2281 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2282 cm_id
->local_addr
= *sin
;
2284 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
2285 cm_id
->remote_addr
= *sin
;
2287 ret
= cma_modify_qp_rtr(id_priv
);
2291 iw_param
.ord
= conn_param
->initiator_depth
;
2292 iw_param
.ird
= conn_param
->responder_resources
;
2293 iw_param
.private_data
= conn_param
->private_data
;
2294 iw_param
.private_data_len
= conn_param
->private_data_len
;
2296 iw_param
.qpn
= id_priv
->qp_num
;
2298 iw_param
.qpn
= conn_param
->qp_num
;
2299 ret
= iw_cm_connect(cm_id
, &iw_param
);
2301 if (ret
&& !IS_ERR(cm_id
)) {
2302 iw_destroy_cm_id(cm_id
);
2303 id_priv
->cm_id
.iw
= NULL
;
2308 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2310 struct rdma_id_private
*id_priv
;
2313 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2314 if (!cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_CONNECT
))
2318 id_priv
->qp_num
= conn_param
->qp_num
;
2319 id_priv
->srq
= conn_param
->srq
;
2322 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2323 case RDMA_TRANSPORT_IB
:
2324 if (cma_is_ud_ps(id
->ps
))
2325 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
2327 ret
= cma_connect_ib(id_priv
, conn_param
);
2329 case RDMA_TRANSPORT_IWARP
:
2330 ret
= cma_connect_iw(id_priv
, conn_param
);
2341 cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_ROUTE_RESOLVED
);
2344 EXPORT_SYMBOL(rdma_connect
);
2346 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
2347 struct rdma_conn_param
*conn_param
)
2349 struct ib_cm_rep_param rep
;
2350 struct ib_qp_attr qp_attr
;
2351 int qp_attr_mask
, ret
;
2353 if (id_priv
->id
.qp
) {
2354 ret
= cma_modify_qp_rtr(id_priv
);
2358 qp_attr
.qp_state
= IB_QPS_RTS
;
2359 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, &qp_attr
,
2364 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
2365 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
2370 memset(&rep
, 0, sizeof rep
);
2371 rep
.qp_num
= id_priv
->qp_num
;
2372 rep
.starting_psn
= id_priv
->seq_num
;
2373 rep
.private_data
= conn_param
->private_data
;
2374 rep
.private_data_len
= conn_param
->private_data_len
;
2375 rep
.responder_resources
= conn_param
->responder_resources
;
2376 rep
.initiator_depth
= conn_param
->initiator_depth
;
2377 rep
.failover_accepted
= 0;
2378 rep
.flow_control
= conn_param
->flow_control
;
2379 rep
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2380 rep
.srq
= id_priv
->srq
? 1 : 0;
2382 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
2387 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
2388 struct rdma_conn_param
*conn_param
)
2390 struct iw_cm_conn_param iw_param
;
2393 ret
= cma_modify_qp_rtr(id_priv
);
2397 iw_param
.ord
= conn_param
->initiator_depth
;
2398 iw_param
.ird
= conn_param
->responder_resources
;
2399 iw_param
.private_data
= conn_param
->private_data
;
2400 iw_param
.private_data_len
= conn_param
->private_data_len
;
2401 if (id_priv
->id
.qp
) {
2402 iw_param
.qpn
= id_priv
->qp_num
;
2404 iw_param
.qpn
= conn_param
->qp_num
;
2406 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
2409 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
2410 enum ib_cm_sidr_status status
,
2411 const void *private_data
, int private_data_len
)
2413 struct ib_cm_sidr_rep_param rep
;
2415 memset(&rep
, 0, sizeof rep
);
2416 rep
.status
= status
;
2417 if (status
== IB_SIDR_SUCCESS
) {
2418 rep
.qp_num
= id_priv
->qp_num
;
2419 rep
.qkey
= id_priv
->qkey
;
2421 rep
.private_data
= private_data
;
2422 rep
.private_data_len
= private_data_len
;
2424 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
2427 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2429 struct rdma_id_private
*id_priv
;
2432 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2433 if (!cma_comp(id_priv
, CMA_CONNECT
))
2436 if (!id
->qp
&& conn_param
) {
2437 id_priv
->qp_num
= conn_param
->qp_num
;
2438 id_priv
->srq
= conn_param
->srq
;
2441 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2442 case RDMA_TRANSPORT_IB
:
2443 if (cma_is_ud_ps(id
->ps
))
2444 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
2445 conn_param
->private_data
,
2446 conn_param
->private_data_len
);
2447 else if (conn_param
)
2448 ret
= cma_accept_ib(id_priv
, conn_param
);
2450 ret
= cma_rep_recv(id_priv
);
2452 case RDMA_TRANSPORT_IWARP
:
2453 ret
= cma_accept_iw(id_priv
, conn_param
);
2465 cma_modify_qp_err(id_priv
);
2466 rdma_reject(id
, NULL
, 0);
2469 EXPORT_SYMBOL(rdma_accept
);
2471 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
2473 struct rdma_id_private
*id_priv
;
2476 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2477 if (!cma_has_cm_dev(id_priv
))
2480 switch (id
->device
->node_type
) {
2481 case RDMA_NODE_IB_CA
:
2482 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
2490 EXPORT_SYMBOL(rdma_notify
);
2492 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
2493 u8 private_data_len
)
2495 struct rdma_id_private
*id_priv
;
2498 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2499 if (!cma_has_cm_dev(id_priv
))
2502 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2503 case RDMA_TRANSPORT_IB
:
2504 if (cma_is_ud_ps(id
->ps
))
2505 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
,
2506 private_data
, private_data_len
);
2508 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
2509 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
2510 0, private_data
, private_data_len
);
2512 case RDMA_TRANSPORT_IWARP
:
2513 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
2514 private_data
, private_data_len
);
2522 EXPORT_SYMBOL(rdma_reject
);
2524 int rdma_disconnect(struct rdma_cm_id
*id
)
2526 struct rdma_id_private
*id_priv
;
2529 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2530 if (!cma_has_cm_dev(id_priv
))
2533 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2534 case RDMA_TRANSPORT_IB
:
2535 ret
= cma_modify_qp_err(id_priv
);
2538 /* Initiate or respond to a disconnect. */
2539 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
2540 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
2542 case RDMA_TRANSPORT_IWARP
:
2543 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
2552 EXPORT_SYMBOL(rdma_disconnect
);
2554 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
2556 struct rdma_id_private
*id_priv
;
2557 struct cma_multicast
*mc
= multicast
->context
;
2558 struct rdma_cm_event event
;
2561 id_priv
= mc
->id_priv
;
2562 if (cma_disable_remove(id_priv
, CMA_ADDR_BOUND
) &&
2563 cma_disable_remove(id_priv
, CMA_ADDR_RESOLVED
))
2566 mutex_lock(&id_priv
->qp_mutex
);
2567 if (!status
&& id_priv
->id
.qp
)
2568 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
2569 multicast
->rec
.mlid
);
2570 mutex_unlock(&id_priv
->qp_mutex
);
2572 memset(&event
, 0, sizeof event
);
2573 event
.status
= status
;
2574 event
.param
.ud
.private_data
= mc
->context
;
2576 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
2577 ib_init_ah_from_mcmember(id_priv
->id
.device
,
2578 id_priv
->id
.port_num
, &multicast
->rec
,
2579 &event
.param
.ud
.ah_attr
);
2580 event
.param
.ud
.qp_num
= 0xFFFFFF;
2581 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
2583 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
2585 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2587 cma_exch(id_priv
, CMA_DESTROYING
);
2588 cma_enable_remove(id_priv
);
2589 rdma_destroy_id(&id_priv
->id
);
2593 cma_enable_remove(id_priv
);
2597 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
2598 struct sockaddr
*addr
, union ib_gid
*mgid
)
2600 unsigned char mc_map
[MAX_ADDR_LEN
];
2601 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2602 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
2603 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
2605 if (cma_any_addr(addr
)) {
2606 memset(mgid
, 0, sizeof *mgid
);
2607 } else if ((addr
->sa_family
== AF_INET6
) &&
2608 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFF10A01B) ==
2610 /* IPv6 address is an SA assigned MGID. */
2611 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
2613 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
2614 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2615 mc_map
[7] = 0x01; /* Use RDMA CM signature */
2616 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
2620 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
2621 struct cma_multicast
*mc
)
2623 struct ib_sa_mcmember_rec rec
;
2624 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2625 ib_sa_comp_mask comp_mask
;
2628 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
2629 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
2634 cma_set_mgid(id_priv
, &mc
->addr
, &rec
.mgid
);
2635 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
2636 rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
2637 ib_addr_get_sgid(dev_addr
, &rec
.port_gid
);
2638 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2641 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
2642 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
2643 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
2644 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
2645 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
2647 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
2648 id_priv
->id
.port_num
, &rec
,
2649 comp_mask
, GFP_KERNEL
,
2650 cma_ib_mc_handler
, mc
);
2651 if (IS_ERR(mc
->multicast
.ib
))
2652 return PTR_ERR(mc
->multicast
.ib
);
2657 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
2660 struct rdma_id_private
*id_priv
;
2661 struct cma_multicast
*mc
;
2664 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2665 if (!cma_comp(id_priv
, CMA_ADDR_BOUND
) &&
2666 !cma_comp(id_priv
, CMA_ADDR_RESOLVED
))
2669 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
2673 memcpy(&mc
->addr
, addr
, ip_addr_size(addr
));
2674 mc
->context
= context
;
2675 mc
->id_priv
= id_priv
;
2677 spin_lock(&id_priv
->lock
);
2678 list_add(&mc
->list
, &id_priv
->mc_list
);
2679 spin_unlock(&id_priv
->lock
);
2681 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2682 case RDMA_TRANSPORT_IB
:
2683 ret
= cma_join_ib_multicast(id_priv
, mc
);
2691 spin_lock_irq(&id_priv
->lock
);
2692 list_del(&mc
->list
);
2693 spin_unlock_irq(&id_priv
->lock
);
2698 EXPORT_SYMBOL(rdma_join_multicast
);
2700 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2702 struct rdma_id_private
*id_priv
;
2703 struct cma_multicast
*mc
;
2705 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2706 spin_lock_irq(&id_priv
->lock
);
2707 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
2708 if (!memcmp(&mc
->addr
, addr
, ip_addr_size(addr
))) {
2709 list_del(&mc
->list
);
2710 spin_unlock_irq(&id_priv
->lock
);
2713 ib_detach_mcast(id
->qp
,
2714 &mc
->multicast
.ib
->rec
.mgid
,
2715 mc
->multicast
.ib
->rec
.mlid
);
2716 ib_sa_free_multicast(mc
->multicast
.ib
);
2721 spin_unlock_irq(&id_priv
->lock
);
2723 EXPORT_SYMBOL(rdma_leave_multicast
);
2725 static void cma_add_one(struct ib_device
*device
)
2727 struct cma_device
*cma_dev
;
2728 struct rdma_id_private
*id_priv
;
2730 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
2734 cma_dev
->device
= device
;
2736 init_completion(&cma_dev
->comp
);
2737 atomic_set(&cma_dev
->refcount
, 1);
2738 INIT_LIST_HEAD(&cma_dev
->id_list
);
2739 ib_set_client_data(device
, &cma_client
, cma_dev
);
2742 list_add_tail(&cma_dev
->list
, &dev_list
);
2743 list_for_each_entry(id_priv
, &listen_any_list
, list
)
2744 cma_listen_on_dev(id_priv
, cma_dev
);
2745 mutex_unlock(&lock
);
2748 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
2750 struct rdma_cm_event event
;
2751 enum cma_state state
;
2753 /* Record that we want to remove the device */
2754 state
= cma_exch(id_priv
, CMA_DEVICE_REMOVAL
);
2755 if (state
== CMA_DESTROYING
)
2758 cma_cancel_operation(id_priv
, state
);
2759 wait_event(id_priv
->wait_remove
, !atomic_read(&id_priv
->dev_remove
));
2761 /* Check for destruction from another callback. */
2762 if (!cma_comp(id_priv
, CMA_DEVICE_REMOVAL
))
2765 memset(&event
, 0, sizeof event
);
2766 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
2767 return id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2770 static void cma_process_remove(struct cma_device
*cma_dev
)
2772 struct rdma_id_private
*id_priv
;
2776 while (!list_empty(&cma_dev
->id_list
)) {
2777 id_priv
= list_entry(cma_dev
->id_list
.next
,
2778 struct rdma_id_private
, list
);
2780 list_del(&id_priv
->listen_list
);
2781 list_del_init(&id_priv
->list
);
2782 atomic_inc(&id_priv
->refcount
);
2783 mutex_unlock(&lock
);
2785 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
2786 cma_deref_id(id_priv
);
2788 rdma_destroy_id(&id_priv
->id
);
2792 mutex_unlock(&lock
);
2794 cma_deref_dev(cma_dev
);
2795 wait_for_completion(&cma_dev
->comp
);
2798 static void cma_remove_one(struct ib_device
*device
)
2800 struct cma_device
*cma_dev
;
2802 cma_dev
= ib_get_client_data(device
, &cma_client
);
2807 list_del(&cma_dev
->list
);
2808 mutex_unlock(&lock
);
2810 cma_process_remove(cma_dev
);
2814 static int cma_init(void)
2816 int ret
, low
, high
, remaining
;
2818 get_random_bytes(&next_port
, sizeof next_port
);
2819 inet_get_local_port_range(&low
, &high
);
2820 remaining
= (high
- low
) + 1;
2821 next_port
= ((unsigned int) next_port
% remaining
) + low
;
2823 cma_wq
= create_singlethread_workqueue("rdma_cm");
2827 ib_sa_register_client(&sa_client
);
2828 rdma_addr_register_client(&addr_client
);
2830 ret
= ib_register_client(&cma_client
);
2836 rdma_addr_unregister_client(&addr_client
);
2837 ib_sa_unregister_client(&sa_client
);
2838 destroy_workqueue(cma_wq
);
2842 static void cma_cleanup(void)
2844 ib_unregister_client(&cma_client
);
2845 rdma_addr_unregister_client(&addr_client
);
2846 ib_sa_unregister_client(&sa_client
);
2847 destroy_workqueue(cma_wq
);
2848 idr_destroy(&sdp_ps
);
2849 idr_destroy(&tcp_ps
);
2850 idr_destroy(&udp_ps
);
2851 idr_destroy(&ipoib_ps
);
2854 module_init(cma_init
);
2855 module_exit(cma_cleanup
);