2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/igmp.h>
42 #include <linux/idr.h>
43 #include <linux/inetdevice.h>
44 #include <linux/slab.h>
45 #include <linux/module.h>
46 #include <net/route.h>
48 #include <net/net_namespace.h>
49 #include <net/netns/generic.h>
52 #include <net/ip_fib.h>
53 #include <net/ip6_route.h>
55 #include <rdma/rdma_cm.h>
56 #include <rdma/rdma_cm_ib.h>
57 #include <rdma/rdma_netlink.h>
59 #include <rdma/ib_cache.h>
60 #include <rdma/ib_cm.h>
61 #include <rdma/ib_sa.h>
62 #include <rdma/iw_cm.h>
64 #include "core_priv.h"
66 MODULE_AUTHOR("Sean Hefty");
67 MODULE_DESCRIPTION("Generic RDMA CM Agent");
68 MODULE_LICENSE("Dual BSD/GPL");
70 #define CMA_CM_RESPONSE_TIMEOUT 20
71 #define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
72 #define CMA_MAX_CM_RETRIES 15
73 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
74 #define CMA_IBOE_PACKET_LIFETIME 18
75 #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
77 static const char * const cma_events
[] = {
78 [RDMA_CM_EVENT_ADDR_RESOLVED
] = "address resolved",
79 [RDMA_CM_EVENT_ADDR_ERROR
] = "address error",
80 [RDMA_CM_EVENT_ROUTE_RESOLVED
] = "route resolved ",
81 [RDMA_CM_EVENT_ROUTE_ERROR
] = "route error",
82 [RDMA_CM_EVENT_CONNECT_REQUEST
] = "connect request",
83 [RDMA_CM_EVENT_CONNECT_RESPONSE
] = "connect response",
84 [RDMA_CM_EVENT_CONNECT_ERROR
] = "connect error",
85 [RDMA_CM_EVENT_UNREACHABLE
] = "unreachable",
86 [RDMA_CM_EVENT_REJECTED
] = "rejected",
87 [RDMA_CM_EVENT_ESTABLISHED
] = "established",
88 [RDMA_CM_EVENT_DISCONNECTED
] = "disconnected",
89 [RDMA_CM_EVENT_DEVICE_REMOVAL
] = "device removal",
90 [RDMA_CM_EVENT_MULTICAST_JOIN
] = "multicast join",
91 [RDMA_CM_EVENT_MULTICAST_ERROR
] = "multicast error",
92 [RDMA_CM_EVENT_ADDR_CHANGE
] = "address change",
93 [RDMA_CM_EVENT_TIMEWAIT_EXIT
] = "timewait exit",
96 const char *__attribute_const__
rdma_event_msg(enum rdma_cm_event_type event
)
100 return (index
< ARRAY_SIZE(cma_events
) && cma_events
[index
]) ?
101 cma_events
[index
] : "unrecognized event";
103 EXPORT_SYMBOL(rdma_event_msg
);
105 const char *__attribute_const__
rdma_reject_msg(struct rdma_cm_id
*id
,
108 if (rdma_ib_or_roce(id
->device
, id
->port_num
))
109 return ibcm_reject_msg(reason
);
111 if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
112 return iwcm_reject_msg(reason
);
115 return "unrecognized transport";
117 EXPORT_SYMBOL(rdma_reject_msg
);
119 bool rdma_is_consumer_reject(struct rdma_cm_id
*id
, int reason
)
121 if (rdma_ib_or_roce(id
->device
, id
->port_num
))
122 return reason
== IB_CM_REJ_CONSUMER_DEFINED
;
124 if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
125 return reason
== -ECONNREFUSED
;
130 EXPORT_SYMBOL(rdma_is_consumer_reject
);
132 const void *rdma_consumer_reject_data(struct rdma_cm_id
*id
,
133 struct rdma_cm_event
*ev
, u8
*data_len
)
137 if (rdma_is_consumer_reject(id
, ev
->status
)) {
138 *data_len
= ev
->param
.conn
.private_data_len
;
139 p
= ev
->param
.conn
.private_data
;
146 EXPORT_SYMBOL(rdma_consumer_reject_data
);
148 static void cma_add_one(struct ib_device
*device
);
149 static void cma_remove_one(struct ib_device
*device
, void *client_data
);
151 static struct ib_client cma_client
= {
154 .remove
= cma_remove_one
157 static struct ib_sa_client sa_client
;
158 static struct rdma_addr_client addr_client
;
159 static LIST_HEAD(dev_list
);
160 static LIST_HEAD(listen_any_list
);
161 static DEFINE_MUTEX(lock
);
162 static struct workqueue_struct
*cma_wq
;
163 static unsigned int cma_pernet_id
;
172 static struct cma_pernet
*cma_pernet(struct net
*net
)
174 return net_generic(net
, cma_pernet_id
);
177 static struct idr
*cma_pernet_idr(struct net
*net
, enum rdma_port_space ps
)
179 struct cma_pernet
*pernet
= cma_pernet(net
);
183 return &pernet
->tcp_ps
;
185 return &pernet
->udp_ps
;
187 return &pernet
->ipoib_ps
;
189 return &pernet
->ib_ps
;
196 struct list_head list
;
197 struct ib_device
*device
;
198 struct completion comp
;
200 struct list_head id_list
;
201 enum ib_gid_type
*default_gid_type
;
202 u8
*default_roce_tos
;
205 struct rdma_bind_list
{
206 enum rdma_port_space ps
;
207 struct hlist_head owners
;
211 struct class_port_info_context
{
212 struct ib_class_port_info
*class_port_info
;
213 struct ib_device
*device
;
214 struct completion done
;
215 struct ib_sa_query
*sa_query
;
219 static int cma_ps_alloc(struct net
*net
, enum rdma_port_space ps
,
220 struct rdma_bind_list
*bind_list
, int snum
)
222 struct idr
*idr
= cma_pernet_idr(net
, ps
);
224 return idr_alloc(idr
, bind_list
, snum
, snum
+ 1, GFP_KERNEL
);
227 static struct rdma_bind_list
*cma_ps_find(struct net
*net
,
228 enum rdma_port_space ps
, int snum
)
230 struct idr
*idr
= cma_pernet_idr(net
, ps
);
232 return idr_find(idr
, snum
);
235 static void cma_ps_remove(struct net
*net
, enum rdma_port_space ps
, int snum
)
237 struct idr
*idr
= cma_pernet_idr(net
, ps
);
239 idr_remove(idr
, snum
);
246 void cma_ref_dev(struct cma_device
*cma_dev
)
248 atomic_inc(&cma_dev
->refcount
);
251 struct cma_device
*cma_enum_devices_by_ibdev(cma_device_filter filter
,
254 struct cma_device
*cma_dev
;
255 struct cma_device
*found_cma_dev
= NULL
;
259 list_for_each_entry(cma_dev
, &dev_list
, list
)
260 if (filter(cma_dev
->device
, cookie
)) {
261 found_cma_dev
= cma_dev
;
266 cma_ref_dev(found_cma_dev
);
268 return found_cma_dev
;
271 int cma_get_default_gid_type(struct cma_device
*cma_dev
,
274 if (!rdma_is_port_valid(cma_dev
->device
, port
))
277 return cma_dev
->default_gid_type
[port
- rdma_start_port(cma_dev
->device
)];
280 int cma_set_default_gid_type(struct cma_device
*cma_dev
,
282 enum ib_gid_type default_gid_type
)
284 unsigned long supported_gids
;
286 if (!rdma_is_port_valid(cma_dev
->device
, port
))
289 supported_gids
= roce_gid_type_mask_support(cma_dev
->device
, port
);
291 if (!(supported_gids
& 1 << default_gid_type
))
294 cma_dev
->default_gid_type
[port
- rdma_start_port(cma_dev
->device
)] =
300 int cma_get_default_roce_tos(struct cma_device
*cma_dev
, unsigned int port
)
302 if (!rdma_is_port_valid(cma_dev
->device
, port
))
305 return cma_dev
->default_roce_tos
[port
- rdma_start_port(cma_dev
->device
)];
308 int cma_set_default_roce_tos(struct cma_device
*cma_dev
, unsigned int port
,
311 if (!rdma_is_port_valid(cma_dev
->device
, port
))
314 cma_dev
->default_roce_tos
[port
- rdma_start_port(cma_dev
->device
)] =
319 struct ib_device
*cma_get_ib_dev(struct cma_device
*cma_dev
)
321 return cma_dev
->device
;
325 * Device removal can occur at anytime, so we need extra handling to
326 * serialize notifying the user of device removal with other callbacks.
327 * We do this by disabling removal notification while a callback is in process,
328 * and reporting it after the callback completes.
330 struct rdma_id_private
{
331 struct rdma_cm_id id
;
333 struct rdma_bind_list
*bind_list
;
334 struct hlist_node node
;
335 struct list_head list
; /* listen_any_list or cma_device.list */
336 struct list_head listen_list
; /* per device listens */
337 struct cma_device
*cma_dev
;
338 struct list_head mc_list
;
341 enum rdma_cm_state state
;
343 struct mutex qp_mutex
;
345 struct completion comp
;
347 struct mutex handler_mutex
;
351 struct ib_sa_query
*query
;
368 enum ib_gid_type gid_type
;
371 struct cma_multicast
{
372 struct rdma_id_private
*id_priv
;
374 struct ib_sa_multicast
*ib
;
376 struct list_head list
;
378 struct sockaddr_storage addr
;
385 struct work_struct work
;
386 struct rdma_id_private
*id
;
387 enum rdma_cm_state old_state
;
388 enum rdma_cm_state new_state
;
389 struct rdma_cm_event event
;
392 struct cma_ndev_work
{
393 struct work_struct work
;
394 struct rdma_id_private
*id
;
395 struct rdma_cm_event event
;
398 struct iboe_mcast_work
{
399 struct work_struct work
;
400 struct rdma_id_private
*id
;
401 struct cma_multicast
*mc
;
414 u8 ip_version
; /* IP version: 7:4 */
416 union cma_ip_addr src_addr
;
417 union cma_ip_addr dst_addr
;
420 #define CMA_VERSION 0x00
422 struct cma_req_info
{
423 struct ib_device
*device
;
425 union ib_gid local_gid
;
431 static int cma_comp(struct rdma_id_private
*id_priv
, enum rdma_cm_state comp
)
436 spin_lock_irqsave(&id_priv
->lock
, flags
);
437 ret
= (id_priv
->state
== comp
);
438 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
442 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
443 enum rdma_cm_state comp
, enum rdma_cm_state exch
)
448 spin_lock_irqsave(&id_priv
->lock
, flags
);
449 if ((ret
= (id_priv
->state
== comp
)))
450 id_priv
->state
= exch
;
451 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
455 static enum rdma_cm_state
cma_exch(struct rdma_id_private
*id_priv
,
456 enum rdma_cm_state exch
)
459 enum rdma_cm_state old
;
461 spin_lock_irqsave(&id_priv
->lock
, flags
);
462 old
= id_priv
->state
;
463 id_priv
->state
= exch
;
464 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
468 static inline u8
cma_get_ip_ver(const struct cma_hdr
*hdr
)
470 return hdr
->ip_version
>> 4;
473 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
475 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
478 static int cma_igmp_send(struct net_device
*ndev
, union ib_gid
*mgid
, bool join
)
480 struct in_device
*in_dev
= NULL
;
484 in_dev
= __in_dev_get_rtnl(ndev
);
487 ip_mc_inc_group(in_dev
,
488 *(__be32
*)(mgid
->raw
+ 12));
490 ip_mc_dec_group(in_dev
,
491 *(__be32
*)(mgid
->raw
+ 12));
495 return (in_dev
) ? 0 : -ENODEV
;
498 static void _cma_attach_to_dev(struct rdma_id_private
*id_priv
,
499 struct cma_device
*cma_dev
)
501 cma_ref_dev(cma_dev
);
502 id_priv
->cma_dev
= cma_dev
;
503 id_priv
->gid_type
= 0;
504 id_priv
->id
.device
= cma_dev
->device
;
505 id_priv
->id
.route
.addr
.dev_addr
.transport
=
506 rdma_node_get_transport(cma_dev
->device
->node_type
);
507 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
510 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
511 struct cma_device
*cma_dev
)
513 _cma_attach_to_dev(id_priv
, cma_dev
);
515 cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
516 rdma_start_port(cma_dev
->device
)];
519 void cma_deref_dev(struct cma_device
*cma_dev
)
521 if (atomic_dec_and_test(&cma_dev
->refcount
))
522 complete(&cma_dev
->comp
);
525 static inline void release_mc(struct kref
*kref
)
527 struct cma_multicast
*mc
= container_of(kref
, struct cma_multicast
, mcref
);
529 kfree(mc
->multicast
.ib
);
533 static void cma_release_dev(struct rdma_id_private
*id_priv
)
536 list_del(&id_priv
->list
);
537 cma_deref_dev(id_priv
->cma_dev
);
538 id_priv
->cma_dev
= NULL
;
542 static inline struct sockaddr
*cma_src_addr(struct rdma_id_private
*id_priv
)
544 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
547 static inline struct sockaddr
*cma_dst_addr(struct rdma_id_private
*id_priv
)
549 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.dst_addr
;
552 static inline unsigned short cma_family(struct rdma_id_private
*id_priv
)
554 return id_priv
->id
.route
.addr
.src_addr
.ss_family
;
557 static int cma_set_qkey(struct rdma_id_private
*id_priv
, u32 qkey
)
559 struct ib_sa_mcmember_rec rec
;
563 if (qkey
&& id_priv
->qkey
!= qkey
)
569 id_priv
->qkey
= qkey
;
573 switch (id_priv
->id
.ps
) {
576 id_priv
->qkey
= RDMA_UDP_QKEY
;
579 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
580 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
581 id_priv
->id
.port_num
, &rec
.mgid
,
584 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
592 static void cma_translate_ib(struct sockaddr_ib
*sib
, struct rdma_dev_addr
*dev_addr
)
594 dev_addr
->dev_type
= ARPHRD_INFINIBAND
;
595 rdma_addr_set_sgid(dev_addr
, (union ib_gid
*) &sib
->sib_addr
);
596 ib_addr_set_pkey(dev_addr
, ntohs(sib
->sib_pkey
));
599 static int cma_translate_addr(struct sockaddr
*addr
, struct rdma_dev_addr
*dev_addr
)
603 if (addr
->sa_family
!= AF_IB
) {
604 ret
= rdma_translate_ip(addr
, dev_addr
, NULL
);
606 cma_translate_ib((struct sockaddr_ib
*) addr
, dev_addr
);
613 static inline int cma_validate_port(struct ib_device
*device
, u8 port
,
614 enum ib_gid_type gid_type
,
615 union ib_gid
*gid
, int dev_type
,
619 struct net_device
*ndev
= NULL
;
621 if ((dev_type
== ARPHRD_INFINIBAND
) && !rdma_protocol_ib(device
, port
))
624 if ((dev_type
!= ARPHRD_INFINIBAND
) && rdma_protocol_ib(device
, port
))
627 if (dev_type
== ARPHRD_ETHER
&& rdma_protocol_roce(device
, port
))
628 ndev
= dev_get_by_index(&init_net
, bound_if_index
);
630 gid_type
= IB_GID_TYPE_IB
;
633 ret
= ib_find_cached_gid_by_port(device
, gid
, gid_type
, port
,
642 static int cma_acquire_dev(struct rdma_id_private
*id_priv
,
643 struct rdma_id_private
*listen_id_priv
)
645 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
646 struct cma_device
*cma_dev
;
647 union ib_gid gid
, iboe_gid
, *gidp
;
651 if (dev_addr
->dev_type
!= ARPHRD_INFINIBAND
&&
652 id_priv
->id
.ps
== RDMA_PS_IPOIB
)
656 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
659 memcpy(&gid
, dev_addr
->src_dev_addr
+
660 rdma_addr_gid_offset(dev_addr
), sizeof gid
);
662 if (listen_id_priv
) {
663 cma_dev
= listen_id_priv
->cma_dev
;
664 port
= listen_id_priv
->id
.port_num
;
665 gidp
= rdma_protocol_roce(cma_dev
->device
, port
) ?
668 ret
= cma_validate_port(cma_dev
->device
, port
,
669 rdma_protocol_ib(cma_dev
->device
, port
) ?
671 listen_id_priv
->gid_type
, gidp
,
673 dev_addr
->bound_dev_if
);
675 id_priv
->id
.port_num
= port
;
680 list_for_each_entry(cma_dev
, &dev_list
, list
) {
681 for (port
= 1; port
<= cma_dev
->device
->phys_port_cnt
; ++port
) {
682 if (listen_id_priv
&&
683 listen_id_priv
->cma_dev
== cma_dev
&&
684 listen_id_priv
->id
.port_num
== port
)
687 gidp
= rdma_protocol_roce(cma_dev
->device
, port
) ?
690 ret
= cma_validate_port(cma_dev
->device
, port
,
691 rdma_protocol_ib(cma_dev
->device
, port
) ?
693 cma_dev
->default_gid_type
[port
- 1],
694 gidp
, dev_addr
->dev_type
,
695 dev_addr
->bound_dev_if
);
697 id_priv
->id
.port_num
= port
;
705 cma_attach_to_dev(id_priv
, cma_dev
);
712 * Select the source IB device and address to reach the destination IB address.
714 static int cma_resolve_ib_dev(struct rdma_id_private
*id_priv
)
716 struct cma_device
*cma_dev
, *cur_dev
;
717 struct sockaddr_ib
*addr
;
718 union ib_gid gid
, sgid
, *dgid
;
721 enum ib_port_state port_state
;
725 addr
= (struct sockaddr_ib
*) cma_dst_addr(id_priv
);
726 dgid
= (union ib_gid
*) &addr
->sib_addr
;
727 pkey
= ntohs(addr
->sib_pkey
);
729 list_for_each_entry(cur_dev
, &dev_list
, list
) {
730 for (p
= 1; p
<= cur_dev
->device
->phys_port_cnt
; ++p
) {
731 if (!rdma_cap_af_ib(cur_dev
->device
, p
))
734 if (ib_find_cached_pkey(cur_dev
->device
, p
, pkey
, &index
))
737 if (ib_get_cached_port_state(cur_dev
->device
, p
, &port_state
))
739 for (i
= 0; !ib_get_cached_gid(cur_dev
->device
, p
, i
,
742 if (!memcmp(&gid
, dgid
, sizeof(gid
))) {
745 id_priv
->id
.port_num
= p
;
749 if (!cma_dev
&& (gid
.global
.subnet_prefix
==
750 dgid
->global
.subnet_prefix
) &&
751 port_state
== IB_PORT_ACTIVE
) {
754 id_priv
->id
.port_num
= p
;
764 cma_attach_to_dev(id_priv
, cma_dev
);
765 addr
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
766 memcpy(&addr
->sib_addr
, &sgid
, sizeof sgid
);
767 cma_translate_ib(addr
, &id_priv
->id
.route
.addr
.dev_addr
);
771 static void cma_deref_id(struct rdma_id_private
*id_priv
)
773 if (atomic_dec_and_test(&id_priv
->refcount
))
774 complete(&id_priv
->comp
);
777 struct rdma_cm_id
*rdma_create_id(struct net
*net
,
778 rdma_cm_event_handler event_handler
,
779 void *context
, enum rdma_port_space ps
,
780 enum ib_qp_type qp_type
)
782 struct rdma_id_private
*id_priv
;
784 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
786 return ERR_PTR(-ENOMEM
);
788 id_priv
->owner
= task_pid_nr(current
);
789 id_priv
->state
= RDMA_CM_IDLE
;
790 id_priv
->id
.context
= context
;
791 id_priv
->id
.event_handler
= event_handler
;
793 id_priv
->id
.qp_type
= qp_type
;
794 id_priv
->tos_set
= false;
795 spin_lock_init(&id_priv
->lock
);
796 mutex_init(&id_priv
->qp_mutex
);
797 init_completion(&id_priv
->comp
);
798 atomic_set(&id_priv
->refcount
, 1);
799 mutex_init(&id_priv
->handler_mutex
);
800 INIT_LIST_HEAD(&id_priv
->listen_list
);
801 INIT_LIST_HEAD(&id_priv
->mc_list
);
802 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
803 id_priv
->id
.route
.addr
.dev_addr
.net
= get_net(net
);
804 id_priv
->seq_num
&= 0x00ffffff;
808 EXPORT_SYMBOL(rdma_create_id
);
810 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
812 struct ib_qp_attr qp_attr
;
813 int qp_attr_mask
, ret
;
815 qp_attr
.qp_state
= IB_QPS_INIT
;
816 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
820 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
824 qp_attr
.qp_state
= IB_QPS_RTR
;
825 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
829 qp_attr
.qp_state
= IB_QPS_RTS
;
831 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
836 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
838 struct ib_qp_attr qp_attr
;
839 int qp_attr_mask
, ret
;
841 qp_attr
.qp_state
= IB_QPS_INIT
;
842 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
846 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
849 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
850 struct ib_qp_init_attr
*qp_init_attr
)
852 struct rdma_id_private
*id_priv
;
856 id_priv
= container_of(id
, struct rdma_id_private
, id
);
857 if (id
->device
!= pd
->device
)
860 qp_init_attr
->port_num
= id
->port_num
;
861 qp
= ib_create_qp(pd
, qp_init_attr
);
865 if (id
->qp_type
== IB_QPT_UD
)
866 ret
= cma_init_ud_qp(id_priv
, qp
);
868 ret
= cma_init_conn_qp(id_priv
, qp
);
873 id_priv
->qp_num
= qp
->qp_num
;
874 id_priv
->srq
= (qp
->srq
!= NULL
);
880 EXPORT_SYMBOL(rdma_create_qp
);
882 void rdma_destroy_qp(struct rdma_cm_id
*id
)
884 struct rdma_id_private
*id_priv
;
886 id_priv
= container_of(id
, struct rdma_id_private
, id
);
887 mutex_lock(&id_priv
->qp_mutex
);
888 ib_destroy_qp(id_priv
->id
.qp
);
889 id_priv
->id
.qp
= NULL
;
890 mutex_unlock(&id_priv
->qp_mutex
);
892 EXPORT_SYMBOL(rdma_destroy_qp
);
894 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
895 struct rdma_conn_param
*conn_param
)
897 struct ib_qp_attr qp_attr
;
898 int qp_attr_mask
, ret
;
901 mutex_lock(&id_priv
->qp_mutex
);
902 if (!id_priv
->id
.qp
) {
907 /* Need to update QP attributes from default values. */
908 qp_attr
.qp_state
= IB_QPS_INIT
;
909 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
913 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
917 qp_attr
.qp_state
= IB_QPS_RTR
;
918 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
922 ret
= ib_query_gid(id_priv
->id
.device
, id_priv
->id
.port_num
,
923 rdma_ah_read_grh(&qp_attr
.ah_attr
)->sgid_index
,
928 BUG_ON(id_priv
->cma_dev
->device
!= id_priv
->id
.device
);
931 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
932 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
934 mutex_unlock(&id_priv
->qp_mutex
);
938 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
939 struct rdma_conn_param
*conn_param
)
941 struct ib_qp_attr qp_attr
;
942 int qp_attr_mask
, ret
;
944 mutex_lock(&id_priv
->qp_mutex
);
945 if (!id_priv
->id
.qp
) {
950 qp_attr
.qp_state
= IB_QPS_RTS
;
951 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
956 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
957 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
959 mutex_unlock(&id_priv
->qp_mutex
);
963 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
965 struct ib_qp_attr qp_attr
;
968 mutex_lock(&id_priv
->qp_mutex
);
969 if (!id_priv
->id
.qp
) {
974 qp_attr
.qp_state
= IB_QPS_ERR
;
975 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
977 mutex_unlock(&id_priv
->qp_mutex
);
981 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
982 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
984 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
988 if (rdma_cap_eth_ah(id_priv
->id
.device
, id_priv
->id
.port_num
))
991 pkey
= ib_addr_get_pkey(dev_addr
);
993 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
994 pkey
, &qp_attr
->pkey_index
);
998 qp_attr
->port_num
= id_priv
->id
.port_num
;
999 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
1001 if (id_priv
->id
.qp_type
== IB_QPT_UD
) {
1002 ret
= cma_set_qkey(id_priv
, 0);
1006 qp_attr
->qkey
= id_priv
->qkey
;
1007 *qp_attr_mask
|= IB_QP_QKEY
;
1009 qp_attr
->qp_access_flags
= 0;
1010 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
1015 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
1018 struct rdma_id_private
*id_priv
;
1021 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1022 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
1023 if (!id_priv
->cm_id
.ib
|| (id_priv
->id
.qp_type
== IB_QPT_UD
))
1024 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
1026 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
1029 if (qp_attr
->qp_state
== IB_QPS_RTR
)
1030 qp_attr
->rq_psn
= id_priv
->seq_num
;
1031 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
1032 if (!id_priv
->cm_id
.iw
) {
1033 qp_attr
->qp_access_flags
= 0;
1034 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
1036 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
1038 qp_attr
->port_num
= id_priv
->id
.port_num
;
1039 *qp_attr_mask
|= IB_QP_PORT
;
1045 EXPORT_SYMBOL(rdma_init_qp_attr
);
1047 static inline int cma_zero_addr(struct sockaddr
*addr
)
1049 switch (addr
->sa_family
) {
1051 return ipv4_is_zeronet(((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
1053 return ipv6_addr_any(&((struct sockaddr_in6
*) addr
)->sin6_addr
);
1055 return ib_addr_any(&((struct sockaddr_ib
*) addr
)->sib_addr
);
1061 static inline int cma_loopback_addr(struct sockaddr
*addr
)
1063 switch (addr
->sa_family
) {
1065 return ipv4_is_loopback(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
1067 return ipv6_addr_loopback(&((struct sockaddr_in6
*) addr
)->sin6_addr
);
1069 return ib_addr_loopback(&((struct sockaddr_ib
*) addr
)->sib_addr
);
1075 static inline int cma_any_addr(struct sockaddr
*addr
)
1077 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
1080 static int cma_addr_cmp(struct sockaddr
*src
, struct sockaddr
*dst
)
1082 if (src
->sa_family
!= dst
->sa_family
)
1085 switch (src
->sa_family
) {
1087 return ((struct sockaddr_in
*) src
)->sin_addr
.s_addr
!=
1088 ((struct sockaddr_in
*) dst
)->sin_addr
.s_addr
;
1090 return ipv6_addr_cmp(&((struct sockaddr_in6
*) src
)->sin6_addr
,
1091 &((struct sockaddr_in6
*) dst
)->sin6_addr
);
1093 return ib_addr_cmp(&((struct sockaddr_ib
*) src
)->sib_addr
,
1094 &((struct sockaddr_ib
*) dst
)->sib_addr
);
1098 static __be16
cma_port(struct sockaddr
*addr
)
1100 struct sockaddr_ib
*sib
;
1102 switch (addr
->sa_family
) {
1104 return ((struct sockaddr_in
*) addr
)->sin_port
;
1106 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
1108 sib
= (struct sockaddr_ib
*) addr
;
1109 return htons((u16
) (be64_to_cpu(sib
->sib_sid
) &
1110 be64_to_cpu(sib
->sib_sid_mask
)));
1116 static inline int cma_any_port(struct sockaddr
*addr
)
1118 return !cma_port(addr
);
1121 static void cma_save_ib_info(struct sockaddr
*src_addr
,
1122 struct sockaddr
*dst_addr
,
1123 struct rdma_cm_id
*listen_id
,
1124 struct sa_path_rec
*path
)
1126 struct sockaddr_ib
*listen_ib
, *ib
;
1128 listen_ib
= (struct sockaddr_ib
*) &listen_id
->route
.addr
.src_addr
;
1130 ib
= (struct sockaddr_ib
*)src_addr
;
1131 ib
->sib_family
= AF_IB
;
1133 ib
->sib_pkey
= path
->pkey
;
1134 ib
->sib_flowinfo
= path
->flow_label
;
1135 memcpy(&ib
->sib_addr
, &path
->sgid
, 16);
1136 ib
->sib_sid
= path
->service_id
;
1137 ib
->sib_scope_id
= 0;
1139 ib
->sib_pkey
= listen_ib
->sib_pkey
;
1140 ib
->sib_flowinfo
= listen_ib
->sib_flowinfo
;
1141 ib
->sib_addr
= listen_ib
->sib_addr
;
1142 ib
->sib_sid
= listen_ib
->sib_sid
;
1143 ib
->sib_scope_id
= listen_ib
->sib_scope_id
;
1145 ib
->sib_sid_mask
= cpu_to_be64(0xffffffffffffffffULL
);
1148 ib
= (struct sockaddr_ib
*)dst_addr
;
1149 ib
->sib_family
= AF_IB
;
1151 ib
->sib_pkey
= path
->pkey
;
1152 ib
->sib_flowinfo
= path
->flow_label
;
1153 memcpy(&ib
->sib_addr
, &path
->dgid
, 16);
1158 static void cma_save_ip4_info(struct sockaddr_in
*src_addr
,
1159 struct sockaddr_in
*dst_addr
,
1160 struct cma_hdr
*hdr
,
1164 *src_addr
= (struct sockaddr_in
) {
1165 .sin_family
= AF_INET
,
1166 .sin_addr
.s_addr
= hdr
->dst_addr
.ip4
.addr
,
1167 .sin_port
= local_port
,
1172 *dst_addr
= (struct sockaddr_in
) {
1173 .sin_family
= AF_INET
,
1174 .sin_addr
.s_addr
= hdr
->src_addr
.ip4
.addr
,
1175 .sin_port
= hdr
->port
,
1180 static void cma_save_ip6_info(struct sockaddr_in6
*src_addr
,
1181 struct sockaddr_in6
*dst_addr
,
1182 struct cma_hdr
*hdr
,
1186 *src_addr
= (struct sockaddr_in6
) {
1187 .sin6_family
= AF_INET6
,
1188 .sin6_addr
= hdr
->dst_addr
.ip6
,
1189 .sin6_port
= local_port
,
1194 *dst_addr
= (struct sockaddr_in6
) {
1195 .sin6_family
= AF_INET6
,
1196 .sin6_addr
= hdr
->src_addr
.ip6
,
1197 .sin6_port
= hdr
->port
,
1202 static u16
cma_port_from_service_id(__be64 service_id
)
1204 return (u16
)be64_to_cpu(service_id
);
1207 static int cma_save_ip_info(struct sockaddr
*src_addr
,
1208 struct sockaddr
*dst_addr
,
1209 struct ib_cm_event
*ib_event
,
1212 struct cma_hdr
*hdr
;
1215 hdr
= ib_event
->private_data
;
1216 if (hdr
->cma_version
!= CMA_VERSION
)
1219 port
= htons(cma_port_from_service_id(service_id
));
1221 switch (cma_get_ip_ver(hdr
)) {
1223 cma_save_ip4_info((struct sockaddr_in
*)src_addr
,
1224 (struct sockaddr_in
*)dst_addr
, hdr
, port
);
1227 cma_save_ip6_info((struct sockaddr_in6
*)src_addr
,
1228 (struct sockaddr_in6
*)dst_addr
, hdr
, port
);
1231 return -EAFNOSUPPORT
;
1237 static int cma_save_net_info(struct sockaddr
*src_addr
,
1238 struct sockaddr
*dst_addr
,
1239 struct rdma_cm_id
*listen_id
,
1240 struct ib_cm_event
*ib_event
,
1241 sa_family_t sa_family
, __be64 service_id
)
1243 if (sa_family
== AF_IB
) {
1244 if (ib_event
->event
== IB_CM_REQ_RECEIVED
)
1245 cma_save_ib_info(src_addr
, dst_addr
, listen_id
,
1246 ib_event
->param
.req_rcvd
.primary_path
);
1247 else if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
)
1248 cma_save_ib_info(src_addr
, dst_addr
, listen_id
, NULL
);
1252 return cma_save_ip_info(src_addr
, dst_addr
, ib_event
, service_id
);
1255 static int cma_save_req_info(const struct ib_cm_event
*ib_event
,
1256 struct cma_req_info
*req
)
1258 const struct ib_cm_req_event_param
*req_param
=
1259 &ib_event
->param
.req_rcvd
;
1260 const struct ib_cm_sidr_req_event_param
*sidr_param
=
1261 &ib_event
->param
.sidr_req_rcvd
;
1263 switch (ib_event
->event
) {
1264 case IB_CM_REQ_RECEIVED
:
1265 req
->device
= req_param
->listen_id
->device
;
1266 req
->port
= req_param
->port
;
1267 memcpy(&req
->local_gid
, &req_param
->primary_path
->sgid
,
1268 sizeof(req
->local_gid
));
1269 req
->has_gid
= true;
1270 req
->service_id
= req_param
->primary_path
->service_id
;
1271 req
->pkey
= be16_to_cpu(req_param
->primary_path
->pkey
);
1272 if (req
->pkey
!= req_param
->bth_pkey
)
1273 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
1274 "RDMA CMA: in the future this may cause the request to be dropped\n",
1275 req_param
->bth_pkey
, req
->pkey
);
1277 case IB_CM_SIDR_REQ_RECEIVED
:
1278 req
->device
= sidr_param
->listen_id
->device
;
1279 req
->port
= sidr_param
->port
;
1280 req
->has_gid
= false;
1281 req
->service_id
= sidr_param
->service_id
;
1282 req
->pkey
= sidr_param
->pkey
;
1283 if (req
->pkey
!= sidr_param
->bth_pkey
)
1284 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
1285 "RDMA CMA: in the future this may cause the request to be dropped\n",
1286 sidr_param
->bth_pkey
, req
->pkey
);
1295 static bool validate_ipv4_net_dev(struct net_device
*net_dev
,
1296 const struct sockaddr_in
*dst_addr
,
1297 const struct sockaddr_in
*src_addr
)
1299 __be32 daddr
= dst_addr
->sin_addr
.s_addr
,
1300 saddr
= src_addr
->sin_addr
.s_addr
;
1301 struct fib_result res
;
1306 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
1307 ipv4_is_lbcast(daddr
) || ipv4_is_zeronet(saddr
) ||
1308 ipv4_is_zeronet(daddr
) || ipv4_is_loopback(daddr
) ||
1309 ipv4_is_loopback(saddr
))
1312 memset(&fl4
, 0, sizeof(fl4
));
1313 fl4
.flowi4_iif
= net_dev
->ifindex
;
1318 err
= fib_lookup(dev_net(net_dev
), &fl4
, &res
, 0);
1319 ret
= err
== 0 && FIB_RES_DEV(res
) == net_dev
;
1325 static bool validate_ipv6_net_dev(struct net_device
*net_dev
,
1326 const struct sockaddr_in6
*dst_addr
,
1327 const struct sockaddr_in6
*src_addr
)
1329 #if IS_ENABLED(CONFIG_IPV6)
1330 const int strict
= ipv6_addr_type(&dst_addr
->sin6_addr
) &
1331 IPV6_ADDR_LINKLOCAL
;
1332 struct rt6_info
*rt
= rt6_lookup(dev_net(net_dev
), &dst_addr
->sin6_addr
,
1333 &src_addr
->sin6_addr
, net_dev
->ifindex
,
1340 ret
= rt
->rt6i_idev
->dev
== net_dev
;
1349 static bool validate_net_dev(struct net_device
*net_dev
,
1350 const struct sockaddr
*daddr
,
1351 const struct sockaddr
*saddr
)
1353 const struct sockaddr_in
*daddr4
= (const struct sockaddr_in
*)daddr
;
1354 const struct sockaddr_in
*saddr4
= (const struct sockaddr_in
*)saddr
;
1355 const struct sockaddr_in6
*daddr6
= (const struct sockaddr_in6
*)daddr
;
1356 const struct sockaddr_in6
*saddr6
= (const struct sockaddr_in6
*)saddr
;
1358 switch (daddr
->sa_family
) {
1360 return saddr
->sa_family
== AF_INET
&&
1361 validate_ipv4_net_dev(net_dev
, daddr4
, saddr4
);
1364 return saddr
->sa_family
== AF_INET6
&&
1365 validate_ipv6_net_dev(net_dev
, daddr6
, saddr6
);
1372 static struct net_device
*cma_get_net_dev(struct ib_cm_event
*ib_event
,
1373 const struct cma_req_info
*req
)
1375 struct sockaddr_storage listen_addr_storage
, src_addr_storage
;
1376 struct sockaddr
*listen_addr
= (struct sockaddr
*)&listen_addr_storage
,
1377 *src_addr
= (struct sockaddr
*)&src_addr_storage
;
1378 struct net_device
*net_dev
;
1379 const union ib_gid
*gid
= req
->has_gid
? &req
->local_gid
: NULL
;
1382 err
= cma_save_ip_info(listen_addr
, src_addr
, ib_event
,
1385 return ERR_PTR(err
);
1387 net_dev
= ib_get_net_dev_by_params(req
->device
, req
->port
, req
->pkey
,
1390 return ERR_PTR(-ENODEV
);
1392 if (!validate_net_dev(net_dev
, listen_addr
, src_addr
)) {
1394 return ERR_PTR(-EHOSTUNREACH
);
1400 static enum rdma_port_space
rdma_ps_from_service_id(__be64 service_id
)
1402 return (be64_to_cpu(service_id
) >> 16) & 0xffff;
1405 static bool cma_match_private_data(struct rdma_id_private
*id_priv
,
1406 const struct cma_hdr
*hdr
)
1408 struct sockaddr
*addr
= cma_src_addr(id_priv
);
1410 struct in6_addr ip6_addr
;
1412 if (cma_any_addr(addr
) && !id_priv
->afonly
)
1415 switch (addr
->sa_family
) {
1417 ip4_addr
= ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
;
1418 if (cma_get_ip_ver(hdr
) != 4)
1420 if (!cma_any_addr(addr
) &&
1421 hdr
->dst_addr
.ip4
.addr
!= ip4_addr
)
1425 ip6_addr
= ((struct sockaddr_in6
*)addr
)->sin6_addr
;
1426 if (cma_get_ip_ver(hdr
) != 6)
1428 if (!cma_any_addr(addr
) &&
1429 memcmp(&hdr
->dst_addr
.ip6
, &ip6_addr
, sizeof(ip6_addr
)))
1441 static bool cma_protocol_roce_dev_port(struct ib_device
*device
, int port_num
)
1443 enum rdma_link_layer ll
= rdma_port_get_link_layer(device
, port_num
);
1444 enum rdma_transport_type transport
=
1445 rdma_node_get_transport(device
->node_type
);
1447 return ll
== IB_LINK_LAYER_ETHERNET
&& transport
== RDMA_TRANSPORT_IB
;
1450 static bool cma_protocol_roce(const struct rdma_cm_id
*id
)
1452 struct ib_device
*device
= id
->device
;
1453 const int port_num
= id
->port_num
?: rdma_start_port(device
);
1455 return cma_protocol_roce_dev_port(device
, port_num
);
1458 static bool cma_match_net_dev(const struct rdma_cm_id
*id
,
1459 const struct net_device
*net_dev
,
1462 const struct rdma_addr
*addr
= &id
->route
.addr
;
1465 /* This request is an AF_IB request or a RoCE request */
1466 return (!id
->port_num
|| id
->port_num
== port_num
) &&
1467 (addr
->src_addr
.ss_family
== AF_IB
||
1468 cma_protocol_roce_dev_port(id
->device
, port_num
));
1470 return !addr
->dev_addr
.bound_dev_if
||
1471 (net_eq(dev_net(net_dev
), addr
->dev_addr
.net
) &&
1472 addr
->dev_addr
.bound_dev_if
== net_dev
->ifindex
);
1475 static struct rdma_id_private
*cma_find_listener(
1476 const struct rdma_bind_list
*bind_list
,
1477 const struct ib_cm_id
*cm_id
,
1478 const struct ib_cm_event
*ib_event
,
1479 const struct cma_req_info
*req
,
1480 const struct net_device
*net_dev
)
1482 struct rdma_id_private
*id_priv
, *id_priv_dev
;
1485 return ERR_PTR(-EINVAL
);
1487 hlist_for_each_entry(id_priv
, &bind_list
->owners
, node
) {
1488 if (cma_match_private_data(id_priv
, ib_event
->private_data
)) {
1489 if (id_priv
->id
.device
== cm_id
->device
&&
1490 cma_match_net_dev(&id_priv
->id
, net_dev
, req
->port
))
1492 list_for_each_entry(id_priv_dev
,
1493 &id_priv
->listen_list
,
1495 if (id_priv_dev
->id
.device
== cm_id
->device
&&
1496 cma_match_net_dev(&id_priv_dev
->id
, net_dev
, req
->port
))
1502 return ERR_PTR(-EINVAL
);
1505 static struct rdma_id_private
*cma_id_from_event(struct ib_cm_id
*cm_id
,
1506 struct ib_cm_event
*ib_event
,
1507 struct net_device
**net_dev
)
1509 struct cma_req_info req
;
1510 struct rdma_bind_list
*bind_list
;
1511 struct rdma_id_private
*id_priv
;
1514 err
= cma_save_req_info(ib_event
, &req
);
1516 return ERR_PTR(err
);
1518 *net_dev
= cma_get_net_dev(ib_event
, &req
);
1519 if (IS_ERR(*net_dev
)) {
1520 if (PTR_ERR(*net_dev
) == -EAFNOSUPPORT
) {
1521 /* Assuming the protocol is AF_IB */
1523 } else if (cma_protocol_roce_dev_port(req
.device
, req
.port
)) {
1524 /* TODO find the net dev matching the request parameters
1525 * through the RoCE GID table */
1528 return ERR_CAST(*net_dev
);
1532 bind_list
= cma_ps_find(*net_dev
? dev_net(*net_dev
) : &init_net
,
1533 rdma_ps_from_service_id(req
.service_id
),
1534 cma_port_from_service_id(req
.service_id
));
1535 id_priv
= cma_find_listener(bind_list
, cm_id
, ib_event
, &req
, *net_dev
);
1536 if (IS_ERR(id_priv
) && *net_dev
) {
1544 static inline u8
cma_user_data_offset(struct rdma_id_private
*id_priv
)
1546 return cma_family(id_priv
) == AF_IB
? 0 : sizeof(struct cma_hdr
);
1549 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
1551 if (rdma_cap_ib_sa(id_priv
->id
.device
, id_priv
->id
.port_num
)) {
1553 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
1557 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
1559 struct rdma_id_private
*dev_id_priv
;
1562 * Remove from listen_any_list to prevent added devices from spawning
1563 * additional listen requests.
1566 list_del(&id_priv
->list
);
1568 while (!list_empty(&id_priv
->listen_list
)) {
1569 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
1570 struct rdma_id_private
, listen_list
);
1571 /* sync with device removal to avoid duplicate destruction */
1572 list_del_init(&dev_id_priv
->list
);
1573 list_del(&dev_id_priv
->listen_list
);
1574 mutex_unlock(&lock
);
1576 rdma_destroy_id(&dev_id_priv
->id
);
1579 mutex_unlock(&lock
);
1582 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
1583 enum rdma_cm_state state
)
1586 case RDMA_CM_ADDR_QUERY
:
1587 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
1589 case RDMA_CM_ROUTE_QUERY
:
1590 cma_cancel_route(id_priv
);
1592 case RDMA_CM_LISTEN
:
1593 if (cma_any_addr(cma_src_addr(id_priv
)) && !id_priv
->cma_dev
)
1594 cma_cancel_listens(id_priv
);
1601 static void cma_release_port(struct rdma_id_private
*id_priv
)
1603 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
1604 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
1610 hlist_del(&id_priv
->node
);
1611 if (hlist_empty(&bind_list
->owners
)) {
1612 cma_ps_remove(net
, bind_list
->ps
, bind_list
->port
);
1615 mutex_unlock(&lock
);
1618 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
1620 struct cma_multicast
*mc
;
1622 while (!list_empty(&id_priv
->mc_list
)) {
1623 mc
= container_of(id_priv
->mc_list
.next
,
1624 struct cma_multicast
, list
);
1625 list_del(&mc
->list
);
1626 if (rdma_cap_ib_mcast(id_priv
->cma_dev
->device
,
1627 id_priv
->id
.port_num
)) {
1628 ib_sa_free_multicast(mc
->multicast
.ib
);
1631 if (mc
->igmp_joined
) {
1632 struct rdma_dev_addr
*dev_addr
=
1633 &id_priv
->id
.route
.addr
.dev_addr
;
1634 struct net_device
*ndev
= NULL
;
1636 if (dev_addr
->bound_dev_if
)
1637 ndev
= dev_get_by_index(&init_net
,
1638 dev_addr
->bound_dev_if
);
1641 &mc
->multicast
.ib
->rec
.mgid
,
1646 kref_put(&mc
->mcref
, release_mc
);
1651 void rdma_destroy_id(struct rdma_cm_id
*id
)
1653 struct rdma_id_private
*id_priv
;
1654 enum rdma_cm_state state
;
1656 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1657 state
= cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1658 cma_cancel_operation(id_priv
, state
);
1661 * Wait for any active callback to finish. New callbacks will find
1662 * the id_priv state set to destroying and abort.
1664 mutex_lock(&id_priv
->handler_mutex
);
1665 mutex_unlock(&id_priv
->handler_mutex
);
1667 if (id_priv
->cma_dev
) {
1668 if (rdma_cap_ib_cm(id_priv
->id
.device
, 1)) {
1669 if (id_priv
->cm_id
.ib
)
1670 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1671 } else if (rdma_cap_iw_cm(id_priv
->id
.device
, 1)) {
1672 if (id_priv
->cm_id
.iw
)
1673 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1675 cma_leave_mc_groups(id_priv
);
1676 cma_release_dev(id_priv
);
1679 cma_release_port(id_priv
);
1680 cma_deref_id(id_priv
);
1681 wait_for_completion(&id_priv
->comp
);
1683 if (id_priv
->internal_id
)
1684 cma_deref_id(id_priv
->id
.context
);
1686 kfree(id_priv
->id
.route
.path_rec
);
1687 put_net(id_priv
->id
.route
.addr
.dev_addr
.net
);
1690 EXPORT_SYMBOL(rdma_destroy_id
);
1692 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
1696 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
1700 ret
= cma_modify_qp_rts(id_priv
, NULL
);
1704 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
1710 pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret
);
1711 cma_modify_qp_err(id_priv
);
1712 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
1717 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
1718 struct ib_cm_rep_event_param
*rep_data
,
1721 event
->param
.conn
.private_data
= private_data
;
1722 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
1723 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
1724 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
1725 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
1726 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
1727 event
->param
.conn
.srq
= rep_data
->srq
;
1728 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
1731 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1733 struct rdma_id_private
*id_priv
= cm_id
->context
;
1734 struct rdma_cm_event event
;
1737 mutex_lock(&id_priv
->handler_mutex
);
1738 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
1739 id_priv
->state
!= RDMA_CM_CONNECT
) ||
1740 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
1741 id_priv
->state
!= RDMA_CM_DISCONNECT
))
1744 memset(&event
, 0, sizeof event
);
1745 switch (ib_event
->event
) {
1746 case IB_CM_REQ_ERROR
:
1747 case IB_CM_REP_ERROR
:
1748 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1749 event
.status
= -ETIMEDOUT
;
1751 case IB_CM_REP_RECEIVED
:
1752 if (cma_comp(id_priv
, RDMA_CM_CONNECT
) &&
1753 (id_priv
->id
.qp_type
!= IB_QPT_UD
))
1754 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1755 if (id_priv
->id
.qp
) {
1756 event
.status
= cma_rep_recv(id_priv
);
1757 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
1758 RDMA_CM_EVENT_ESTABLISHED
;
1760 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
1762 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
1763 ib_event
->private_data
);
1765 case IB_CM_RTU_RECEIVED
:
1766 case IB_CM_USER_ESTABLISHED
:
1767 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1769 case IB_CM_DREQ_ERROR
:
1770 event
.status
= -ETIMEDOUT
; /* fall through */
1771 case IB_CM_DREQ_RECEIVED
:
1772 case IB_CM_DREP_RECEIVED
:
1773 if (!cma_comp_exch(id_priv
, RDMA_CM_CONNECT
,
1774 RDMA_CM_DISCONNECT
))
1776 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1778 case IB_CM_TIMEWAIT_EXIT
:
1779 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
1781 case IB_CM_MRA_RECEIVED
:
1784 case IB_CM_REJ_RECEIVED
:
1785 pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv
->id
,
1786 ib_event
->param
.rej_rcvd
.reason
));
1787 cma_modify_qp_err(id_priv
);
1788 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
1789 event
.event
= RDMA_CM_EVENT_REJECTED
;
1790 event
.param
.conn
.private_data
= ib_event
->private_data
;
1791 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
1794 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
1799 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1801 /* Destroy the CM ID by returning a non-zero value. */
1802 id_priv
->cm_id
.ib
= NULL
;
1803 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1804 mutex_unlock(&id_priv
->handler_mutex
);
1805 rdma_destroy_id(&id_priv
->id
);
1809 mutex_unlock(&id_priv
->handler_mutex
);
1813 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
1814 struct ib_cm_event
*ib_event
,
1815 struct net_device
*net_dev
)
1817 struct rdma_id_private
*id_priv
;
1818 struct rdma_cm_id
*id
;
1819 struct rdma_route
*rt
;
1820 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
1821 struct sa_path_rec
*path
= ib_event
->param
.req_rcvd
.primary_path
;
1822 const __be64 service_id
=
1823 ib_event
->param
.req_rcvd
.primary_path
->service_id
;
1826 id
= rdma_create_id(listen_id
->route
.addr
.dev_addr
.net
,
1827 listen_id
->event_handler
, listen_id
->context
,
1828 listen_id
->ps
, ib_event
->param
.req_rcvd
.qp_type
);
1832 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1833 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
1834 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
1835 listen_id
, ib_event
, ss_family
, service_id
))
1839 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1840 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1845 rt
->path_rec
[0] = *path
;
1846 if (rt
->num_paths
== 2)
1847 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1850 rdma_copy_addr(&rt
->addr
.dev_addr
, net_dev
, NULL
);
1852 if (!cma_protocol_roce(listen_id
) &&
1853 cma_any_addr(cma_src_addr(id_priv
))) {
1854 rt
->addr
.dev_addr
.dev_type
= ARPHRD_INFINIBAND
;
1855 rdma_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
1856 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
1857 } else if (!cma_any_addr(cma_src_addr(id_priv
))) {
1858 ret
= cma_translate_addr(cma_src_addr(id_priv
), &rt
->addr
.dev_addr
);
1863 rdma_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1865 id_priv
->state
= RDMA_CM_CONNECT
;
1869 rdma_destroy_id(id
);
1873 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1874 struct ib_cm_event
*ib_event
,
1875 struct net_device
*net_dev
)
1877 struct rdma_id_private
*id_priv
;
1878 struct rdma_cm_id
*id
;
1879 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
1880 struct net
*net
= listen_id
->route
.addr
.dev_addr
.net
;
1883 id
= rdma_create_id(net
, listen_id
->event_handler
, listen_id
->context
,
1884 listen_id
->ps
, IB_QPT_UD
);
1888 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1889 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
1890 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
1891 listen_id
, ib_event
, ss_family
,
1892 ib_event
->param
.sidr_req_rcvd
.service_id
))
1896 rdma_copy_addr(&id
->route
.addr
.dev_addr
, net_dev
, NULL
);
1898 if (!cma_any_addr(cma_src_addr(id_priv
))) {
1899 ret
= cma_translate_addr(cma_src_addr(id_priv
),
1900 &id
->route
.addr
.dev_addr
);
1906 id_priv
->state
= RDMA_CM_CONNECT
;
1909 rdma_destroy_id(id
);
1913 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1914 struct ib_cm_req_event_param
*req_data
,
1915 void *private_data
, int offset
)
1917 event
->param
.conn
.private_data
= private_data
+ offset
;
1918 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1919 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1920 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1921 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1922 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1923 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1924 event
->param
.conn
.srq
= req_data
->srq
;
1925 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1928 static int cma_check_req_qp_type(struct rdma_cm_id
*id
, struct ib_cm_event
*ib_event
)
1930 return (((ib_event
->event
== IB_CM_REQ_RECEIVED
) &&
1931 (ib_event
->param
.req_rcvd
.qp_type
== id
->qp_type
)) ||
1932 ((ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) &&
1933 (id
->qp_type
== IB_QPT_UD
)) ||
1937 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1939 struct rdma_id_private
*listen_id
, *conn_id
= NULL
;
1940 struct rdma_cm_event event
;
1941 struct net_device
*net_dev
;
1945 listen_id
= cma_id_from_event(cm_id
, ib_event
, &net_dev
);
1946 if (IS_ERR(listen_id
))
1947 return PTR_ERR(listen_id
);
1949 if (!cma_check_req_qp_type(&listen_id
->id
, ib_event
)) {
1954 mutex_lock(&listen_id
->handler_mutex
);
1955 if (listen_id
->state
!= RDMA_CM_LISTEN
) {
1956 ret
= -ECONNABORTED
;
1960 memset(&event
, 0, sizeof event
);
1961 offset
= cma_user_data_offset(listen_id
);
1962 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1963 if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) {
1964 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
, net_dev
);
1965 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1966 event
.param
.ud
.private_data_len
=
1967 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1969 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
, net_dev
);
1970 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1971 ib_event
->private_data
, offset
);
1978 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1979 ret
= cma_acquire_dev(conn_id
, listen_id
);
1983 conn_id
->cm_id
.ib
= cm_id
;
1984 cm_id
->context
= conn_id
;
1985 cm_id
->cm_handler
= cma_ib_handler
;
1988 * Protect against the user destroying conn_id from another thread
1989 * until we're done accessing it.
1991 atomic_inc(&conn_id
->refcount
);
1992 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1996 * Acquire mutex to prevent user executing rdma_destroy_id()
1997 * while we're accessing the cm_id.
2000 if (cma_comp(conn_id
, RDMA_CM_CONNECT
) &&
2001 (conn_id
->id
.qp_type
!= IB_QPT_UD
))
2002 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
2003 mutex_unlock(&lock
);
2004 mutex_unlock(&conn_id
->handler_mutex
);
2005 mutex_unlock(&listen_id
->handler_mutex
);
2006 cma_deref_id(conn_id
);
2012 cma_deref_id(conn_id
);
2013 /* Destroy the CM ID by returning a non-zero value. */
2014 conn_id
->cm_id
.ib
= NULL
;
2016 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
2017 mutex_unlock(&conn_id
->handler_mutex
);
2019 mutex_unlock(&listen_id
->handler_mutex
);
2021 rdma_destroy_id(&conn_id
->id
);
2030 __be64
rdma_get_service_id(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2032 if (addr
->sa_family
== AF_IB
)
2033 return ((struct sockaddr_ib
*) addr
)->sib_sid
;
2035 return cpu_to_be64(((u64
)id
->ps
<< 16) + be16_to_cpu(cma_port(addr
)));
2037 EXPORT_SYMBOL(rdma_get_service_id
);
2039 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
2041 struct rdma_id_private
*id_priv
= iw_id
->context
;
2042 struct rdma_cm_event event
;
2044 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
2045 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
2047 mutex_lock(&id_priv
->handler_mutex
);
2048 if (id_priv
->state
!= RDMA_CM_CONNECT
)
2051 memset(&event
, 0, sizeof event
);
2052 switch (iw_event
->event
) {
2053 case IW_CM_EVENT_CLOSE
:
2054 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
2056 case IW_CM_EVENT_CONNECT_REPLY
:
2057 memcpy(cma_src_addr(id_priv
), laddr
,
2058 rdma_addr_size(laddr
));
2059 memcpy(cma_dst_addr(id_priv
), raddr
,
2060 rdma_addr_size(raddr
));
2061 switch (iw_event
->status
) {
2063 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2064 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2065 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2069 event
.event
= RDMA_CM_EVENT_REJECTED
;
2072 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2075 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
2079 case IW_CM_EVENT_ESTABLISHED
:
2080 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2081 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2082 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2088 event
.status
= iw_event
->status
;
2089 event
.param
.conn
.private_data
= iw_event
->private_data
;
2090 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
2091 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2093 /* Destroy the CM ID by returning a non-zero value. */
2094 id_priv
->cm_id
.iw
= NULL
;
2095 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2096 mutex_unlock(&id_priv
->handler_mutex
);
2097 rdma_destroy_id(&id_priv
->id
);
2102 mutex_unlock(&id_priv
->handler_mutex
);
2106 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
2107 struct iw_cm_event
*iw_event
)
2109 struct rdma_cm_id
*new_cm_id
;
2110 struct rdma_id_private
*listen_id
, *conn_id
;
2111 struct rdma_cm_event event
;
2112 int ret
= -ECONNABORTED
;
2113 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
2114 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
2116 listen_id
= cm_id
->context
;
2118 mutex_lock(&listen_id
->handler_mutex
);
2119 if (listen_id
->state
!= RDMA_CM_LISTEN
)
2122 /* Create a new RDMA id for the new IW CM ID */
2123 new_cm_id
= rdma_create_id(listen_id
->id
.route
.addr
.dev_addr
.net
,
2124 listen_id
->id
.event_handler
,
2125 listen_id
->id
.context
,
2126 RDMA_PS_TCP
, IB_QPT_RC
);
2127 if (IS_ERR(new_cm_id
)) {
2131 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
2132 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
2133 conn_id
->state
= RDMA_CM_CONNECT
;
2135 ret
= rdma_translate_ip(laddr
, &conn_id
->id
.route
.addr
.dev_addr
, NULL
);
2137 mutex_unlock(&conn_id
->handler_mutex
);
2138 rdma_destroy_id(new_cm_id
);
2142 ret
= cma_acquire_dev(conn_id
, listen_id
);
2144 mutex_unlock(&conn_id
->handler_mutex
);
2145 rdma_destroy_id(new_cm_id
);
2149 conn_id
->cm_id
.iw
= cm_id
;
2150 cm_id
->context
= conn_id
;
2151 cm_id
->cm_handler
= cma_iw_handler
;
2153 memcpy(cma_src_addr(conn_id
), laddr
, rdma_addr_size(laddr
));
2154 memcpy(cma_dst_addr(conn_id
), raddr
, rdma_addr_size(raddr
));
2156 memset(&event
, 0, sizeof event
);
2157 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
2158 event
.param
.conn
.private_data
= iw_event
->private_data
;
2159 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
2160 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2161 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2164 * Protect against the user destroying conn_id from another thread
2165 * until we're done accessing it.
2167 atomic_inc(&conn_id
->refcount
);
2168 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
2170 /* User wants to destroy the CM ID */
2171 conn_id
->cm_id
.iw
= NULL
;
2172 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
2173 mutex_unlock(&conn_id
->handler_mutex
);
2174 cma_deref_id(conn_id
);
2175 rdma_destroy_id(&conn_id
->id
);
2179 mutex_unlock(&conn_id
->handler_mutex
);
2180 cma_deref_id(conn_id
);
2183 mutex_unlock(&listen_id
->handler_mutex
);
2187 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
2189 struct sockaddr
*addr
;
2190 struct ib_cm_id
*id
;
2193 addr
= cma_src_addr(id_priv
);
2194 svc_id
= rdma_get_service_id(&id_priv
->id
, addr
);
2195 id
= ib_cm_insert_listen(id_priv
->id
.device
, cma_req_handler
, svc_id
);
2198 id_priv
->cm_id
.ib
= id
;
2203 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
2206 struct iw_cm_id
*id
;
2208 id
= iw_create_cm_id(id_priv
->id
.device
,
2209 iw_conn_req_handler
,
2214 id
->tos
= id_priv
->tos
;
2215 id_priv
->cm_id
.iw
= id
;
2217 memcpy(&id_priv
->cm_id
.iw
->local_addr
, cma_src_addr(id_priv
),
2218 rdma_addr_size(cma_src_addr(id_priv
)));
2220 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
2223 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
2224 id_priv
->cm_id
.iw
= NULL
;
2230 static int cma_listen_handler(struct rdma_cm_id
*id
,
2231 struct rdma_cm_event
*event
)
2233 struct rdma_id_private
*id_priv
= id
->context
;
2235 id
->context
= id_priv
->id
.context
;
2236 id
->event_handler
= id_priv
->id
.event_handler
;
2237 return id_priv
->id
.event_handler(id
, event
);
2240 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
2241 struct cma_device
*cma_dev
)
2243 struct rdma_id_private
*dev_id_priv
;
2244 struct rdma_cm_id
*id
;
2245 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
2248 if (cma_family(id_priv
) == AF_IB
&& !rdma_cap_ib_cm(cma_dev
->device
, 1))
2251 id
= rdma_create_id(net
, cma_listen_handler
, id_priv
, id_priv
->id
.ps
,
2252 id_priv
->id
.qp_type
);
2256 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
2258 dev_id_priv
->state
= RDMA_CM_ADDR_BOUND
;
2259 memcpy(cma_src_addr(dev_id_priv
), cma_src_addr(id_priv
),
2260 rdma_addr_size(cma_src_addr(id_priv
)));
2262 _cma_attach_to_dev(dev_id_priv
, cma_dev
);
2263 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
2264 atomic_inc(&id_priv
->refcount
);
2265 dev_id_priv
->internal_id
= 1;
2266 dev_id_priv
->afonly
= id_priv
->afonly
;
2268 ret
= rdma_listen(id
, id_priv
->backlog
);
2270 pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
2271 ret
, cma_dev
->device
->name
);
2274 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
2276 struct cma_device
*cma_dev
;
2279 list_add_tail(&id_priv
->list
, &listen_any_list
);
2280 list_for_each_entry(cma_dev
, &dev_list
, list
)
2281 cma_listen_on_dev(id_priv
, cma_dev
);
2282 mutex_unlock(&lock
);
2285 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
2287 struct rdma_id_private
*id_priv
;
2289 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2290 id_priv
->tos
= (u8
) tos
;
2291 id_priv
->tos_set
= true;
2293 EXPORT_SYMBOL(rdma_set_service_type
);
2295 static void cma_query_handler(int status
, struct sa_path_rec
*path_rec
,
2298 struct cma_work
*work
= context
;
2299 struct rdma_route
*route
;
2301 route
= &work
->id
->id
.route
;
2304 route
->num_paths
= 1;
2305 *route
->path_rec
= *path_rec
;
2307 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2308 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2309 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
2310 work
->event
.status
= status
;
2311 pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
2315 queue_work(cma_wq
, &work
->work
);
2318 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
2319 struct cma_work
*work
)
2321 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2322 struct sa_path_rec path_rec
;
2323 ib_sa_comp_mask comp_mask
;
2324 struct sockaddr_in6
*sin6
;
2325 struct sockaddr_ib
*sib
;
2327 memset(&path_rec
, 0, sizeof path_rec
);
2329 if (rdma_cap_opa_ah(id_priv
->id
.device
, id_priv
->id
.port_num
))
2330 path_rec
.rec_type
= SA_PATH_REC_TYPE_OPA
;
2332 path_rec
.rec_type
= SA_PATH_REC_TYPE_IB
;
2333 rdma_addr_get_sgid(dev_addr
, &path_rec
.sgid
);
2334 rdma_addr_get_dgid(dev_addr
, &path_rec
.dgid
);
2335 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2336 path_rec
.numb_path
= 1;
2337 path_rec
.reversible
= 1;
2338 path_rec
.service_id
= rdma_get_service_id(&id_priv
->id
,
2339 cma_dst_addr(id_priv
));
2341 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
2342 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
2343 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
2345 switch (cma_family(id_priv
)) {
2347 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
2348 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
2351 sin6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
2352 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
2353 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2356 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
2357 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sib
->sib_flowinfo
) >> 20);
2358 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2362 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
2363 id_priv
->id
.port_num
, &path_rec
,
2364 comp_mask
, timeout_ms
,
2365 GFP_KERNEL
, cma_query_handler
,
2366 work
, &id_priv
->query
);
2368 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
2371 static void cma_work_handler(struct work_struct
*_work
)
2373 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
2374 struct rdma_id_private
*id_priv
= work
->id
;
2377 mutex_lock(&id_priv
->handler_mutex
);
2378 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
2381 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
2382 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2386 mutex_unlock(&id_priv
->handler_mutex
);
2387 cma_deref_id(id_priv
);
2389 rdma_destroy_id(&id_priv
->id
);
2393 static void cma_ndev_work_handler(struct work_struct
*_work
)
2395 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
2396 struct rdma_id_private
*id_priv
= work
->id
;
2399 mutex_lock(&id_priv
->handler_mutex
);
2400 if (id_priv
->state
== RDMA_CM_DESTROYING
||
2401 id_priv
->state
== RDMA_CM_DEVICE_REMOVAL
)
2404 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
2405 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2410 mutex_unlock(&id_priv
->handler_mutex
);
2411 cma_deref_id(id_priv
);
2413 rdma_destroy_id(&id_priv
->id
);
2417 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
2419 struct rdma_route
*route
= &id_priv
->id
.route
;
2420 struct cma_work
*work
;
2423 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2428 INIT_WORK(&work
->work
, cma_work_handler
);
2429 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2430 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2431 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2433 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
2434 if (!route
->path_rec
) {
2439 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
2445 kfree(route
->path_rec
);
2446 route
->path_rec
= NULL
;
2452 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
2453 struct sa_path_rec
*path_rec
, int num_paths
)
2455 struct rdma_id_private
*id_priv
;
2458 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2459 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
2460 RDMA_CM_ROUTE_RESOLVED
))
2463 id
->route
.path_rec
= kmemdup(path_rec
, sizeof *path_rec
* num_paths
,
2465 if (!id
->route
.path_rec
) {
2470 id
->route
.num_paths
= num_paths
;
2473 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_ADDR_RESOLVED
);
2476 EXPORT_SYMBOL(rdma_set_ib_paths
);
2478 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
2480 struct cma_work
*work
;
2482 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2487 INIT_WORK(&work
->work
, cma_work_handler
);
2488 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2489 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2490 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2491 queue_work(cma_wq
, &work
->work
);
2495 static int iboe_tos_to_sl(struct net_device
*ndev
, int tos
)
2498 struct net_device
*dev
;
2500 prio
= rt_tos2priority(tos
);
2501 dev
= is_vlan_dev(ndev
) ? vlan_dev_real_dev(ndev
) : ndev
;
2503 return netdev_get_prio_tc_map(dev
, prio
);
2505 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2506 if (is_vlan_dev(ndev
))
2507 return (vlan_dev_get_egress_qos_mask(ndev
, prio
) &
2508 VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
2513 static enum ib_gid_type
cma_route_gid_type(enum rdma_network_type network_type
,
2514 unsigned long supported_gids
,
2515 enum ib_gid_type default_gid
)
2517 if ((network_type
== RDMA_NETWORK_IPV4
||
2518 network_type
== RDMA_NETWORK_IPV6
) &&
2519 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP
, &supported_gids
))
2520 return IB_GID_TYPE_ROCE_UDP_ENCAP
;
2525 static int cma_resolve_iboe_route(struct rdma_id_private
*id_priv
)
2527 struct rdma_route
*route
= &id_priv
->id
.route
;
2528 struct rdma_addr
*addr
= &route
->addr
;
2529 struct cma_work
*work
;
2531 struct net_device
*ndev
= NULL
;
2532 enum ib_gid_type gid_type
= IB_GID_TYPE_IB
;
2533 u8 default_roce_tos
= id_priv
->cma_dev
->default_roce_tos
[id_priv
->id
.port_num
-
2534 rdma_start_port(id_priv
->cma_dev
->device
)];
2535 u8 tos
= id_priv
->tos_set
? id_priv
->tos
: default_roce_tos
;
2538 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2543 INIT_WORK(&work
->work
, cma_work_handler
);
2545 route
->path_rec
= kzalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
2546 if (!route
->path_rec
) {
2551 route
->num_paths
= 1;
2553 if (addr
->dev_addr
.bound_dev_if
) {
2554 unsigned long supported_gids
;
2556 ndev
= dev_get_by_index(&init_net
, addr
->dev_addr
.bound_dev_if
);
2562 supported_gids
= roce_gid_type_mask_support(id_priv
->id
.device
,
2563 id_priv
->id
.port_num
);
2564 gid_type
= cma_route_gid_type(addr
->dev_addr
.network
,
2567 route
->path_rec
->rec_type
=
2568 sa_conv_gid_to_pathrec_type(gid_type
);
2569 sa_path_set_ndev(route
->path_rec
, &init_net
);
2570 sa_path_set_ifindex(route
->path_rec
, ndev
->ifindex
);
2577 sa_path_set_dmac(route
->path_rec
, addr
->dev_addr
.dst_dev_addr
);
2579 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
2580 &route
->path_rec
->sgid
);
2581 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
,
2582 &route
->path_rec
->dgid
);
2584 /* Use the hint from IP Stack to select GID Type */
2585 if (gid_type
< ib_network_to_gid_type(addr
->dev_addr
.network
))
2586 gid_type
= ib_network_to_gid_type(addr
->dev_addr
.network
);
2587 route
->path_rec
->rec_type
= sa_conv_gid_to_pathrec_type(gid_type
);
2589 if (((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
)->sa_family
!= AF_IB
)
2590 /* TODO: get the hoplimit from the inet/inet6 device */
2591 route
->path_rec
->hop_limit
= addr
->dev_addr
.hoplimit
;
2593 route
->path_rec
->hop_limit
= 1;
2594 route
->path_rec
->reversible
= 1;
2595 route
->path_rec
->pkey
= cpu_to_be16(0xffff);
2596 route
->path_rec
->mtu_selector
= IB_SA_EQ
;
2597 route
->path_rec
->sl
= iboe_tos_to_sl(ndev
, tos
);
2598 route
->path_rec
->traffic_class
= tos
;
2599 route
->path_rec
->mtu
= iboe_get_mtu(ndev
->mtu
);
2600 route
->path_rec
->rate_selector
= IB_SA_EQ
;
2601 route
->path_rec
->rate
= iboe_get_rate(ndev
);
2603 route
->path_rec
->packet_life_time_selector
= IB_SA_EQ
;
2604 route
->path_rec
->packet_life_time
= CMA_IBOE_PACKET_LIFETIME
;
2605 if (!route
->path_rec
->mtu
) {
2610 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2611 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2612 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2613 work
->event
.status
= 0;
2615 queue_work(cma_wq
, &work
->work
);
2620 kfree(route
->path_rec
);
2621 route
->path_rec
= NULL
;
2627 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
2629 struct rdma_id_private
*id_priv
;
2632 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2633 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
, RDMA_CM_ROUTE_QUERY
))
2636 atomic_inc(&id_priv
->refcount
);
2637 if (rdma_cap_ib_sa(id
->device
, id
->port_num
))
2638 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
2639 else if (rdma_protocol_roce(id
->device
, id
->port_num
))
2640 ret
= cma_resolve_iboe_route(id_priv
);
2641 else if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
2642 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
2651 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_QUERY
, RDMA_CM_ADDR_RESOLVED
);
2652 cma_deref_id(id_priv
);
2655 EXPORT_SYMBOL(rdma_resolve_route
);
2657 static void cma_set_loopback(struct sockaddr
*addr
)
2659 switch (addr
->sa_family
) {
2661 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
= htonl(INADDR_LOOPBACK
);
2664 ipv6_addr_set(&((struct sockaddr_in6
*) addr
)->sin6_addr
,
2668 ib_addr_set(&((struct sockaddr_ib
*) addr
)->sib_addr
,
2674 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
2676 struct cma_device
*cma_dev
, *cur_dev
;
2678 enum ib_port_state port_state
;
2685 list_for_each_entry(cur_dev
, &dev_list
, list
) {
2686 if (cma_family(id_priv
) == AF_IB
&&
2687 !rdma_cap_ib_cm(cur_dev
->device
, 1))
2693 for (p
= 1; p
<= cur_dev
->device
->phys_port_cnt
; ++p
) {
2694 if (!ib_get_cached_port_state(cur_dev
->device
, p
, &port_state
) &&
2695 port_state
== IB_PORT_ACTIVE
) {
2710 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
, NULL
);
2714 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
2718 id_priv
->id
.route
.addr
.dev_addr
.dev_type
=
2719 (rdma_protocol_ib(cma_dev
->device
, p
)) ?
2720 ARPHRD_INFINIBAND
: ARPHRD_ETHER
;
2722 rdma_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2723 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
2724 id_priv
->id
.port_num
= p
;
2725 cma_attach_to_dev(id_priv
, cma_dev
);
2726 cma_set_loopback(cma_src_addr(id_priv
));
2728 mutex_unlock(&lock
);
2732 static void addr_handler(int status
, struct sockaddr
*src_addr
,
2733 struct rdma_dev_addr
*dev_addr
, void *context
)
2735 struct rdma_id_private
*id_priv
= context
;
2736 struct rdma_cm_event event
;
2738 memset(&event
, 0, sizeof event
);
2739 mutex_lock(&id_priv
->handler_mutex
);
2740 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
,
2741 RDMA_CM_ADDR_RESOLVED
))
2744 memcpy(cma_src_addr(id_priv
), src_addr
, rdma_addr_size(src_addr
));
2745 if (!status
&& !id_priv
->cma_dev
) {
2746 status
= cma_acquire_dev(id_priv
, NULL
);
2748 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
2751 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status
);
2755 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
2756 RDMA_CM_ADDR_BOUND
))
2758 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
2759 event
.status
= status
;
2761 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2763 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
2764 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2765 mutex_unlock(&id_priv
->handler_mutex
);
2766 cma_deref_id(id_priv
);
2767 rdma_destroy_id(&id_priv
->id
);
2771 mutex_unlock(&id_priv
->handler_mutex
);
2772 cma_deref_id(id_priv
);
2775 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
2777 struct cma_work
*work
;
2781 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2785 if (!id_priv
->cma_dev
) {
2786 ret
= cma_bind_loopback(id_priv
);
2791 rdma_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2792 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2795 INIT_WORK(&work
->work
, cma_work_handler
);
2796 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2797 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2798 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2799 queue_work(cma_wq
, &work
->work
);
2806 static int cma_resolve_ib_addr(struct rdma_id_private
*id_priv
)
2808 struct cma_work
*work
;
2811 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2815 if (!id_priv
->cma_dev
) {
2816 ret
= cma_resolve_ib_dev(id_priv
);
2821 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, (union ib_gid
*)
2822 &(((struct sockaddr_ib
*) &id_priv
->id
.route
.addr
.dst_addr
)->sib_addr
));
2825 INIT_WORK(&work
->work
, cma_work_handler
);
2826 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2827 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2828 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2829 queue_work(cma_wq
, &work
->work
);
2836 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2837 struct sockaddr
*dst_addr
)
2839 if (!src_addr
|| !src_addr
->sa_family
) {
2840 src_addr
= (struct sockaddr
*) &id
->route
.addr
.src_addr
;
2841 src_addr
->sa_family
= dst_addr
->sa_family
;
2842 if (IS_ENABLED(CONFIG_IPV6
) &&
2843 dst_addr
->sa_family
== AF_INET6
) {
2844 struct sockaddr_in6
*src_addr6
= (struct sockaddr_in6
*) src_addr
;
2845 struct sockaddr_in6
*dst_addr6
= (struct sockaddr_in6
*) dst_addr
;
2846 src_addr6
->sin6_scope_id
= dst_addr6
->sin6_scope_id
;
2847 if (ipv6_addr_type(&dst_addr6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
)
2848 id
->route
.addr
.dev_addr
.bound_dev_if
= dst_addr6
->sin6_scope_id
;
2849 } else if (dst_addr
->sa_family
== AF_IB
) {
2850 ((struct sockaddr_ib
*) src_addr
)->sib_pkey
=
2851 ((struct sockaddr_ib
*) dst_addr
)->sib_pkey
;
2854 return rdma_bind_addr(id
, src_addr
);
2857 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2858 struct sockaddr
*dst_addr
, int timeout_ms
)
2860 struct rdma_id_private
*id_priv
;
2863 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2864 memcpy(cma_dst_addr(id_priv
), dst_addr
, rdma_addr_size(dst_addr
));
2865 if (id_priv
->state
== RDMA_CM_IDLE
) {
2866 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
2868 memset(cma_dst_addr(id_priv
), 0, rdma_addr_size(dst_addr
));
2873 if (cma_family(id_priv
) != dst_addr
->sa_family
) {
2874 memset(cma_dst_addr(id_priv
), 0, rdma_addr_size(dst_addr
));
2878 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_ADDR_QUERY
)) {
2879 memset(cma_dst_addr(id_priv
), 0, rdma_addr_size(dst_addr
));
2883 atomic_inc(&id_priv
->refcount
);
2884 if (cma_any_addr(dst_addr
)) {
2885 ret
= cma_resolve_loopback(id_priv
);
2887 if (dst_addr
->sa_family
== AF_IB
) {
2888 ret
= cma_resolve_ib_addr(id_priv
);
2890 ret
= rdma_resolve_ip(&addr_client
, cma_src_addr(id_priv
),
2891 dst_addr
, &id
->route
.addr
.dev_addr
,
2892 timeout_ms
, addr_handler
, id_priv
);
2900 cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
, RDMA_CM_ADDR_BOUND
);
2901 cma_deref_id(id_priv
);
2904 EXPORT_SYMBOL(rdma_resolve_addr
);
2906 int rdma_set_reuseaddr(struct rdma_cm_id
*id
, int reuse
)
2908 struct rdma_id_private
*id_priv
;
2909 unsigned long flags
;
2912 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2913 spin_lock_irqsave(&id_priv
->lock
, flags
);
2914 if (reuse
|| id_priv
->state
== RDMA_CM_IDLE
) {
2915 id_priv
->reuseaddr
= reuse
;
2920 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2923 EXPORT_SYMBOL(rdma_set_reuseaddr
);
2925 int rdma_set_afonly(struct rdma_cm_id
*id
, int afonly
)
2927 struct rdma_id_private
*id_priv
;
2928 unsigned long flags
;
2931 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2932 spin_lock_irqsave(&id_priv
->lock
, flags
);
2933 if (id_priv
->state
== RDMA_CM_IDLE
|| id_priv
->state
== RDMA_CM_ADDR_BOUND
) {
2934 id_priv
->options
|= (1 << CMA_OPTION_AFONLY
);
2935 id_priv
->afonly
= afonly
;
2940 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2943 EXPORT_SYMBOL(rdma_set_afonly
);
2945 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
2946 struct rdma_id_private
*id_priv
)
2948 struct sockaddr
*addr
;
2949 struct sockaddr_ib
*sib
;
2953 addr
= cma_src_addr(id_priv
);
2954 port
= htons(bind_list
->port
);
2956 switch (addr
->sa_family
) {
2958 ((struct sockaddr_in
*) addr
)->sin_port
= port
;
2961 ((struct sockaddr_in6
*) addr
)->sin6_port
= port
;
2964 sib
= (struct sockaddr_ib
*) addr
;
2965 sid
= be64_to_cpu(sib
->sib_sid
);
2966 mask
= be64_to_cpu(sib
->sib_sid_mask
);
2967 sib
->sib_sid
= cpu_to_be64((sid
& mask
) | (u64
) ntohs(port
));
2968 sib
->sib_sid_mask
= cpu_to_be64(~0ULL);
2971 id_priv
->bind_list
= bind_list
;
2972 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
2975 static int cma_alloc_port(enum rdma_port_space ps
,
2976 struct rdma_id_private
*id_priv
, unsigned short snum
)
2978 struct rdma_bind_list
*bind_list
;
2981 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
2985 ret
= cma_ps_alloc(id_priv
->id
.route
.addr
.dev_addr
.net
, ps
, bind_list
,
2991 bind_list
->port
= (unsigned short)ret
;
2992 cma_bind_port(bind_list
, id_priv
);
2996 return ret
== -ENOSPC
? -EADDRNOTAVAIL
: ret
;
2999 static int cma_port_is_unique(struct rdma_bind_list
*bind_list
,
3000 struct rdma_id_private
*id_priv
)
3002 struct rdma_id_private
*cur_id
;
3003 struct sockaddr
*daddr
= cma_dst_addr(id_priv
);
3004 struct sockaddr
*saddr
= cma_src_addr(id_priv
);
3005 __be16 dport
= cma_port(daddr
);
3007 hlist_for_each_entry(cur_id
, &bind_list
->owners
, node
) {
3008 struct sockaddr
*cur_daddr
= cma_dst_addr(cur_id
);
3009 struct sockaddr
*cur_saddr
= cma_src_addr(cur_id
);
3010 __be16 cur_dport
= cma_port(cur_daddr
);
3012 if (id_priv
== cur_id
)
3015 /* different dest port -> unique */
3016 if (!cma_any_port(cur_daddr
) &&
3017 (dport
!= cur_dport
))
3020 /* different src address -> unique */
3021 if (!cma_any_addr(saddr
) &&
3022 !cma_any_addr(cur_saddr
) &&
3023 cma_addr_cmp(saddr
, cur_saddr
))
3026 /* different dst address -> unique */
3027 if (!cma_any_addr(cur_daddr
) &&
3028 cma_addr_cmp(daddr
, cur_daddr
))
3031 return -EADDRNOTAVAIL
;
3036 static int cma_alloc_any_port(enum rdma_port_space ps
,
3037 struct rdma_id_private
*id_priv
)
3039 static unsigned int last_used_port
;
3040 int low
, high
, remaining
;
3042 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
3044 inet_get_local_port_range(net
, &low
, &high
);
3045 remaining
= (high
- low
) + 1;
3046 rover
= prandom_u32() % remaining
+ low
;
3048 if (last_used_port
!= rover
) {
3049 struct rdma_bind_list
*bind_list
;
3052 bind_list
= cma_ps_find(net
, ps
, (unsigned short)rover
);
3055 ret
= cma_alloc_port(ps
, id_priv
, rover
);
3057 ret
= cma_port_is_unique(bind_list
, id_priv
);
3059 cma_bind_port(bind_list
, id_priv
);
3062 * Remember previously used port number in order to avoid
3063 * re-using same port immediately after it is closed.
3066 last_used_port
= rover
;
3067 if (ret
!= -EADDRNOTAVAIL
)
3072 if ((rover
< low
) || (rover
> high
))
3076 return -EADDRNOTAVAIL
;
3080 * Check that the requested port is available. This is called when trying to
3081 * bind to a specific port, or when trying to listen on a bound port. In
3082 * the latter case, the provided id_priv may already be on the bind_list, but
3083 * we still need to check that it's okay to start listening.
3085 static int cma_check_port(struct rdma_bind_list
*bind_list
,
3086 struct rdma_id_private
*id_priv
, uint8_t reuseaddr
)
3088 struct rdma_id_private
*cur_id
;
3089 struct sockaddr
*addr
, *cur_addr
;
3091 addr
= cma_src_addr(id_priv
);
3092 hlist_for_each_entry(cur_id
, &bind_list
->owners
, node
) {
3093 if (id_priv
== cur_id
)
3096 if ((cur_id
->state
!= RDMA_CM_LISTEN
) && reuseaddr
&&
3100 cur_addr
= cma_src_addr(cur_id
);
3101 if (id_priv
->afonly
&& cur_id
->afonly
&&
3102 (addr
->sa_family
!= cur_addr
->sa_family
))
3105 if (cma_any_addr(addr
) || cma_any_addr(cur_addr
))
3106 return -EADDRNOTAVAIL
;
3108 if (!cma_addr_cmp(addr
, cur_addr
))
3114 static int cma_use_port(enum rdma_port_space ps
,
3115 struct rdma_id_private
*id_priv
)
3117 struct rdma_bind_list
*bind_list
;
3118 unsigned short snum
;
3121 snum
= ntohs(cma_port(cma_src_addr(id_priv
)));
3122 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
3125 bind_list
= cma_ps_find(id_priv
->id
.route
.addr
.dev_addr
.net
, ps
, snum
);
3127 ret
= cma_alloc_port(ps
, id_priv
, snum
);
3129 ret
= cma_check_port(bind_list
, id_priv
, id_priv
->reuseaddr
);
3131 cma_bind_port(bind_list
, id_priv
);
3136 static int cma_bind_listen(struct rdma_id_private
*id_priv
)
3138 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
3142 if (bind_list
->owners
.first
->next
)
3143 ret
= cma_check_port(bind_list
, id_priv
, 0);
3144 mutex_unlock(&lock
);
3148 static enum rdma_port_space
cma_select_inet_ps(
3149 struct rdma_id_private
*id_priv
)
3151 switch (id_priv
->id
.ps
) {
3156 return id_priv
->id
.ps
;
3163 static enum rdma_port_space
cma_select_ib_ps(struct rdma_id_private
*id_priv
)
3165 enum rdma_port_space ps
= 0;
3166 struct sockaddr_ib
*sib
;
3167 u64 sid_ps
, mask
, sid
;
3169 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
3170 mask
= be64_to_cpu(sib
->sib_sid_mask
) & RDMA_IB_IP_PS_MASK
;
3171 sid
= be64_to_cpu(sib
->sib_sid
) & mask
;
3173 if ((id_priv
->id
.ps
== RDMA_PS_IB
) && (sid
== (RDMA_IB_IP_PS_IB
& mask
))) {
3174 sid_ps
= RDMA_IB_IP_PS_IB
;
3176 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_TCP
)) &&
3177 (sid
== (RDMA_IB_IP_PS_TCP
& mask
))) {
3178 sid_ps
= RDMA_IB_IP_PS_TCP
;
3180 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_UDP
)) &&
3181 (sid
== (RDMA_IB_IP_PS_UDP
& mask
))) {
3182 sid_ps
= RDMA_IB_IP_PS_UDP
;
3187 sib
->sib_sid
= cpu_to_be64(sid_ps
| ntohs(cma_port((struct sockaddr
*) sib
)));
3188 sib
->sib_sid_mask
= cpu_to_be64(RDMA_IB_IP_PS_MASK
|
3189 be64_to_cpu(sib
->sib_sid_mask
));
3194 static int cma_get_port(struct rdma_id_private
*id_priv
)
3196 enum rdma_port_space ps
;
3199 if (cma_family(id_priv
) != AF_IB
)
3200 ps
= cma_select_inet_ps(id_priv
);
3202 ps
= cma_select_ib_ps(id_priv
);
3204 return -EPROTONOSUPPORT
;
3207 if (cma_any_port(cma_src_addr(id_priv
)))
3208 ret
= cma_alloc_any_port(ps
, id_priv
);
3210 ret
= cma_use_port(ps
, id_priv
);
3211 mutex_unlock(&lock
);
3216 static int cma_check_linklocal(struct rdma_dev_addr
*dev_addr
,
3217 struct sockaddr
*addr
)
3219 #if IS_ENABLED(CONFIG_IPV6)
3220 struct sockaddr_in6
*sin6
;
3222 if (addr
->sa_family
!= AF_INET6
)
3225 sin6
= (struct sockaddr_in6
*) addr
;
3227 if (!(ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
))
3230 if (!sin6
->sin6_scope_id
)
3233 dev_addr
->bound_dev_if
= sin6
->sin6_scope_id
;
3238 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
3240 struct rdma_id_private
*id_priv
;
3243 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3244 if (id_priv
->state
== RDMA_CM_IDLE
) {
3245 id
->route
.addr
.src_addr
.ss_family
= AF_INET
;
3246 ret
= rdma_bind_addr(id
, cma_src_addr(id_priv
));
3251 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_LISTEN
))
3254 if (id_priv
->reuseaddr
) {
3255 ret
= cma_bind_listen(id_priv
);
3260 id_priv
->backlog
= backlog
;
3262 if (rdma_cap_ib_cm(id
->device
, 1)) {
3263 ret
= cma_ib_listen(id_priv
);
3266 } else if (rdma_cap_iw_cm(id
->device
, 1)) {
3267 ret
= cma_iw_listen(id_priv
, backlog
);
3275 cma_listen_on_all(id_priv
);
3279 id_priv
->backlog
= 0;
3280 cma_comp_exch(id_priv
, RDMA_CM_LISTEN
, RDMA_CM_ADDR_BOUND
);
3283 EXPORT_SYMBOL(rdma_listen
);
3285 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
3287 struct rdma_id_private
*id_priv
;
3289 struct sockaddr
*daddr
;
3291 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
&&
3292 addr
->sa_family
!= AF_IB
)
3293 return -EAFNOSUPPORT
;
3295 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3296 if (!cma_comp_exch(id_priv
, RDMA_CM_IDLE
, RDMA_CM_ADDR_BOUND
))
3299 ret
= cma_check_linklocal(&id
->route
.addr
.dev_addr
, addr
);
3303 memcpy(cma_src_addr(id_priv
), addr
, rdma_addr_size(addr
));
3304 if (!cma_any_addr(addr
)) {
3305 ret
= cma_translate_addr(addr
, &id
->route
.addr
.dev_addr
);
3309 ret
= cma_acquire_dev(id_priv
, NULL
);
3314 if (!(id_priv
->options
& (1 << CMA_OPTION_AFONLY
))) {
3315 if (addr
->sa_family
== AF_INET
)
3316 id_priv
->afonly
= 1;
3317 #if IS_ENABLED(CONFIG_IPV6)
3318 else if (addr
->sa_family
== AF_INET6
) {
3319 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
3321 id_priv
->afonly
= net
->ipv6
.sysctl
.bindv6only
;
3325 ret
= cma_get_port(id_priv
);
3329 daddr
= cma_dst_addr(id_priv
);
3330 daddr
->sa_family
= addr
->sa_family
;
3334 if (id_priv
->cma_dev
)
3335 cma_release_dev(id_priv
);
3337 cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_IDLE
);
3340 EXPORT_SYMBOL(rdma_bind_addr
);
3342 static int cma_format_hdr(void *hdr
, struct rdma_id_private
*id_priv
)
3344 struct cma_hdr
*cma_hdr
;
3347 cma_hdr
->cma_version
= CMA_VERSION
;
3348 if (cma_family(id_priv
) == AF_INET
) {
3349 struct sockaddr_in
*src4
, *dst4
;
3351 src4
= (struct sockaddr_in
*) cma_src_addr(id_priv
);
3352 dst4
= (struct sockaddr_in
*) cma_dst_addr(id_priv
);
3354 cma_set_ip_ver(cma_hdr
, 4);
3355 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
3356 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
3357 cma_hdr
->port
= src4
->sin_port
;
3358 } else if (cma_family(id_priv
) == AF_INET6
) {
3359 struct sockaddr_in6
*src6
, *dst6
;
3361 src6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
3362 dst6
= (struct sockaddr_in6
*) cma_dst_addr(id_priv
);
3364 cma_set_ip_ver(cma_hdr
, 6);
3365 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
3366 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
3367 cma_hdr
->port
= src6
->sin6_port
;
3372 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
3373 struct ib_cm_event
*ib_event
)
3375 struct rdma_id_private
*id_priv
= cm_id
->context
;
3376 struct rdma_cm_event event
;
3377 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
3380 mutex_lock(&id_priv
->handler_mutex
);
3381 if (id_priv
->state
!= RDMA_CM_CONNECT
)
3384 memset(&event
, 0, sizeof event
);
3385 switch (ib_event
->event
) {
3386 case IB_CM_SIDR_REQ_ERROR
:
3387 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
3388 event
.status
= -ETIMEDOUT
;
3390 case IB_CM_SIDR_REP_RECEIVED
:
3391 event
.param
.ud
.private_data
= ib_event
->private_data
;
3392 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
3393 if (rep
->status
!= IB_SIDR_SUCCESS
) {
3394 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
3395 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
3396 pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n",
3400 ret
= cma_set_qkey(id_priv
, rep
->qkey
);
3402 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret
);
3403 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
3407 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
3408 id_priv
->id
.route
.path_rec
,
3409 &event
.param
.ud
.ah_attr
);
3410 event
.param
.ud
.qp_num
= rep
->qpn
;
3411 event
.param
.ud
.qkey
= rep
->qkey
;
3412 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
3416 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
3421 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3423 /* Destroy the CM ID by returning a non-zero value. */
3424 id_priv
->cm_id
.ib
= NULL
;
3425 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
3426 mutex_unlock(&id_priv
->handler_mutex
);
3427 rdma_destroy_id(&id_priv
->id
);
3431 mutex_unlock(&id_priv
->handler_mutex
);
3435 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
3436 struct rdma_conn_param
*conn_param
)
3438 struct ib_cm_sidr_req_param req
;
3439 struct ib_cm_id
*id
;
3444 memset(&req
, 0, sizeof req
);
3445 offset
= cma_user_data_offset(id_priv
);
3446 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
3447 if (req
.private_data_len
< conn_param
->private_data_len
)
3450 if (req
.private_data_len
) {
3451 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
3455 private_data
= NULL
;
3458 if (conn_param
->private_data
&& conn_param
->private_data_len
)
3459 memcpy(private_data
+ offset
, conn_param
->private_data
,
3460 conn_param
->private_data_len
);
3463 ret
= cma_format_hdr(private_data
, id_priv
);
3466 req
.private_data
= private_data
;
3469 id
= ib_create_cm_id(id_priv
->id
.device
, cma_sidr_rep_handler
,
3475 id_priv
->cm_id
.ib
= id
;
3477 req
.path
= id_priv
->id
.route
.path_rec
;
3478 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
3479 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
3480 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
3482 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
3484 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
3485 id_priv
->cm_id
.ib
= NULL
;
3488 kfree(private_data
);
3492 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
3493 struct rdma_conn_param
*conn_param
)
3495 struct ib_cm_req_param req
;
3496 struct rdma_route
*route
;
3498 struct ib_cm_id
*id
;
3502 memset(&req
, 0, sizeof req
);
3503 offset
= cma_user_data_offset(id_priv
);
3504 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
3505 if (req
.private_data_len
< conn_param
->private_data_len
)
3508 if (req
.private_data_len
) {
3509 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
3513 private_data
= NULL
;
3516 if (conn_param
->private_data
&& conn_param
->private_data_len
)
3517 memcpy(private_data
+ offset
, conn_param
->private_data
,
3518 conn_param
->private_data_len
);
3520 id
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
, id_priv
);
3525 id_priv
->cm_id
.ib
= id
;
3527 route
= &id_priv
->id
.route
;
3529 ret
= cma_format_hdr(private_data
, id_priv
);
3532 req
.private_data
= private_data
;
3535 req
.primary_path
= &route
->path_rec
[0];
3536 if (route
->num_paths
== 2)
3537 req
.alternate_path
= &route
->path_rec
[1];
3539 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
3540 req
.qp_num
= id_priv
->qp_num
;
3541 req
.qp_type
= id_priv
->id
.qp_type
;
3542 req
.starting_psn
= id_priv
->seq_num
;
3543 req
.responder_resources
= conn_param
->responder_resources
;
3544 req
.initiator_depth
= conn_param
->initiator_depth
;
3545 req
.flow_control
= conn_param
->flow_control
;
3546 req
.retry_count
= min_t(u8
, 7, conn_param
->retry_count
);
3547 req
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
3548 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
3549 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
3550 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
3551 req
.srq
= id_priv
->srq
? 1 : 0;
3553 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
3555 if (ret
&& !IS_ERR(id
)) {
3556 ib_destroy_cm_id(id
);
3557 id_priv
->cm_id
.ib
= NULL
;
3560 kfree(private_data
);
3564 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
3565 struct rdma_conn_param
*conn_param
)
3567 struct iw_cm_id
*cm_id
;
3569 struct iw_cm_conn_param iw_param
;
3571 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
3573 return PTR_ERR(cm_id
);
3575 cm_id
->tos
= id_priv
->tos
;
3576 id_priv
->cm_id
.iw
= cm_id
;
3578 memcpy(&cm_id
->local_addr
, cma_src_addr(id_priv
),
3579 rdma_addr_size(cma_src_addr(id_priv
)));
3580 memcpy(&cm_id
->remote_addr
, cma_dst_addr(id_priv
),
3581 rdma_addr_size(cma_dst_addr(id_priv
)));
3583 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3588 iw_param
.ord
= conn_param
->initiator_depth
;
3589 iw_param
.ird
= conn_param
->responder_resources
;
3590 iw_param
.private_data
= conn_param
->private_data
;
3591 iw_param
.private_data_len
= conn_param
->private_data_len
;
3592 iw_param
.qpn
= id_priv
->id
.qp
? id_priv
->qp_num
: conn_param
->qp_num
;
3594 memset(&iw_param
, 0, sizeof iw_param
);
3595 iw_param
.qpn
= id_priv
->qp_num
;
3597 ret
= iw_cm_connect(cm_id
, &iw_param
);
3600 iw_destroy_cm_id(cm_id
);
3601 id_priv
->cm_id
.iw
= NULL
;
3606 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
3608 struct rdma_id_private
*id_priv
;
3611 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3612 if (!cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_CONNECT
))
3616 id_priv
->qp_num
= conn_param
->qp_num
;
3617 id_priv
->srq
= conn_param
->srq
;
3620 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3621 if (id
->qp_type
== IB_QPT_UD
)
3622 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
3624 ret
= cma_connect_ib(id_priv
, conn_param
);
3625 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
))
3626 ret
= cma_connect_iw(id_priv
, conn_param
);
3634 cma_comp_exch(id_priv
, RDMA_CM_CONNECT
, RDMA_CM_ROUTE_RESOLVED
);
3637 EXPORT_SYMBOL(rdma_connect
);
3639 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
3640 struct rdma_conn_param
*conn_param
)
3642 struct ib_cm_rep_param rep
;
3645 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3649 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
3653 memset(&rep
, 0, sizeof rep
);
3654 rep
.qp_num
= id_priv
->qp_num
;
3655 rep
.starting_psn
= id_priv
->seq_num
;
3656 rep
.private_data
= conn_param
->private_data
;
3657 rep
.private_data_len
= conn_param
->private_data_len
;
3658 rep
.responder_resources
= conn_param
->responder_resources
;
3659 rep
.initiator_depth
= conn_param
->initiator_depth
;
3660 rep
.failover_accepted
= 0;
3661 rep
.flow_control
= conn_param
->flow_control
;
3662 rep
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
3663 rep
.srq
= id_priv
->srq
? 1 : 0;
3665 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
3670 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
3671 struct rdma_conn_param
*conn_param
)
3673 struct iw_cm_conn_param iw_param
;
3679 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3683 iw_param
.ord
= conn_param
->initiator_depth
;
3684 iw_param
.ird
= conn_param
->responder_resources
;
3685 iw_param
.private_data
= conn_param
->private_data
;
3686 iw_param
.private_data_len
= conn_param
->private_data_len
;
3687 if (id_priv
->id
.qp
) {
3688 iw_param
.qpn
= id_priv
->qp_num
;
3690 iw_param
.qpn
= conn_param
->qp_num
;
3692 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
3695 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
3696 enum ib_cm_sidr_status status
, u32 qkey
,
3697 const void *private_data
, int private_data_len
)
3699 struct ib_cm_sidr_rep_param rep
;
3702 memset(&rep
, 0, sizeof rep
);
3703 rep
.status
= status
;
3704 if (status
== IB_SIDR_SUCCESS
) {
3705 ret
= cma_set_qkey(id_priv
, qkey
);
3708 rep
.qp_num
= id_priv
->qp_num
;
3709 rep
.qkey
= id_priv
->qkey
;
3711 rep
.private_data
= private_data
;
3712 rep
.private_data_len
= private_data_len
;
3714 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
3717 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
3719 struct rdma_id_private
*id_priv
;
3722 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3724 id_priv
->owner
= task_pid_nr(current
);
3726 if (!cma_comp(id_priv
, RDMA_CM_CONNECT
))
3729 if (!id
->qp
&& conn_param
) {
3730 id_priv
->qp_num
= conn_param
->qp_num
;
3731 id_priv
->srq
= conn_param
->srq
;
3734 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3735 if (id
->qp_type
== IB_QPT_UD
) {
3737 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
3739 conn_param
->private_data
,
3740 conn_param
->private_data_len
);
3742 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
3746 ret
= cma_accept_ib(id_priv
, conn_param
);
3748 ret
= cma_rep_recv(id_priv
);
3750 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
))
3751 ret
= cma_accept_iw(id_priv
, conn_param
);
3760 cma_modify_qp_err(id_priv
);
3761 rdma_reject(id
, NULL
, 0);
3764 EXPORT_SYMBOL(rdma_accept
);
3766 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
3768 struct rdma_id_private
*id_priv
;
3771 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3772 if (!id_priv
->cm_id
.ib
)
3775 switch (id
->device
->node_type
) {
3776 case RDMA_NODE_IB_CA
:
3777 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
3785 EXPORT_SYMBOL(rdma_notify
);
3787 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
3788 u8 private_data_len
)
3790 struct rdma_id_private
*id_priv
;
3793 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3794 if (!id_priv
->cm_id
.ib
)
3797 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3798 if (id
->qp_type
== IB_QPT_UD
)
3799 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
, 0,
3800 private_data
, private_data_len
);
3802 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
3803 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
3804 0, private_data
, private_data_len
);
3805 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
3806 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
3807 private_data
, private_data_len
);
3813 EXPORT_SYMBOL(rdma_reject
);
3815 int rdma_disconnect(struct rdma_cm_id
*id
)
3817 struct rdma_id_private
*id_priv
;
3820 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3821 if (!id_priv
->cm_id
.ib
)
3824 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3825 ret
= cma_modify_qp_err(id_priv
);
3828 /* Initiate or respond to a disconnect. */
3829 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
3830 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
3831 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
3832 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
3839 EXPORT_SYMBOL(rdma_disconnect
);
3841 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
3843 struct rdma_id_private
*id_priv
;
3844 struct cma_multicast
*mc
= multicast
->context
;
3845 struct rdma_cm_event event
;
3848 id_priv
= mc
->id_priv
;
3849 mutex_lock(&id_priv
->handler_mutex
);
3850 if (id_priv
->state
!= RDMA_CM_ADDR_BOUND
&&
3851 id_priv
->state
!= RDMA_CM_ADDR_RESOLVED
)
3855 status
= cma_set_qkey(id_priv
, be32_to_cpu(multicast
->rec
.qkey
));
3857 pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
3859 mutex_lock(&id_priv
->qp_mutex
);
3860 if (!status
&& id_priv
->id
.qp
) {
3861 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
3862 be16_to_cpu(multicast
->rec
.mlid
));
3864 pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
3867 mutex_unlock(&id_priv
->qp_mutex
);
3869 memset(&event
, 0, sizeof event
);
3870 event
.status
= status
;
3871 event
.param
.ud
.private_data
= mc
->context
;
3873 struct rdma_dev_addr
*dev_addr
=
3874 &id_priv
->id
.route
.addr
.dev_addr
;
3875 struct net_device
*ndev
=
3876 dev_get_by_index(&init_net
, dev_addr
->bound_dev_if
);
3877 enum ib_gid_type gid_type
=
3878 id_priv
->cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
3879 rdma_start_port(id_priv
->cma_dev
->device
)];
3881 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
3882 ib_init_ah_from_mcmember(id_priv
->id
.device
,
3883 id_priv
->id
.port_num
, &multicast
->rec
,
3885 &event
.param
.ud
.ah_attr
);
3886 event
.param
.ud
.qp_num
= 0xFFFFFF;
3887 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
3891 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
3893 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3895 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
3896 mutex_unlock(&id_priv
->handler_mutex
);
3897 rdma_destroy_id(&id_priv
->id
);
3902 mutex_unlock(&id_priv
->handler_mutex
);
3906 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
3907 struct sockaddr
*addr
, union ib_gid
*mgid
)
3909 unsigned char mc_map
[MAX_ADDR_LEN
];
3910 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3911 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
3912 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
3914 if (cma_any_addr(addr
)) {
3915 memset(mgid
, 0, sizeof *mgid
);
3916 } else if ((addr
->sa_family
== AF_INET6
) &&
3917 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFFF0FFFF) ==
3919 /* IPv6 address is an SA assigned MGID. */
3920 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
3921 } else if (addr
->sa_family
== AF_IB
) {
3922 memcpy(mgid
, &((struct sockaddr_ib
*) addr
)->sib_addr
, sizeof *mgid
);
3923 } else if ((addr
->sa_family
== AF_INET6
)) {
3924 ipv6_ib_mc_map(&sin6
->sin6_addr
, dev_addr
->broadcast
, mc_map
);
3925 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3926 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3927 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3929 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
3930 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3931 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3932 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3936 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
3937 struct cma_multicast
*mc
)
3939 struct ib_sa_mcmember_rec rec
;
3940 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3941 ib_sa_comp_mask comp_mask
;
3944 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
3945 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
3950 ret
= cma_set_qkey(id_priv
, 0);
3954 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
3955 rec
.qkey
= cpu_to_be32(id_priv
->qkey
);
3956 rdma_addr_get_sgid(dev_addr
, &rec
.port_gid
);
3957 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
3958 rec
.join_state
= mc
->join_state
;
3960 if ((rec
.join_state
== BIT(SENDONLY_FULLMEMBER_JOIN
)) &&
3961 (!ib_sa_sendonly_fullmem_support(&sa_client
,
3963 id_priv
->id
.port_num
))) {
3964 pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
3965 "RDMA CM: SM doesn't support Send Only Full Member option\n",
3966 id_priv
->id
.device
->name
, id_priv
->id
.port_num
);
3970 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
3971 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
3972 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
3973 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
3974 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
3976 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
3977 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
3978 IB_SA_MCMEMBER_REC_RATE_SELECTOR
|
3979 IB_SA_MCMEMBER_REC_MTU_SELECTOR
|
3980 IB_SA_MCMEMBER_REC_MTU
|
3981 IB_SA_MCMEMBER_REC_HOP_LIMIT
;
3983 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
3984 id_priv
->id
.port_num
, &rec
,
3985 comp_mask
, GFP_KERNEL
,
3986 cma_ib_mc_handler
, mc
);
3987 return PTR_ERR_OR_ZERO(mc
->multicast
.ib
);
3990 static void iboe_mcast_work_handler(struct work_struct
*work
)
3992 struct iboe_mcast_work
*mw
= container_of(work
, struct iboe_mcast_work
, work
);
3993 struct cma_multicast
*mc
= mw
->mc
;
3994 struct ib_sa_multicast
*m
= mc
->multicast
.ib
;
3996 mc
->multicast
.ib
->context
= mc
;
3997 cma_ib_mc_handler(0, m
);
3998 kref_put(&mc
->mcref
, release_mc
);
4002 static void cma_iboe_set_mgid(struct sockaddr
*addr
, union ib_gid
*mgid
,
4003 enum ib_gid_type gid_type
)
4005 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
4006 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
4008 if (cma_any_addr(addr
)) {
4009 memset(mgid
, 0, sizeof *mgid
);
4010 } else if (addr
->sa_family
== AF_INET6
) {
4011 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
4013 mgid
->raw
[0] = (gid_type
== IB_GID_TYPE_IB
) ? 0xff : 0;
4014 mgid
->raw
[1] = (gid_type
== IB_GID_TYPE_IB
) ? 0x0e : 0;
4023 mgid
->raw
[10] = 0xff;
4024 mgid
->raw
[11] = 0xff;
4025 *(__be32
*)(&mgid
->raw
[12]) = sin
->sin_addr
.s_addr
;
4029 static int cma_iboe_join_multicast(struct rdma_id_private
*id_priv
,
4030 struct cma_multicast
*mc
)
4032 struct iboe_mcast_work
*work
;
4033 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4035 struct sockaddr
*addr
= (struct sockaddr
*)&mc
->addr
;
4036 struct net_device
*ndev
= NULL
;
4037 enum ib_gid_type gid_type
;
4040 send_only
= mc
->join_state
== BIT(SENDONLY_FULLMEMBER_JOIN
);
4042 if (cma_zero_addr((struct sockaddr
*)&mc
->addr
))
4045 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
4049 mc
->multicast
.ib
= kzalloc(sizeof(struct ib_sa_multicast
), GFP_KERNEL
);
4050 if (!mc
->multicast
.ib
) {
4055 gid_type
= id_priv
->cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
4056 rdma_start_port(id_priv
->cma_dev
->device
)];
4057 cma_iboe_set_mgid(addr
, &mc
->multicast
.ib
->rec
.mgid
, gid_type
);
4059 mc
->multicast
.ib
->rec
.pkey
= cpu_to_be16(0xffff);
4060 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
4061 mc
->multicast
.ib
->rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
4063 if (dev_addr
->bound_dev_if
)
4064 ndev
= dev_get_by_index(&init_net
, dev_addr
->bound_dev_if
);
4069 mc
->multicast
.ib
->rec
.rate
= iboe_get_rate(ndev
);
4070 mc
->multicast
.ib
->rec
.hop_limit
= 1;
4071 mc
->multicast
.ib
->rec
.mtu
= iboe_get_mtu(ndev
->mtu
);
4073 if (addr
->sa_family
== AF_INET
) {
4074 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) {
4075 mc
->multicast
.ib
->rec
.hop_limit
= IPV6_DEFAULT_HOPLIMIT
;
4077 err
= cma_igmp_send(ndev
, &mc
->multicast
.ib
->rec
.mgid
,
4080 mc
->igmp_joined
= true;
4084 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
)
4088 if (err
|| !mc
->multicast
.ib
->rec
.mtu
) {
4093 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
4094 &mc
->multicast
.ib
->rec
.port_gid
);
4097 INIT_WORK(&work
->work
, iboe_mcast_work_handler
);
4098 kref_get(&mc
->mcref
);
4099 queue_work(cma_wq
, &work
->work
);
4104 kfree(mc
->multicast
.ib
);
4110 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
4111 u8 join_state
, void *context
)
4113 struct rdma_id_private
*id_priv
;
4114 struct cma_multicast
*mc
;
4117 id_priv
= container_of(id
, struct rdma_id_private
, id
);
4118 if (!cma_comp(id_priv
, RDMA_CM_ADDR_BOUND
) &&
4119 !cma_comp(id_priv
, RDMA_CM_ADDR_RESOLVED
))
4122 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
4126 memcpy(&mc
->addr
, addr
, rdma_addr_size(addr
));
4127 mc
->context
= context
;
4128 mc
->id_priv
= id_priv
;
4129 mc
->igmp_joined
= false;
4130 mc
->join_state
= join_state
;
4131 spin_lock(&id_priv
->lock
);
4132 list_add(&mc
->list
, &id_priv
->mc_list
);
4133 spin_unlock(&id_priv
->lock
);
4135 if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
4136 kref_init(&mc
->mcref
);
4137 ret
= cma_iboe_join_multicast(id_priv
, mc
);
4138 } else if (rdma_cap_ib_mcast(id
->device
, id
->port_num
))
4139 ret
= cma_join_ib_multicast(id_priv
, mc
);
4144 spin_lock_irq(&id_priv
->lock
);
4145 list_del(&mc
->list
);
4146 spin_unlock_irq(&id_priv
->lock
);
4151 EXPORT_SYMBOL(rdma_join_multicast
);
4153 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
4155 struct rdma_id_private
*id_priv
;
4156 struct cma_multicast
*mc
;
4158 id_priv
= container_of(id
, struct rdma_id_private
, id
);
4159 spin_lock_irq(&id_priv
->lock
);
4160 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
4161 if (!memcmp(&mc
->addr
, addr
, rdma_addr_size(addr
))) {
4162 list_del(&mc
->list
);
4163 spin_unlock_irq(&id_priv
->lock
);
4166 ib_detach_mcast(id
->qp
,
4167 &mc
->multicast
.ib
->rec
.mgid
,
4168 be16_to_cpu(mc
->multicast
.ib
->rec
.mlid
));
4170 BUG_ON(id_priv
->cma_dev
->device
!= id
->device
);
4172 if (rdma_cap_ib_mcast(id
->device
, id
->port_num
)) {
4173 ib_sa_free_multicast(mc
->multicast
.ib
);
4175 } else if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
4176 if (mc
->igmp_joined
) {
4177 struct rdma_dev_addr
*dev_addr
=
4178 &id
->route
.addr
.dev_addr
;
4179 struct net_device
*ndev
= NULL
;
4181 if (dev_addr
->bound_dev_if
)
4182 ndev
= dev_get_by_index(&init_net
,
4183 dev_addr
->bound_dev_if
);
4186 &mc
->multicast
.ib
->rec
.mgid
,
4190 mc
->igmp_joined
= false;
4192 kref_put(&mc
->mcref
, release_mc
);
4197 spin_unlock_irq(&id_priv
->lock
);
4199 EXPORT_SYMBOL(rdma_leave_multicast
);
4201 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
4203 struct rdma_dev_addr
*dev_addr
;
4204 struct cma_ndev_work
*work
;
4206 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4208 if ((dev_addr
->bound_dev_if
== ndev
->ifindex
) &&
4209 (net_eq(dev_net(ndev
), dev_addr
->net
)) &&
4210 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
4211 pr_info("RDMA CM addr change for ndev %s used by id %p\n",
4212 ndev
->name
, &id_priv
->id
);
4213 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
4217 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
4219 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
4220 atomic_inc(&id_priv
->refcount
);
4221 queue_work(cma_wq
, &work
->work
);
4227 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
4230 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
4231 struct cma_device
*cma_dev
;
4232 struct rdma_id_private
*id_priv
;
4233 int ret
= NOTIFY_DONE
;
4235 if (event
!= NETDEV_BONDING_FAILOVER
)
4238 if (!(ndev
->flags
& IFF_MASTER
) || !(ndev
->priv_flags
& IFF_BONDING
))
4242 list_for_each_entry(cma_dev
, &dev_list
, list
)
4243 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
4244 ret
= cma_netdev_change(ndev
, id_priv
);
4250 mutex_unlock(&lock
);
4254 static struct notifier_block cma_nb
= {
4255 .notifier_call
= cma_netdev_callback
4258 static void cma_add_one(struct ib_device
*device
)
4260 struct cma_device
*cma_dev
;
4261 struct rdma_id_private
*id_priv
;
4263 unsigned long supported_gids
= 0;
4265 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
4269 cma_dev
->device
= device
;
4270 cma_dev
->default_gid_type
= kcalloc(device
->phys_port_cnt
,
4271 sizeof(*cma_dev
->default_gid_type
),
4273 if (!cma_dev
->default_gid_type
)
4276 cma_dev
->default_roce_tos
= kcalloc(device
->phys_port_cnt
,
4277 sizeof(*cma_dev
->default_roce_tos
),
4279 if (!cma_dev
->default_roce_tos
)
4282 for (i
= rdma_start_port(device
); i
<= rdma_end_port(device
); i
++) {
4283 supported_gids
= roce_gid_type_mask_support(device
, i
);
4284 WARN_ON(!supported_gids
);
4285 if (supported_gids
& (1 << CMA_PREFERRED_ROCE_GID_TYPE
))
4286 cma_dev
->default_gid_type
[i
- rdma_start_port(device
)] =
4287 CMA_PREFERRED_ROCE_GID_TYPE
;
4289 cma_dev
->default_gid_type
[i
- rdma_start_port(device
)] =
4290 find_first_bit(&supported_gids
, BITS_PER_LONG
);
4291 cma_dev
->default_roce_tos
[i
- rdma_start_port(device
)] = 0;
4294 init_completion(&cma_dev
->comp
);
4295 atomic_set(&cma_dev
->refcount
, 1);
4296 INIT_LIST_HEAD(&cma_dev
->id_list
);
4297 ib_set_client_data(device
, &cma_client
, cma_dev
);
4300 list_add_tail(&cma_dev
->list
, &dev_list
);
4301 list_for_each_entry(id_priv
, &listen_any_list
, list
)
4302 cma_listen_on_dev(id_priv
, cma_dev
);
4303 mutex_unlock(&lock
);
4308 kfree(cma_dev
->default_gid_type
);
4316 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
4318 struct rdma_cm_event event
;
4319 enum rdma_cm_state state
;
4322 /* Record that we want to remove the device */
4323 state
= cma_exch(id_priv
, RDMA_CM_DEVICE_REMOVAL
);
4324 if (state
== RDMA_CM_DESTROYING
)
4327 cma_cancel_operation(id_priv
, state
);
4328 mutex_lock(&id_priv
->handler_mutex
);
4330 /* Check for destruction from another callback. */
4331 if (!cma_comp(id_priv
, RDMA_CM_DEVICE_REMOVAL
))
4334 memset(&event
, 0, sizeof event
);
4335 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
4336 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
4338 mutex_unlock(&id_priv
->handler_mutex
);
4342 static void cma_process_remove(struct cma_device
*cma_dev
)
4344 struct rdma_id_private
*id_priv
;
4348 while (!list_empty(&cma_dev
->id_list
)) {
4349 id_priv
= list_entry(cma_dev
->id_list
.next
,
4350 struct rdma_id_private
, list
);
4352 list_del(&id_priv
->listen_list
);
4353 list_del_init(&id_priv
->list
);
4354 atomic_inc(&id_priv
->refcount
);
4355 mutex_unlock(&lock
);
4357 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
4358 cma_deref_id(id_priv
);
4360 rdma_destroy_id(&id_priv
->id
);
4364 mutex_unlock(&lock
);
4366 cma_deref_dev(cma_dev
);
4367 wait_for_completion(&cma_dev
->comp
);
4370 static void cma_remove_one(struct ib_device
*device
, void *client_data
)
4372 struct cma_device
*cma_dev
= client_data
;
4378 list_del(&cma_dev
->list
);
4379 mutex_unlock(&lock
);
4381 cma_process_remove(cma_dev
);
4382 kfree(cma_dev
->default_roce_tos
);
4383 kfree(cma_dev
->default_gid_type
);
4387 static int cma_get_id_stats(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4389 struct nlmsghdr
*nlh
;
4390 struct rdma_cm_id_stats
*id_stats
;
4391 struct rdma_id_private
*id_priv
;
4392 struct rdma_cm_id
*id
= NULL
;
4393 struct cma_device
*cma_dev
;
4394 int i_dev
= 0, i_id
= 0;
4397 * We export all of the IDs as a sequence of messages. Each
4398 * ID gets its own netlink message.
4402 list_for_each_entry(cma_dev
, &dev_list
, list
) {
4403 if (i_dev
< cb
->args
[0]) {
4409 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
4410 if (i_id
< cb
->args
[1]) {
4415 id_stats
= ibnl_put_msg(skb
, &nlh
, cb
->nlh
->nlmsg_seq
,
4416 sizeof *id_stats
, RDMA_NL_RDMA_CM
,
4417 RDMA_NL_RDMA_CM_ID_STATS
,
4422 memset(id_stats
, 0, sizeof *id_stats
);
4424 id_stats
->node_type
= id
->route
.addr
.dev_addr
.dev_type
;
4425 id_stats
->port_num
= id
->port_num
;
4426 id_stats
->bound_dev_if
=
4427 id
->route
.addr
.dev_addr
.bound_dev_if
;
4429 if (ibnl_put_attr(skb
, nlh
,
4430 rdma_addr_size(cma_src_addr(id_priv
)),
4431 cma_src_addr(id_priv
),
4432 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR
))
4434 if (ibnl_put_attr(skb
, nlh
,
4435 rdma_addr_size(cma_src_addr(id_priv
)),
4436 cma_dst_addr(id_priv
),
4437 RDMA_NL_RDMA_CM_ATTR_DST_ADDR
))
4440 id_stats
->pid
= id_priv
->owner
;
4441 id_stats
->port_space
= id
->ps
;
4442 id_stats
->cm_state
= id_priv
->state
;
4443 id_stats
->qp_num
= id_priv
->qp_num
;
4444 id_stats
->qp_type
= id
->qp_type
;
4454 mutex_unlock(&lock
);
4455 cb
->args
[0] = i_dev
;
4461 static const struct rdma_nl_cbs cma_cb_table
[RDMA_NL_RDMA_CM_NUM_OPS
] = {
4462 [RDMA_NL_RDMA_CM_ID_STATS
] = { .dump
= cma_get_id_stats
},
4465 static int cma_init_net(struct net
*net
)
4467 struct cma_pernet
*pernet
= cma_pernet(net
);
4469 idr_init(&pernet
->tcp_ps
);
4470 idr_init(&pernet
->udp_ps
);
4471 idr_init(&pernet
->ipoib_ps
);
4472 idr_init(&pernet
->ib_ps
);
4477 static void cma_exit_net(struct net
*net
)
4479 struct cma_pernet
*pernet
= cma_pernet(net
);
4481 idr_destroy(&pernet
->tcp_ps
);
4482 idr_destroy(&pernet
->udp_ps
);
4483 idr_destroy(&pernet
->ipoib_ps
);
4484 idr_destroy(&pernet
->ib_ps
);
4487 static struct pernet_operations cma_pernet_operations
= {
4488 .init
= cma_init_net
,
4489 .exit
= cma_exit_net
,
4490 .id
= &cma_pernet_id
,
4491 .size
= sizeof(struct cma_pernet
),
4494 static int __init
cma_init(void)
4498 cma_wq
= alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM
);
4502 ret
= register_pernet_subsys(&cma_pernet_operations
);
4506 ib_sa_register_client(&sa_client
);
4507 rdma_addr_register_client(&addr_client
);
4508 register_netdevice_notifier(&cma_nb
);
4510 ret
= ib_register_client(&cma_client
);
4514 rdma_nl_register(RDMA_NL_RDMA_CM
, cma_cb_table
);
4515 cma_configfs_init();
4520 unregister_netdevice_notifier(&cma_nb
);
4521 rdma_addr_unregister_client(&addr_client
);
4522 ib_sa_unregister_client(&sa_client
);
4524 destroy_workqueue(cma_wq
);
4528 static void __exit
cma_cleanup(void)
4530 cma_configfs_exit();
4531 rdma_nl_unregister(RDMA_NL_RDMA_CM
);
4532 ib_unregister_client(&cma_client
);
4533 unregister_netdevice_notifier(&cma_nb
);
4534 rdma_addr_unregister_client(&addr_client
);
4535 ib_sa_unregister_client(&sa_client
);
4536 unregister_pernet_subsys(&cma_pernet_operations
);
4537 destroy_workqueue(cma_wq
);
4540 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_RDMA_CM
, 1);
4542 module_init(cma_init
);
4543 module_exit(cma_cleanup
);