1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
4 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
5 * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
6 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
9 #include <linux/completion.h>
11 #include <linux/in6.h>
12 #include <linux/mutex.h>
13 #include <linux/random.h>
14 #include <linux/igmp.h>
15 #include <linux/xarray.h>
16 #include <linux/inetdevice.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <net/route.h>
21 #include <net/net_namespace.h>
22 #include <net/netns/generic.h>
25 #include <net/ip_fib.h>
26 #include <net/ip6_route.h>
28 #include <rdma/rdma_cm.h>
29 #include <rdma/rdma_cm_ib.h>
30 #include <rdma/rdma_netlink.h>
32 #include <rdma/ib_cache.h>
33 #include <rdma/ib_cm.h>
34 #include <rdma/ib_sa.h>
35 #include <rdma/iw_cm.h>
37 #include "core_priv.h"
39 #include "cma_trace.h"
41 MODULE_AUTHOR("Sean Hefty");
42 MODULE_DESCRIPTION("Generic RDMA CM Agent");
43 MODULE_LICENSE("Dual BSD/GPL");
45 #define CMA_CM_RESPONSE_TIMEOUT 20
46 #define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
47 #define CMA_MAX_CM_RETRIES 15
48 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
49 #define CMA_IBOE_PACKET_LIFETIME 18
50 #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
52 static const char * const cma_events
[] = {
53 [RDMA_CM_EVENT_ADDR_RESOLVED
] = "address resolved",
54 [RDMA_CM_EVENT_ADDR_ERROR
] = "address error",
55 [RDMA_CM_EVENT_ROUTE_RESOLVED
] = "route resolved ",
56 [RDMA_CM_EVENT_ROUTE_ERROR
] = "route error",
57 [RDMA_CM_EVENT_CONNECT_REQUEST
] = "connect request",
58 [RDMA_CM_EVENT_CONNECT_RESPONSE
] = "connect response",
59 [RDMA_CM_EVENT_CONNECT_ERROR
] = "connect error",
60 [RDMA_CM_EVENT_UNREACHABLE
] = "unreachable",
61 [RDMA_CM_EVENT_REJECTED
] = "rejected",
62 [RDMA_CM_EVENT_ESTABLISHED
] = "established",
63 [RDMA_CM_EVENT_DISCONNECTED
] = "disconnected",
64 [RDMA_CM_EVENT_DEVICE_REMOVAL
] = "device removal",
65 [RDMA_CM_EVENT_MULTICAST_JOIN
] = "multicast join",
66 [RDMA_CM_EVENT_MULTICAST_ERROR
] = "multicast error",
67 [RDMA_CM_EVENT_ADDR_CHANGE
] = "address change",
68 [RDMA_CM_EVENT_TIMEWAIT_EXIT
] = "timewait exit",
71 const char *__attribute_const__
rdma_event_msg(enum rdma_cm_event_type event
)
75 return (index
< ARRAY_SIZE(cma_events
) && cma_events
[index
]) ?
76 cma_events
[index
] : "unrecognized event";
78 EXPORT_SYMBOL(rdma_event_msg
);
80 const char *__attribute_const__
rdma_reject_msg(struct rdma_cm_id
*id
,
83 if (rdma_ib_or_roce(id
->device
, id
->port_num
))
84 return ibcm_reject_msg(reason
);
86 if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
87 return iwcm_reject_msg(reason
);
90 return "unrecognized transport";
92 EXPORT_SYMBOL(rdma_reject_msg
);
95 * rdma_is_consumer_reject - return true if the consumer rejected the connect
97 * @id: Communication identifier that received the REJECT event.
98 * @reason: Value returned in the REJECT event status field.
100 static bool rdma_is_consumer_reject(struct rdma_cm_id
*id
, int reason
)
102 if (rdma_ib_or_roce(id
->device
, id
->port_num
))
103 return reason
== IB_CM_REJ_CONSUMER_DEFINED
;
105 if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
106 return reason
== -ECONNREFUSED
;
112 const void *rdma_consumer_reject_data(struct rdma_cm_id
*id
,
113 struct rdma_cm_event
*ev
, u8
*data_len
)
117 if (rdma_is_consumer_reject(id
, ev
->status
)) {
118 *data_len
= ev
->param
.conn
.private_data_len
;
119 p
= ev
->param
.conn
.private_data
;
126 EXPORT_SYMBOL(rdma_consumer_reject_data
);
129 * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
130 * @id: Communication Identifier
132 struct iw_cm_id
*rdma_iw_cm_id(struct rdma_cm_id
*id
)
134 struct rdma_id_private
*id_priv
;
136 id_priv
= container_of(id
, struct rdma_id_private
, id
);
137 if (id
->device
->node_type
== RDMA_NODE_RNIC
)
138 return id_priv
->cm_id
.iw
;
141 EXPORT_SYMBOL(rdma_iw_cm_id
);
144 * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
145 * @res: rdma resource tracking entry pointer
147 struct rdma_cm_id
*rdma_res_to_id(struct rdma_restrack_entry
*res
)
149 struct rdma_id_private
*id_priv
=
150 container_of(res
, struct rdma_id_private
, res
);
154 EXPORT_SYMBOL(rdma_res_to_id
);
156 static int cma_add_one(struct ib_device
*device
);
157 static void cma_remove_one(struct ib_device
*device
, void *client_data
);
159 static struct ib_client cma_client
= {
162 .remove
= cma_remove_one
165 static struct ib_sa_client sa_client
;
166 static LIST_HEAD(dev_list
);
167 static LIST_HEAD(listen_any_list
);
168 static DEFINE_MUTEX(lock
);
169 static struct workqueue_struct
*cma_wq
;
170 static unsigned int cma_pernet_id
;
173 struct xarray tcp_ps
;
174 struct xarray udp_ps
;
175 struct xarray ipoib_ps
;
179 static struct cma_pernet
*cma_pernet(struct net
*net
)
181 return net_generic(net
, cma_pernet_id
);
185 struct xarray
*cma_pernet_xa(struct net
*net
, enum rdma_ucm_port_space ps
)
187 struct cma_pernet
*pernet
= cma_pernet(net
);
191 return &pernet
->tcp_ps
;
193 return &pernet
->udp_ps
;
195 return &pernet
->ipoib_ps
;
197 return &pernet
->ib_ps
;
204 struct list_head list
;
205 struct ib_device
*device
;
206 struct completion comp
;
208 struct list_head id_list
;
209 enum ib_gid_type
*default_gid_type
;
210 u8
*default_roce_tos
;
213 struct rdma_bind_list
{
214 enum rdma_ucm_port_space ps
;
215 struct hlist_head owners
;
219 struct class_port_info_context
{
220 struct ib_class_port_info
*class_port_info
;
221 struct ib_device
*device
;
222 struct completion done
;
223 struct ib_sa_query
*sa_query
;
227 static int cma_ps_alloc(struct net
*net
, enum rdma_ucm_port_space ps
,
228 struct rdma_bind_list
*bind_list
, int snum
)
230 struct xarray
*xa
= cma_pernet_xa(net
, ps
);
232 return xa_insert(xa
, snum
, bind_list
, GFP_KERNEL
);
235 static struct rdma_bind_list
*cma_ps_find(struct net
*net
,
236 enum rdma_ucm_port_space ps
, int snum
)
238 struct xarray
*xa
= cma_pernet_xa(net
, ps
);
240 return xa_load(xa
, snum
);
243 static void cma_ps_remove(struct net
*net
, enum rdma_ucm_port_space ps
,
246 struct xarray
*xa
= cma_pernet_xa(net
, ps
);
255 void cma_dev_get(struct cma_device
*cma_dev
)
257 refcount_inc(&cma_dev
->refcount
);
260 void cma_dev_put(struct cma_device
*cma_dev
)
262 if (refcount_dec_and_test(&cma_dev
->refcount
))
263 complete(&cma_dev
->comp
);
266 struct cma_device
*cma_enum_devices_by_ibdev(cma_device_filter filter
,
269 struct cma_device
*cma_dev
;
270 struct cma_device
*found_cma_dev
= NULL
;
274 list_for_each_entry(cma_dev
, &dev_list
, list
)
275 if (filter(cma_dev
->device
, cookie
)) {
276 found_cma_dev
= cma_dev
;
281 cma_dev_get(found_cma_dev
);
283 return found_cma_dev
;
286 int cma_get_default_gid_type(struct cma_device
*cma_dev
,
289 if (!rdma_is_port_valid(cma_dev
->device
, port
))
292 return cma_dev
->default_gid_type
[port
- rdma_start_port(cma_dev
->device
)];
295 int cma_set_default_gid_type(struct cma_device
*cma_dev
,
297 enum ib_gid_type default_gid_type
)
299 unsigned long supported_gids
;
301 if (!rdma_is_port_valid(cma_dev
->device
, port
))
304 supported_gids
= roce_gid_type_mask_support(cma_dev
->device
, port
);
306 if (!(supported_gids
& 1 << default_gid_type
))
309 cma_dev
->default_gid_type
[port
- rdma_start_port(cma_dev
->device
)] =
315 int cma_get_default_roce_tos(struct cma_device
*cma_dev
, unsigned int port
)
317 if (!rdma_is_port_valid(cma_dev
->device
, port
))
320 return cma_dev
->default_roce_tos
[port
- rdma_start_port(cma_dev
->device
)];
323 int cma_set_default_roce_tos(struct cma_device
*cma_dev
, unsigned int port
,
326 if (!rdma_is_port_valid(cma_dev
->device
, port
))
329 cma_dev
->default_roce_tos
[port
- rdma_start_port(cma_dev
->device
)] =
334 struct ib_device
*cma_get_ib_dev(struct cma_device
*cma_dev
)
336 return cma_dev
->device
;
340 * Device removal can occur at anytime, so we need extra handling to
341 * serialize notifying the user of device removal with other callbacks.
342 * We do this by disabling removal notification while a callback is in process,
343 * and reporting it after the callback completes.
346 struct cma_multicast
{
347 struct rdma_id_private
*id_priv
;
349 struct ib_sa_multicast
*ib
;
351 struct list_head list
;
353 struct sockaddr_storage addr
;
359 struct work_struct work
;
360 struct rdma_id_private
*id
;
361 enum rdma_cm_state old_state
;
362 enum rdma_cm_state new_state
;
363 struct rdma_cm_event event
;
366 struct cma_ndev_work
{
367 struct work_struct work
;
368 struct rdma_id_private
*id
;
369 struct rdma_cm_event event
;
372 struct iboe_mcast_work
{
373 struct work_struct work
;
374 struct rdma_id_private
*id
;
375 struct cma_multicast
*mc
;
388 u8 ip_version
; /* IP version: 7:4 */
390 union cma_ip_addr src_addr
;
391 union cma_ip_addr dst_addr
;
394 #define CMA_VERSION 0x00
396 struct cma_req_info
{
397 struct sockaddr_storage listen_addr_storage
;
398 struct sockaddr_storage src_addr_storage
;
399 struct ib_device
*device
;
400 union ib_gid local_gid
;
407 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
408 enum rdma_cm_state comp
, enum rdma_cm_state exch
)
414 * The FSM uses a funny double locking where state is protected by both
415 * the handler_mutex and the spinlock. State is not allowed to change
416 * away from a handler_mutex protected value without also holding
419 if (comp
== RDMA_CM_CONNECT
)
420 lockdep_assert_held(&id_priv
->handler_mutex
);
422 spin_lock_irqsave(&id_priv
->lock
, flags
);
423 if ((ret
= (id_priv
->state
== comp
)))
424 id_priv
->state
= exch
;
425 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
429 static inline u8
cma_get_ip_ver(const struct cma_hdr
*hdr
)
431 return hdr
->ip_version
>> 4;
434 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
436 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
439 static int cma_igmp_send(struct net_device
*ndev
, union ib_gid
*mgid
, bool join
)
441 struct in_device
*in_dev
= NULL
;
445 in_dev
= __in_dev_get_rtnl(ndev
);
448 ip_mc_inc_group(in_dev
,
449 *(__be32
*)(mgid
->raw
+ 12));
451 ip_mc_dec_group(in_dev
,
452 *(__be32
*)(mgid
->raw
+ 12));
456 return (in_dev
) ? 0 : -ENODEV
;
459 static void _cma_attach_to_dev(struct rdma_id_private
*id_priv
,
460 struct cma_device
*cma_dev
)
462 cma_dev_get(cma_dev
);
463 id_priv
->cma_dev
= cma_dev
;
464 id_priv
->id
.device
= cma_dev
->device
;
465 id_priv
->id
.route
.addr
.dev_addr
.transport
=
466 rdma_node_get_transport(cma_dev
->device
->node_type
);
467 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
468 if (id_priv
->res
.kern_name
)
469 rdma_restrack_kadd(&id_priv
->res
);
471 rdma_restrack_uadd(&id_priv
->res
);
472 trace_cm_id_attach(id_priv
, cma_dev
->device
);
475 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
476 struct cma_device
*cma_dev
)
478 _cma_attach_to_dev(id_priv
, cma_dev
);
480 cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
481 rdma_start_port(cma_dev
->device
)];
484 static inline void release_mc(struct kref
*kref
)
486 struct cma_multicast
*mc
= container_of(kref
, struct cma_multicast
, mcref
);
488 kfree(mc
->multicast
.ib
);
492 static void cma_release_dev(struct rdma_id_private
*id_priv
)
495 list_del(&id_priv
->list
);
496 cma_dev_put(id_priv
->cma_dev
);
497 id_priv
->cma_dev
= NULL
;
501 static inline struct sockaddr
*cma_src_addr(struct rdma_id_private
*id_priv
)
503 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
506 static inline struct sockaddr
*cma_dst_addr(struct rdma_id_private
*id_priv
)
508 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.dst_addr
;
511 static inline unsigned short cma_family(struct rdma_id_private
*id_priv
)
513 return id_priv
->id
.route
.addr
.src_addr
.ss_family
;
516 static int cma_set_qkey(struct rdma_id_private
*id_priv
, u32 qkey
)
518 struct ib_sa_mcmember_rec rec
;
522 if (qkey
&& id_priv
->qkey
!= qkey
)
528 id_priv
->qkey
= qkey
;
532 switch (id_priv
->id
.ps
) {
535 id_priv
->qkey
= RDMA_UDP_QKEY
;
538 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
539 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
540 id_priv
->id
.port_num
, &rec
.mgid
,
543 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
551 static void cma_translate_ib(struct sockaddr_ib
*sib
, struct rdma_dev_addr
*dev_addr
)
553 dev_addr
->dev_type
= ARPHRD_INFINIBAND
;
554 rdma_addr_set_sgid(dev_addr
, (union ib_gid
*) &sib
->sib_addr
);
555 ib_addr_set_pkey(dev_addr
, ntohs(sib
->sib_pkey
));
558 static int cma_translate_addr(struct sockaddr
*addr
, struct rdma_dev_addr
*dev_addr
)
562 if (addr
->sa_family
!= AF_IB
) {
563 ret
= rdma_translate_ip(addr
, dev_addr
);
565 cma_translate_ib((struct sockaddr_ib
*) addr
, dev_addr
);
572 static const struct ib_gid_attr
*
573 cma_validate_port(struct ib_device
*device
, u8 port
,
574 enum ib_gid_type gid_type
,
576 struct rdma_id_private
*id_priv
)
578 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
579 int bound_if_index
= dev_addr
->bound_dev_if
;
580 const struct ib_gid_attr
*sgid_attr
;
581 int dev_type
= dev_addr
->dev_type
;
582 struct net_device
*ndev
= NULL
;
584 if (!rdma_dev_access_netns(device
, id_priv
->id
.route
.addr
.dev_addr
.net
))
585 return ERR_PTR(-ENODEV
);
587 if ((dev_type
== ARPHRD_INFINIBAND
) && !rdma_protocol_ib(device
, port
))
588 return ERR_PTR(-ENODEV
);
590 if ((dev_type
!= ARPHRD_INFINIBAND
) && rdma_protocol_ib(device
, port
))
591 return ERR_PTR(-ENODEV
);
593 if (dev_type
== ARPHRD_ETHER
&& rdma_protocol_roce(device
, port
)) {
594 ndev
= dev_get_by_index(dev_addr
->net
, bound_if_index
);
596 return ERR_PTR(-ENODEV
);
598 gid_type
= IB_GID_TYPE_IB
;
601 sgid_attr
= rdma_find_gid_by_port(device
, gid
, gid_type
, port
, ndev
);
607 static void cma_bind_sgid_attr(struct rdma_id_private
*id_priv
,
608 const struct ib_gid_attr
*sgid_attr
)
610 WARN_ON(id_priv
->id
.route
.addr
.dev_addr
.sgid_attr
);
611 id_priv
->id
.route
.addr
.dev_addr
.sgid_attr
= sgid_attr
;
615 * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
616 * based on source ip address.
617 * @id_priv: cm_id which should be bound to cma device
619 * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
620 * based on source IP address. It returns 0 on success or error code otherwise.
621 * It is applicable to active and passive side cm_id.
623 static int cma_acquire_dev_by_src_ip(struct rdma_id_private
*id_priv
)
625 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
626 const struct ib_gid_attr
*sgid_attr
;
627 union ib_gid gid
, iboe_gid
, *gidp
;
628 struct cma_device
*cma_dev
;
629 enum ib_gid_type gid_type
;
633 if (dev_addr
->dev_type
!= ARPHRD_INFINIBAND
&&
634 id_priv
->id
.ps
== RDMA_PS_IPOIB
)
637 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
640 memcpy(&gid
, dev_addr
->src_dev_addr
+
641 rdma_addr_gid_offset(dev_addr
), sizeof(gid
));
644 list_for_each_entry(cma_dev
, &dev_list
, list
) {
645 rdma_for_each_port (cma_dev
->device
, port
) {
646 gidp
= rdma_protocol_roce(cma_dev
->device
, port
) ?
648 gid_type
= cma_dev
->default_gid_type
[port
- 1];
649 sgid_attr
= cma_validate_port(cma_dev
->device
, port
,
650 gid_type
, gidp
, id_priv
);
651 if (!IS_ERR(sgid_attr
)) {
652 id_priv
->id
.port_num
= port
;
653 cma_bind_sgid_attr(id_priv
, sgid_attr
);
654 cma_attach_to_dev(id_priv
, cma_dev
);
666 * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
667 * @id_priv: cm id to bind to cma device
668 * @listen_id_priv: listener cm id to match against
669 * @req: Pointer to req structure containaining incoming
670 * request information
671 * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
672 * rdma device matches for listen_id and incoming request. It also verifies
673 * that a GID table entry is present for the source address.
674 * Returns 0 on success, or returns error code otherwise.
676 static int cma_ib_acquire_dev(struct rdma_id_private
*id_priv
,
677 const struct rdma_id_private
*listen_id_priv
,
678 struct cma_req_info
*req
)
680 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
681 const struct ib_gid_attr
*sgid_attr
;
682 enum ib_gid_type gid_type
;
685 if (dev_addr
->dev_type
!= ARPHRD_INFINIBAND
&&
686 id_priv
->id
.ps
== RDMA_PS_IPOIB
)
689 if (rdma_protocol_roce(req
->device
, req
->port
))
690 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
693 memcpy(&gid
, dev_addr
->src_dev_addr
+
694 rdma_addr_gid_offset(dev_addr
), sizeof(gid
));
696 gid_type
= listen_id_priv
->cma_dev
->default_gid_type
[req
->port
- 1];
697 sgid_attr
= cma_validate_port(req
->device
, req
->port
,
698 gid_type
, &gid
, id_priv
);
699 if (IS_ERR(sgid_attr
))
700 return PTR_ERR(sgid_attr
);
702 id_priv
->id
.port_num
= req
->port
;
703 cma_bind_sgid_attr(id_priv
, sgid_attr
);
704 /* Need to acquire lock to protect against reader
705 * of cma_dev->id_list such as cma_netdev_callback() and
706 * cma_process_remove().
709 cma_attach_to_dev(id_priv
, listen_id_priv
->cma_dev
);
714 static int cma_iw_acquire_dev(struct rdma_id_private
*id_priv
,
715 const struct rdma_id_private
*listen_id_priv
)
717 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
718 const struct ib_gid_attr
*sgid_attr
;
719 struct cma_device
*cma_dev
;
720 enum ib_gid_type gid_type
;
725 if (dev_addr
->dev_type
!= ARPHRD_INFINIBAND
&&
726 id_priv
->id
.ps
== RDMA_PS_IPOIB
)
729 memcpy(&gid
, dev_addr
->src_dev_addr
+
730 rdma_addr_gid_offset(dev_addr
), sizeof(gid
));
734 cma_dev
= listen_id_priv
->cma_dev
;
735 port
= listen_id_priv
->id
.port_num
;
736 gid_type
= listen_id_priv
->gid_type
;
737 sgid_attr
= cma_validate_port(cma_dev
->device
, port
,
738 gid_type
, &gid
, id_priv
);
739 if (!IS_ERR(sgid_attr
)) {
740 id_priv
->id
.port_num
= port
;
741 cma_bind_sgid_attr(id_priv
, sgid_attr
);
746 list_for_each_entry(cma_dev
, &dev_list
, list
) {
747 rdma_for_each_port (cma_dev
->device
, port
) {
748 if (listen_id_priv
->cma_dev
== cma_dev
&&
749 listen_id_priv
->id
.port_num
== port
)
752 gid_type
= cma_dev
->default_gid_type
[port
- 1];
753 sgid_attr
= cma_validate_port(cma_dev
->device
, port
,
754 gid_type
, &gid
, id_priv
);
755 if (!IS_ERR(sgid_attr
)) {
756 id_priv
->id
.port_num
= port
;
757 cma_bind_sgid_attr(id_priv
, sgid_attr
);
766 cma_attach_to_dev(id_priv
, cma_dev
);
773 * Select the source IB device and address to reach the destination IB address.
775 static int cma_resolve_ib_dev(struct rdma_id_private
*id_priv
)
777 struct cma_device
*cma_dev
, *cur_dev
;
778 struct sockaddr_ib
*addr
;
779 union ib_gid gid
, sgid
, *dgid
;
782 enum ib_port_state port_state
;
786 addr
= (struct sockaddr_ib
*) cma_dst_addr(id_priv
);
787 dgid
= (union ib_gid
*) &addr
->sib_addr
;
788 pkey
= ntohs(addr
->sib_pkey
);
791 list_for_each_entry(cur_dev
, &dev_list
, list
) {
792 rdma_for_each_port (cur_dev
->device
, p
) {
793 if (!rdma_cap_af_ib(cur_dev
->device
, p
))
796 if (ib_find_cached_pkey(cur_dev
->device
, p
, pkey
, &index
))
799 if (ib_get_cached_port_state(cur_dev
->device
, p
, &port_state
))
801 for (i
= 0; !rdma_query_gid(cur_dev
->device
,
804 if (!memcmp(&gid
, dgid
, sizeof(gid
))) {
807 id_priv
->id
.port_num
= p
;
811 if (!cma_dev
&& (gid
.global
.subnet_prefix
==
812 dgid
->global
.subnet_prefix
) &&
813 port_state
== IB_PORT_ACTIVE
) {
816 id_priv
->id
.port_num
= p
;
826 cma_attach_to_dev(id_priv
, cma_dev
);
828 addr
= (struct sockaddr_ib
*)cma_src_addr(id_priv
);
829 memcpy(&addr
->sib_addr
, &sgid
, sizeof(sgid
));
830 cma_translate_ib(addr
, &id_priv
->id
.route
.addr
.dev_addr
);
834 static void cma_id_get(struct rdma_id_private
*id_priv
)
836 refcount_inc(&id_priv
->refcount
);
839 static void cma_id_put(struct rdma_id_private
*id_priv
)
841 if (refcount_dec_and_test(&id_priv
->refcount
))
842 complete(&id_priv
->comp
);
845 struct rdma_cm_id
*__rdma_create_id(struct net
*net
,
846 rdma_cm_event_handler event_handler
,
847 void *context
, enum rdma_ucm_port_space ps
,
848 enum ib_qp_type qp_type
, const char *caller
)
850 struct rdma_id_private
*id_priv
;
852 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
854 return ERR_PTR(-ENOMEM
);
856 rdma_restrack_set_task(&id_priv
->res
, caller
);
857 id_priv
->res
.type
= RDMA_RESTRACK_CM_ID
;
858 id_priv
->state
= RDMA_CM_IDLE
;
859 id_priv
->id
.context
= context
;
860 id_priv
->id
.event_handler
= event_handler
;
862 id_priv
->id
.qp_type
= qp_type
;
863 id_priv
->tos_set
= false;
864 id_priv
->timeout_set
= false;
865 id_priv
->gid_type
= IB_GID_TYPE_IB
;
866 spin_lock_init(&id_priv
->lock
);
867 mutex_init(&id_priv
->qp_mutex
);
868 init_completion(&id_priv
->comp
);
869 refcount_set(&id_priv
->refcount
, 1);
870 mutex_init(&id_priv
->handler_mutex
);
871 INIT_LIST_HEAD(&id_priv
->listen_list
);
872 INIT_LIST_HEAD(&id_priv
->mc_list
);
873 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
874 id_priv
->id
.route
.addr
.dev_addr
.net
= get_net(net
);
875 id_priv
->seq_num
&= 0x00ffffff;
879 EXPORT_SYMBOL(__rdma_create_id
);
881 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
883 struct ib_qp_attr qp_attr
;
884 int qp_attr_mask
, ret
;
886 qp_attr
.qp_state
= IB_QPS_INIT
;
887 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
891 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
895 qp_attr
.qp_state
= IB_QPS_RTR
;
896 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
900 qp_attr
.qp_state
= IB_QPS_RTS
;
902 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
907 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
909 struct ib_qp_attr qp_attr
;
910 int qp_attr_mask
, ret
;
912 qp_attr
.qp_state
= IB_QPS_INIT
;
913 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
917 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
920 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
921 struct ib_qp_init_attr
*qp_init_attr
)
923 struct rdma_id_private
*id_priv
;
927 id_priv
= container_of(id
, struct rdma_id_private
, id
);
928 if (id
->device
!= pd
->device
) {
933 qp_init_attr
->port_num
= id
->port_num
;
934 qp
= ib_create_qp(pd
, qp_init_attr
);
940 if (id
->qp_type
== IB_QPT_UD
)
941 ret
= cma_init_ud_qp(id_priv
, qp
);
943 ret
= cma_init_conn_qp(id_priv
, qp
);
948 id_priv
->qp_num
= qp
->qp_num
;
949 id_priv
->srq
= (qp
->srq
!= NULL
);
950 trace_cm_qp_create(id_priv
, pd
, qp_init_attr
, 0);
955 trace_cm_qp_create(id_priv
, pd
, qp_init_attr
, ret
);
958 EXPORT_SYMBOL(rdma_create_qp
);
960 void rdma_destroy_qp(struct rdma_cm_id
*id
)
962 struct rdma_id_private
*id_priv
;
964 id_priv
= container_of(id
, struct rdma_id_private
, id
);
965 trace_cm_qp_destroy(id_priv
);
966 mutex_lock(&id_priv
->qp_mutex
);
967 ib_destroy_qp(id_priv
->id
.qp
);
968 id_priv
->id
.qp
= NULL
;
969 mutex_unlock(&id_priv
->qp_mutex
);
971 EXPORT_SYMBOL(rdma_destroy_qp
);
973 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
974 struct rdma_conn_param
*conn_param
)
976 struct ib_qp_attr qp_attr
;
977 int qp_attr_mask
, ret
;
979 mutex_lock(&id_priv
->qp_mutex
);
980 if (!id_priv
->id
.qp
) {
985 /* Need to update QP attributes from default values. */
986 qp_attr
.qp_state
= IB_QPS_INIT
;
987 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
991 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
995 qp_attr
.qp_state
= IB_QPS_RTR
;
996 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
1000 BUG_ON(id_priv
->cma_dev
->device
!= id_priv
->id
.device
);
1003 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
1004 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
1006 mutex_unlock(&id_priv
->qp_mutex
);
1010 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
1011 struct rdma_conn_param
*conn_param
)
1013 struct ib_qp_attr qp_attr
;
1014 int qp_attr_mask
, ret
;
1016 mutex_lock(&id_priv
->qp_mutex
);
1017 if (!id_priv
->id
.qp
) {
1022 qp_attr
.qp_state
= IB_QPS_RTS
;
1023 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
1028 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
1029 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
1031 mutex_unlock(&id_priv
->qp_mutex
);
1035 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
1037 struct ib_qp_attr qp_attr
;
1040 mutex_lock(&id_priv
->qp_mutex
);
1041 if (!id_priv
->id
.qp
) {
1046 qp_attr
.qp_state
= IB_QPS_ERR
;
1047 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
1049 mutex_unlock(&id_priv
->qp_mutex
);
1053 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
1054 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
1056 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
1060 if (rdma_cap_eth_ah(id_priv
->id
.device
, id_priv
->id
.port_num
))
1063 pkey
= ib_addr_get_pkey(dev_addr
);
1065 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
1066 pkey
, &qp_attr
->pkey_index
);
1070 qp_attr
->port_num
= id_priv
->id
.port_num
;
1071 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
1073 if (id_priv
->id
.qp_type
== IB_QPT_UD
) {
1074 ret
= cma_set_qkey(id_priv
, 0);
1078 qp_attr
->qkey
= id_priv
->qkey
;
1079 *qp_attr_mask
|= IB_QP_QKEY
;
1081 qp_attr
->qp_access_flags
= 0;
1082 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
1087 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
1090 struct rdma_id_private
*id_priv
;
1093 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1094 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
1095 if (!id_priv
->cm_id
.ib
|| (id_priv
->id
.qp_type
== IB_QPT_UD
))
1096 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
1098 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
1101 if (qp_attr
->qp_state
== IB_QPS_RTR
)
1102 qp_attr
->rq_psn
= id_priv
->seq_num
;
1103 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
1104 if (!id_priv
->cm_id
.iw
) {
1105 qp_attr
->qp_access_flags
= 0;
1106 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
1108 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
1110 qp_attr
->port_num
= id_priv
->id
.port_num
;
1111 *qp_attr_mask
|= IB_QP_PORT
;
1115 if ((*qp_attr_mask
& IB_QP_TIMEOUT
) && id_priv
->timeout_set
)
1116 qp_attr
->timeout
= id_priv
->timeout
;
1120 EXPORT_SYMBOL(rdma_init_qp_attr
);
1122 static inline bool cma_zero_addr(const struct sockaddr
*addr
)
1124 switch (addr
->sa_family
) {
1126 return ipv4_is_zeronet(((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
1128 return ipv6_addr_any(&((struct sockaddr_in6
*)addr
)->sin6_addr
);
1130 return ib_addr_any(&((struct sockaddr_ib
*)addr
)->sib_addr
);
1136 static inline bool cma_loopback_addr(const struct sockaddr
*addr
)
1138 switch (addr
->sa_family
) {
1140 return ipv4_is_loopback(
1141 ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
1143 return ipv6_addr_loopback(
1144 &((struct sockaddr_in6
*)addr
)->sin6_addr
);
1146 return ib_addr_loopback(
1147 &((struct sockaddr_ib
*)addr
)->sib_addr
);
1153 static inline bool cma_any_addr(const struct sockaddr
*addr
)
1155 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
1158 static int cma_addr_cmp(const struct sockaddr
*src
, const struct sockaddr
*dst
)
1160 if (src
->sa_family
!= dst
->sa_family
)
1163 switch (src
->sa_family
) {
1165 return ((struct sockaddr_in
*)src
)->sin_addr
.s_addr
!=
1166 ((struct sockaddr_in
*)dst
)->sin_addr
.s_addr
;
1168 struct sockaddr_in6
*src_addr6
= (struct sockaddr_in6
*)src
;
1169 struct sockaddr_in6
*dst_addr6
= (struct sockaddr_in6
*)dst
;
1172 if (ipv6_addr_cmp(&src_addr6
->sin6_addr
,
1173 &dst_addr6
->sin6_addr
))
1175 link_local
= ipv6_addr_type(&dst_addr6
->sin6_addr
) &
1176 IPV6_ADDR_LINKLOCAL
;
1177 /* Link local must match their scope_ids */
1178 return link_local
? (src_addr6
->sin6_scope_id
!=
1179 dst_addr6
->sin6_scope_id
) :
1184 return ib_addr_cmp(&((struct sockaddr_ib
*) src
)->sib_addr
,
1185 &((struct sockaddr_ib
*) dst
)->sib_addr
);
1189 static __be16
cma_port(const struct sockaddr
*addr
)
1191 struct sockaddr_ib
*sib
;
1193 switch (addr
->sa_family
) {
1195 return ((struct sockaddr_in
*) addr
)->sin_port
;
1197 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
1199 sib
= (struct sockaddr_ib
*) addr
;
1200 return htons((u16
) (be64_to_cpu(sib
->sib_sid
) &
1201 be64_to_cpu(sib
->sib_sid_mask
)));
1207 static inline int cma_any_port(const struct sockaddr
*addr
)
1209 return !cma_port(addr
);
1212 static void cma_save_ib_info(struct sockaddr
*src_addr
,
1213 struct sockaddr
*dst_addr
,
1214 const struct rdma_cm_id
*listen_id
,
1215 const struct sa_path_rec
*path
)
1217 struct sockaddr_ib
*listen_ib
, *ib
;
1219 listen_ib
= (struct sockaddr_ib
*) &listen_id
->route
.addr
.src_addr
;
1221 ib
= (struct sockaddr_ib
*)src_addr
;
1222 ib
->sib_family
= AF_IB
;
1224 ib
->sib_pkey
= path
->pkey
;
1225 ib
->sib_flowinfo
= path
->flow_label
;
1226 memcpy(&ib
->sib_addr
, &path
->sgid
, 16);
1227 ib
->sib_sid
= path
->service_id
;
1228 ib
->sib_scope_id
= 0;
1230 ib
->sib_pkey
= listen_ib
->sib_pkey
;
1231 ib
->sib_flowinfo
= listen_ib
->sib_flowinfo
;
1232 ib
->sib_addr
= listen_ib
->sib_addr
;
1233 ib
->sib_sid
= listen_ib
->sib_sid
;
1234 ib
->sib_scope_id
= listen_ib
->sib_scope_id
;
1236 ib
->sib_sid_mask
= cpu_to_be64(0xffffffffffffffffULL
);
1239 ib
= (struct sockaddr_ib
*)dst_addr
;
1240 ib
->sib_family
= AF_IB
;
1242 ib
->sib_pkey
= path
->pkey
;
1243 ib
->sib_flowinfo
= path
->flow_label
;
1244 memcpy(&ib
->sib_addr
, &path
->dgid
, 16);
1249 static void cma_save_ip4_info(struct sockaddr_in
*src_addr
,
1250 struct sockaddr_in
*dst_addr
,
1251 struct cma_hdr
*hdr
,
1255 *src_addr
= (struct sockaddr_in
) {
1256 .sin_family
= AF_INET
,
1257 .sin_addr
.s_addr
= hdr
->dst_addr
.ip4
.addr
,
1258 .sin_port
= local_port
,
1263 *dst_addr
= (struct sockaddr_in
) {
1264 .sin_family
= AF_INET
,
1265 .sin_addr
.s_addr
= hdr
->src_addr
.ip4
.addr
,
1266 .sin_port
= hdr
->port
,
1271 static void cma_save_ip6_info(struct sockaddr_in6
*src_addr
,
1272 struct sockaddr_in6
*dst_addr
,
1273 struct cma_hdr
*hdr
,
1277 *src_addr
= (struct sockaddr_in6
) {
1278 .sin6_family
= AF_INET6
,
1279 .sin6_addr
= hdr
->dst_addr
.ip6
,
1280 .sin6_port
= local_port
,
1285 *dst_addr
= (struct sockaddr_in6
) {
1286 .sin6_family
= AF_INET6
,
1287 .sin6_addr
= hdr
->src_addr
.ip6
,
1288 .sin6_port
= hdr
->port
,
1293 static u16
cma_port_from_service_id(__be64 service_id
)
1295 return (u16
)be64_to_cpu(service_id
);
1298 static int cma_save_ip_info(struct sockaddr
*src_addr
,
1299 struct sockaddr
*dst_addr
,
1300 const struct ib_cm_event
*ib_event
,
1303 struct cma_hdr
*hdr
;
1306 hdr
= ib_event
->private_data
;
1307 if (hdr
->cma_version
!= CMA_VERSION
)
1310 port
= htons(cma_port_from_service_id(service_id
));
1312 switch (cma_get_ip_ver(hdr
)) {
1314 cma_save_ip4_info((struct sockaddr_in
*)src_addr
,
1315 (struct sockaddr_in
*)dst_addr
, hdr
, port
);
1318 cma_save_ip6_info((struct sockaddr_in6
*)src_addr
,
1319 (struct sockaddr_in6
*)dst_addr
, hdr
, port
);
1322 return -EAFNOSUPPORT
;
1328 static int cma_save_net_info(struct sockaddr
*src_addr
,
1329 struct sockaddr
*dst_addr
,
1330 const struct rdma_cm_id
*listen_id
,
1331 const struct ib_cm_event
*ib_event
,
1332 sa_family_t sa_family
, __be64 service_id
)
1334 if (sa_family
== AF_IB
) {
1335 if (ib_event
->event
== IB_CM_REQ_RECEIVED
)
1336 cma_save_ib_info(src_addr
, dst_addr
, listen_id
,
1337 ib_event
->param
.req_rcvd
.primary_path
);
1338 else if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
)
1339 cma_save_ib_info(src_addr
, dst_addr
, listen_id
, NULL
);
1343 return cma_save_ip_info(src_addr
, dst_addr
, ib_event
, service_id
);
1346 static int cma_save_req_info(const struct ib_cm_event
*ib_event
,
1347 struct cma_req_info
*req
)
1349 const struct ib_cm_req_event_param
*req_param
=
1350 &ib_event
->param
.req_rcvd
;
1351 const struct ib_cm_sidr_req_event_param
*sidr_param
=
1352 &ib_event
->param
.sidr_req_rcvd
;
1354 switch (ib_event
->event
) {
1355 case IB_CM_REQ_RECEIVED
:
1356 req
->device
= req_param
->listen_id
->device
;
1357 req
->port
= req_param
->port
;
1358 memcpy(&req
->local_gid
, &req_param
->primary_path
->sgid
,
1359 sizeof(req
->local_gid
));
1360 req
->has_gid
= true;
1361 req
->service_id
= req_param
->primary_path
->service_id
;
1362 req
->pkey
= be16_to_cpu(req_param
->primary_path
->pkey
);
1363 if (req
->pkey
!= req_param
->bth_pkey
)
1364 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
1365 "RDMA CMA: in the future this may cause the request to be dropped\n",
1366 req_param
->bth_pkey
, req
->pkey
);
1368 case IB_CM_SIDR_REQ_RECEIVED
:
1369 req
->device
= sidr_param
->listen_id
->device
;
1370 req
->port
= sidr_param
->port
;
1371 req
->has_gid
= false;
1372 req
->service_id
= sidr_param
->service_id
;
1373 req
->pkey
= sidr_param
->pkey
;
1374 if (req
->pkey
!= sidr_param
->bth_pkey
)
1375 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
1376 "RDMA CMA: in the future this may cause the request to be dropped\n",
1377 sidr_param
->bth_pkey
, req
->pkey
);
1386 static bool validate_ipv4_net_dev(struct net_device
*net_dev
,
1387 const struct sockaddr_in
*dst_addr
,
1388 const struct sockaddr_in
*src_addr
)
1390 __be32 daddr
= dst_addr
->sin_addr
.s_addr
,
1391 saddr
= src_addr
->sin_addr
.s_addr
;
1392 struct fib_result res
;
1397 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
1398 ipv4_is_lbcast(daddr
) || ipv4_is_zeronet(saddr
) ||
1399 ipv4_is_zeronet(daddr
) || ipv4_is_loopback(daddr
) ||
1400 ipv4_is_loopback(saddr
))
1403 memset(&fl4
, 0, sizeof(fl4
));
1404 fl4
.flowi4_iif
= net_dev
->ifindex
;
1409 err
= fib_lookup(dev_net(net_dev
), &fl4
, &res
, 0);
1410 ret
= err
== 0 && FIB_RES_DEV(res
) == net_dev
;
1416 static bool validate_ipv6_net_dev(struct net_device
*net_dev
,
1417 const struct sockaddr_in6
*dst_addr
,
1418 const struct sockaddr_in6
*src_addr
)
1420 #if IS_ENABLED(CONFIG_IPV6)
1421 const int strict
= ipv6_addr_type(&dst_addr
->sin6_addr
) &
1422 IPV6_ADDR_LINKLOCAL
;
1423 struct rt6_info
*rt
= rt6_lookup(dev_net(net_dev
), &dst_addr
->sin6_addr
,
1424 &src_addr
->sin6_addr
, net_dev
->ifindex
,
1431 ret
= rt
->rt6i_idev
->dev
== net_dev
;
1440 static bool validate_net_dev(struct net_device
*net_dev
,
1441 const struct sockaddr
*daddr
,
1442 const struct sockaddr
*saddr
)
1444 const struct sockaddr_in
*daddr4
= (const struct sockaddr_in
*)daddr
;
1445 const struct sockaddr_in
*saddr4
= (const struct sockaddr_in
*)saddr
;
1446 const struct sockaddr_in6
*daddr6
= (const struct sockaddr_in6
*)daddr
;
1447 const struct sockaddr_in6
*saddr6
= (const struct sockaddr_in6
*)saddr
;
1449 switch (daddr
->sa_family
) {
1451 return saddr
->sa_family
== AF_INET
&&
1452 validate_ipv4_net_dev(net_dev
, daddr4
, saddr4
);
1455 return saddr
->sa_family
== AF_INET6
&&
1456 validate_ipv6_net_dev(net_dev
, daddr6
, saddr6
);
1463 static struct net_device
*
1464 roce_get_net_dev_by_cm_event(const struct ib_cm_event
*ib_event
)
1466 const struct ib_gid_attr
*sgid_attr
= NULL
;
1467 struct net_device
*ndev
;
1469 if (ib_event
->event
== IB_CM_REQ_RECEIVED
)
1470 sgid_attr
= ib_event
->param
.req_rcvd
.ppath_sgid_attr
;
1471 else if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
)
1472 sgid_attr
= ib_event
->param
.sidr_req_rcvd
.sgid_attr
;
1478 ndev
= rdma_read_gid_attr_ndev_rcu(sgid_attr
);
1487 static struct net_device
*cma_get_net_dev(const struct ib_cm_event
*ib_event
,
1488 struct cma_req_info
*req
)
1490 struct sockaddr
*listen_addr
=
1491 (struct sockaddr
*)&req
->listen_addr_storage
;
1492 struct sockaddr
*src_addr
= (struct sockaddr
*)&req
->src_addr_storage
;
1493 struct net_device
*net_dev
;
1494 const union ib_gid
*gid
= req
->has_gid
? &req
->local_gid
: NULL
;
1497 err
= cma_save_ip_info(listen_addr
, src_addr
, ib_event
,
1500 return ERR_PTR(err
);
1502 if (rdma_protocol_roce(req
->device
, req
->port
))
1503 net_dev
= roce_get_net_dev_by_cm_event(ib_event
);
1505 net_dev
= ib_get_net_dev_by_params(req
->device
, req
->port
,
1509 return ERR_PTR(-ENODEV
);
1514 static enum rdma_ucm_port_space
rdma_ps_from_service_id(__be64 service_id
)
1516 return (be64_to_cpu(service_id
) >> 16) & 0xffff;
1519 static bool cma_match_private_data(struct rdma_id_private
*id_priv
,
1520 const struct cma_hdr
*hdr
)
1522 struct sockaddr
*addr
= cma_src_addr(id_priv
);
1524 struct in6_addr ip6_addr
;
1526 if (cma_any_addr(addr
) && !id_priv
->afonly
)
1529 switch (addr
->sa_family
) {
1531 ip4_addr
= ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
;
1532 if (cma_get_ip_ver(hdr
) != 4)
1534 if (!cma_any_addr(addr
) &&
1535 hdr
->dst_addr
.ip4
.addr
!= ip4_addr
)
1539 ip6_addr
= ((struct sockaddr_in6
*)addr
)->sin6_addr
;
1540 if (cma_get_ip_ver(hdr
) != 6)
1542 if (!cma_any_addr(addr
) &&
1543 memcmp(&hdr
->dst_addr
.ip6
, &ip6_addr
, sizeof(ip6_addr
)))
1555 static bool cma_protocol_roce(const struct rdma_cm_id
*id
)
1557 struct ib_device
*device
= id
->device
;
1558 const int port_num
= id
->port_num
?: rdma_start_port(device
);
1560 return rdma_protocol_roce(device
, port_num
);
1563 static bool cma_is_req_ipv6_ll(const struct cma_req_info
*req
)
1565 const struct sockaddr
*daddr
=
1566 (const struct sockaddr
*)&req
->listen_addr_storage
;
1567 const struct sockaddr_in6
*daddr6
= (const struct sockaddr_in6
*)daddr
;
1569 /* Returns true if the req is for IPv6 link local */
1570 return (daddr
->sa_family
== AF_INET6
&&
1571 (ipv6_addr_type(&daddr6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
));
1574 static bool cma_match_net_dev(const struct rdma_cm_id
*id
,
1575 const struct net_device
*net_dev
,
1576 const struct cma_req_info
*req
)
1578 const struct rdma_addr
*addr
= &id
->route
.addr
;
1581 /* This request is an AF_IB request */
1582 return (!id
->port_num
|| id
->port_num
== req
->port
) &&
1583 (addr
->src_addr
.ss_family
== AF_IB
);
1586 * If the request is not for IPv6 link local, allow matching
1587 * request to any netdevice of the one or multiport rdma device.
1589 if (!cma_is_req_ipv6_ll(req
))
1592 * Net namespaces must match, and if the listner is listening
1593 * on a specific netdevice than netdevice must match as well.
1595 if (net_eq(dev_net(net_dev
), addr
->dev_addr
.net
) &&
1596 (!!addr
->dev_addr
.bound_dev_if
==
1597 (addr
->dev_addr
.bound_dev_if
== net_dev
->ifindex
)))
1603 static struct rdma_id_private
*cma_find_listener(
1604 const struct rdma_bind_list
*bind_list
,
1605 const struct ib_cm_id
*cm_id
,
1606 const struct ib_cm_event
*ib_event
,
1607 const struct cma_req_info
*req
,
1608 const struct net_device
*net_dev
)
1610 struct rdma_id_private
*id_priv
, *id_priv_dev
;
1612 lockdep_assert_held(&lock
);
1615 return ERR_PTR(-EINVAL
);
1617 hlist_for_each_entry(id_priv
, &bind_list
->owners
, node
) {
1618 if (cma_match_private_data(id_priv
, ib_event
->private_data
)) {
1619 if (id_priv
->id
.device
== cm_id
->device
&&
1620 cma_match_net_dev(&id_priv
->id
, net_dev
, req
))
1622 list_for_each_entry(id_priv_dev
,
1623 &id_priv
->listen_list
,
1625 if (id_priv_dev
->id
.device
== cm_id
->device
&&
1626 cma_match_net_dev(&id_priv_dev
->id
,
1633 return ERR_PTR(-EINVAL
);
1636 static struct rdma_id_private
*
1637 cma_ib_id_from_event(struct ib_cm_id
*cm_id
,
1638 const struct ib_cm_event
*ib_event
,
1639 struct cma_req_info
*req
,
1640 struct net_device
**net_dev
)
1642 struct rdma_bind_list
*bind_list
;
1643 struct rdma_id_private
*id_priv
;
1646 err
= cma_save_req_info(ib_event
, req
);
1648 return ERR_PTR(err
);
1650 *net_dev
= cma_get_net_dev(ib_event
, req
);
1651 if (IS_ERR(*net_dev
)) {
1652 if (PTR_ERR(*net_dev
) == -EAFNOSUPPORT
) {
1653 /* Assuming the protocol is AF_IB */
1656 return ERR_CAST(*net_dev
);
1662 * Net namespace might be getting deleted while route lookup,
1663 * cm_id lookup is in progress. Therefore, perform netdevice
1664 * validation, cm_id lookup under rcu lock.
1665 * RCU lock along with netdevice state check, synchronizes with
1666 * netdevice migrating to different net namespace and also avoids
1667 * case where net namespace doesn't get deleted while lookup is in
1669 * If the device state is not IFF_UP, its properties such as ifindex
1670 * and nd_net cannot be trusted to remain valid without rcu lock.
1671 * net/core/dev.c change_net_namespace() ensures to synchronize with
1672 * ongoing operations on net device after device is closed using
1673 * synchronize_net().
1678 * If netdevice is down, it is likely that it is administratively
1679 * down or it might be migrating to different namespace.
1680 * In that case avoid further processing, as the net namespace
1681 * or ifindex may change.
1683 if (((*net_dev
)->flags
& IFF_UP
) == 0) {
1684 id_priv
= ERR_PTR(-EHOSTUNREACH
);
1688 if (!validate_net_dev(*net_dev
,
1689 (struct sockaddr
*)&req
->listen_addr_storage
,
1690 (struct sockaddr
*)&req
->src_addr_storage
)) {
1691 id_priv
= ERR_PTR(-EHOSTUNREACH
);
1696 bind_list
= cma_ps_find(*net_dev
? dev_net(*net_dev
) : &init_net
,
1697 rdma_ps_from_service_id(req
->service_id
),
1698 cma_port_from_service_id(req
->service_id
));
1699 id_priv
= cma_find_listener(bind_list
, cm_id
, ib_event
, req
, *net_dev
);
1702 mutex_unlock(&lock
);
1703 if (IS_ERR(id_priv
) && *net_dev
) {
1710 static inline u8
cma_user_data_offset(struct rdma_id_private
*id_priv
)
1712 return cma_family(id_priv
) == AF_IB
? 0 : sizeof(struct cma_hdr
);
1715 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
1717 if (rdma_cap_ib_sa(id_priv
->id
.device
, id_priv
->id
.port_num
)) {
1719 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
1723 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
1725 struct rdma_id_private
*dev_id_priv
;
1728 * Remove from listen_any_list to prevent added devices from spawning
1729 * additional listen requests.
1732 list_del(&id_priv
->list
);
1734 while (!list_empty(&id_priv
->listen_list
)) {
1735 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
1736 struct rdma_id_private
, listen_list
);
1737 /* sync with device removal to avoid duplicate destruction */
1738 list_del_init(&dev_id_priv
->list
);
1739 list_del(&dev_id_priv
->listen_list
);
1740 mutex_unlock(&lock
);
1742 rdma_destroy_id(&dev_id_priv
->id
);
1745 mutex_unlock(&lock
);
1748 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
1749 enum rdma_cm_state state
)
1752 case RDMA_CM_ADDR_QUERY
:
1753 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
1755 case RDMA_CM_ROUTE_QUERY
:
1756 cma_cancel_route(id_priv
);
1758 case RDMA_CM_LISTEN
:
1759 if (cma_any_addr(cma_src_addr(id_priv
)) && !id_priv
->cma_dev
)
1760 cma_cancel_listens(id_priv
);
1767 static void cma_release_port(struct rdma_id_private
*id_priv
)
1769 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
1770 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
1776 hlist_del(&id_priv
->node
);
1777 if (hlist_empty(&bind_list
->owners
)) {
1778 cma_ps_remove(net
, bind_list
->ps
, bind_list
->port
);
1781 mutex_unlock(&lock
);
1784 static void cma_leave_roce_mc_group(struct rdma_id_private
*id_priv
,
1785 struct cma_multicast
*mc
)
1787 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
1788 struct net_device
*ndev
= NULL
;
1790 if (dev_addr
->bound_dev_if
)
1791 ndev
= dev_get_by_index(dev_addr
->net
, dev_addr
->bound_dev_if
);
1793 cma_igmp_send(ndev
, &mc
->multicast
.ib
->rec
.mgid
, false);
1796 kref_put(&mc
->mcref
, release_mc
);
1799 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
1801 struct cma_multicast
*mc
;
1803 while (!list_empty(&id_priv
->mc_list
)) {
1804 mc
= container_of(id_priv
->mc_list
.next
,
1805 struct cma_multicast
, list
);
1806 list_del(&mc
->list
);
1807 if (rdma_cap_ib_mcast(id_priv
->cma_dev
->device
,
1808 id_priv
->id
.port_num
)) {
1809 ib_sa_free_multicast(mc
->multicast
.ib
);
1812 cma_leave_roce_mc_group(id_priv
, mc
);
1817 static void _destroy_id(struct rdma_id_private
*id_priv
,
1818 enum rdma_cm_state state
)
1820 cma_cancel_operation(id_priv
, state
);
1822 rdma_restrack_del(&id_priv
->res
);
1823 if (id_priv
->cma_dev
) {
1824 if (rdma_cap_ib_cm(id_priv
->id
.device
, 1)) {
1825 if (id_priv
->cm_id
.ib
)
1826 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1827 } else if (rdma_cap_iw_cm(id_priv
->id
.device
, 1)) {
1828 if (id_priv
->cm_id
.iw
)
1829 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1831 cma_leave_mc_groups(id_priv
);
1832 cma_release_dev(id_priv
);
1835 cma_release_port(id_priv
);
1836 cma_id_put(id_priv
);
1837 wait_for_completion(&id_priv
->comp
);
1839 if (id_priv
->internal_id
)
1840 cma_id_put(id_priv
->id
.context
);
1842 kfree(id_priv
->id
.route
.path_rec
);
1844 if (id_priv
->id
.route
.addr
.dev_addr
.sgid_attr
)
1845 rdma_put_gid_attr(id_priv
->id
.route
.addr
.dev_addr
.sgid_attr
);
1847 put_net(id_priv
->id
.route
.addr
.dev_addr
.net
);
1852 * destroy an ID from within the handler_mutex. This ensures that no other
1853 * handlers can start running concurrently.
1855 static void destroy_id_handler_unlock(struct rdma_id_private
*id_priv
)
1856 __releases(&idprv
->handler_mutex
)
1858 enum rdma_cm_state state
;
1859 unsigned long flags
;
1861 trace_cm_id_destroy(id_priv
);
1864 * Setting the state to destroyed under the handler mutex provides a
1865 * fence against calling handler callbacks. If this is invoked due to
1866 * the failure of a handler callback then it guarentees that no future
1867 * handlers will be called.
1869 lockdep_assert_held(&id_priv
->handler_mutex
);
1870 spin_lock_irqsave(&id_priv
->lock
, flags
);
1871 state
= id_priv
->state
;
1872 id_priv
->state
= RDMA_CM_DESTROYING
;
1873 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
1874 mutex_unlock(&id_priv
->handler_mutex
);
1875 _destroy_id(id_priv
, state
);
1878 void rdma_destroy_id(struct rdma_cm_id
*id
)
1880 struct rdma_id_private
*id_priv
=
1881 container_of(id
, struct rdma_id_private
, id
);
1883 mutex_lock(&id_priv
->handler_mutex
);
1884 destroy_id_handler_unlock(id_priv
);
1886 EXPORT_SYMBOL(rdma_destroy_id
);
1888 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
1892 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
1896 ret
= cma_modify_qp_rts(id_priv
, NULL
);
1900 trace_cm_send_rtu(id_priv
);
1901 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
1907 pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret
);
1908 cma_modify_qp_err(id_priv
);
1909 trace_cm_send_rej(id_priv
);
1910 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
1915 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
1916 const struct ib_cm_rep_event_param
*rep_data
,
1919 event
->param
.conn
.private_data
= private_data
;
1920 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
1921 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
1922 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
1923 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
1924 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
1925 event
->param
.conn
.srq
= rep_data
->srq
;
1926 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
1928 event
->ece
.vendor_id
= rep_data
->ece
.vendor_id
;
1929 event
->ece
.attr_mod
= rep_data
->ece
.attr_mod
;
1932 static int cma_cm_event_handler(struct rdma_id_private
*id_priv
,
1933 struct rdma_cm_event
*event
)
1937 lockdep_assert_held(&id_priv
->handler_mutex
);
1939 trace_cm_event_handler(id_priv
, event
);
1940 ret
= id_priv
->id
.event_handler(&id_priv
->id
, event
);
1941 trace_cm_event_done(id_priv
, event
, ret
);
1945 static int cma_ib_handler(struct ib_cm_id
*cm_id
,
1946 const struct ib_cm_event
*ib_event
)
1948 struct rdma_id_private
*id_priv
= cm_id
->context
;
1949 struct rdma_cm_event event
= {};
1950 enum rdma_cm_state state
;
1953 mutex_lock(&id_priv
->handler_mutex
);
1954 state
= READ_ONCE(id_priv
->state
);
1955 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
1956 state
!= RDMA_CM_CONNECT
) ||
1957 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
1958 state
!= RDMA_CM_DISCONNECT
))
1961 switch (ib_event
->event
) {
1962 case IB_CM_REQ_ERROR
:
1963 case IB_CM_REP_ERROR
:
1964 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1965 event
.status
= -ETIMEDOUT
;
1967 case IB_CM_REP_RECEIVED
:
1968 if (state
== RDMA_CM_CONNECT
&&
1969 (id_priv
->id
.qp_type
!= IB_QPT_UD
)) {
1970 trace_cm_send_mra(id_priv
);
1971 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1973 if (id_priv
->id
.qp
) {
1974 event
.status
= cma_rep_recv(id_priv
);
1975 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
1976 RDMA_CM_EVENT_ESTABLISHED
;
1978 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
1980 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
1981 ib_event
->private_data
);
1983 case IB_CM_RTU_RECEIVED
:
1984 case IB_CM_USER_ESTABLISHED
:
1985 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1987 case IB_CM_DREQ_ERROR
:
1988 event
.status
= -ETIMEDOUT
;
1990 case IB_CM_DREQ_RECEIVED
:
1991 case IB_CM_DREP_RECEIVED
:
1992 if (!cma_comp_exch(id_priv
, RDMA_CM_CONNECT
,
1993 RDMA_CM_DISCONNECT
))
1995 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1997 case IB_CM_TIMEWAIT_EXIT
:
1998 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
2000 case IB_CM_MRA_RECEIVED
:
2003 case IB_CM_REJ_RECEIVED
:
2004 pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv
->id
,
2005 ib_event
->param
.rej_rcvd
.reason
));
2006 cma_modify_qp_err(id_priv
);
2007 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
2008 event
.event
= RDMA_CM_EVENT_REJECTED
;
2009 event
.param
.conn
.private_data
= ib_event
->private_data
;
2010 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
2013 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
2018 ret
= cma_cm_event_handler(id_priv
, &event
);
2020 /* Destroy the CM ID by returning a non-zero value. */
2021 id_priv
->cm_id
.ib
= NULL
;
2022 destroy_id_handler_unlock(id_priv
);
2026 mutex_unlock(&id_priv
->handler_mutex
);
2030 static struct rdma_id_private
*
2031 cma_ib_new_conn_id(const struct rdma_cm_id
*listen_id
,
2032 const struct ib_cm_event
*ib_event
,
2033 struct net_device
*net_dev
)
2035 struct rdma_id_private
*listen_id_priv
;
2036 struct rdma_id_private
*id_priv
;
2037 struct rdma_cm_id
*id
;
2038 struct rdma_route
*rt
;
2039 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
2040 struct sa_path_rec
*path
= ib_event
->param
.req_rcvd
.primary_path
;
2041 const __be64 service_id
=
2042 ib_event
->param
.req_rcvd
.primary_path
->service_id
;
2045 listen_id_priv
= container_of(listen_id
, struct rdma_id_private
, id
);
2046 id
= __rdma_create_id(listen_id
->route
.addr
.dev_addr
.net
,
2047 listen_id
->event_handler
, listen_id
->context
,
2048 listen_id
->ps
, ib_event
->param
.req_rcvd
.qp_type
,
2049 listen_id_priv
->res
.kern_name
);
2053 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2054 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
2055 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
2056 listen_id
, ib_event
, ss_family
, service_id
))
2060 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
2061 rt
->path_rec
= kmalloc_array(rt
->num_paths
, sizeof(*rt
->path_rec
),
2066 rt
->path_rec
[0] = *path
;
2067 if (rt
->num_paths
== 2)
2068 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
2071 rdma_copy_src_l2_addr(&rt
->addr
.dev_addr
, net_dev
);
2073 if (!cma_protocol_roce(listen_id
) &&
2074 cma_any_addr(cma_src_addr(id_priv
))) {
2075 rt
->addr
.dev_addr
.dev_type
= ARPHRD_INFINIBAND
;
2076 rdma_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
2077 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
2078 } else if (!cma_any_addr(cma_src_addr(id_priv
))) {
2079 ret
= cma_translate_addr(cma_src_addr(id_priv
), &rt
->addr
.dev_addr
);
2084 rdma_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
2086 id_priv
->state
= RDMA_CM_CONNECT
;
2090 rdma_destroy_id(id
);
2094 static struct rdma_id_private
*
2095 cma_ib_new_udp_id(const struct rdma_cm_id
*listen_id
,
2096 const struct ib_cm_event
*ib_event
,
2097 struct net_device
*net_dev
)
2099 const struct rdma_id_private
*listen_id_priv
;
2100 struct rdma_id_private
*id_priv
;
2101 struct rdma_cm_id
*id
;
2102 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
2103 struct net
*net
= listen_id
->route
.addr
.dev_addr
.net
;
2106 listen_id_priv
= container_of(listen_id
, struct rdma_id_private
, id
);
2107 id
= __rdma_create_id(net
, listen_id
->event_handler
, listen_id
->context
,
2108 listen_id
->ps
, IB_QPT_UD
,
2109 listen_id_priv
->res
.kern_name
);
2113 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2114 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
2115 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
2116 listen_id
, ib_event
, ss_family
,
2117 ib_event
->param
.sidr_req_rcvd
.service_id
))
2121 rdma_copy_src_l2_addr(&id
->route
.addr
.dev_addr
, net_dev
);
2123 if (!cma_any_addr(cma_src_addr(id_priv
))) {
2124 ret
= cma_translate_addr(cma_src_addr(id_priv
),
2125 &id
->route
.addr
.dev_addr
);
2131 id_priv
->state
= RDMA_CM_CONNECT
;
2134 rdma_destroy_id(id
);
2138 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
2139 const struct ib_cm_req_event_param
*req_data
,
2140 void *private_data
, int offset
)
2142 event
->param
.conn
.private_data
= private_data
+ offset
;
2143 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
2144 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
2145 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
2146 event
->param
.conn
.flow_control
= req_data
->flow_control
;
2147 event
->param
.conn
.retry_count
= req_data
->retry_count
;
2148 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
2149 event
->param
.conn
.srq
= req_data
->srq
;
2150 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
2152 event
->ece
.vendor_id
= req_data
->ece
.vendor_id
;
2153 event
->ece
.attr_mod
= req_data
->ece
.attr_mod
;
2156 static int cma_ib_check_req_qp_type(const struct rdma_cm_id
*id
,
2157 const struct ib_cm_event
*ib_event
)
2159 return (((ib_event
->event
== IB_CM_REQ_RECEIVED
) &&
2160 (ib_event
->param
.req_rcvd
.qp_type
== id
->qp_type
)) ||
2161 ((ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) &&
2162 (id
->qp_type
== IB_QPT_UD
)) ||
2166 static int cma_ib_req_handler(struct ib_cm_id
*cm_id
,
2167 const struct ib_cm_event
*ib_event
)
2169 struct rdma_id_private
*listen_id
, *conn_id
= NULL
;
2170 struct rdma_cm_event event
= {};
2171 struct cma_req_info req
= {};
2172 struct net_device
*net_dev
;
2176 listen_id
= cma_ib_id_from_event(cm_id
, ib_event
, &req
, &net_dev
);
2177 if (IS_ERR(listen_id
))
2178 return PTR_ERR(listen_id
);
2180 trace_cm_req_handler(listen_id
, ib_event
->event
);
2181 if (!cma_ib_check_req_qp_type(&listen_id
->id
, ib_event
)) {
2186 mutex_lock(&listen_id
->handler_mutex
);
2187 if (READ_ONCE(listen_id
->state
) != RDMA_CM_LISTEN
) {
2188 ret
= -ECONNABORTED
;
2192 offset
= cma_user_data_offset(listen_id
);
2193 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
2194 if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) {
2195 conn_id
= cma_ib_new_udp_id(&listen_id
->id
, ib_event
, net_dev
);
2196 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
2197 event
.param
.ud
.private_data_len
=
2198 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
2200 conn_id
= cma_ib_new_conn_id(&listen_id
->id
, ib_event
, net_dev
);
2201 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
2202 ib_event
->private_data
, offset
);
2209 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
2210 ret
= cma_ib_acquire_dev(conn_id
, listen_id
, &req
);
2212 destroy_id_handler_unlock(conn_id
);
2216 conn_id
->cm_id
.ib
= cm_id
;
2217 cm_id
->context
= conn_id
;
2218 cm_id
->cm_handler
= cma_ib_handler
;
2220 ret
= cma_cm_event_handler(conn_id
, &event
);
2222 /* Destroy the CM ID by returning a non-zero value. */
2223 conn_id
->cm_id
.ib
= NULL
;
2224 mutex_unlock(&listen_id
->handler_mutex
);
2225 destroy_id_handler_unlock(conn_id
);
2229 if (READ_ONCE(conn_id
->state
) == RDMA_CM_CONNECT
&&
2230 conn_id
->id
.qp_type
!= IB_QPT_UD
) {
2231 trace_cm_send_mra(cm_id
->context
);
2232 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
2234 mutex_unlock(&conn_id
->handler_mutex
);
2237 mutex_unlock(&listen_id
->handler_mutex
);
2246 __be64
rdma_get_service_id(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2248 if (addr
->sa_family
== AF_IB
)
2249 return ((struct sockaddr_ib
*) addr
)->sib_sid
;
2251 return cpu_to_be64(((u64
)id
->ps
<< 16) + be16_to_cpu(cma_port(addr
)));
2253 EXPORT_SYMBOL(rdma_get_service_id
);
2255 void rdma_read_gids(struct rdma_cm_id
*cm_id
, union ib_gid
*sgid
,
2258 struct rdma_addr
*addr
= &cm_id
->route
.addr
;
2260 if (!cm_id
->device
) {
2262 memset(sgid
, 0, sizeof(*sgid
));
2264 memset(dgid
, 0, sizeof(*dgid
));
2268 if (rdma_protocol_roce(cm_id
->device
, cm_id
->port_num
)) {
2270 rdma_ip2gid((struct sockaddr
*)&addr
->src_addr
, sgid
);
2272 rdma_ip2gid((struct sockaddr
*)&addr
->dst_addr
, dgid
);
2275 rdma_addr_get_sgid(&addr
->dev_addr
, sgid
);
2277 rdma_addr_get_dgid(&addr
->dev_addr
, dgid
);
2280 EXPORT_SYMBOL(rdma_read_gids
);
2282 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
2284 struct rdma_id_private
*id_priv
= iw_id
->context
;
2285 struct rdma_cm_event event
= {};
2287 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
2288 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
2290 mutex_lock(&id_priv
->handler_mutex
);
2291 if (READ_ONCE(id_priv
->state
) != RDMA_CM_CONNECT
)
2294 switch (iw_event
->event
) {
2295 case IW_CM_EVENT_CLOSE
:
2296 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
2298 case IW_CM_EVENT_CONNECT_REPLY
:
2299 memcpy(cma_src_addr(id_priv
), laddr
,
2300 rdma_addr_size(laddr
));
2301 memcpy(cma_dst_addr(id_priv
), raddr
,
2302 rdma_addr_size(raddr
));
2303 switch (iw_event
->status
) {
2305 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2306 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2307 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2311 event
.event
= RDMA_CM_EVENT_REJECTED
;
2314 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
2317 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
2321 case IW_CM_EVENT_ESTABLISHED
:
2322 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
2323 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2324 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2330 event
.status
= iw_event
->status
;
2331 event
.param
.conn
.private_data
= iw_event
->private_data
;
2332 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
2333 ret
= cma_cm_event_handler(id_priv
, &event
);
2335 /* Destroy the CM ID by returning a non-zero value. */
2336 id_priv
->cm_id
.iw
= NULL
;
2337 destroy_id_handler_unlock(id_priv
);
2342 mutex_unlock(&id_priv
->handler_mutex
);
2346 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
2347 struct iw_cm_event
*iw_event
)
2349 struct rdma_cm_id
*new_cm_id
;
2350 struct rdma_id_private
*listen_id
, *conn_id
;
2351 struct rdma_cm_event event
= {};
2352 int ret
= -ECONNABORTED
;
2353 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
2354 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
2356 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
2357 event
.param
.conn
.private_data
= iw_event
->private_data
;
2358 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
2359 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
2360 event
.param
.conn
.responder_resources
= iw_event
->ord
;
2362 listen_id
= cm_id
->context
;
2364 mutex_lock(&listen_id
->handler_mutex
);
2365 if (READ_ONCE(listen_id
->state
) != RDMA_CM_LISTEN
)
2368 /* Create a new RDMA id for the new IW CM ID */
2369 new_cm_id
= __rdma_create_id(listen_id
->id
.route
.addr
.dev_addr
.net
,
2370 listen_id
->id
.event_handler
,
2371 listen_id
->id
.context
,
2372 RDMA_PS_TCP
, IB_QPT_RC
,
2373 listen_id
->res
.kern_name
);
2374 if (IS_ERR(new_cm_id
)) {
2378 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
2379 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
2380 conn_id
->state
= RDMA_CM_CONNECT
;
2382 ret
= rdma_translate_ip(laddr
, &conn_id
->id
.route
.addr
.dev_addr
);
2384 mutex_unlock(&listen_id
->handler_mutex
);
2385 destroy_id_handler_unlock(conn_id
);
2389 ret
= cma_iw_acquire_dev(conn_id
, listen_id
);
2391 mutex_unlock(&listen_id
->handler_mutex
);
2392 destroy_id_handler_unlock(conn_id
);
2396 conn_id
->cm_id
.iw
= cm_id
;
2397 cm_id
->context
= conn_id
;
2398 cm_id
->cm_handler
= cma_iw_handler
;
2400 memcpy(cma_src_addr(conn_id
), laddr
, rdma_addr_size(laddr
));
2401 memcpy(cma_dst_addr(conn_id
), raddr
, rdma_addr_size(raddr
));
2403 ret
= cma_cm_event_handler(conn_id
, &event
);
2405 /* User wants to destroy the CM ID */
2406 conn_id
->cm_id
.iw
= NULL
;
2407 mutex_unlock(&listen_id
->handler_mutex
);
2408 destroy_id_handler_unlock(conn_id
);
2412 mutex_unlock(&conn_id
->handler_mutex
);
2415 mutex_unlock(&listen_id
->handler_mutex
);
2419 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
2421 struct sockaddr
*addr
;
2422 struct ib_cm_id
*id
;
2425 addr
= cma_src_addr(id_priv
);
2426 svc_id
= rdma_get_service_id(&id_priv
->id
, addr
);
2427 id
= ib_cm_insert_listen(id_priv
->id
.device
,
2428 cma_ib_req_handler
, svc_id
);
2431 id_priv
->cm_id
.ib
= id
;
2436 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
2439 struct iw_cm_id
*id
;
2441 id
= iw_create_cm_id(id_priv
->id
.device
,
2442 iw_conn_req_handler
,
2447 id
->tos
= id_priv
->tos
;
2448 id
->tos_set
= id_priv
->tos_set
;
2449 id_priv
->cm_id
.iw
= id
;
2451 memcpy(&id_priv
->cm_id
.iw
->local_addr
, cma_src_addr(id_priv
),
2452 rdma_addr_size(cma_src_addr(id_priv
)));
2454 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
2457 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
2458 id_priv
->cm_id
.iw
= NULL
;
2464 static int cma_listen_handler(struct rdma_cm_id
*id
,
2465 struct rdma_cm_event
*event
)
2467 struct rdma_id_private
*id_priv
= id
->context
;
2469 /* Listening IDs are always destroyed on removal */
2470 if (event
->event
== RDMA_CM_EVENT_DEVICE_REMOVAL
)
2473 id
->context
= id_priv
->id
.context
;
2474 id
->event_handler
= id_priv
->id
.event_handler
;
2475 trace_cm_event_handler(id_priv
, event
);
2476 return id_priv
->id
.event_handler(id
, event
);
2479 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
2480 struct cma_device
*cma_dev
)
2482 struct rdma_id_private
*dev_id_priv
;
2483 struct rdma_cm_id
*id
;
2484 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
2487 lockdep_assert_held(&lock
);
2489 if (cma_family(id_priv
) == AF_IB
&& !rdma_cap_ib_cm(cma_dev
->device
, 1))
2492 id
= __rdma_create_id(net
, cma_listen_handler
, id_priv
, id_priv
->id
.ps
,
2493 id_priv
->id
.qp_type
, id_priv
->res
.kern_name
);
2497 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
2499 dev_id_priv
->state
= RDMA_CM_ADDR_BOUND
;
2500 memcpy(cma_src_addr(dev_id_priv
), cma_src_addr(id_priv
),
2501 rdma_addr_size(cma_src_addr(id_priv
)));
2503 _cma_attach_to_dev(dev_id_priv
, cma_dev
);
2504 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
2505 cma_id_get(id_priv
);
2506 dev_id_priv
->internal_id
= 1;
2507 dev_id_priv
->afonly
= id_priv
->afonly
;
2508 dev_id_priv
->tos_set
= id_priv
->tos_set
;
2509 dev_id_priv
->tos
= id_priv
->tos
;
2511 ret
= rdma_listen(id
, id_priv
->backlog
);
2513 dev_warn(&cma_dev
->device
->dev
,
2514 "RDMA CMA: cma_listen_on_dev, error %d\n", ret
);
2517 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
2519 struct cma_device
*cma_dev
;
2522 list_add_tail(&id_priv
->list
, &listen_any_list
);
2523 list_for_each_entry(cma_dev
, &dev_list
, list
)
2524 cma_listen_on_dev(id_priv
, cma_dev
);
2525 mutex_unlock(&lock
);
2528 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
2530 struct rdma_id_private
*id_priv
;
2532 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2533 id_priv
->tos
= (u8
) tos
;
2534 id_priv
->tos_set
= true;
2536 EXPORT_SYMBOL(rdma_set_service_type
);
2539 * rdma_set_ack_timeout() - Set the ack timeout of QP associated
2540 * with a connection identifier.
2541 * @id: Communication identifier to associated with service type.
2542 * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec.
2544 * This function should be called before rdma_connect() on active side,
2545 * and on passive side before rdma_accept(). It is applicable to primary
2546 * path only. The timeout will affect the local side of the QP, it is not
2547 * negotiated with remote side and zero disables the timer. In case it is
2548 * set before rdma_resolve_route, the value will also be used to determine
2549 * PacketLifeTime for RoCE.
2551 * Return: 0 for success
2553 int rdma_set_ack_timeout(struct rdma_cm_id
*id
, u8 timeout
)
2555 struct rdma_id_private
*id_priv
;
2557 if (id
->qp_type
!= IB_QPT_RC
)
2560 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2561 id_priv
->timeout
= timeout
;
2562 id_priv
->timeout_set
= true;
2566 EXPORT_SYMBOL(rdma_set_ack_timeout
);
2568 static void cma_query_handler(int status
, struct sa_path_rec
*path_rec
,
2571 struct cma_work
*work
= context
;
2572 struct rdma_route
*route
;
2574 route
= &work
->id
->id
.route
;
2577 route
->num_paths
= 1;
2578 *route
->path_rec
= *path_rec
;
2580 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2581 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2582 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
2583 work
->event
.status
= status
;
2584 pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
2588 queue_work(cma_wq
, &work
->work
);
2591 static int cma_query_ib_route(struct rdma_id_private
*id_priv
,
2592 unsigned long timeout_ms
, struct cma_work
*work
)
2594 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2595 struct sa_path_rec path_rec
;
2596 ib_sa_comp_mask comp_mask
;
2597 struct sockaddr_in6
*sin6
;
2598 struct sockaddr_ib
*sib
;
2600 memset(&path_rec
, 0, sizeof path_rec
);
2602 if (rdma_cap_opa_ah(id_priv
->id
.device
, id_priv
->id
.port_num
))
2603 path_rec
.rec_type
= SA_PATH_REC_TYPE_OPA
;
2605 path_rec
.rec_type
= SA_PATH_REC_TYPE_IB
;
2606 rdma_addr_get_sgid(dev_addr
, &path_rec
.sgid
);
2607 rdma_addr_get_dgid(dev_addr
, &path_rec
.dgid
);
2608 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2609 path_rec
.numb_path
= 1;
2610 path_rec
.reversible
= 1;
2611 path_rec
.service_id
= rdma_get_service_id(&id_priv
->id
,
2612 cma_dst_addr(id_priv
));
2614 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
2615 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
2616 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
2618 switch (cma_family(id_priv
)) {
2620 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
2621 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
2624 sin6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
2625 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
2626 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2629 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
2630 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sib
->sib_flowinfo
) >> 20);
2631 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2635 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
2636 id_priv
->id
.port_num
, &path_rec
,
2637 comp_mask
, timeout_ms
,
2638 GFP_KERNEL
, cma_query_handler
,
2639 work
, &id_priv
->query
);
2641 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
2644 static void cma_work_handler(struct work_struct
*_work
)
2646 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
2647 struct rdma_id_private
*id_priv
= work
->id
;
2649 mutex_lock(&id_priv
->handler_mutex
);
2650 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
2653 if (cma_cm_event_handler(id_priv
, &work
->event
)) {
2654 cma_id_put(id_priv
);
2655 destroy_id_handler_unlock(id_priv
);
2660 mutex_unlock(&id_priv
->handler_mutex
);
2661 cma_id_put(id_priv
);
2666 static void cma_ndev_work_handler(struct work_struct
*_work
)
2668 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
2669 struct rdma_id_private
*id_priv
= work
->id
;
2671 mutex_lock(&id_priv
->handler_mutex
);
2672 if (id_priv
->state
== RDMA_CM_DESTROYING
||
2673 id_priv
->state
== RDMA_CM_DEVICE_REMOVAL
)
2676 if (cma_cm_event_handler(id_priv
, &work
->event
)) {
2677 cma_id_put(id_priv
);
2678 destroy_id_handler_unlock(id_priv
);
2683 mutex_unlock(&id_priv
->handler_mutex
);
2684 cma_id_put(id_priv
);
2689 static void cma_init_resolve_route_work(struct cma_work
*work
,
2690 struct rdma_id_private
*id_priv
)
2693 INIT_WORK(&work
->work
, cma_work_handler
);
2694 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2695 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2696 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2699 static void enqueue_resolve_addr_work(struct cma_work
*work
,
2700 struct rdma_id_private
*id_priv
)
2702 /* Balances with cma_id_put() in cma_work_handler */
2703 cma_id_get(id_priv
);
2706 INIT_WORK(&work
->work
, cma_work_handler
);
2707 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2708 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2709 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2711 queue_work(cma_wq
, &work
->work
);
2714 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
,
2715 unsigned long timeout_ms
)
2717 struct rdma_route
*route
= &id_priv
->id
.route
;
2718 struct cma_work
*work
;
2721 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2725 cma_init_resolve_route_work(work
, id_priv
);
2727 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
2728 if (!route
->path_rec
) {
2733 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
2739 kfree(route
->path_rec
);
2740 route
->path_rec
= NULL
;
2746 static enum ib_gid_type
cma_route_gid_type(enum rdma_network_type network_type
,
2747 unsigned long supported_gids
,
2748 enum ib_gid_type default_gid
)
2750 if ((network_type
== RDMA_NETWORK_IPV4
||
2751 network_type
== RDMA_NETWORK_IPV6
) &&
2752 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP
, &supported_gids
))
2753 return IB_GID_TYPE_ROCE_UDP_ENCAP
;
2759 * cma_iboe_set_path_rec_l2_fields() is helper function which sets
2760 * path record type based on GID type.
2761 * It also sets up other L2 fields which includes destination mac address
2762 * netdev ifindex, of the path record.
2763 * It returns the netdev of the bound interface for this path record entry.
2765 static struct net_device
*
2766 cma_iboe_set_path_rec_l2_fields(struct rdma_id_private
*id_priv
)
2768 struct rdma_route
*route
= &id_priv
->id
.route
;
2769 enum ib_gid_type gid_type
= IB_GID_TYPE_ROCE
;
2770 struct rdma_addr
*addr
= &route
->addr
;
2771 unsigned long supported_gids
;
2772 struct net_device
*ndev
;
2774 if (!addr
->dev_addr
.bound_dev_if
)
2777 ndev
= dev_get_by_index(addr
->dev_addr
.net
,
2778 addr
->dev_addr
.bound_dev_if
);
2782 supported_gids
= roce_gid_type_mask_support(id_priv
->id
.device
,
2783 id_priv
->id
.port_num
);
2784 gid_type
= cma_route_gid_type(addr
->dev_addr
.network
,
2787 /* Use the hint from IP Stack to select GID Type */
2788 if (gid_type
< ib_network_to_gid_type(addr
->dev_addr
.network
))
2789 gid_type
= ib_network_to_gid_type(addr
->dev_addr
.network
);
2790 route
->path_rec
->rec_type
= sa_conv_gid_to_pathrec_type(gid_type
);
2792 route
->path_rec
->roce
.route_resolved
= true;
2793 sa_path_set_dmac(route
->path_rec
, addr
->dev_addr
.dst_dev_addr
);
2797 int rdma_set_ib_path(struct rdma_cm_id
*id
,
2798 struct sa_path_rec
*path_rec
)
2800 struct rdma_id_private
*id_priv
;
2801 struct net_device
*ndev
;
2804 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2805 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
2806 RDMA_CM_ROUTE_RESOLVED
))
2809 id
->route
.path_rec
= kmemdup(path_rec
, sizeof(*path_rec
),
2811 if (!id
->route
.path_rec
) {
2816 if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
2817 ndev
= cma_iboe_set_path_rec_l2_fields(id_priv
);
2825 id
->route
.num_paths
= 1;
2829 kfree(id
->route
.path_rec
);
2830 id
->route
.path_rec
= NULL
;
2832 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_ADDR_RESOLVED
);
2835 EXPORT_SYMBOL(rdma_set_ib_path
);
2837 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
)
2839 struct cma_work
*work
;
2841 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2845 cma_init_resolve_route_work(work
, id_priv
);
2846 queue_work(cma_wq
, &work
->work
);
2850 static int get_vlan_ndev_tc(struct net_device
*vlan_ndev
, int prio
)
2852 struct net_device
*dev
;
2854 dev
= vlan_dev_real_dev(vlan_ndev
);
2856 return netdev_get_prio_tc_map(dev
, prio
);
2858 return (vlan_dev_get_egress_qos_mask(vlan_ndev
, prio
) &
2859 VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
2862 struct iboe_prio_tc_map
{
2868 static int get_lower_vlan_dev_tc(struct net_device
*dev
, void *data
)
2870 struct iboe_prio_tc_map
*map
= data
;
2872 if (is_vlan_dev(dev
))
2873 map
->output_tc
= get_vlan_ndev_tc(dev
, map
->input_prio
);
2874 else if (dev
->num_tc
)
2875 map
->output_tc
= netdev_get_prio_tc_map(dev
, map
->input_prio
);
2878 /* We are interested only in first level VLAN device, so always
2879 * return 1 to stop iterating over next level devices.
2885 static int iboe_tos_to_sl(struct net_device
*ndev
, int tos
)
2887 struct iboe_prio_tc_map prio_tc_map
= {};
2888 int prio
= rt_tos2priority(tos
);
2890 /* If VLAN device, get it directly from the VLAN netdev */
2891 if (is_vlan_dev(ndev
))
2892 return get_vlan_ndev_tc(ndev
, prio
);
2894 prio_tc_map
.input_prio
= prio
;
2896 netdev_walk_all_lower_dev_rcu(ndev
,
2897 get_lower_vlan_dev_tc
,
2900 /* If map is found from lower device, use it; Otherwise
2901 * continue with the current netdevice to get priority to tc map.
2903 if (prio_tc_map
.found
)
2904 return prio_tc_map
.output_tc
;
2905 else if (ndev
->num_tc
)
2906 return netdev_get_prio_tc_map(ndev
, prio
);
2911 static __be32
cma_get_roce_udp_flow_label(struct rdma_id_private
*id_priv
)
2913 struct sockaddr_in6
*addr6
;
2917 addr6
= (struct sockaddr_in6
*)cma_src_addr(id_priv
);
2918 fl
= be32_to_cpu(addr6
->sin6_flowinfo
) & IB_GRH_FLOWLABEL_MASK
;
2919 if ((cma_family(id_priv
) != AF_INET6
) || !fl
) {
2920 dport
= be16_to_cpu(cma_port(cma_dst_addr(id_priv
)));
2921 sport
= be16_to_cpu(cma_port(cma_src_addr(id_priv
)));
2922 hash
= (u32
)sport
* 31 + dport
;
2923 fl
= hash
& IB_GRH_FLOWLABEL_MASK
;
2926 return cpu_to_be32(fl
);
2929 static int cma_resolve_iboe_route(struct rdma_id_private
*id_priv
)
2931 struct rdma_route
*route
= &id_priv
->id
.route
;
2932 struct rdma_addr
*addr
= &route
->addr
;
2933 struct cma_work
*work
;
2935 struct net_device
*ndev
;
2937 u8 default_roce_tos
= id_priv
->cma_dev
->default_roce_tos
[id_priv
->id
.port_num
-
2938 rdma_start_port(id_priv
->cma_dev
->device
)];
2939 u8 tos
= id_priv
->tos_set
? id_priv
->tos
: default_roce_tos
;
2942 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2946 route
->path_rec
= kzalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
2947 if (!route
->path_rec
) {
2952 route
->num_paths
= 1;
2954 ndev
= cma_iboe_set_path_rec_l2_fields(id_priv
);
2960 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
2961 &route
->path_rec
->sgid
);
2962 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
,
2963 &route
->path_rec
->dgid
);
2965 if (((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
)->sa_family
!= AF_IB
)
2966 /* TODO: get the hoplimit from the inet/inet6 device */
2967 route
->path_rec
->hop_limit
= addr
->dev_addr
.hoplimit
;
2969 route
->path_rec
->hop_limit
= 1;
2970 route
->path_rec
->reversible
= 1;
2971 route
->path_rec
->pkey
= cpu_to_be16(0xffff);
2972 route
->path_rec
->mtu_selector
= IB_SA_EQ
;
2973 route
->path_rec
->sl
= iboe_tos_to_sl(ndev
, tos
);
2974 route
->path_rec
->traffic_class
= tos
;
2975 route
->path_rec
->mtu
= iboe_get_mtu(ndev
->mtu
);
2976 route
->path_rec
->rate_selector
= IB_SA_EQ
;
2977 route
->path_rec
->rate
= iboe_get_rate(ndev
);
2979 route
->path_rec
->packet_life_time_selector
= IB_SA_EQ
;
2980 /* In case ACK timeout is set, use this value to calculate
2981 * PacketLifeTime. As per IBTA 12.7.34,
2982 * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay).
2983 * Assuming a negligible local ACK delay, we can use
2984 * PacketLifeTime = local ACK timeout/2
2985 * as a reasonable approximation for RoCE networks.
2987 route
->path_rec
->packet_life_time
= id_priv
->timeout_set
?
2988 id_priv
->timeout
- 1 : CMA_IBOE_PACKET_LIFETIME
;
2990 if (!route
->path_rec
->mtu
) {
2995 if (rdma_protocol_roce_udp_encap(id_priv
->id
.device
,
2996 id_priv
->id
.port_num
))
2997 route
->path_rec
->flow_label
=
2998 cma_get_roce_udp_flow_label(id_priv
);
3000 cma_init_resolve_route_work(work
, id_priv
);
3001 queue_work(cma_wq
, &work
->work
);
3006 kfree(route
->path_rec
);
3007 route
->path_rec
= NULL
;
3008 route
->num_paths
= 0;
3014 int rdma_resolve_route(struct rdma_cm_id
*id
, unsigned long timeout_ms
)
3016 struct rdma_id_private
*id_priv
;
3019 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3020 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
, RDMA_CM_ROUTE_QUERY
))
3023 cma_id_get(id_priv
);
3024 if (rdma_cap_ib_sa(id
->device
, id
->port_num
))
3025 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
3026 else if (rdma_protocol_roce(id
->device
, id
->port_num
))
3027 ret
= cma_resolve_iboe_route(id_priv
);
3028 else if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
3029 ret
= cma_resolve_iw_route(id_priv
);
3038 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_QUERY
, RDMA_CM_ADDR_RESOLVED
);
3039 cma_id_put(id_priv
);
3042 EXPORT_SYMBOL(rdma_resolve_route
);
3044 static void cma_set_loopback(struct sockaddr
*addr
)
3046 switch (addr
->sa_family
) {
3048 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
= htonl(INADDR_LOOPBACK
);
3051 ipv6_addr_set(&((struct sockaddr_in6
*) addr
)->sin6_addr
,
3055 ib_addr_set(&((struct sockaddr_ib
*) addr
)->sib_addr
,
3061 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
3063 struct cma_device
*cma_dev
, *cur_dev
;
3065 enum ib_port_state port_state
;
3072 list_for_each_entry(cur_dev
, &dev_list
, list
) {
3073 if (cma_family(id_priv
) == AF_IB
&&
3074 !rdma_cap_ib_cm(cur_dev
->device
, 1))
3080 rdma_for_each_port (cur_dev
->device
, p
) {
3081 if (!ib_get_cached_port_state(cur_dev
->device
, p
, &port_state
) &&
3082 port_state
== IB_PORT_ACTIVE
) {
3097 ret
= rdma_query_gid(cma_dev
->device
, p
, 0, &gid
);
3101 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
3105 id_priv
->id
.route
.addr
.dev_addr
.dev_type
=
3106 (rdma_protocol_ib(cma_dev
->device
, p
)) ?
3107 ARPHRD_INFINIBAND
: ARPHRD_ETHER
;
3109 rdma_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
3110 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
3111 id_priv
->id
.port_num
= p
;
3112 cma_attach_to_dev(id_priv
, cma_dev
);
3113 cma_set_loopback(cma_src_addr(id_priv
));
3115 mutex_unlock(&lock
);
3119 static void addr_handler(int status
, struct sockaddr
*src_addr
,
3120 struct rdma_dev_addr
*dev_addr
, void *context
)
3122 struct rdma_id_private
*id_priv
= context
;
3123 struct rdma_cm_event event
= {};
3124 struct sockaddr
*addr
;
3125 struct sockaddr_storage old_addr
;
3127 mutex_lock(&id_priv
->handler_mutex
);
3128 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
,
3129 RDMA_CM_ADDR_RESOLVED
))
3133 * Store the previous src address, so that if we fail to acquire
3134 * matching rdma device, old address can be restored back, which helps
3135 * to cancel the cma listen operation correctly.
3137 addr
= cma_src_addr(id_priv
);
3138 memcpy(&old_addr
, addr
, rdma_addr_size(addr
));
3139 memcpy(addr
, src_addr
, rdma_addr_size(src_addr
));
3140 if (!status
&& !id_priv
->cma_dev
) {
3141 status
= cma_acquire_dev_by_src_ip(id_priv
);
3143 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
3145 } else if (status
) {
3146 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status
);
3150 memcpy(addr
, &old_addr
,
3151 rdma_addr_size((struct sockaddr
*)&old_addr
));
3152 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
3153 RDMA_CM_ADDR_BOUND
))
3155 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
3156 event
.status
= status
;
3158 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
3160 if (cma_cm_event_handler(id_priv
, &event
)) {
3161 destroy_id_handler_unlock(id_priv
);
3165 mutex_unlock(&id_priv
->handler_mutex
);
3168 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
3170 struct cma_work
*work
;
3174 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3178 if (!id_priv
->cma_dev
) {
3179 ret
= cma_bind_loopback(id_priv
);
3184 rdma_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
3185 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
3187 enqueue_resolve_addr_work(work
, id_priv
);
3194 static int cma_resolve_ib_addr(struct rdma_id_private
*id_priv
)
3196 struct cma_work
*work
;
3199 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3203 if (!id_priv
->cma_dev
) {
3204 ret
= cma_resolve_ib_dev(id_priv
);
3209 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, (union ib_gid
*)
3210 &(((struct sockaddr_ib
*) &id_priv
->id
.route
.addr
.dst_addr
)->sib_addr
));
3212 enqueue_resolve_addr_work(work
, id_priv
);
3219 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
3220 const struct sockaddr
*dst_addr
)
3222 if (!src_addr
|| !src_addr
->sa_family
) {
3223 src_addr
= (struct sockaddr
*) &id
->route
.addr
.src_addr
;
3224 src_addr
->sa_family
= dst_addr
->sa_family
;
3225 if (IS_ENABLED(CONFIG_IPV6
) &&
3226 dst_addr
->sa_family
== AF_INET6
) {
3227 struct sockaddr_in6
*src_addr6
= (struct sockaddr_in6
*) src_addr
;
3228 struct sockaddr_in6
*dst_addr6
= (struct sockaddr_in6
*) dst_addr
;
3229 src_addr6
->sin6_scope_id
= dst_addr6
->sin6_scope_id
;
3230 if (ipv6_addr_type(&dst_addr6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
)
3231 id
->route
.addr
.dev_addr
.bound_dev_if
= dst_addr6
->sin6_scope_id
;
3232 } else if (dst_addr
->sa_family
== AF_IB
) {
3233 ((struct sockaddr_ib
*) src_addr
)->sib_pkey
=
3234 ((struct sockaddr_ib
*) dst_addr
)->sib_pkey
;
3237 return rdma_bind_addr(id
, src_addr
);
3241 * If required, resolve the source address for bind and leave the id_priv in
3242 * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
3243 * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
3246 static int resolve_prepare_src(struct rdma_id_private
*id_priv
,
3247 struct sockaddr
*src_addr
,
3248 const struct sockaddr
*dst_addr
)
3252 memcpy(cma_dst_addr(id_priv
), dst_addr
, rdma_addr_size(dst_addr
));
3253 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_ADDR_QUERY
)) {
3254 /* For a well behaved ULP state will be RDMA_CM_IDLE */
3255 ret
= cma_bind_addr(&id_priv
->id
, src_addr
, dst_addr
);
3258 if (WARN_ON(!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
,
3259 RDMA_CM_ADDR_QUERY
))) {
3265 if (cma_family(id_priv
) != dst_addr
->sa_family
) {
3272 cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
, RDMA_CM_ADDR_BOUND
);
3274 memset(cma_dst_addr(id_priv
), 0, rdma_addr_size(dst_addr
));
3278 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
3279 const struct sockaddr
*dst_addr
, unsigned long timeout_ms
)
3281 struct rdma_id_private
*id_priv
=
3282 container_of(id
, struct rdma_id_private
, id
);
3285 ret
= resolve_prepare_src(id_priv
, src_addr
, dst_addr
);
3289 if (cma_any_addr(dst_addr
)) {
3290 ret
= cma_resolve_loopback(id_priv
);
3292 if (dst_addr
->sa_family
== AF_IB
) {
3293 ret
= cma_resolve_ib_addr(id_priv
);
3295 ret
= rdma_resolve_ip(cma_src_addr(id_priv
), dst_addr
,
3296 &id
->route
.addr
.dev_addr
,
3297 timeout_ms
, addr_handler
,
3306 cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
, RDMA_CM_ADDR_BOUND
);
3309 EXPORT_SYMBOL(rdma_resolve_addr
);
3311 int rdma_set_reuseaddr(struct rdma_cm_id
*id
, int reuse
)
3313 struct rdma_id_private
*id_priv
;
3314 unsigned long flags
;
3317 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3318 spin_lock_irqsave(&id_priv
->lock
, flags
);
3319 if ((reuse
&& id_priv
->state
!= RDMA_CM_LISTEN
) ||
3320 id_priv
->state
== RDMA_CM_IDLE
) {
3321 id_priv
->reuseaddr
= reuse
;
3326 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
3329 EXPORT_SYMBOL(rdma_set_reuseaddr
);
3331 int rdma_set_afonly(struct rdma_cm_id
*id
, int afonly
)
3333 struct rdma_id_private
*id_priv
;
3334 unsigned long flags
;
3337 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3338 spin_lock_irqsave(&id_priv
->lock
, flags
);
3339 if (id_priv
->state
== RDMA_CM_IDLE
|| id_priv
->state
== RDMA_CM_ADDR_BOUND
) {
3340 id_priv
->options
|= (1 << CMA_OPTION_AFONLY
);
3341 id_priv
->afonly
= afonly
;
3346 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
3349 EXPORT_SYMBOL(rdma_set_afonly
);
3351 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
3352 struct rdma_id_private
*id_priv
)
3354 struct sockaddr
*addr
;
3355 struct sockaddr_ib
*sib
;
3359 lockdep_assert_held(&lock
);
3361 addr
= cma_src_addr(id_priv
);
3362 port
= htons(bind_list
->port
);
3364 switch (addr
->sa_family
) {
3366 ((struct sockaddr_in
*) addr
)->sin_port
= port
;
3369 ((struct sockaddr_in6
*) addr
)->sin6_port
= port
;
3372 sib
= (struct sockaddr_ib
*) addr
;
3373 sid
= be64_to_cpu(sib
->sib_sid
);
3374 mask
= be64_to_cpu(sib
->sib_sid_mask
);
3375 sib
->sib_sid
= cpu_to_be64((sid
& mask
) | (u64
) ntohs(port
));
3376 sib
->sib_sid_mask
= cpu_to_be64(~0ULL);
3379 id_priv
->bind_list
= bind_list
;
3380 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
3383 static int cma_alloc_port(enum rdma_ucm_port_space ps
,
3384 struct rdma_id_private
*id_priv
, unsigned short snum
)
3386 struct rdma_bind_list
*bind_list
;
3389 lockdep_assert_held(&lock
);
3391 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
3395 ret
= cma_ps_alloc(id_priv
->id
.route
.addr
.dev_addr
.net
, ps
, bind_list
,
3401 bind_list
->port
= snum
;
3402 cma_bind_port(bind_list
, id_priv
);
3406 return ret
== -ENOSPC
? -EADDRNOTAVAIL
: ret
;
3409 static int cma_port_is_unique(struct rdma_bind_list
*bind_list
,
3410 struct rdma_id_private
*id_priv
)
3412 struct rdma_id_private
*cur_id
;
3413 struct sockaddr
*daddr
= cma_dst_addr(id_priv
);
3414 struct sockaddr
*saddr
= cma_src_addr(id_priv
);
3415 __be16 dport
= cma_port(daddr
);
3417 lockdep_assert_held(&lock
);
3419 hlist_for_each_entry(cur_id
, &bind_list
->owners
, node
) {
3420 struct sockaddr
*cur_daddr
= cma_dst_addr(cur_id
);
3421 struct sockaddr
*cur_saddr
= cma_src_addr(cur_id
);
3422 __be16 cur_dport
= cma_port(cur_daddr
);
3424 if (id_priv
== cur_id
)
3427 /* different dest port -> unique */
3428 if (!cma_any_port(daddr
) &&
3429 !cma_any_port(cur_daddr
) &&
3430 (dport
!= cur_dport
))
3433 /* different src address -> unique */
3434 if (!cma_any_addr(saddr
) &&
3435 !cma_any_addr(cur_saddr
) &&
3436 cma_addr_cmp(saddr
, cur_saddr
))
3439 /* different dst address -> unique */
3440 if (!cma_any_addr(daddr
) &&
3441 !cma_any_addr(cur_daddr
) &&
3442 cma_addr_cmp(daddr
, cur_daddr
))
3445 return -EADDRNOTAVAIL
;
3450 static int cma_alloc_any_port(enum rdma_ucm_port_space ps
,
3451 struct rdma_id_private
*id_priv
)
3453 static unsigned int last_used_port
;
3454 int low
, high
, remaining
;
3456 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
3458 lockdep_assert_held(&lock
);
3460 inet_get_local_port_range(net
, &low
, &high
);
3461 remaining
= (high
- low
) + 1;
3462 rover
= prandom_u32() % remaining
+ low
;
3464 if (last_used_port
!= rover
) {
3465 struct rdma_bind_list
*bind_list
;
3468 bind_list
= cma_ps_find(net
, ps
, (unsigned short)rover
);
3471 ret
= cma_alloc_port(ps
, id_priv
, rover
);
3473 ret
= cma_port_is_unique(bind_list
, id_priv
);
3475 cma_bind_port(bind_list
, id_priv
);
3478 * Remember previously used port number in order to avoid
3479 * re-using same port immediately after it is closed.
3482 last_used_port
= rover
;
3483 if (ret
!= -EADDRNOTAVAIL
)
3488 if ((rover
< low
) || (rover
> high
))
3492 return -EADDRNOTAVAIL
;
3496 * Check that the requested port is available. This is called when trying to
3497 * bind to a specific port, or when trying to listen on a bound port. In
3498 * the latter case, the provided id_priv may already be on the bind_list, but
3499 * we still need to check that it's okay to start listening.
3501 static int cma_check_port(struct rdma_bind_list
*bind_list
,
3502 struct rdma_id_private
*id_priv
, uint8_t reuseaddr
)
3504 struct rdma_id_private
*cur_id
;
3505 struct sockaddr
*addr
, *cur_addr
;
3507 lockdep_assert_held(&lock
);
3509 addr
= cma_src_addr(id_priv
);
3510 hlist_for_each_entry(cur_id
, &bind_list
->owners
, node
) {
3511 if (id_priv
== cur_id
)
3514 if (reuseaddr
&& cur_id
->reuseaddr
)
3517 cur_addr
= cma_src_addr(cur_id
);
3518 if (id_priv
->afonly
&& cur_id
->afonly
&&
3519 (addr
->sa_family
!= cur_addr
->sa_family
))
3522 if (cma_any_addr(addr
) || cma_any_addr(cur_addr
))
3523 return -EADDRNOTAVAIL
;
3525 if (!cma_addr_cmp(addr
, cur_addr
))
3531 static int cma_use_port(enum rdma_ucm_port_space ps
,
3532 struct rdma_id_private
*id_priv
)
3534 struct rdma_bind_list
*bind_list
;
3535 unsigned short snum
;
3538 lockdep_assert_held(&lock
);
3540 snum
= ntohs(cma_port(cma_src_addr(id_priv
)));
3541 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
3544 bind_list
= cma_ps_find(id_priv
->id
.route
.addr
.dev_addr
.net
, ps
, snum
);
3546 ret
= cma_alloc_port(ps
, id_priv
, snum
);
3548 ret
= cma_check_port(bind_list
, id_priv
, id_priv
->reuseaddr
);
3550 cma_bind_port(bind_list
, id_priv
);
3555 static enum rdma_ucm_port_space
3556 cma_select_inet_ps(struct rdma_id_private
*id_priv
)
3558 switch (id_priv
->id
.ps
) {
3563 return id_priv
->id
.ps
;
3570 static enum rdma_ucm_port_space
3571 cma_select_ib_ps(struct rdma_id_private
*id_priv
)
3573 enum rdma_ucm_port_space ps
= 0;
3574 struct sockaddr_ib
*sib
;
3575 u64 sid_ps
, mask
, sid
;
3577 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
3578 mask
= be64_to_cpu(sib
->sib_sid_mask
) & RDMA_IB_IP_PS_MASK
;
3579 sid
= be64_to_cpu(sib
->sib_sid
) & mask
;
3581 if ((id_priv
->id
.ps
== RDMA_PS_IB
) && (sid
== (RDMA_IB_IP_PS_IB
& mask
))) {
3582 sid_ps
= RDMA_IB_IP_PS_IB
;
3584 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_TCP
)) &&
3585 (sid
== (RDMA_IB_IP_PS_TCP
& mask
))) {
3586 sid_ps
= RDMA_IB_IP_PS_TCP
;
3588 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_UDP
)) &&
3589 (sid
== (RDMA_IB_IP_PS_UDP
& mask
))) {
3590 sid_ps
= RDMA_IB_IP_PS_UDP
;
3595 sib
->sib_sid
= cpu_to_be64(sid_ps
| ntohs(cma_port((struct sockaddr
*) sib
)));
3596 sib
->sib_sid_mask
= cpu_to_be64(RDMA_IB_IP_PS_MASK
|
3597 be64_to_cpu(sib
->sib_sid_mask
));
3602 static int cma_get_port(struct rdma_id_private
*id_priv
)
3604 enum rdma_ucm_port_space ps
;
3607 if (cma_family(id_priv
) != AF_IB
)
3608 ps
= cma_select_inet_ps(id_priv
);
3610 ps
= cma_select_ib_ps(id_priv
);
3612 return -EPROTONOSUPPORT
;
3615 if (cma_any_port(cma_src_addr(id_priv
)))
3616 ret
= cma_alloc_any_port(ps
, id_priv
);
3618 ret
= cma_use_port(ps
, id_priv
);
3619 mutex_unlock(&lock
);
3624 static int cma_check_linklocal(struct rdma_dev_addr
*dev_addr
,
3625 struct sockaddr
*addr
)
3627 #if IS_ENABLED(CONFIG_IPV6)
3628 struct sockaddr_in6
*sin6
;
3630 if (addr
->sa_family
!= AF_INET6
)
3633 sin6
= (struct sockaddr_in6
*) addr
;
3635 if (!(ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
))
3638 if (!sin6
->sin6_scope_id
)
3641 dev_addr
->bound_dev_if
= sin6
->sin6_scope_id
;
3646 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
3648 struct rdma_id_private
*id_priv
=
3649 container_of(id
, struct rdma_id_private
, id
);
3652 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_LISTEN
)) {
3653 /* For a well behaved ULP state will be RDMA_CM_IDLE */
3654 id
->route
.addr
.src_addr
.ss_family
= AF_INET
;
3655 ret
= rdma_bind_addr(id
, cma_src_addr(id_priv
));
3658 if (WARN_ON(!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
,
3664 * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable
3665 * any more, and has to be unique in the bind list.
3667 if (id_priv
->reuseaddr
) {
3669 ret
= cma_check_port(id_priv
->bind_list
, id_priv
, 0);
3671 id_priv
->reuseaddr
= 0;
3672 mutex_unlock(&lock
);
3677 id_priv
->backlog
= backlog
;
3679 if (rdma_cap_ib_cm(id
->device
, 1)) {
3680 ret
= cma_ib_listen(id_priv
);
3683 } else if (rdma_cap_iw_cm(id
->device
, 1)) {
3684 ret
= cma_iw_listen(id_priv
, backlog
);
3692 cma_listen_on_all(id_priv
);
3696 id_priv
->backlog
= 0;
3698 * All the failure paths that lead here will not allow the req_handler's
3701 cma_comp_exch(id_priv
, RDMA_CM_LISTEN
, RDMA_CM_ADDR_BOUND
);
3704 EXPORT_SYMBOL(rdma_listen
);
3706 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
3708 struct rdma_id_private
*id_priv
;
3710 struct sockaddr
*daddr
;
3712 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
&&
3713 addr
->sa_family
!= AF_IB
)
3714 return -EAFNOSUPPORT
;
3716 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3717 if (!cma_comp_exch(id_priv
, RDMA_CM_IDLE
, RDMA_CM_ADDR_BOUND
))
3720 ret
= cma_check_linklocal(&id
->route
.addr
.dev_addr
, addr
);
3724 memcpy(cma_src_addr(id_priv
), addr
, rdma_addr_size(addr
));
3725 if (!cma_any_addr(addr
)) {
3726 ret
= cma_translate_addr(addr
, &id
->route
.addr
.dev_addr
);
3730 ret
= cma_acquire_dev_by_src_ip(id_priv
);
3735 if (!(id_priv
->options
& (1 << CMA_OPTION_AFONLY
))) {
3736 if (addr
->sa_family
== AF_INET
)
3737 id_priv
->afonly
= 1;
3738 #if IS_ENABLED(CONFIG_IPV6)
3739 else if (addr
->sa_family
== AF_INET6
) {
3740 struct net
*net
= id_priv
->id
.route
.addr
.dev_addr
.net
;
3742 id_priv
->afonly
= net
->ipv6
.sysctl
.bindv6only
;
3746 daddr
= cma_dst_addr(id_priv
);
3747 daddr
->sa_family
= addr
->sa_family
;
3749 ret
= cma_get_port(id_priv
);
3755 rdma_restrack_del(&id_priv
->res
);
3756 if (id_priv
->cma_dev
)
3757 cma_release_dev(id_priv
);
3759 cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_IDLE
);
3762 EXPORT_SYMBOL(rdma_bind_addr
);
3764 static int cma_format_hdr(void *hdr
, struct rdma_id_private
*id_priv
)
3766 struct cma_hdr
*cma_hdr
;
3769 cma_hdr
->cma_version
= CMA_VERSION
;
3770 if (cma_family(id_priv
) == AF_INET
) {
3771 struct sockaddr_in
*src4
, *dst4
;
3773 src4
= (struct sockaddr_in
*) cma_src_addr(id_priv
);
3774 dst4
= (struct sockaddr_in
*) cma_dst_addr(id_priv
);
3776 cma_set_ip_ver(cma_hdr
, 4);
3777 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
3778 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
3779 cma_hdr
->port
= src4
->sin_port
;
3780 } else if (cma_family(id_priv
) == AF_INET6
) {
3781 struct sockaddr_in6
*src6
, *dst6
;
3783 src6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
3784 dst6
= (struct sockaddr_in6
*) cma_dst_addr(id_priv
);
3786 cma_set_ip_ver(cma_hdr
, 6);
3787 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
3788 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
3789 cma_hdr
->port
= src6
->sin6_port
;
3794 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
3795 const struct ib_cm_event
*ib_event
)
3797 struct rdma_id_private
*id_priv
= cm_id
->context
;
3798 struct rdma_cm_event event
= {};
3799 const struct ib_cm_sidr_rep_event_param
*rep
=
3800 &ib_event
->param
.sidr_rep_rcvd
;
3803 mutex_lock(&id_priv
->handler_mutex
);
3804 if (READ_ONCE(id_priv
->state
) != RDMA_CM_CONNECT
)
3807 switch (ib_event
->event
) {
3808 case IB_CM_SIDR_REQ_ERROR
:
3809 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
3810 event
.status
= -ETIMEDOUT
;
3812 case IB_CM_SIDR_REP_RECEIVED
:
3813 event
.param
.ud
.private_data
= ib_event
->private_data
;
3814 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
3815 if (rep
->status
!= IB_SIDR_SUCCESS
) {
3816 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
3817 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
3818 pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n",
3822 ret
= cma_set_qkey(id_priv
, rep
->qkey
);
3824 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret
);
3825 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
3829 ib_init_ah_attr_from_path(id_priv
->id
.device
,
3830 id_priv
->id
.port_num
,
3831 id_priv
->id
.route
.path_rec
,
3832 &event
.param
.ud
.ah_attr
,
3834 event
.param
.ud
.qp_num
= rep
->qpn
;
3835 event
.param
.ud
.qkey
= rep
->qkey
;
3836 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
3840 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
3845 ret
= cma_cm_event_handler(id_priv
, &event
);
3847 rdma_destroy_ah_attr(&event
.param
.ud
.ah_attr
);
3849 /* Destroy the CM ID by returning a non-zero value. */
3850 id_priv
->cm_id
.ib
= NULL
;
3851 destroy_id_handler_unlock(id_priv
);
3855 mutex_unlock(&id_priv
->handler_mutex
);
3859 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
3860 struct rdma_conn_param
*conn_param
)
3862 struct ib_cm_sidr_req_param req
;
3863 struct ib_cm_id
*id
;
3868 memset(&req
, 0, sizeof req
);
3869 offset
= cma_user_data_offset(id_priv
);
3870 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
3871 if (req
.private_data_len
< conn_param
->private_data_len
)
3874 if (req
.private_data_len
) {
3875 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
3879 private_data
= NULL
;
3882 if (conn_param
->private_data
&& conn_param
->private_data_len
)
3883 memcpy(private_data
+ offset
, conn_param
->private_data
,
3884 conn_param
->private_data_len
);
3887 ret
= cma_format_hdr(private_data
, id_priv
);
3890 req
.private_data
= private_data
;
3893 id
= ib_create_cm_id(id_priv
->id
.device
, cma_sidr_rep_handler
,
3899 id_priv
->cm_id
.ib
= id
;
3901 req
.path
= id_priv
->id
.route
.path_rec
;
3902 req
.sgid_attr
= id_priv
->id
.route
.addr
.dev_addr
.sgid_attr
;
3903 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
3904 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
3905 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
3907 trace_cm_send_sidr_req(id_priv
);
3908 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
3910 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
3911 id_priv
->cm_id
.ib
= NULL
;
3914 kfree(private_data
);
3918 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
3919 struct rdma_conn_param
*conn_param
)
3921 struct ib_cm_req_param req
;
3922 struct rdma_route
*route
;
3924 struct ib_cm_id
*id
;
3928 memset(&req
, 0, sizeof req
);
3929 offset
= cma_user_data_offset(id_priv
);
3930 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
3931 if (req
.private_data_len
< conn_param
->private_data_len
)
3934 if (req
.private_data_len
) {
3935 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
3939 private_data
= NULL
;
3942 if (conn_param
->private_data
&& conn_param
->private_data_len
)
3943 memcpy(private_data
+ offset
, conn_param
->private_data
,
3944 conn_param
->private_data_len
);
3946 id
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
, id_priv
);
3951 id_priv
->cm_id
.ib
= id
;
3953 route
= &id_priv
->id
.route
;
3955 ret
= cma_format_hdr(private_data
, id_priv
);
3958 req
.private_data
= private_data
;
3961 req
.primary_path
= &route
->path_rec
[0];
3962 if (route
->num_paths
== 2)
3963 req
.alternate_path
= &route
->path_rec
[1];
3965 req
.ppath_sgid_attr
= id_priv
->id
.route
.addr
.dev_addr
.sgid_attr
;
3966 /* Alternate path SGID attribute currently unsupported */
3967 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
3968 req
.qp_num
= id_priv
->qp_num
;
3969 req
.qp_type
= id_priv
->id
.qp_type
;
3970 req
.starting_psn
= id_priv
->seq_num
;
3971 req
.responder_resources
= conn_param
->responder_resources
;
3972 req
.initiator_depth
= conn_param
->initiator_depth
;
3973 req
.flow_control
= conn_param
->flow_control
;
3974 req
.retry_count
= min_t(u8
, 7, conn_param
->retry_count
);
3975 req
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
3976 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
3977 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
3978 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
3979 req
.srq
= id_priv
->srq
? 1 : 0;
3980 req
.ece
.vendor_id
= id_priv
->ece
.vendor_id
;
3981 req
.ece
.attr_mod
= id_priv
->ece
.attr_mod
;
3983 trace_cm_send_req(id_priv
);
3984 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
3986 if (ret
&& !IS_ERR(id
)) {
3987 ib_destroy_cm_id(id
);
3988 id_priv
->cm_id
.ib
= NULL
;
3991 kfree(private_data
);
3995 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
3996 struct rdma_conn_param
*conn_param
)
3998 struct iw_cm_id
*cm_id
;
4000 struct iw_cm_conn_param iw_param
;
4002 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
4004 return PTR_ERR(cm_id
);
4006 cm_id
->tos
= id_priv
->tos
;
4007 cm_id
->tos_set
= id_priv
->tos_set
;
4008 id_priv
->cm_id
.iw
= cm_id
;
4010 memcpy(&cm_id
->local_addr
, cma_src_addr(id_priv
),
4011 rdma_addr_size(cma_src_addr(id_priv
)));
4012 memcpy(&cm_id
->remote_addr
, cma_dst_addr(id_priv
),
4013 rdma_addr_size(cma_dst_addr(id_priv
)));
4015 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
4020 iw_param
.ord
= conn_param
->initiator_depth
;
4021 iw_param
.ird
= conn_param
->responder_resources
;
4022 iw_param
.private_data
= conn_param
->private_data
;
4023 iw_param
.private_data_len
= conn_param
->private_data_len
;
4024 iw_param
.qpn
= id_priv
->id
.qp
? id_priv
->qp_num
: conn_param
->qp_num
;
4026 memset(&iw_param
, 0, sizeof iw_param
);
4027 iw_param
.qpn
= id_priv
->qp_num
;
4029 ret
= iw_cm_connect(cm_id
, &iw_param
);
4032 iw_destroy_cm_id(cm_id
);
4033 id_priv
->cm_id
.iw
= NULL
;
4038 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
4040 struct rdma_id_private
*id_priv
=
4041 container_of(id
, struct rdma_id_private
, id
);
4044 mutex_lock(&id_priv
->handler_mutex
);
4045 if (!cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_CONNECT
)) {
4051 id_priv
->qp_num
= conn_param
->qp_num
;
4052 id_priv
->srq
= conn_param
->srq
;
4055 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
4056 if (id
->qp_type
== IB_QPT_UD
)
4057 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
4059 ret
= cma_connect_ib(id_priv
, conn_param
);
4060 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
))
4061 ret
= cma_connect_iw(id_priv
, conn_param
);
4066 mutex_unlock(&id_priv
->handler_mutex
);
4069 cma_comp_exch(id_priv
, RDMA_CM_CONNECT
, RDMA_CM_ROUTE_RESOLVED
);
4071 mutex_unlock(&id_priv
->handler_mutex
);
4074 EXPORT_SYMBOL(rdma_connect
);
4077 * rdma_connect_ece - Initiate an active connection request with ECE data.
4078 * @id: Connection identifier to connect.
4079 * @conn_param: Connection information used for connected QPs.
4080 * @ece: ECE parameters
4082 * See rdma_connect() explanation.
4084 int rdma_connect_ece(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
,
4085 struct rdma_ucm_ece
*ece
)
4087 struct rdma_id_private
*id_priv
=
4088 container_of(id
, struct rdma_id_private
, id
);
4090 id_priv
->ece
.vendor_id
= ece
->vendor_id
;
4091 id_priv
->ece
.attr_mod
= ece
->attr_mod
;
4093 return rdma_connect(id
, conn_param
);
4095 EXPORT_SYMBOL(rdma_connect_ece
);
4097 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
4098 struct rdma_conn_param
*conn_param
)
4100 struct ib_cm_rep_param rep
;
4103 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
4107 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
4111 memset(&rep
, 0, sizeof rep
);
4112 rep
.qp_num
= id_priv
->qp_num
;
4113 rep
.starting_psn
= id_priv
->seq_num
;
4114 rep
.private_data
= conn_param
->private_data
;
4115 rep
.private_data_len
= conn_param
->private_data_len
;
4116 rep
.responder_resources
= conn_param
->responder_resources
;
4117 rep
.initiator_depth
= conn_param
->initiator_depth
;
4118 rep
.failover_accepted
= 0;
4119 rep
.flow_control
= conn_param
->flow_control
;
4120 rep
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
4121 rep
.srq
= id_priv
->srq
? 1 : 0;
4122 rep
.ece
.vendor_id
= id_priv
->ece
.vendor_id
;
4123 rep
.ece
.attr_mod
= id_priv
->ece
.attr_mod
;
4125 trace_cm_send_rep(id_priv
);
4126 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
4131 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
4132 struct rdma_conn_param
*conn_param
)
4134 struct iw_cm_conn_param iw_param
;
4140 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
4144 iw_param
.ord
= conn_param
->initiator_depth
;
4145 iw_param
.ird
= conn_param
->responder_resources
;
4146 iw_param
.private_data
= conn_param
->private_data
;
4147 iw_param
.private_data_len
= conn_param
->private_data_len
;
4148 if (id_priv
->id
.qp
) {
4149 iw_param
.qpn
= id_priv
->qp_num
;
4151 iw_param
.qpn
= conn_param
->qp_num
;
4153 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
4156 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
4157 enum ib_cm_sidr_status status
, u32 qkey
,
4158 const void *private_data
, int private_data_len
)
4160 struct ib_cm_sidr_rep_param rep
;
4163 memset(&rep
, 0, sizeof rep
);
4164 rep
.status
= status
;
4165 if (status
== IB_SIDR_SUCCESS
) {
4166 ret
= cma_set_qkey(id_priv
, qkey
);
4169 rep
.qp_num
= id_priv
->qp_num
;
4170 rep
.qkey
= id_priv
->qkey
;
4172 rep
.ece
.vendor_id
= id_priv
->ece
.vendor_id
;
4173 rep
.ece
.attr_mod
= id_priv
->ece
.attr_mod
;
4176 rep
.private_data
= private_data
;
4177 rep
.private_data_len
= private_data_len
;
4179 trace_cm_send_sidr_rep(id_priv
);
4180 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
4183 int __rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
,
4186 struct rdma_id_private
*id_priv
=
4187 container_of(id
, struct rdma_id_private
, id
);
4190 lockdep_assert_held(&id_priv
->handler_mutex
);
4192 rdma_restrack_set_task(&id_priv
->res
, caller
);
4194 if (READ_ONCE(id_priv
->state
) != RDMA_CM_CONNECT
)
4197 if (!id
->qp
&& conn_param
) {
4198 id_priv
->qp_num
= conn_param
->qp_num
;
4199 id_priv
->srq
= conn_param
->srq
;
4202 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
4203 if (id
->qp_type
== IB_QPT_UD
) {
4205 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
4207 conn_param
->private_data
,
4208 conn_param
->private_data_len
);
4210 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
4214 ret
= cma_accept_ib(id_priv
, conn_param
);
4216 ret
= cma_rep_recv(id_priv
);
4218 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
))
4219 ret
= cma_accept_iw(id_priv
, conn_param
);
4228 cma_modify_qp_err(id_priv
);
4229 rdma_reject(id
, NULL
, 0, IB_CM_REJ_CONSUMER_DEFINED
);
4232 EXPORT_SYMBOL(__rdma_accept
);
4234 int __rdma_accept_ece(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
,
4235 const char *caller
, struct rdma_ucm_ece
*ece
)
4237 struct rdma_id_private
*id_priv
=
4238 container_of(id
, struct rdma_id_private
, id
);
4240 id_priv
->ece
.vendor_id
= ece
->vendor_id
;
4241 id_priv
->ece
.attr_mod
= ece
->attr_mod
;
4243 return __rdma_accept(id
, conn_param
, caller
);
4245 EXPORT_SYMBOL(__rdma_accept_ece
);
4247 void rdma_lock_handler(struct rdma_cm_id
*id
)
4249 struct rdma_id_private
*id_priv
=
4250 container_of(id
, struct rdma_id_private
, id
);
4252 mutex_lock(&id_priv
->handler_mutex
);
4254 EXPORT_SYMBOL(rdma_lock_handler
);
4256 void rdma_unlock_handler(struct rdma_cm_id
*id
)
4258 struct rdma_id_private
*id_priv
=
4259 container_of(id
, struct rdma_id_private
, id
);
4261 mutex_unlock(&id_priv
->handler_mutex
);
4263 EXPORT_SYMBOL(rdma_unlock_handler
);
4265 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
4267 struct rdma_id_private
*id_priv
;
4270 id_priv
= container_of(id
, struct rdma_id_private
, id
);
4271 if (!id_priv
->cm_id
.ib
)
4274 switch (id
->device
->node_type
) {
4275 case RDMA_NODE_IB_CA
:
4276 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
4284 EXPORT_SYMBOL(rdma_notify
);
4286 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
4287 u8 private_data_len
, u8 reason
)
4289 struct rdma_id_private
*id_priv
;
4292 id_priv
= container_of(id
, struct rdma_id_private
, id
);
4293 if (!id_priv
->cm_id
.ib
)
4296 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
4297 if (id
->qp_type
== IB_QPT_UD
) {
4298 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
, 0,
4299 private_data
, private_data_len
);
4301 trace_cm_send_rej(id_priv
);
4302 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
, reason
, NULL
, 0,
4303 private_data
, private_data_len
);
4305 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
4306 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
4307 private_data
, private_data_len
);
4313 EXPORT_SYMBOL(rdma_reject
);
4315 int rdma_disconnect(struct rdma_cm_id
*id
)
4317 struct rdma_id_private
*id_priv
;
4320 id_priv
= container_of(id
, struct rdma_id_private
, id
);
4321 if (!id_priv
->cm_id
.ib
)
4324 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
4325 ret
= cma_modify_qp_err(id_priv
);
4328 /* Initiate or respond to a disconnect. */
4329 trace_cm_disconnect(id_priv
);
4330 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0)) {
4331 if (!ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0))
4332 trace_cm_sent_drep(id_priv
);
4334 trace_cm_sent_dreq(id_priv
);
4336 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
4337 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
4344 EXPORT_SYMBOL(rdma_disconnect
);
4346 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
4348 struct rdma_id_private
*id_priv
;
4349 struct cma_multicast
*mc
= multicast
->context
;
4350 struct rdma_cm_event event
= {};
4353 id_priv
= mc
->id_priv
;
4354 mutex_lock(&id_priv
->handler_mutex
);
4355 if (READ_ONCE(id_priv
->state
) == RDMA_CM_DEVICE_REMOVAL
||
4356 READ_ONCE(id_priv
->state
) == RDMA_CM_DESTROYING
)
4360 status
= cma_set_qkey(id_priv
, be32_to_cpu(multicast
->rec
.qkey
));
4362 pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
4364 mutex_lock(&id_priv
->qp_mutex
);
4365 if (!status
&& id_priv
->id
.qp
) {
4366 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
4367 be16_to_cpu(multicast
->rec
.mlid
));
4369 pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
4372 mutex_unlock(&id_priv
->qp_mutex
);
4374 event
.status
= status
;
4375 event
.param
.ud
.private_data
= mc
->context
;
4377 struct rdma_dev_addr
*dev_addr
=
4378 &id_priv
->id
.route
.addr
.dev_addr
;
4379 struct net_device
*ndev
=
4380 dev_get_by_index(dev_addr
->net
, dev_addr
->bound_dev_if
);
4381 enum ib_gid_type gid_type
=
4382 id_priv
->cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
4383 rdma_start_port(id_priv
->cma_dev
->device
)];
4385 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
4386 ret
= ib_init_ah_from_mcmember(id_priv
->id
.device
,
4387 id_priv
->id
.port_num
,
4390 &event
.param
.ud
.ah_attr
);
4392 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
4394 event
.param
.ud
.qp_num
= 0xFFFFFF;
4395 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
4399 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
4401 ret
= cma_cm_event_handler(id_priv
, &event
);
4403 rdma_destroy_ah_attr(&event
.param
.ud
.ah_attr
);
4405 destroy_id_handler_unlock(id_priv
);
4410 mutex_unlock(&id_priv
->handler_mutex
);
4414 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
4415 struct sockaddr
*addr
, union ib_gid
*mgid
)
4417 unsigned char mc_map
[MAX_ADDR_LEN
];
4418 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4419 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
4420 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
4422 if (cma_any_addr(addr
)) {
4423 memset(mgid
, 0, sizeof *mgid
);
4424 } else if ((addr
->sa_family
== AF_INET6
) &&
4425 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFFF0FFFF) ==
4427 /* IPv6 address is an SA assigned MGID. */
4428 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
4429 } else if (addr
->sa_family
== AF_IB
) {
4430 memcpy(mgid
, &((struct sockaddr_ib
*) addr
)->sib_addr
, sizeof *mgid
);
4431 } else if (addr
->sa_family
== AF_INET6
) {
4432 ipv6_ib_mc_map(&sin6
->sin6_addr
, dev_addr
->broadcast
, mc_map
);
4433 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
4434 mc_map
[7] = 0x01; /* Use RDMA CM signature */
4435 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
4437 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
4438 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
4439 mc_map
[7] = 0x01; /* Use RDMA CM signature */
4440 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
4444 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
4445 struct cma_multicast
*mc
)
4447 struct ib_sa_mcmember_rec rec
;
4448 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4449 ib_sa_comp_mask comp_mask
;
4452 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
4453 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
4458 ret
= cma_set_qkey(id_priv
, 0);
4462 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
4463 rec
.qkey
= cpu_to_be32(id_priv
->qkey
);
4464 rdma_addr_get_sgid(dev_addr
, &rec
.port_gid
);
4465 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
4466 rec
.join_state
= mc
->join_state
;
4468 if ((rec
.join_state
== BIT(SENDONLY_FULLMEMBER_JOIN
)) &&
4469 (!ib_sa_sendonly_fullmem_support(&sa_client
,
4471 id_priv
->id
.port_num
))) {
4473 &id_priv
->id
.device
->dev
,
4474 "RDMA CM: port %u Unable to multicast join: SM doesn't support Send Only Full Member option\n",
4475 id_priv
->id
.port_num
);
4479 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
4480 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
4481 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
4482 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
4483 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
4485 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
4486 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
4487 IB_SA_MCMEMBER_REC_RATE_SELECTOR
|
4488 IB_SA_MCMEMBER_REC_MTU_SELECTOR
|
4489 IB_SA_MCMEMBER_REC_MTU
|
4490 IB_SA_MCMEMBER_REC_HOP_LIMIT
;
4492 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
4493 id_priv
->id
.port_num
, &rec
,
4494 comp_mask
, GFP_KERNEL
,
4495 cma_ib_mc_handler
, mc
);
4496 return PTR_ERR_OR_ZERO(mc
->multicast
.ib
);
4499 static void iboe_mcast_work_handler(struct work_struct
*work
)
4501 struct iboe_mcast_work
*mw
= container_of(work
, struct iboe_mcast_work
, work
);
4502 struct cma_multicast
*mc
= mw
->mc
;
4503 struct ib_sa_multicast
*m
= mc
->multicast
.ib
;
4505 mc
->multicast
.ib
->context
= mc
;
4506 cma_ib_mc_handler(0, m
);
4507 kref_put(&mc
->mcref
, release_mc
);
4511 static void cma_iboe_set_mgid(struct sockaddr
*addr
, union ib_gid
*mgid
,
4512 enum ib_gid_type gid_type
)
4514 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
4515 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
4517 if (cma_any_addr(addr
)) {
4518 memset(mgid
, 0, sizeof *mgid
);
4519 } else if (addr
->sa_family
== AF_INET6
) {
4520 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
4523 (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) ? 0 : 0xff;
4525 (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) ? 0 : 0x0e;
4534 mgid
->raw
[10] = 0xff;
4535 mgid
->raw
[11] = 0xff;
4536 *(__be32
*)(&mgid
->raw
[12]) = sin
->sin_addr
.s_addr
;
4540 static int cma_iboe_join_multicast(struct rdma_id_private
*id_priv
,
4541 struct cma_multicast
*mc
)
4543 struct iboe_mcast_work
*work
;
4544 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4546 struct sockaddr
*addr
= (struct sockaddr
*)&mc
->addr
;
4547 struct net_device
*ndev
= NULL
;
4548 enum ib_gid_type gid_type
;
4551 send_only
= mc
->join_state
== BIT(SENDONLY_FULLMEMBER_JOIN
);
4553 if (cma_zero_addr((struct sockaddr
*)&mc
->addr
))
4556 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
4560 mc
->multicast
.ib
= kzalloc(sizeof(struct ib_sa_multicast
), GFP_KERNEL
);
4561 if (!mc
->multicast
.ib
) {
4566 gid_type
= id_priv
->cma_dev
->default_gid_type
[id_priv
->id
.port_num
-
4567 rdma_start_port(id_priv
->cma_dev
->device
)];
4568 cma_iboe_set_mgid(addr
, &mc
->multicast
.ib
->rec
.mgid
, gid_type
);
4570 mc
->multicast
.ib
->rec
.pkey
= cpu_to_be16(0xffff);
4571 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
4572 mc
->multicast
.ib
->rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
4574 if (dev_addr
->bound_dev_if
)
4575 ndev
= dev_get_by_index(dev_addr
->net
, dev_addr
->bound_dev_if
);
4580 mc
->multicast
.ib
->rec
.rate
= iboe_get_rate(ndev
);
4581 mc
->multicast
.ib
->rec
.hop_limit
= 1;
4582 mc
->multicast
.ib
->rec
.mtu
= iboe_get_mtu(ndev
->mtu
);
4584 if (addr
->sa_family
== AF_INET
) {
4585 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) {
4586 mc
->multicast
.ib
->rec
.hop_limit
= IPV6_DEFAULT_HOPLIMIT
;
4588 err
= cma_igmp_send(ndev
, &mc
->multicast
.ib
->rec
.mgid
,
4593 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
)
4597 if (err
|| !mc
->multicast
.ib
->rec
.mtu
) {
4602 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
4603 &mc
->multicast
.ib
->rec
.port_gid
);
4606 INIT_WORK(&work
->work
, iboe_mcast_work_handler
);
4607 kref_get(&mc
->mcref
);
4608 queue_work(cma_wq
, &work
->work
);
4613 kfree(mc
->multicast
.ib
);
4619 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
4620 u8 join_state
, void *context
)
4622 struct rdma_id_private
*id_priv
=
4623 container_of(id
, struct rdma_id_private
, id
);
4624 struct cma_multicast
*mc
;
4627 /* ULP is calling this wrong. */
4628 if (!id
->device
|| (READ_ONCE(id_priv
->state
) != RDMA_CM_ADDR_BOUND
&&
4629 READ_ONCE(id_priv
->state
) != RDMA_CM_ADDR_RESOLVED
))
4632 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
4636 memcpy(&mc
->addr
, addr
, rdma_addr_size(addr
));
4637 mc
->context
= context
;
4638 mc
->id_priv
= id_priv
;
4639 mc
->join_state
= join_state
;
4641 if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
4642 kref_init(&mc
->mcref
);
4643 ret
= cma_iboe_join_multicast(id_priv
, mc
);
4646 } else if (rdma_cap_ib_mcast(id
->device
, id
->port_num
)) {
4647 ret
= cma_join_ib_multicast(id_priv
, mc
);
4655 spin_lock(&id_priv
->lock
);
4656 list_add(&mc
->list
, &id_priv
->mc_list
);
4657 spin_unlock(&id_priv
->lock
);
4664 EXPORT_SYMBOL(rdma_join_multicast
);
4666 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
4668 struct rdma_id_private
*id_priv
;
4669 struct cma_multicast
*mc
;
4671 id_priv
= container_of(id
, struct rdma_id_private
, id
);
4672 spin_lock_irq(&id_priv
->lock
);
4673 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
4674 if (!memcmp(&mc
->addr
, addr
, rdma_addr_size(addr
))) {
4675 list_del(&mc
->list
);
4676 spin_unlock_irq(&id_priv
->lock
);
4679 ib_detach_mcast(id
->qp
,
4680 &mc
->multicast
.ib
->rec
.mgid
,
4681 be16_to_cpu(mc
->multicast
.ib
->rec
.mlid
));
4683 BUG_ON(id_priv
->cma_dev
->device
!= id
->device
);
4685 if (rdma_cap_ib_mcast(id
->device
, id
->port_num
)) {
4686 ib_sa_free_multicast(mc
->multicast
.ib
);
4688 } else if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
4689 cma_leave_roce_mc_group(id_priv
, mc
);
4694 spin_unlock_irq(&id_priv
->lock
);
4696 EXPORT_SYMBOL(rdma_leave_multicast
);
4698 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
4700 struct rdma_dev_addr
*dev_addr
;
4701 struct cma_ndev_work
*work
;
4703 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
4705 if ((dev_addr
->bound_dev_if
== ndev
->ifindex
) &&
4706 (net_eq(dev_net(ndev
), dev_addr
->net
)) &&
4707 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
4708 pr_info("RDMA CM addr change for ndev %s used by id %p\n",
4709 ndev
->name
, &id_priv
->id
);
4710 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
4714 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
4716 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
4717 cma_id_get(id_priv
);
4718 queue_work(cma_wq
, &work
->work
);
4724 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
4727 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
4728 struct cma_device
*cma_dev
;
4729 struct rdma_id_private
*id_priv
;
4730 int ret
= NOTIFY_DONE
;
4732 if (event
!= NETDEV_BONDING_FAILOVER
)
4735 if (!netif_is_bond_master(ndev
))
4739 list_for_each_entry(cma_dev
, &dev_list
, list
)
4740 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
4741 ret
= cma_netdev_change(ndev
, id_priv
);
4747 mutex_unlock(&lock
);
4751 static struct notifier_block cma_nb
= {
4752 .notifier_call
= cma_netdev_callback
4755 static int cma_add_one(struct ib_device
*device
)
4757 struct cma_device
*cma_dev
;
4758 struct rdma_id_private
*id_priv
;
4760 unsigned long supported_gids
= 0;
4763 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
4767 cma_dev
->device
= device
;
4768 cma_dev
->default_gid_type
= kcalloc(device
->phys_port_cnt
,
4769 sizeof(*cma_dev
->default_gid_type
),
4771 if (!cma_dev
->default_gid_type
) {
4776 cma_dev
->default_roce_tos
= kcalloc(device
->phys_port_cnt
,
4777 sizeof(*cma_dev
->default_roce_tos
),
4779 if (!cma_dev
->default_roce_tos
) {
4784 rdma_for_each_port (device
, i
) {
4785 supported_gids
= roce_gid_type_mask_support(device
, i
);
4786 WARN_ON(!supported_gids
);
4787 if (supported_gids
& (1 << CMA_PREFERRED_ROCE_GID_TYPE
))
4788 cma_dev
->default_gid_type
[i
- rdma_start_port(device
)] =
4789 CMA_PREFERRED_ROCE_GID_TYPE
;
4791 cma_dev
->default_gid_type
[i
- rdma_start_port(device
)] =
4792 find_first_bit(&supported_gids
, BITS_PER_LONG
);
4793 cma_dev
->default_roce_tos
[i
- rdma_start_port(device
)] = 0;
4796 init_completion(&cma_dev
->comp
);
4797 refcount_set(&cma_dev
->refcount
, 1);
4798 INIT_LIST_HEAD(&cma_dev
->id_list
);
4799 ib_set_client_data(device
, &cma_client
, cma_dev
);
4802 list_add_tail(&cma_dev
->list
, &dev_list
);
4803 list_for_each_entry(id_priv
, &listen_any_list
, list
)
4804 cma_listen_on_dev(id_priv
, cma_dev
);
4805 mutex_unlock(&lock
);
4807 trace_cm_add_one(device
);
4811 kfree(cma_dev
->default_gid_type
);
4818 static void cma_send_device_removal_put(struct rdma_id_private
*id_priv
)
4820 struct rdma_cm_event event
= { .event
= RDMA_CM_EVENT_DEVICE_REMOVAL
};
4821 enum rdma_cm_state state
;
4822 unsigned long flags
;
4824 mutex_lock(&id_priv
->handler_mutex
);
4825 /* Record that we want to remove the device */
4826 spin_lock_irqsave(&id_priv
->lock
, flags
);
4827 state
= id_priv
->state
;
4828 if (state
== RDMA_CM_DESTROYING
|| state
== RDMA_CM_DEVICE_REMOVAL
) {
4829 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
4830 mutex_unlock(&id_priv
->handler_mutex
);
4831 cma_id_put(id_priv
);
4834 id_priv
->state
= RDMA_CM_DEVICE_REMOVAL
;
4835 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
4837 if (cma_cm_event_handler(id_priv
, &event
)) {
4839 * At this point the ULP promises it won't call
4840 * rdma_destroy_id() concurrently
4842 cma_id_put(id_priv
);
4843 mutex_unlock(&id_priv
->handler_mutex
);
4844 trace_cm_id_destroy(id_priv
);
4845 _destroy_id(id_priv
, state
);
4848 mutex_unlock(&id_priv
->handler_mutex
);
4851 * If this races with destroy then the thread that first assigns state
4852 * to a destroying does the cancel.
4854 cma_cancel_operation(id_priv
, state
);
4855 cma_id_put(id_priv
);
4858 static void cma_process_remove(struct cma_device
*cma_dev
)
4861 while (!list_empty(&cma_dev
->id_list
)) {
4862 struct rdma_id_private
*id_priv
= list_first_entry(
4863 &cma_dev
->id_list
, struct rdma_id_private
, list
);
4865 list_del(&id_priv
->listen_list
);
4866 list_del_init(&id_priv
->list
);
4867 cma_id_get(id_priv
);
4868 mutex_unlock(&lock
);
4870 cma_send_device_removal_put(id_priv
);
4874 mutex_unlock(&lock
);
4876 cma_dev_put(cma_dev
);
4877 wait_for_completion(&cma_dev
->comp
);
4880 static void cma_remove_one(struct ib_device
*device
, void *client_data
)
4882 struct cma_device
*cma_dev
= client_data
;
4884 trace_cm_remove_one(device
);
4887 list_del(&cma_dev
->list
);
4888 mutex_unlock(&lock
);
4890 cma_process_remove(cma_dev
);
4891 kfree(cma_dev
->default_roce_tos
);
4892 kfree(cma_dev
->default_gid_type
);
4896 static int cma_init_net(struct net
*net
)
4898 struct cma_pernet
*pernet
= cma_pernet(net
);
4900 xa_init(&pernet
->tcp_ps
);
4901 xa_init(&pernet
->udp_ps
);
4902 xa_init(&pernet
->ipoib_ps
);
4903 xa_init(&pernet
->ib_ps
);
4908 static void cma_exit_net(struct net
*net
)
4910 struct cma_pernet
*pernet
= cma_pernet(net
);
4912 WARN_ON(!xa_empty(&pernet
->tcp_ps
));
4913 WARN_ON(!xa_empty(&pernet
->udp_ps
));
4914 WARN_ON(!xa_empty(&pernet
->ipoib_ps
));
4915 WARN_ON(!xa_empty(&pernet
->ib_ps
));
4918 static struct pernet_operations cma_pernet_operations
= {
4919 .init
= cma_init_net
,
4920 .exit
= cma_exit_net
,
4921 .id
= &cma_pernet_id
,
4922 .size
= sizeof(struct cma_pernet
),
4925 static int __init
cma_init(void)
4930 * There is a rare lock ordering dependency in cma_netdev_callback()
4931 * that only happens when bonding is enabled. Teach lockdep that rtnl
4932 * must never be nested under lock so it can find these without having
4933 * to test with bonding.
4935 if (IS_ENABLED(CONFIG_LOCKDEP
)) {
4938 mutex_unlock(&lock
);
4942 cma_wq
= alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM
);
4946 ret
= register_pernet_subsys(&cma_pernet_operations
);
4950 ib_sa_register_client(&sa_client
);
4951 register_netdevice_notifier(&cma_nb
);
4953 ret
= ib_register_client(&cma_client
);
4957 ret
= cma_configfs_init();
4964 ib_unregister_client(&cma_client
);
4966 unregister_netdevice_notifier(&cma_nb
);
4967 ib_sa_unregister_client(&sa_client
);
4968 unregister_pernet_subsys(&cma_pernet_operations
);
4970 destroy_workqueue(cma_wq
);
4974 static void __exit
cma_cleanup(void)
4976 cma_configfs_exit();
4977 ib_unregister_client(&cma_client
);
4978 unregister_netdevice_notifier(&cma_nb
);
4979 ib_sa_unregister_client(&sa_client
);
4980 unregister_pernet_subsys(&cma_pernet_operations
);
4981 destroy_workqueue(cma_wq
);
4984 module_init(cma_init
);
4985 module_exit(cma_cleanup
);