2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/completion.h>
35 #include <linux/init.h>
37 #include <linux/module.h>
38 #include <linux/device.h>
39 #include <linux/err.h>
40 #include <linux/poll.h>
41 #include <linux/sched.h>
42 #include <linux/file.h>
43 #include <linux/mount.h>
44 #include <linux/cdev.h>
45 #include <linux/xarray.h>
46 #include <linux/mutex.h>
47 #include <linux/slab.h>
49 #include <linux/nospec.h>
51 #include <linux/uaccess.h>
54 #include <rdma/ib_cm.h>
55 #include <rdma/ib_user_cm.h>
56 #include <rdma/ib_marshall.h>
58 #include "core_priv.h"
60 MODULE_AUTHOR("Libor Michalek");
61 MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
62 MODULE_LICENSE("Dual BSD/GPL");
64 struct ib_ucm_device
{
68 struct ib_device
*ib_dev
;
72 struct mutex file_mutex
;
74 struct ib_ucm_device
*device
;
76 struct list_head ctxs
;
77 struct list_head events
;
78 wait_queue_head_t poll_wait
;
81 struct ib_ucm_context
{
83 struct completion comp
;
87 struct ib_ucm_file
*file
;
88 struct ib_cm_id
*cm_id
;
91 struct list_head events
; /* list of pending events. */
92 struct list_head file_list
; /* member in file ctx list */
96 struct ib_ucm_context
*ctx
;
97 struct list_head file_list
; /* member in file event list */
98 struct list_head ctx_list
; /* member in ctx event list */
100 struct ib_cm_id
*cm_id
;
101 struct ib_ucm_event_resp resp
;
110 IB_UCM_BASE_MINOR
= 224,
111 IB_UCM_MAX_DEVICES
= RDMA_MAX_PORTS
,
112 IB_UCM_NUM_FIXED_MINOR
= 32,
113 IB_UCM_NUM_DYNAMIC_MINOR
= IB_UCM_MAX_DEVICES
- IB_UCM_NUM_FIXED_MINOR
,
116 #define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
117 static dev_t dynamic_ucm_dev
;
119 static void ib_ucm_add_one(struct ib_device
*device
);
120 static void ib_ucm_remove_one(struct ib_device
*device
, void *client_data
);
122 static struct ib_client ucm_client
= {
124 .add
= ib_ucm_add_one
,
125 .remove
= ib_ucm_remove_one
128 static DEFINE_XARRAY_ALLOC(ctx_id_table
);
129 static DECLARE_BITMAP(dev_map
, IB_UCM_MAX_DEVICES
);
131 static struct ib_ucm_context
*ib_ucm_ctx_get(struct ib_ucm_file
*file
, int id
)
133 struct ib_ucm_context
*ctx
;
135 xa_lock(&ctx_id_table
);
136 ctx
= xa_load(&ctx_id_table
, id
);
138 ctx
= ERR_PTR(-ENOENT
);
139 else if (ctx
->file
!= file
)
140 ctx
= ERR_PTR(-EINVAL
);
142 atomic_inc(&ctx
->ref
);
143 xa_unlock(&ctx_id_table
);
148 static void ib_ucm_ctx_put(struct ib_ucm_context
*ctx
)
150 if (atomic_dec_and_test(&ctx
->ref
))
151 complete(&ctx
->comp
);
154 static inline int ib_ucm_new_cm_id(int event
)
156 return event
== IB_CM_REQ_RECEIVED
|| event
== IB_CM_SIDR_REQ_RECEIVED
;
159 static void ib_ucm_cleanup_events(struct ib_ucm_context
*ctx
)
161 struct ib_ucm_event
*uevent
;
163 mutex_lock(&ctx
->file
->file_mutex
);
164 list_del(&ctx
->file_list
);
165 while (!list_empty(&ctx
->events
)) {
167 uevent
= list_entry(ctx
->events
.next
,
168 struct ib_ucm_event
, ctx_list
);
169 list_del(&uevent
->file_list
);
170 list_del(&uevent
->ctx_list
);
171 mutex_unlock(&ctx
->file
->file_mutex
);
173 /* clear incoming connections. */
174 if (ib_ucm_new_cm_id(uevent
->resp
.event
))
175 ib_destroy_cm_id(uevent
->cm_id
);
178 mutex_lock(&ctx
->file
->file_mutex
);
180 mutex_unlock(&ctx
->file
->file_mutex
);
183 static struct ib_ucm_context
*ib_ucm_ctx_alloc(struct ib_ucm_file
*file
)
185 struct ib_ucm_context
*ctx
;
187 ctx
= kzalloc(sizeof *ctx
, GFP_KERNEL
);
191 atomic_set(&ctx
->ref
, 1);
192 init_completion(&ctx
->comp
);
194 INIT_LIST_HEAD(&ctx
->events
);
196 if (xa_alloc(&ctx_id_table
, &ctx
->id
, ctx
, xa_limit_32b
, GFP_KERNEL
))
199 list_add_tail(&ctx
->file_list
, &file
->ctxs
);
207 static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp
*ureq
,
208 const struct ib_cm_req_event_param
*kreq
)
210 ureq
->remote_ca_guid
= kreq
->remote_ca_guid
;
211 ureq
->remote_qkey
= kreq
->remote_qkey
;
212 ureq
->remote_qpn
= kreq
->remote_qpn
;
213 ureq
->qp_type
= kreq
->qp_type
;
214 ureq
->starting_psn
= kreq
->starting_psn
;
215 ureq
->responder_resources
= kreq
->responder_resources
;
216 ureq
->initiator_depth
= kreq
->initiator_depth
;
217 ureq
->local_cm_response_timeout
= kreq
->local_cm_response_timeout
;
218 ureq
->flow_control
= kreq
->flow_control
;
219 ureq
->remote_cm_response_timeout
= kreq
->remote_cm_response_timeout
;
220 ureq
->retry_count
= kreq
->retry_count
;
221 ureq
->rnr_retry_count
= kreq
->rnr_retry_count
;
222 ureq
->srq
= kreq
->srq
;
223 ureq
->port
= kreq
->port
;
225 ib_copy_path_rec_to_user(&ureq
->primary_path
, kreq
->primary_path
);
226 if (kreq
->alternate_path
)
227 ib_copy_path_rec_to_user(&ureq
->alternate_path
,
228 kreq
->alternate_path
);
231 static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp
*urep
,
232 const struct ib_cm_rep_event_param
*krep
)
234 urep
->remote_ca_guid
= krep
->remote_ca_guid
;
235 urep
->remote_qkey
= krep
->remote_qkey
;
236 urep
->remote_qpn
= krep
->remote_qpn
;
237 urep
->starting_psn
= krep
->starting_psn
;
238 urep
->responder_resources
= krep
->responder_resources
;
239 urep
->initiator_depth
= krep
->initiator_depth
;
240 urep
->target_ack_delay
= krep
->target_ack_delay
;
241 urep
->failover_accepted
= krep
->failover_accepted
;
242 urep
->flow_control
= krep
->flow_control
;
243 urep
->rnr_retry_count
= krep
->rnr_retry_count
;
244 urep
->srq
= krep
->srq
;
247 static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp
*urep
,
248 const struct ib_cm_sidr_rep_event_param
*krep
)
250 urep
->status
= krep
->status
;
251 urep
->qkey
= krep
->qkey
;
252 urep
->qpn
= krep
->qpn
;
255 static int ib_ucm_event_process(const struct ib_cm_event
*evt
,
256 struct ib_ucm_event
*uvt
)
260 switch (evt
->event
) {
261 case IB_CM_REQ_RECEIVED
:
262 ib_ucm_event_req_get(&uvt
->resp
.u
.req_resp
,
263 &evt
->param
.req_rcvd
);
264 uvt
->data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
;
265 uvt
->resp
.present
= IB_UCM_PRES_PRIMARY
;
266 uvt
->resp
.present
|= (evt
->param
.req_rcvd
.alternate_path
?
267 IB_UCM_PRES_ALTERNATE
: 0);
269 case IB_CM_REP_RECEIVED
:
270 ib_ucm_event_rep_get(&uvt
->resp
.u
.rep_resp
,
271 &evt
->param
.rep_rcvd
);
272 uvt
->data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
274 case IB_CM_RTU_RECEIVED
:
275 uvt
->data_len
= IB_CM_RTU_PRIVATE_DATA_SIZE
;
276 uvt
->resp
.u
.send_status
= evt
->param
.send_status
;
278 case IB_CM_DREQ_RECEIVED
:
279 uvt
->data_len
= IB_CM_DREQ_PRIVATE_DATA_SIZE
;
280 uvt
->resp
.u
.send_status
= evt
->param
.send_status
;
282 case IB_CM_DREP_RECEIVED
:
283 uvt
->data_len
= IB_CM_DREP_PRIVATE_DATA_SIZE
;
284 uvt
->resp
.u
.send_status
= evt
->param
.send_status
;
286 case IB_CM_MRA_RECEIVED
:
287 uvt
->resp
.u
.mra_resp
.timeout
=
288 evt
->param
.mra_rcvd
.service_timeout
;
289 uvt
->data_len
= IB_CM_MRA_PRIVATE_DATA_SIZE
;
291 case IB_CM_REJ_RECEIVED
:
292 uvt
->resp
.u
.rej_resp
.reason
= evt
->param
.rej_rcvd
.reason
;
293 uvt
->data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
294 uvt
->info_len
= evt
->param
.rej_rcvd
.ari_length
;
295 info
= evt
->param
.rej_rcvd
.ari
;
297 case IB_CM_LAP_RECEIVED
:
298 ib_copy_path_rec_to_user(&uvt
->resp
.u
.lap_resp
.path
,
299 evt
->param
.lap_rcvd
.alternate_path
);
300 uvt
->data_len
= IB_CM_LAP_PRIVATE_DATA_SIZE
;
301 uvt
->resp
.present
= IB_UCM_PRES_ALTERNATE
;
303 case IB_CM_APR_RECEIVED
:
304 uvt
->resp
.u
.apr_resp
.status
= evt
->param
.apr_rcvd
.ap_status
;
305 uvt
->data_len
= IB_CM_APR_PRIVATE_DATA_SIZE
;
306 uvt
->info_len
= evt
->param
.apr_rcvd
.info_len
;
307 info
= evt
->param
.apr_rcvd
.apr_info
;
309 case IB_CM_SIDR_REQ_RECEIVED
:
310 uvt
->resp
.u
.sidr_req_resp
.pkey
=
311 evt
->param
.sidr_req_rcvd
.pkey
;
312 uvt
->resp
.u
.sidr_req_resp
.port
=
313 evt
->param
.sidr_req_rcvd
.port
;
314 uvt
->data_len
= IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
;
316 case IB_CM_SIDR_REP_RECEIVED
:
317 ib_ucm_event_sidr_rep_get(&uvt
->resp
.u
.sidr_rep_resp
,
318 &evt
->param
.sidr_rep_rcvd
);
319 uvt
->data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
320 uvt
->info_len
= evt
->param
.sidr_rep_rcvd
.info_len
;
321 info
= evt
->param
.sidr_rep_rcvd
.info
;
324 uvt
->resp
.u
.send_status
= evt
->param
.send_status
;
329 uvt
->data
= kmemdup(evt
->private_data
, uvt
->data_len
, GFP_KERNEL
);
333 uvt
->resp
.present
|= IB_UCM_PRES_DATA
;
337 uvt
->info
= kmemdup(info
, uvt
->info_len
, GFP_KERNEL
);
341 uvt
->resp
.present
|= IB_UCM_PRES_INFO
;
351 static int ib_ucm_event_handler(struct ib_cm_id
*cm_id
,
352 const struct ib_cm_event
*event
)
354 struct ib_ucm_event
*uevent
;
355 struct ib_ucm_context
*ctx
;
358 ctx
= cm_id
->context
;
360 uevent
= kzalloc(sizeof *uevent
, GFP_KERNEL
);
365 uevent
->cm_id
= cm_id
;
366 uevent
->resp
.uid
= ctx
->uid
;
367 uevent
->resp
.id
= ctx
->id
;
368 uevent
->resp
.event
= event
->event
;
370 result
= ib_ucm_event_process(event
, uevent
);
374 mutex_lock(&ctx
->file
->file_mutex
);
375 list_add_tail(&uevent
->file_list
, &ctx
->file
->events
);
376 list_add_tail(&uevent
->ctx_list
, &ctx
->events
);
377 wake_up_interruptible(&ctx
->file
->poll_wait
);
378 mutex_unlock(&ctx
->file
->file_mutex
);
384 /* Destroy new cm_id's */
385 return ib_ucm_new_cm_id(event
->event
);
388 static ssize_t
ib_ucm_event(struct ib_ucm_file
*file
,
389 const char __user
*inbuf
,
390 int in_len
, int out_len
)
392 struct ib_ucm_context
*ctx
;
393 struct ib_ucm_event_get cmd
;
394 struct ib_ucm_event
*uevent
;
397 if (out_len
< sizeof(struct ib_ucm_event_resp
))
400 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
403 mutex_lock(&file
->file_mutex
);
404 while (list_empty(&file
->events
)) {
405 mutex_unlock(&file
->file_mutex
);
407 if (file
->filp
->f_flags
& O_NONBLOCK
)
410 if (wait_event_interruptible(file
->poll_wait
,
411 !list_empty(&file
->events
)))
414 mutex_lock(&file
->file_mutex
);
417 uevent
= list_entry(file
->events
.next
, struct ib_ucm_event
, file_list
);
419 if (ib_ucm_new_cm_id(uevent
->resp
.event
)) {
420 ctx
= ib_ucm_ctx_alloc(file
);
426 ctx
->cm_id
= uevent
->cm_id
;
427 ctx
->cm_id
->context
= ctx
;
428 uevent
->resp
.id
= ctx
->id
;
431 if (copy_to_user(u64_to_user_ptr(cmd
.response
),
432 &uevent
->resp
, sizeof(uevent
->resp
))) {
438 if (cmd
.data_len
< uevent
->data_len
) {
442 if (copy_to_user(u64_to_user_ptr(cmd
.data
),
443 uevent
->data
, uevent
->data_len
)) {
450 if (cmd
.info_len
< uevent
->info_len
) {
454 if (copy_to_user(u64_to_user_ptr(cmd
.info
),
455 uevent
->info
, uevent
->info_len
)) {
461 list_del(&uevent
->file_list
);
462 list_del(&uevent
->ctx_list
);
463 uevent
->ctx
->events_reported
++;
469 mutex_unlock(&file
->file_mutex
);
473 static ssize_t
ib_ucm_create_id(struct ib_ucm_file
*file
,
474 const char __user
*inbuf
,
475 int in_len
, int out_len
)
477 struct ib_ucm_create_id cmd
;
478 struct ib_ucm_create_id_resp resp
;
479 struct ib_ucm_context
*ctx
;
482 if (out_len
< sizeof(resp
))
485 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
488 mutex_lock(&file
->file_mutex
);
489 ctx
= ib_ucm_ctx_alloc(file
);
490 mutex_unlock(&file
->file_mutex
);
495 ctx
->cm_id
= ib_create_cm_id(file
->device
->ib_dev
,
496 ib_ucm_event_handler
, ctx
);
497 if (IS_ERR(ctx
->cm_id
)) {
498 result
= PTR_ERR(ctx
->cm_id
);
503 if (copy_to_user(u64_to_user_ptr(cmd
.response
),
504 &resp
, sizeof(resp
))) {
511 ib_destroy_cm_id(ctx
->cm_id
);
513 xa_erase(&ctx_id_table
, ctx
->id
);
518 static ssize_t
ib_ucm_destroy_id(struct ib_ucm_file
*file
,
519 const char __user
*inbuf
,
520 int in_len
, int out_len
)
522 struct ib_ucm_destroy_id cmd
;
523 struct ib_ucm_destroy_id_resp resp
;
524 struct ib_ucm_context
*ctx
;
527 if (out_len
< sizeof(resp
))
530 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
533 xa_lock(&ctx_id_table
);
534 ctx
= xa_load(&ctx_id_table
, cmd
.id
);
536 ctx
= ERR_PTR(-ENOENT
);
537 else if (ctx
->file
!= file
)
538 ctx
= ERR_PTR(-EINVAL
);
540 __xa_erase(&ctx_id_table
, ctx
->id
);
541 xa_unlock(&ctx_id_table
);
547 wait_for_completion(&ctx
->comp
);
549 /* No new events will be generated after destroying the cm_id. */
550 ib_destroy_cm_id(ctx
->cm_id
);
551 /* Cleanup events not yet reported to the user. */
552 ib_ucm_cleanup_events(ctx
);
554 resp
.events_reported
= ctx
->events_reported
;
555 if (copy_to_user(u64_to_user_ptr(cmd
.response
),
556 &resp
, sizeof(resp
)))
563 static ssize_t
ib_ucm_attr_id(struct ib_ucm_file
*file
,
564 const char __user
*inbuf
,
565 int in_len
, int out_len
)
567 struct ib_ucm_attr_id_resp resp
;
568 struct ib_ucm_attr_id cmd
;
569 struct ib_ucm_context
*ctx
;
572 if (out_len
< sizeof(resp
))
575 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
578 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
582 resp
.service_id
= ctx
->cm_id
->service_id
;
583 resp
.service_mask
= ctx
->cm_id
->service_mask
;
584 resp
.local_id
= ctx
->cm_id
->local_id
;
585 resp
.remote_id
= ctx
->cm_id
->remote_id
;
587 if (copy_to_user(u64_to_user_ptr(cmd
.response
),
588 &resp
, sizeof(resp
)))
595 static ssize_t
ib_ucm_init_qp_attr(struct ib_ucm_file
*file
,
596 const char __user
*inbuf
,
597 int in_len
, int out_len
)
599 struct ib_uverbs_qp_attr resp
;
600 struct ib_ucm_init_qp_attr cmd
;
601 struct ib_ucm_context
*ctx
;
602 struct ib_qp_attr qp_attr
;
605 if (out_len
< sizeof(resp
))
608 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
611 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
615 resp
.qp_attr_mask
= 0;
616 memset(&qp_attr
, 0, sizeof qp_attr
);
617 qp_attr
.qp_state
= cmd
.qp_state
;
618 result
= ib_cm_init_qp_attr(ctx
->cm_id
, &qp_attr
, &resp
.qp_attr_mask
);
622 ib_copy_qp_attr_to_user(ctx
->cm_id
->device
, &resp
, &qp_attr
);
624 if (copy_to_user(u64_to_user_ptr(cmd
.response
),
625 &resp
, sizeof(resp
)))
633 static int ucm_validate_listen(__be64 service_id
, __be64 service_mask
)
635 service_id
&= service_mask
;
637 if (((service_id
& IB_CMA_SERVICE_ID_MASK
) == IB_CMA_SERVICE_ID
) ||
638 ((service_id
& IB_SDP_SERVICE_ID_MASK
) == IB_SDP_SERVICE_ID
))
644 static ssize_t
ib_ucm_listen(struct ib_ucm_file
*file
,
645 const char __user
*inbuf
,
646 int in_len
, int out_len
)
648 struct ib_ucm_listen cmd
;
649 struct ib_ucm_context
*ctx
;
652 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
655 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
659 result
= ucm_validate_listen(cmd
.service_id
, cmd
.service_mask
);
663 result
= ib_cm_listen(ctx
->cm_id
, cmd
.service_id
, cmd
.service_mask
);
669 static ssize_t
ib_ucm_notify(struct ib_ucm_file
*file
,
670 const char __user
*inbuf
,
671 int in_len
, int out_len
)
673 struct ib_ucm_notify cmd
;
674 struct ib_ucm_context
*ctx
;
677 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
680 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
684 result
= ib_cm_notify(ctx
->cm_id
, (enum ib_event_type
) cmd
.event
);
689 static int ib_ucm_alloc_data(const void **dest
, u64 src
, u32 len
)
698 data
= memdup_user(u64_to_user_ptr(src
), len
);
700 return PTR_ERR(data
);
706 static int ib_ucm_path_get(struct sa_path_rec
**path
, u64 src
)
708 struct ib_user_path_rec upath
;
709 struct sa_path_rec
*sa_path
;
716 sa_path
= kmalloc(sizeof(*sa_path
), GFP_KERNEL
);
720 if (copy_from_user(&upath
, u64_to_user_ptr(src
),
727 ib_copy_path_rec_from_user(sa_path
, &upath
);
732 static ssize_t
ib_ucm_send_req(struct ib_ucm_file
*file
,
733 const char __user
*inbuf
,
734 int in_len
, int out_len
)
736 struct ib_cm_req_param param
;
737 struct ib_ucm_context
*ctx
;
738 struct ib_ucm_req cmd
;
741 param
.private_data
= NULL
;
742 param
.primary_path
= NULL
;
743 param
.alternate_path
= NULL
;
745 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
748 result
= ib_ucm_alloc_data(¶m
.private_data
, cmd
.data
, cmd
.len
);
752 result
= ib_ucm_path_get(¶m
.primary_path
, cmd
.primary_path
);
756 result
= ib_ucm_path_get(¶m
.alternate_path
, cmd
.alternate_path
);
760 param
.private_data_len
= cmd
.len
;
761 param
.service_id
= cmd
.sid
;
762 param
.qp_num
= cmd
.qpn
;
763 param
.qp_type
= cmd
.qp_type
;
764 param
.starting_psn
= cmd
.psn
;
765 param
.peer_to_peer
= cmd
.peer_to_peer
;
766 param
.responder_resources
= cmd
.responder_resources
;
767 param
.initiator_depth
= cmd
.initiator_depth
;
768 param
.remote_cm_response_timeout
= cmd
.remote_cm_response_timeout
;
769 param
.flow_control
= cmd
.flow_control
;
770 param
.local_cm_response_timeout
= cmd
.local_cm_response_timeout
;
771 param
.retry_count
= cmd
.retry_count
;
772 param
.rnr_retry_count
= cmd
.rnr_retry_count
;
773 param
.max_cm_retries
= cmd
.max_cm_retries
;
776 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
778 result
= ib_send_cm_req(ctx
->cm_id
, ¶m
);
781 result
= PTR_ERR(ctx
);
784 kfree(param
.private_data
);
785 kfree(param
.primary_path
);
786 kfree(param
.alternate_path
);
790 static ssize_t
ib_ucm_send_rep(struct ib_ucm_file
*file
,
791 const char __user
*inbuf
,
792 int in_len
, int out_len
)
794 struct ib_cm_rep_param param
;
795 struct ib_ucm_context
*ctx
;
796 struct ib_ucm_rep cmd
;
799 param
.private_data
= NULL
;
801 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
804 result
= ib_ucm_alloc_data(¶m
.private_data
, cmd
.data
, cmd
.len
);
808 param
.qp_num
= cmd
.qpn
;
809 param
.starting_psn
= cmd
.psn
;
810 param
.private_data_len
= cmd
.len
;
811 param
.responder_resources
= cmd
.responder_resources
;
812 param
.initiator_depth
= cmd
.initiator_depth
;
813 param
.failover_accepted
= cmd
.failover_accepted
;
814 param
.flow_control
= cmd
.flow_control
;
815 param
.rnr_retry_count
= cmd
.rnr_retry_count
;
818 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
821 result
= ib_send_cm_rep(ctx
->cm_id
, ¶m
);
824 result
= PTR_ERR(ctx
);
826 kfree(param
.private_data
);
830 static ssize_t
ib_ucm_send_private_data(struct ib_ucm_file
*file
,
831 const char __user
*inbuf
, int in_len
,
832 int (*func
)(struct ib_cm_id
*cm_id
,
833 const void *private_data
,
834 u8 private_data_len
))
836 struct ib_ucm_private_data cmd
;
837 struct ib_ucm_context
*ctx
;
838 const void *private_data
= NULL
;
841 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
844 result
= ib_ucm_alloc_data(&private_data
, cmd
.data
, cmd
.len
);
848 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
850 result
= func(ctx
->cm_id
, private_data
, cmd
.len
);
853 result
= PTR_ERR(ctx
);
859 static ssize_t
ib_ucm_send_rtu(struct ib_ucm_file
*file
,
860 const char __user
*inbuf
,
861 int in_len
, int out_len
)
863 return ib_ucm_send_private_data(file
, inbuf
, in_len
, ib_send_cm_rtu
);
866 static ssize_t
ib_ucm_send_dreq(struct ib_ucm_file
*file
,
867 const char __user
*inbuf
,
868 int in_len
, int out_len
)
870 return ib_ucm_send_private_data(file
, inbuf
, in_len
, ib_send_cm_dreq
);
873 static ssize_t
ib_ucm_send_drep(struct ib_ucm_file
*file
,
874 const char __user
*inbuf
,
875 int in_len
, int out_len
)
877 return ib_ucm_send_private_data(file
, inbuf
, in_len
, ib_send_cm_drep
);
880 static ssize_t
ib_ucm_send_info(struct ib_ucm_file
*file
,
881 const char __user
*inbuf
, int in_len
,
882 int (*func
)(struct ib_cm_id
*cm_id
,
889 struct ib_ucm_context
*ctx
;
890 struct ib_ucm_info cmd
;
891 const void *data
= NULL
;
892 const void *info
= NULL
;
895 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
898 result
= ib_ucm_alloc_data(&data
, cmd
.data
, cmd
.data_len
);
902 result
= ib_ucm_alloc_data(&info
, cmd
.info
, cmd
.info_len
);
906 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
908 result
= func(ctx
->cm_id
, cmd
.status
, info
, cmd
.info_len
,
912 result
= PTR_ERR(ctx
);
920 static ssize_t
ib_ucm_send_rej(struct ib_ucm_file
*file
,
921 const char __user
*inbuf
,
922 int in_len
, int out_len
)
924 return ib_ucm_send_info(file
, inbuf
, in_len
, (void *)ib_send_cm_rej
);
927 static ssize_t
ib_ucm_send_apr(struct ib_ucm_file
*file
,
928 const char __user
*inbuf
,
929 int in_len
, int out_len
)
931 return ib_ucm_send_info(file
, inbuf
, in_len
, (void *)ib_send_cm_apr
);
934 static ssize_t
ib_ucm_send_mra(struct ib_ucm_file
*file
,
935 const char __user
*inbuf
,
936 int in_len
, int out_len
)
938 struct ib_ucm_context
*ctx
;
939 struct ib_ucm_mra cmd
;
940 const void *data
= NULL
;
943 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
946 result
= ib_ucm_alloc_data(&data
, cmd
.data
, cmd
.len
);
950 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
952 result
= ib_send_cm_mra(ctx
->cm_id
, cmd
.timeout
, data
, cmd
.len
);
955 result
= PTR_ERR(ctx
);
961 static ssize_t
ib_ucm_send_lap(struct ib_ucm_file
*file
,
962 const char __user
*inbuf
,
963 int in_len
, int out_len
)
965 struct ib_ucm_context
*ctx
;
966 struct sa_path_rec
*path
= NULL
;
967 struct ib_ucm_lap cmd
;
968 const void *data
= NULL
;
971 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
974 result
= ib_ucm_alloc_data(&data
, cmd
.data
, cmd
.len
);
978 result
= ib_ucm_path_get(&path
, cmd
.path
);
982 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
984 result
= ib_send_cm_lap(ctx
->cm_id
, path
, data
, cmd
.len
);
987 result
= PTR_ERR(ctx
);
995 static ssize_t
ib_ucm_send_sidr_req(struct ib_ucm_file
*file
,
996 const char __user
*inbuf
,
997 int in_len
, int out_len
)
999 struct ib_cm_sidr_req_param param
= {};
1000 struct ib_ucm_context
*ctx
;
1001 struct ib_ucm_sidr_req cmd
;
1004 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1007 result
= ib_ucm_alloc_data(¶m
.private_data
, cmd
.data
, cmd
.len
);
1011 result
= ib_ucm_path_get(¶m
.path
, cmd
.path
);
1015 param
.private_data_len
= cmd
.len
;
1016 param
.service_id
= cmd
.sid
;
1017 param
.timeout_ms
= cmd
.timeout
;
1018 param
.max_cm_retries
= cmd
.max_cm_retries
;
1020 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
1022 result
= ib_send_cm_sidr_req(ctx
->cm_id
, ¶m
);
1023 ib_ucm_ctx_put(ctx
);
1025 result
= PTR_ERR(ctx
);
1028 kfree(param
.private_data
);
1033 static ssize_t
ib_ucm_send_sidr_rep(struct ib_ucm_file
*file
,
1034 const char __user
*inbuf
,
1035 int in_len
, int out_len
)
1037 struct ib_cm_sidr_rep_param param
;
1038 struct ib_ucm_sidr_rep cmd
;
1039 struct ib_ucm_context
*ctx
;
1044 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1047 result
= ib_ucm_alloc_data(¶m
.private_data
,
1048 cmd
.data
, cmd
.data_len
);
1052 result
= ib_ucm_alloc_data(¶m
.info
, cmd
.info
, cmd
.info_len
);
1056 param
.qp_num
= cmd
.qpn
;
1057 param
.qkey
= cmd
.qkey
;
1058 param
.status
= cmd
.status
;
1059 param
.info_length
= cmd
.info_len
;
1060 param
.private_data_len
= cmd
.data_len
;
1062 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
1064 result
= ib_send_cm_sidr_rep(ctx
->cm_id
, ¶m
);
1065 ib_ucm_ctx_put(ctx
);
1067 result
= PTR_ERR(ctx
);
1070 kfree(param
.private_data
);
1075 static ssize_t (*ucm_cmd_table
[])(struct ib_ucm_file
*file
,
1076 const char __user
*inbuf
,
1077 int in_len
, int out_len
) = {
1078 [IB_USER_CM_CMD_CREATE_ID
] = ib_ucm_create_id
,
1079 [IB_USER_CM_CMD_DESTROY_ID
] = ib_ucm_destroy_id
,
1080 [IB_USER_CM_CMD_ATTR_ID
] = ib_ucm_attr_id
,
1081 [IB_USER_CM_CMD_LISTEN
] = ib_ucm_listen
,
1082 [IB_USER_CM_CMD_NOTIFY
] = ib_ucm_notify
,
1083 [IB_USER_CM_CMD_SEND_REQ
] = ib_ucm_send_req
,
1084 [IB_USER_CM_CMD_SEND_REP
] = ib_ucm_send_rep
,
1085 [IB_USER_CM_CMD_SEND_RTU
] = ib_ucm_send_rtu
,
1086 [IB_USER_CM_CMD_SEND_DREQ
] = ib_ucm_send_dreq
,
1087 [IB_USER_CM_CMD_SEND_DREP
] = ib_ucm_send_drep
,
1088 [IB_USER_CM_CMD_SEND_REJ
] = ib_ucm_send_rej
,
1089 [IB_USER_CM_CMD_SEND_MRA
] = ib_ucm_send_mra
,
1090 [IB_USER_CM_CMD_SEND_LAP
] = ib_ucm_send_lap
,
1091 [IB_USER_CM_CMD_SEND_APR
] = ib_ucm_send_apr
,
1092 [IB_USER_CM_CMD_SEND_SIDR_REQ
] = ib_ucm_send_sidr_req
,
1093 [IB_USER_CM_CMD_SEND_SIDR_REP
] = ib_ucm_send_sidr_rep
,
1094 [IB_USER_CM_CMD_EVENT
] = ib_ucm_event
,
1095 [IB_USER_CM_CMD_INIT_QP_ATTR
] = ib_ucm_init_qp_attr
,
1098 static ssize_t
ib_ucm_write(struct file
*filp
, const char __user
*buf
,
1099 size_t len
, loff_t
*pos
)
1101 struct ib_ucm_file
*file
= filp
->private_data
;
1102 struct ib_ucm_cmd_hdr hdr
;
1105 if (!ib_safe_file_access(filp
)) {
1106 pr_err_once("ucm_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1107 task_tgid_vnr(current
), current
->comm
);
1111 if (len
< sizeof(hdr
))
1114 if (copy_from_user(&hdr
, buf
, sizeof(hdr
)))
1117 if (hdr
.cmd
>= ARRAY_SIZE(ucm_cmd_table
))
1119 hdr
.cmd
= array_index_nospec(hdr
.cmd
, ARRAY_SIZE(ucm_cmd_table
));
1121 if (hdr
.in
+ sizeof(hdr
) > len
)
1124 result
= ucm_cmd_table
[hdr
.cmd
](file
, buf
+ sizeof(hdr
),
1132 static __poll_t
ib_ucm_poll(struct file
*filp
,
1133 struct poll_table_struct
*wait
)
1135 struct ib_ucm_file
*file
= filp
->private_data
;
1138 poll_wait(filp
, &file
->poll_wait
, wait
);
1140 if (!list_empty(&file
->events
))
1141 mask
= EPOLLIN
| EPOLLRDNORM
;
1147 * ib_ucm_open() does not need the BKL:
1149 * - no global state is referred to;
1150 * - there is no ioctl method to race against;
1151 * - no further module initialization is required for open to work
1152 * after the device is registered.
1154 static int ib_ucm_open(struct inode
*inode
, struct file
*filp
)
1156 struct ib_ucm_file
*file
;
1158 file
= kmalloc(sizeof(*file
), GFP_KERNEL
);
1162 INIT_LIST_HEAD(&file
->events
);
1163 INIT_LIST_HEAD(&file
->ctxs
);
1164 init_waitqueue_head(&file
->poll_wait
);
1166 mutex_init(&file
->file_mutex
);
1168 filp
->private_data
= file
;
1170 file
->device
= container_of(inode
->i_cdev
, struct ib_ucm_device
, cdev
);
1172 return stream_open(inode
, filp
);
1175 static int ib_ucm_close(struct inode
*inode
, struct file
*filp
)
1177 struct ib_ucm_file
*file
= filp
->private_data
;
1178 struct ib_ucm_context
*ctx
;
1180 mutex_lock(&file
->file_mutex
);
1181 while (!list_empty(&file
->ctxs
)) {
1182 ctx
= list_entry(file
->ctxs
.next
,
1183 struct ib_ucm_context
, file_list
);
1184 mutex_unlock(&file
->file_mutex
);
1186 xa_erase(&ctx_id_table
, ctx
->id
);
1187 ib_destroy_cm_id(ctx
->cm_id
);
1188 ib_ucm_cleanup_events(ctx
);
1191 mutex_lock(&file
->file_mutex
);
1193 mutex_unlock(&file
->file_mutex
);
1198 static void ib_ucm_release_dev(struct device
*dev
)
1200 struct ib_ucm_device
*ucm_dev
;
1202 ucm_dev
= container_of(dev
, struct ib_ucm_device
, dev
);
1206 static void ib_ucm_free_dev(struct ib_ucm_device
*ucm_dev
)
1208 clear_bit(ucm_dev
->devnum
, dev_map
);
1211 static const struct file_operations ucm_fops
= {
1212 .owner
= THIS_MODULE
,
1213 .open
= ib_ucm_open
,
1214 .release
= ib_ucm_close
,
1215 .write
= ib_ucm_write
,
1216 .poll
= ib_ucm_poll
,
1217 .llseek
= no_llseek
,
1220 static ssize_t
show_ibdev(struct device
*dev
, struct device_attribute
*attr
,
1223 struct ib_ucm_device
*ucm_dev
;
1225 ucm_dev
= container_of(dev
, struct ib_ucm_device
, dev
);
1226 return sprintf(buf
, "%s\n", ucm_dev
->ib_dev
->name
);
1228 static DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
1230 static void ib_ucm_add_one(struct ib_device
*device
)
1234 struct ib_ucm_device
*ucm_dev
;
1236 if (!device
->ops
.alloc_ucontext
|| !rdma_cap_ib_cm(device
, 1))
1239 ucm_dev
= kzalloc(sizeof *ucm_dev
, GFP_KERNEL
);
1243 device_initialize(&ucm_dev
->dev
);
1244 ucm_dev
->ib_dev
= device
;
1245 ucm_dev
->dev
.release
= ib_ucm_release_dev
;
1247 devnum
= find_first_zero_bit(dev_map
, IB_UCM_MAX_DEVICES
);
1248 if (devnum
>= IB_UCM_MAX_DEVICES
)
1250 ucm_dev
->devnum
= devnum
;
1251 set_bit(devnum
, dev_map
);
1252 if (devnum
>= IB_UCM_NUM_FIXED_MINOR
)
1253 base
= dynamic_ucm_dev
+ devnum
- IB_UCM_NUM_FIXED_MINOR
;
1255 base
= IB_UCM_BASE_DEV
+ devnum
;
1257 cdev_init(&ucm_dev
->cdev
, &ucm_fops
);
1258 ucm_dev
->cdev
.owner
= THIS_MODULE
;
1259 kobject_set_name(&ucm_dev
->cdev
.kobj
, "ucm%d", ucm_dev
->devnum
);
1261 ucm_dev
->dev
.class = &cm_class
;
1262 ucm_dev
->dev
.parent
= device
->dev
.parent
;
1263 ucm_dev
->dev
.devt
= base
;
1265 dev_set_name(&ucm_dev
->dev
, "ucm%d", ucm_dev
->devnum
);
1266 if (cdev_device_add(&ucm_dev
->cdev
, &ucm_dev
->dev
))
1269 if (device_create_file(&ucm_dev
->dev
, &dev_attr_ibdev
))
1272 ib_set_client_data(device
, &ucm_client
, ucm_dev
);
1276 cdev_device_del(&ucm_dev
->cdev
, &ucm_dev
->dev
);
1278 ib_ucm_free_dev(ucm_dev
);
1280 put_device(&ucm_dev
->dev
);
1284 static void ib_ucm_remove_one(struct ib_device
*device
, void *client_data
)
1286 struct ib_ucm_device
*ucm_dev
= client_data
;
1291 cdev_device_del(&ucm_dev
->cdev
, &ucm_dev
->dev
);
1292 ib_ucm_free_dev(ucm_dev
);
1293 put_device(&ucm_dev
->dev
);
1296 static CLASS_ATTR_STRING(abi_version
, S_IRUGO
,
1297 __stringify(IB_USER_CM_ABI_VERSION
));
1299 static int __init
ib_ucm_init(void)
1303 ret
= register_chrdev_region(IB_UCM_BASE_DEV
, IB_UCM_NUM_FIXED_MINOR
,
1306 pr_err("ucm: couldn't register device number\n");
1310 ret
= alloc_chrdev_region(&dynamic_ucm_dev
, 0, IB_UCM_NUM_DYNAMIC_MINOR
,
1313 pr_err("ucm: couldn't register dynamic device number\n");
1317 ret
= class_create_file(&cm_class
, &class_attr_abi_version
.attr
);
1319 pr_err("ucm: couldn't create abi_version attribute\n");
1323 ret
= ib_register_client(&ucm_client
);
1325 pr_err("ucm: couldn't register client\n");
1331 class_remove_file(&cm_class
, &class_attr_abi_version
.attr
);
1333 unregister_chrdev_region(dynamic_ucm_dev
, IB_UCM_NUM_DYNAMIC_MINOR
);
1335 unregister_chrdev_region(IB_UCM_BASE_DEV
, IB_UCM_NUM_FIXED_MINOR
);
1340 static void __exit
ib_ucm_cleanup(void)
1342 ib_unregister_client(&ucm_client
);
1343 class_remove_file(&cm_class
, &class_attr_abi_version
.attr
);
1344 unregister_chrdev_region(IB_UCM_BASE_DEV
, IB_UCM_NUM_FIXED_MINOR
);
1345 unregister_chrdev_region(dynamic_ucm_dev
, IB_UCM_NUM_DYNAMIC_MINOR
);
1346 WARN_ON(!xa_empty(&ctx_id_table
));
1349 module_init(ib_ucm_init
);
1350 module_exit(ib_ucm_cleanup
);