]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/infiniband/core/ucma.c
RDMA/cma: Connect ECE to rdma_accept
[mirror_ubuntu-jammy-kernel.git] / drivers / infiniband / core / ucma.c
CommitLineData
75216638
SH
1/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/completion.h>
88314e4d 34#include <linux/file.h>
75216638
SH
35#include <linux/mutex.h>
36#include <linux/poll.h>
d43c36dc 37#include <linux/sched.h>
75216638
SH
38#include <linux/idr.h>
39#include <linux/in.h>
40#include <linux/in6.h>
41#include <linux/miscdevice.h>
5a0e3ad6 42#include <linux/slab.h>
97cb7e40 43#include <linux/sysctl.h>
e4dd23d7 44#include <linux/module.h>
95893dde 45#include <linux/nsproxy.h>
75216638 46
a3671a4f
GS
47#include <linux/nospec.h>
48
75216638
SH
49#include <rdma/rdma_user_cm.h>
50#include <rdma/ib_marshall.h>
51#include <rdma/rdma_cm.h>
a7ca1f00 52#include <rdma/rdma_cm_ib.h>
ee7aed45 53#include <rdma/ib_addr.h>
edaa7a55 54#include <rdma/ib.h>
8f71bb00
JG
55#include <rdma/rdma_netlink.h>
56#include "core_priv.h"
75216638
SH
57
58MODULE_AUTHOR("Sean Hefty");
59MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
60MODULE_LICENSE("Dual BSD/GPL");
61
97cb7e40
SW
62static unsigned int max_backlog = 1024;
63
64static struct ctl_table_header *ucma_ctl_table_hdr;
f3a5e3e3 65static struct ctl_table ucma_ctl_table[] = {
97cb7e40
SW
66 {
67 .procname = "max_backlog",
68 .data = &max_backlog,
69 .maxlen = sizeof max_backlog,
70 .mode = 0644,
71 .proc_handler = proc_dointvec,
72 },
73 { }
74};
75
75216638
SH
76struct ucma_file {
77 struct mutex mut;
78 struct file *filp;
79 struct list_head ctx_list;
80 struct list_head event_list;
81 wait_queue_head_t poll_wait;
e1c30298 82 struct workqueue_struct *close_wq;
75216638
SH
83};
84
85struct ucma_context {
afcafe07 86 u32 id;
75216638 87 struct completion comp;
167b95ec 88 refcount_t ref;
75216638
SH
89 int events_reported;
90 int backlog;
91
92 struct ucma_file *file;
93 struct rdma_cm_id *cm_id;
7c119107 94 struct mutex mutex;
75216638
SH
95 u64 uid;
96
97 struct list_head list;
c8f6a362 98 struct list_head mc_list;
e1c30298 99 /* mark that device is in process of destroying the internal HW
afcafe07 100 * resources, protected by the ctx_table lock
e1c30298
YH
101 */
102 int closing;
103 /* sync between removal event and id destroy, protected by file mut */
104 int destroying;
105 struct work_struct close_work;
c8f6a362
SH
106};
107
108struct ucma_multicast {
109 struct ucma_context *ctx;
4dfd5321 110 u32 id;
c8f6a362
SH
111 int events_reported;
112
113 u64 uid;
ab15c95a 114 u8 join_state;
c8f6a362 115 struct list_head list;
3f446754 116 struct sockaddr_storage addr;
75216638
SH
117};
118
119struct ucma_event {
120 struct ucma_context *ctx;
c8f6a362 121 struct ucma_multicast *mc;
75216638
SH
122 struct list_head list;
123 struct rdma_cm_id *cm_id;
124 struct rdma_ucm_event_resp resp;
e1c30298 125 struct work_struct close_work;
75216638
SH
126};
127
afcafe07 128static DEFINE_XARRAY_ALLOC(ctx_table);
4dfd5321 129static DEFINE_XARRAY_ALLOC(multicast_table);
75216638 130
0d23ba60
JH
131static const struct file_operations ucma_fops;
132
75216638
SH
133static inline struct ucma_context *_ucma_find_context(int id,
134 struct ucma_file *file)
135{
136 struct ucma_context *ctx;
137
afcafe07 138 ctx = xa_load(&ctx_table, id);
75216638
SH
139 if (!ctx)
140 ctx = ERR_PTR(-ENOENT);
e8980d67 141 else if (ctx->file != file || !ctx->cm_id)
75216638
SH
142 ctx = ERR_PTR(-EINVAL);
143 return ctx;
144}
145
146static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
147{
148 struct ucma_context *ctx;
149
afcafe07 150 xa_lock(&ctx_table);
75216638 151 ctx = _ucma_find_context(id, file);
e1c30298
YH
152 if (!IS_ERR(ctx)) {
153 if (ctx->closing)
154 ctx = ERR_PTR(-EIO);
155 else
167b95ec 156 refcount_inc(&ctx->ref);
e1c30298 157 }
afcafe07 158 xa_unlock(&ctx_table);
75216638
SH
159 return ctx;
160}
161
162static void ucma_put_ctx(struct ucma_context *ctx)
163{
167b95ec 164 if (refcount_dec_and_test(&ctx->ref))
75216638
SH
165 complete(&ctx->comp);
166}
167
8b77586b
JG
168/*
169 * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
170 * CM_ID is bound.
171 */
172static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id)
173{
174 struct ucma_context *ctx = ucma_get_ctx(file, id);
175
176 if (IS_ERR(ctx))
177 return ctx;
178 if (!ctx->cm_id->device) {
179 ucma_put_ctx(ctx);
180 return ERR_PTR(-EINVAL);
181 }
182 return ctx;
183}
184
e1c30298
YH
185static void ucma_close_event_id(struct work_struct *work)
186{
187 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
188
189 rdma_destroy_id(uevent_close->cm_id);
190 kfree(uevent_close);
191}
192
193static void ucma_close_id(struct work_struct *work)
194{
195 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
196
197 /* once all inflight tasks are finished, we close all underlying
198 * resources. The context is still alive till its explicit destryoing
199 * by its creator.
200 */
201 ucma_put_ctx(ctx);
202 wait_for_completion(&ctx->comp);
203 /* No new events will be generated after destroying the id. */
204 rdma_destroy_id(ctx->cm_id);
205}
206
75216638
SH
207static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
208{
209 struct ucma_context *ctx;
75216638
SH
210
211 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
212 if (!ctx)
213 return NULL;
214
e1c30298 215 INIT_WORK(&ctx->close_work, ucma_close_id);
167b95ec 216 refcount_set(&ctx->ref, 1);
75216638 217 init_completion(&ctx->comp);
c8f6a362 218 INIT_LIST_HEAD(&ctx->mc_list);
75216638 219 ctx->file = file;
7c119107 220 mutex_init(&ctx->mutex);
75216638 221
afcafe07 222 if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
75216638
SH
223 goto error;
224
225 list_add_tail(&ctx->list, &file->ctx_list);
226 return ctx;
227
228error:
229 kfree(ctx);
230 return NULL;
231}
232
c8f6a362
SH
233static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
234{
235 struct ucma_multicast *mc;
c8f6a362
SH
236
237 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
238 if (!mc)
239 return NULL;
240
4dfd5321
MW
241 mc->ctx = ctx;
242 if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, GFP_KERNEL))
c8f6a362
SH
243 goto error;
244
c8f6a362
SH
245 list_add_tail(&mc->list, &ctx->mc_list);
246 return mc;
247
248error:
249 kfree(mc);
250 return NULL;
251}
252
75216638
SH
253static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
254 struct rdma_conn_param *src)
255{
256 if (src->private_data_len)
257 memcpy(dst->private_data, src->private_data,
258 src->private_data_len);
259 dst->private_data_len = src->private_data_len;
260 dst->responder_resources =src->responder_resources;
261 dst->initiator_depth = src->initiator_depth;
262 dst->flow_control = src->flow_control;
263 dst->retry_count = src->retry_count;
264 dst->rnr_retry_count = src->rnr_retry_count;
265 dst->srq = src->srq;
266 dst->qp_num = src->qp_num;
267}
268
d541e455
DC
269static void ucma_copy_ud_event(struct ib_device *device,
270 struct rdma_ucm_ud_param *dst,
75216638
SH
271 struct rdma_ud_param *src)
272{
273 if (src->private_data_len)
274 memcpy(dst->private_data, src->private_data,
275 src->private_data_len);
276 dst->private_data_len = src->private_data_len;
d541e455 277 ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
75216638
SH
278 dst->qp_num = src->qp_num;
279 dst->qkey = src->qkey;
280}
281
282static void ucma_set_event_context(struct ucma_context *ctx,
283 struct rdma_cm_event *event,
284 struct ucma_event *uevent)
285{
286 uevent->ctx = ctx;
c8f6a362
SH
287 switch (event->event) {
288 case RDMA_CM_EVENT_MULTICAST_JOIN:
289 case RDMA_CM_EVENT_MULTICAST_ERROR:
290 uevent->mc = (struct ucma_multicast *)
291 event->param.ud.private_data;
292 uevent->resp.uid = uevent->mc->uid;
293 uevent->resp.id = uevent->mc->id;
294 break;
295 default:
296 uevent->resp.uid = ctx->uid;
297 uevent->resp.id = ctx->id;
298 break;
299 }
75216638
SH
300}
301
e1c30298
YH
302/* Called with file->mut locked for the relevant context. */
303static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
304{
305 struct ucma_context *ctx = cm_id->context;
306 struct ucma_event *con_req_eve;
307 int event_found = 0;
308
309 if (ctx->destroying)
310 return;
311
312 /* only if context is pointing to cm_id that it owns it and can be
313 * queued to be closed, otherwise that cm_id is an inflight one that
314 * is part of that context event list pending to be detached and
315 * reattached to its new context as part of ucma_get_event,
316 * handled separately below.
317 */
318 if (ctx->cm_id == cm_id) {
afcafe07 319 xa_lock(&ctx_table);
e1c30298 320 ctx->closing = 1;
afcafe07 321 xa_unlock(&ctx_table);
e1c30298
YH
322 queue_work(ctx->file->close_wq, &ctx->close_work);
323 return;
324 }
325
326 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
327 if (con_req_eve->cm_id == cm_id &&
328 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
329 list_del(&con_req_eve->list);
330 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
331 queue_work(ctx->file->close_wq, &con_req_eve->close_work);
332 event_found = 1;
333 break;
334 }
335 }
336 if (!event_found)
aba25a3e 337 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
e1c30298
YH
338}
339
75216638
SH
340static int ucma_event_handler(struct rdma_cm_id *cm_id,
341 struct rdma_cm_event *event)
342{
343 struct ucma_event *uevent;
344 struct ucma_context *ctx = cm_id->context;
345 int ret = 0;
346
347 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
348 if (!uevent)
349 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
350
418edaab 351 mutex_lock(&ctx->file->mut);
75216638
SH
352 uevent->cm_id = cm_id;
353 ucma_set_event_context(ctx, event, uevent);
354 uevent->resp.event = event->event;
355 uevent->resp.status = event->status;
638ef7a6 356 if (cm_id->qp_type == IB_QPT_UD)
d541e455
DC
357 ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud,
358 &event->param.ud);
75216638
SH
359 else
360 ucma_copy_conn_event(&uevent->resp.param.conn,
361 &event->param.conn);
362
93531ee7
LR
363 uevent->resp.ece.vendor_id = event->ece.vendor_id;
364 uevent->resp.ece.attr_mod = event->ece.attr_mod;
365
75216638
SH
366 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
367 if (!ctx->backlog) {
3492856e 368 ret = -ENOMEM;
30a5ec98 369 kfree(uevent);
75216638
SH
370 goto out;
371 }
372 ctx->backlog--;
c6b21824 373 } else if (!ctx->uid || ctx->cm_id != cm_id) {
0cefcf0b
SH
374 /*
375 * We ignore events for new connections until userspace has set
376 * their context. This can only happen if an error occurs on a
377 * new connection before the user accepts it. This is okay,
e1c30298
YH
378 * since the accept will just fail later. However, we do need
379 * to release the underlying HW resources in case of a device
380 * removal event.
0cefcf0b 381 */
e1c30298
YH
382 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
383 ucma_removal_event_handler(cm_id);
384
0cefcf0b
SH
385 kfree(uevent);
386 goto out;
75216638 387 }
0cefcf0b 388
75216638
SH
389 list_add_tail(&uevent->list, &ctx->file->event_list);
390 wake_up_interruptible(&ctx->file->poll_wait);
e1c30298
YH
391 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
392 ucma_removal_event_handler(cm_id);
75216638
SH
393out:
394 mutex_unlock(&ctx->file->mut);
395 return ret;
396}
397
398static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
399 int in_len, int out_len)
400{
401 struct ucma_context *ctx;
402 struct rdma_ucm_get_event cmd;
403 struct ucma_event *uevent;
404 int ret = 0;
75216638 405
611cb92b
JG
406 /*
407 * Old 32 bit user space does not send the 4 byte padding in the
408 * reserved field. We don't care, allow it to keep working.
409 */
93531ee7
LR
410 if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved) -
411 sizeof(uevent->resp.ece))
75216638
SH
412 return -ENOSPC;
413
414 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
415 return -EFAULT;
416
417 mutex_lock(&file->mut);
418 while (list_empty(&file->event_list)) {
d92f7644 419 mutex_unlock(&file->mut);
75216638 420
d92f7644
SH
421 if (file->filp->f_flags & O_NONBLOCK)
422 return -EAGAIN;
423
424 if (wait_event_interruptible(file->poll_wait,
425 !list_empty(&file->event_list)))
426 return -ERESTARTSYS;
75216638 427
75216638 428 mutex_lock(&file->mut);
75216638
SH
429 }
430
75216638
SH
431 uevent = list_entry(file->event_list.next, struct ucma_event, list);
432
433 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
434 ctx = ucma_alloc_ctx(file);
435 if (!ctx) {
436 ret = -ENOMEM;
437 goto done;
438 }
439 uevent->ctx->backlog++;
440 ctx->cm_id = uevent->cm_id;
441 ctx->cm_id->context = ctx;
442 uevent->resp.id = ctx->id;
443 }
444
6f57c933 445 if (copy_to_user(u64_to_user_ptr(cmd.response),
611cb92b
JG
446 &uevent->resp,
447 min_t(size_t, out_len, sizeof(uevent->resp)))) {
75216638
SH
448 ret = -EFAULT;
449 goto done;
450 }
451
452 list_del(&uevent->list);
453 uevent->ctx->events_reported++;
c8f6a362
SH
454 if (uevent->mc)
455 uevent->mc->events_reported++;
75216638
SH
456 kfree(uevent);
457done:
458 mutex_unlock(&file->mut);
459 return ret;
460}
461
b26f9b99
SH
462static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
463{
464 switch (cmd->ps) {
465 case RDMA_PS_TCP:
466 *qp_type = IB_QPT_RC;
467 return 0;
468 case RDMA_PS_UDP:
469 case RDMA_PS_IPOIB:
470 *qp_type = IB_QPT_UD;
471 return 0;
638ef7a6
SH
472 case RDMA_PS_IB:
473 *qp_type = cmd->qp_type;
474 return 0;
b26f9b99
SH
475 default:
476 return -EINVAL;
477 }
478}
479
480static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
481 int in_len, int out_len)
75216638
SH
482{
483 struct rdma_ucm_create_id cmd;
484 struct rdma_ucm_create_id_resp resp;
485 struct ucma_context *ctx;
e8980d67 486 struct rdma_cm_id *cm_id;
b26f9b99 487 enum ib_qp_type qp_type;
75216638
SH
488 int ret;
489
490 if (out_len < sizeof(resp))
491 return -ENOSPC;
492
493 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
494 return -EFAULT;
495
b26f9b99
SH
496 ret = ucma_get_qp_type(&cmd, &qp_type);
497 if (ret)
498 return ret;
499
75216638
SH
500 mutex_lock(&file->mut);
501 ctx = ucma_alloc_ctx(file);
502 mutex_unlock(&file->mut);
503 if (!ctx)
504 return -ENOMEM;
505
506 ctx->uid = cmd.uid;
19fd08b8
LT
507 cm_id = __rdma_create_id(current->nsproxy->net_ns,
508 ucma_event_handler, ctx, cmd.ps, qp_type, NULL);
e8980d67
LR
509 if (IS_ERR(cm_id)) {
510 ret = PTR_ERR(cm_id);
75216638
SH
511 goto err1;
512 }
513
514 resp.id = ctx->id;
6f57c933 515 if (copy_to_user(u64_to_user_ptr(cmd.response),
75216638
SH
516 &resp, sizeof(resp))) {
517 ret = -EFAULT;
518 goto err2;
519 }
e8980d67
LR
520
521 ctx->cm_id = cm_id;
75216638
SH
522 return 0;
523
524err2:
e8980d67 525 rdma_destroy_id(cm_id);
75216638 526err1:
afcafe07 527 xa_erase(&ctx_table, ctx->id);
ed65a4dc
LR
528 mutex_lock(&file->mut);
529 list_del(&ctx->list);
530 mutex_unlock(&file->mut);
75216638
SH
531 kfree(ctx);
532 return ret;
533}
534
c8f6a362
SH
535static void ucma_cleanup_multicast(struct ucma_context *ctx)
536{
537 struct ucma_multicast *mc, *tmp;
538
afcafe07 539 mutex_lock(&ctx->file->mut);
c8f6a362
SH
540 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
541 list_del(&mc->list);
4dfd5321 542 xa_erase(&multicast_table, mc->id);
c8f6a362
SH
543 kfree(mc);
544 }
afcafe07 545 mutex_unlock(&ctx->file->mut);
c8f6a362
SH
546}
547
c8f6a362
SH
548static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
549{
550 struct ucma_event *uevent, *tmp;
551
552 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
553 if (uevent->mc != mc)
554 continue;
555
556 list_del(&uevent->list);
557 kfree(uevent);
558 }
559}
560
186834b5 561/*
e1c30298
YH
562 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
563 * this point, no new events will be reported from the hardware. However, we
564 * still need to cleanup the UCMA context for this ID. Specifically, there
565 * might be events that have not yet been consumed by the user space software.
566 * These might include pending connect requests which we have not completed
567 * processing. We cannot call rdma_destroy_id while holding the lock of the
568 * context (file->mut), as it might cause a deadlock. We therefore extract all
569 * relevant events from the context pending events list while holding the
570 * mutex. After that we release them as needed.
186834b5 571 */
75216638
SH
572static int ucma_free_ctx(struct ucma_context *ctx)
573{
574 int events_reported;
186834b5
HS
575 struct ucma_event *uevent, *tmp;
576 LIST_HEAD(list);
75216638 577
75216638 578
c8f6a362
SH
579 ucma_cleanup_multicast(ctx);
580
75216638
SH
581 /* Cleanup events not yet reported to the user. */
582 mutex_lock(&ctx->file->mut);
186834b5
HS
583 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
584 if (uevent->ctx == ctx)
585 list_move_tail(&uevent->list, &list);
586 }
75216638
SH
587 list_del(&ctx->list);
588 mutex_unlock(&ctx->file->mut);
589
186834b5
HS
590 list_for_each_entry_safe(uevent, tmp, &list, list) {
591 list_del(&uevent->list);
592 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
593 rdma_destroy_id(uevent->cm_id);
594 kfree(uevent);
595 }
596
75216638 597 events_reported = ctx->events_reported;
7c119107 598 mutex_destroy(&ctx->mutex);
75216638
SH
599 kfree(ctx);
600 return events_reported;
601}
602
603static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
604 int in_len, int out_len)
605{
606 struct rdma_ucm_destroy_id cmd;
607 struct rdma_ucm_destroy_id_resp resp;
608 struct ucma_context *ctx;
609 int ret = 0;
610
611 if (out_len < sizeof(resp))
612 return -ENOSPC;
613
614 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
615 return -EFAULT;
616
afcafe07 617 xa_lock(&ctx_table);
75216638
SH
618 ctx = _ucma_find_context(cmd.id, file);
619 if (!IS_ERR(ctx))
afcafe07
MW
620 __xa_erase(&ctx_table, ctx->id);
621 xa_unlock(&ctx_table);
75216638
SH
622
623 if (IS_ERR(ctx))
624 return PTR_ERR(ctx);
625
e1c30298
YH
626 mutex_lock(&ctx->file->mut);
627 ctx->destroying = 1;
628 mutex_unlock(&ctx->file->mut);
629
630 flush_workqueue(ctx->file->close_wq);
631 /* At this point it's guaranteed that there is no inflight
632 * closing task */
afcafe07 633 xa_lock(&ctx_table);
e1c30298 634 if (!ctx->closing) {
afcafe07 635 xa_unlock(&ctx_table);
e1c30298
YH
636 ucma_put_ctx(ctx);
637 wait_for_completion(&ctx->comp);
638 rdma_destroy_id(ctx->cm_id);
639 } else {
afcafe07 640 xa_unlock(&ctx_table);
e1c30298 641 }
75216638 642
e1c30298 643 resp.events_reported = ucma_free_ctx(ctx);
6f57c933 644 if (copy_to_user(u64_to_user_ptr(cmd.response),
75216638
SH
645 &resp, sizeof(resp)))
646 ret = -EFAULT;
647
648 return ret;
649}
650
05ad9457 651static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
75216638
SH
652 int in_len, int out_len)
653{
05ad9457 654 struct rdma_ucm_bind_ip cmd;
75216638
SH
655 struct ucma_context *ctx;
656 int ret;
657
658 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
659 return -EFAULT;
660
84652aef
RD
661 if (!rdma_addr_size_in6(&cmd.addr))
662 return -EINVAL;
663
75216638
SH
664 ctx = ucma_get_ctx(file, cmd.id);
665 if (IS_ERR(ctx))
666 return PTR_ERR(ctx);
667
7c119107 668 mutex_lock(&ctx->mutex);
75216638 669 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
7c119107
JG
670 mutex_unlock(&ctx->mutex);
671
75216638
SH
672 ucma_put_ctx(ctx);
673 return ret;
674}
675
eebe4c3a
SH
676static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
677 int in_len, int out_len)
678{
679 struct rdma_ucm_bind cmd;
eebe4c3a
SH
680 struct ucma_context *ctx;
681 int ret;
682
683 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
684 return -EFAULT;
685
84652aef
RD
686 if (cmd.reserved || !cmd.addr_size ||
687 cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
eebe4c3a
SH
688 return -EINVAL;
689
690 ctx = ucma_get_ctx(file, cmd.id);
691 if (IS_ERR(ctx))
692 return PTR_ERR(ctx);
693
7c119107 694 mutex_lock(&ctx->mutex);
84652aef 695 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
7c119107 696 mutex_unlock(&ctx->mutex);
eebe4c3a
SH
697 ucma_put_ctx(ctx);
698 return ret;
699}
700
05ad9457
SH
701static ssize_t ucma_resolve_ip(struct ucma_file *file,
702 const char __user *inbuf,
703 int in_len, int out_len)
75216638 704{
05ad9457 705 struct rdma_ucm_resolve_ip cmd;
75216638
SH
706 struct ucma_context *ctx;
707 int ret;
708
709 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
710 return -EFAULT;
711
09abfe7b 712 if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
84652aef 713 !rdma_addr_size_in6(&cmd.dst_addr))
2975d5de
LR
714 return -EINVAL;
715
75216638
SH
716 ctx = ucma_get_ctx(file, cmd.id);
717 if (IS_ERR(ctx))
718 return PTR_ERR(ctx);
719
7c119107 720 mutex_lock(&ctx->mutex);
84652aef
RD
721 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
722 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
7c119107 723 mutex_unlock(&ctx->mutex);
75216638
SH
724 ucma_put_ctx(ctx);
725 return ret;
726}
727
209cf2a7
SH
728static ssize_t ucma_resolve_addr(struct ucma_file *file,
729 const char __user *inbuf,
730 int in_len, int out_len)
731{
732 struct rdma_ucm_resolve_addr cmd;
209cf2a7
SH
733 struct ucma_context *ctx;
734 int ret;
735
736 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
737 return -EFAULT;
738
84652aef
RD
739 if (cmd.reserved ||
740 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
741 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
209cf2a7
SH
742 return -EINVAL;
743
744 ctx = ucma_get_ctx(file, cmd.id);
745 if (IS_ERR(ctx))
746 return PTR_ERR(ctx);
747
7c119107 748 mutex_lock(&ctx->mutex);
84652aef
RD
749 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
750 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
7c119107 751 mutex_unlock(&ctx->mutex);
209cf2a7
SH
752 ucma_put_ctx(ctx);
753 return ret;
754}
755
75216638
SH
756static ssize_t ucma_resolve_route(struct ucma_file *file,
757 const char __user *inbuf,
758 int in_len, int out_len)
759{
760 struct rdma_ucm_resolve_route cmd;
761 struct ucma_context *ctx;
762 int ret;
763
764 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
765 return -EFAULT;
766
8b77586b 767 ctx = ucma_get_ctx_dev(file, cmd.id);
75216638
SH
768 if (IS_ERR(ctx))
769 return PTR_ERR(ctx);
770
7c119107 771 mutex_lock(&ctx->mutex);
75216638 772 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
7c119107 773 mutex_unlock(&ctx->mutex);
75216638
SH
774 ucma_put_ctx(ctx);
775 return ret;
776}
777
778static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
779 struct rdma_route *route)
780{
781 struct rdma_dev_addr *dev_addr;
782
783 resp->num_paths = route->num_paths;
784 switch (route->num_paths) {
785 case 0:
786 dev_addr = &route->addr.dev_addr;
6f8372b6
SH
787 rdma_addr_get_dgid(dev_addr,
788 (union ib_gid *) &resp->ib_route[0].dgid);
789 rdma_addr_get_sgid(dev_addr,
790 (union ib_gid *) &resp->ib_route[0].sgid);
75216638
SH
791 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
792 break;
793 case 2:
794 ib_copy_path_rec_to_user(&resp->ib_route[1],
795 &route->path_rec[1]);
796 /* fall through */
797 case 1:
798 ib_copy_path_rec_to_user(&resp->ib_route[0],
799 &route->path_rec[0]);
800 break;
801 default:
802 break;
803 }
804}
805
3c86aa70
EC
806static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
807 struct rdma_route *route)
808{
3c86aa70
EC
809
810 resp->num_paths = route->num_paths;
811 switch (route->num_paths) {
812 case 0:
7b85627b
MS
813 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
814 (union ib_gid *)&resp->ib_route[0].dgid);
815 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
816 (union ib_gid *)&resp->ib_route[0].sgid);
3c86aa70
EC
817 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
818 break;
819 case 2:
820 ib_copy_path_rec_to_user(&resp->ib_route[1],
821 &route->path_rec[1]);
822 /* fall through */
823 case 1:
824 ib_copy_path_rec_to_user(&resp->ib_route[0],
825 &route->path_rec[0]);
826 break;
827 default:
828 break;
829 }
830}
831
e86f8b06
SW
832static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
833 struct rdma_route *route)
834{
835 struct rdma_dev_addr *dev_addr;
836
837 dev_addr = &route->addr.dev_addr;
838 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
839 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
840}
841
75216638
SH
842static ssize_t ucma_query_route(struct ucma_file *file,
843 const char __user *inbuf,
844 int in_len, int out_len)
845{
ee7aed45 846 struct rdma_ucm_query cmd;
75216638
SH
847 struct rdma_ucm_query_route_resp resp;
848 struct ucma_context *ctx;
849 struct sockaddr *addr;
850 int ret = 0;
851
17793833 852 if (out_len < offsetof(struct rdma_ucm_query_route_resp, ibdev_index))
75216638
SH
853 return -ENOSPC;
854
855 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
856 return -EFAULT;
857
858 ctx = ucma_get_ctx(file, cmd.id);
859 if (IS_ERR(ctx))
860 return PTR_ERR(ctx);
861
7c119107 862 mutex_lock(&ctx->mutex);
75216638 863 memset(&resp, 0, sizeof resp);
3f446754 864 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
75216638
SH
865 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
866 sizeof(struct sockaddr_in) :
867 sizeof(struct sockaddr_in6));
3f446754 868 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
75216638
SH
869 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
870 sizeof(struct sockaddr_in) :
871 sizeof(struct sockaddr_in6));
872 if (!ctx->cm_id->device)
873 goto out;
874
9cda779c 875 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
17793833 876 resp.ibdev_index = ctx->cm_id->device->index;
75216638 877 resp.port_num = ctx->cm_id->port_num;
c72f2189 878
fe53ba2f 879 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
c72f2189 880 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
5d9fb044 881 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
c72f2189
MW
882 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
883 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
e86f8b06 884 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
75216638
SH
885
886out:
7c119107 887 mutex_unlock(&ctx->mutex);
17793833
LR
888 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp,
889 min_t(size_t, out_len, sizeof(resp))))
75216638
SH
890 ret = -EFAULT;
891
892 ucma_put_ctx(ctx);
893 return ret;
894}
895
ee7aed45
SH
896static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
897 struct rdma_ucm_query_addr_resp *resp)
898{
899 if (!cm_id->device)
900 return;
901
902 resp->node_guid = (__force __u64) cm_id->device->node_guid;
17793833 903 resp->ibdev_index = cm_id->device->index;
ee7aed45
SH
904 resp->port_num = cm_id->port_num;
905 resp->pkey = (__force __u16) cpu_to_be16(
906 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
907}
908
909static ssize_t ucma_query_addr(struct ucma_context *ctx,
910 void __user *response, int out_len)
911{
912 struct rdma_ucm_query_addr_resp resp;
913 struct sockaddr *addr;
914 int ret = 0;
915
17793833 916 if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
ee7aed45
SH
917 return -ENOSPC;
918
919 memset(&resp, 0, sizeof resp);
920
921 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
922 resp.src_size = rdma_addr_size(addr);
923 memcpy(&resp.src_addr, addr, resp.src_size);
924
925 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
926 resp.dst_size = rdma_addr_size(addr);
927 memcpy(&resp.dst_addr, addr, resp.dst_size);
928
929 ucma_query_device_addr(ctx->cm_id, &resp);
930
17793833 931 if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
ee7aed45
SH
932 ret = -EFAULT;
933
934 return ret;
935}
936
ac53b264
SH
937static ssize_t ucma_query_path(struct ucma_context *ctx,
938 void __user *response, int out_len)
939{
940 struct rdma_ucm_query_path_resp *resp;
941 int i, ret = 0;
942
943 if (out_len < sizeof(*resp))
944 return -ENOSPC;
945
946 resp = kzalloc(out_len, GFP_KERNEL);
947 if (!resp)
948 return -ENOMEM;
949
950 resp->num_paths = ctx->cm_id->route.num_paths;
951 for (i = 0, out_len -= sizeof(*resp);
952 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
953 i++, out_len -= sizeof(struct ib_path_rec_data)) {
57520751 954 struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
ac53b264
SH
955
956 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
957 IB_PATH_BIDIRECTIONAL;
89838118 958 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
57520751
DC
959 struct sa_path_rec ib;
960
961 sa_convert_path_opa_to_ib(&ib, rec);
962 ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
89838118
PP
963
964 } else {
965 ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
57520751 966 }
ac53b264
SH
967 }
968
9bcb8940 969 if (copy_to_user(response, resp, struct_size(resp, path_data, i)))
ac53b264
SH
970 ret = -EFAULT;
971
972 kfree(resp);
973 return ret;
974}
975
edaa7a55
SH
976static ssize_t ucma_query_gid(struct ucma_context *ctx,
977 void __user *response, int out_len)
978{
979 struct rdma_ucm_query_addr_resp resp;
980 struct sockaddr_ib *addr;
981 int ret = 0;
982
17793833 983 if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
edaa7a55
SH
984 return -ENOSPC;
985
986 memset(&resp, 0, sizeof resp);
987
988 ucma_query_device_addr(ctx->cm_id, &resp);
989
990 addr = (struct sockaddr_ib *) &resp.src_addr;
991 resp.src_size = sizeof(*addr);
992 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
993 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
994 } else {
995 addr->sib_family = AF_IB;
996 addr->sib_pkey = (__force __be16) resp.pkey;
7a2f64ee
PP
997 rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
998 NULL);
edaa7a55
SH
999 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
1000 &ctx->cm_id->route.addr.src_addr);
1001 }
1002
1003 addr = (struct sockaddr_ib *) &resp.dst_addr;
1004 resp.dst_size = sizeof(*addr);
1005 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
1006 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
1007 } else {
1008 addr->sib_family = AF_IB;
1009 addr->sib_pkey = (__force __be16) resp.pkey;
7a2f64ee
PP
1010 rdma_read_gids(ctx->cm_id, NULL,
1011 (union ib_gid *)&addr->sib_addr);
edaa7a55
SH
1012 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
1013 &ctx->cm_id->route.addr.dst_addr);
1014 }
1015
17793833 1016 if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
edaa7a55
SH
1017 ret = -EFAULT;
1018
1019 return ret;
1020}
1021
ee7aed45
SH
1022static ssize_t ucma_query(struct ucma_file *file,
1023 const char __user *inbuf,
1024 int in_len, int out_len)
1025{
1026 struct rdma_ucm_query cmd;
1027 struct ucma_context *ctx;
1028 void __user *response;
1029 int ret;
1030
1031 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1032 return -EFAULT;
1033
6f57c933 1034 response = u64_to_user_ptr(cmd.response);
ee7aed45
SH
1035 ctx = ucma_get_ctx(file, cmd.id);
1036 if (IS_ERR(ctx))
1037 return PTR_ERR(ctx);
1038
7c119107 1039 mutex_lock(&ctx->mutex);
ee7aed45
SH
1040 switch (cmd.option) {
1041 case RDMA_USER_CM_QUERY_ADDR:
1042 ret = ucma_query_addr(ctx, response, out_len);
1043 break;
ac53b264
SH
1044 case RDMA_USER_CM_QUERY_PATH:
1045 ret = ucma_query_path(ctx, response, out_len);
1046 break;
edaa7a55
SH
1047 case RDMA_USER_CM_QUERY_GID:
1048 ret = ucma_query_gid(ctx, response, out_len);
1049 break;
ee7aed45
SH
1050 default:
1051 ret = -ENOSYS;
1052 break;
1053 }
7c119107 1054 mutex_unlock(&ctx->mutex);
ee7aed45
SH
1055
1056 ucma_put_ctx(ctx);
1057 return ret;
1058}
1059
5c438135
SH
1060static void ucma_copy_conn_param(struct rdma_cm_id *id,
1061 struct rdma_conn_param *dst,
75216638
SH
1062 struct rdma_ucm_conn_param *src)
1063{
1064 dst->private_data = src->private_data;
1065 dst->private_data_len = src->private_data_len;
1066 dst->responder_resources =src->responder_resources;
1067 dst->initiator_depth = src->initiator_depth;
1068 dst->flow_control = src->flow_control;
1069 dst->retry_count = src->retry_count;
1070 dst->rnr_retry_count = src->rnr_retry_count;
1071 dst->srq = src->srq;
ca750d4a 1072 dst->qp_num = src->qp_num & 0xFFFFFF;
5c438135 1073 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
75216638
SH
1074}
1075
1076static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1077 int in_len, int out_len)
1078{
75216638 1079 struct rdma_conn_param conn_param;
34e2ab57
LR
1080 struct rdma_ucm_ece ece = {};
1081 struct rdma_ucm_connect cmd;
75216638 1082 struct ucma_context *ctx;
34e2ab57 1083 size_t in_size;
75216638
SH
1084 int ret;
1085
34e2ab57
LR
1086 in_size = min_t(size_t, in_len, sizeof(cmd));
1087 if (copy_from_user(&cmd, inbuf, in_size))
75216638
SH
1088 return -EFAULT;
1089
1090 if (!cmd.conn_param.valid)
1091 return -EINVAL;
1092
8b77586b 1093 ctx = ucma_get_ctx_dev(file, cmd.id);
75216638
SH
1094 if (IS_ERR(ctx))
1095 return PTR_ERR(ctx);
1096
5c438135 1097 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
34e2ab57
LR
1098 if (offsetofend(typeof(cmd), ece) <= in_size) {
1099 ece.vendor_id = cmd.ece.vendor_id;
1100 ece.attr_mod = cmd.ece.attr_mod;
1101 }
1102
7c119107 1103 mutex_lock(&ctx->mutex);
34e2ab57 1104 ret = rdma_connect_ece(ctx->cm_id, &conn_param, &ece);
7c119107 1105 mutex_unlock(&ctx->mutex);
75216638
SH
1106 ucma_put_ctx(ctx);
1107 return ret;
1108}
1109
1110static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1111 int in_len, int out_len)
1112{
1113 struct rdma_ucm_listen cmd;
1114 struct ucma_context *ctx;
1115 int ret;
1116
1117 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1118 return -EFAULT;
1119
1120 ctx = ucma_get_ctx(file, cmd.id);
1121 if (IS_ERR(ctx))
1122 return PTR_ERR(ctx);
1123
97cb7e40
SW
1124 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1125 cmd.backlog : max_backlog;
7c119107 1126 mutex_lock(&ctx->mutex);
75216638 1127 ret = rdma_listen(ctx->cm_id, ctx->backlog);
7c119107 1128 mutex_unlock(&ctx->mutex);
75216638
SH
1129 ucma_put_ctx(ctx);
1130 return ret;
1131}
1132
1133static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1134 int in_len, int out_len)
1135{
1136 struct rdma_ucm_accept cmd;
1137 struct rdma_conn_param conn_param;
0cb15372 1138 struct rdma_ucm_ece ece = {};
75216638 1139 struct ucma_context *ctx;
0cb15372 1140 size_t in_size;
75216638
SH
1141 int ret;
1142
0cb15372
LR
1143 in_size = min_t(size_t, in_len, sizeof(cmd));
1144 if (copy_from_user(&cmd, inbuf, in_size))
75216638
SH
1145 return -EFAULT;
1146
8b77586b 1147 ctx = ucma_get_ctx_dev(file, cmd.id);
75216638
SH
1148 if (IS_ERR(ctx))
1149 return PTR_ERR(ctx);
1150
0cb15372
LR
1151 if (offsetofend(typeof(cmd), ece) <= in_size) {
1152 ece.vendor_id = cmd.ece.vendor_id;
1153 ece.attr_mod = cmd.ece.attr_mod;
1154 }
1155
75216638 1156 if (cmd.conn_param.valid) {
5c438135 1157 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
9ced69ca 1158 mutex_lock(&file->mut);
7c119107 1159 mutex_lock(&ctx->mutex);
0cb15372 1160 ret = __rdma_accept_ece(ctx->cm_id, &conn_param, NULL, &ece);
7c119107 1161 mutex_unlock(&ctx->mutex);
9ced69ca
SH
1162 if (!ret)
1163 ctx->uid = cmd.uid;
1164 mutex_unlock(&file->mut);
7c119107
JG
1165 } else {
1166 mutex_lock(&ctx->mutex);
0cb15372 1167 ret = __rdma_accept_ece(ctx->cm_id, NULL, NULL, &ece);
7c119107
JG
1168 mutex_unlock(&ctx->mutex);
1169 }
75216638
SH
1170 ucma_put_ctx(ctx);
1171 return ret;
1172}
1173
1174static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1175 int in_len, int out_len)
1176{
1177 struct rdma_ucm_reject cmd;
1178 struct ucma_context *ctx;
1179 int ret;
1180
1181 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1182 return -EFAULT;
1183
8b77586b 1184 ctx = ucma_get_ctx_dev(file, cmd.id);
75216638
SH
1185 if (IS_ERR(ctx))
1186 return PTR_ERR(ctx);
1187
7c119107 1188 mutex_lock(&ctx->mutex);
75216638 1189 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
7c119107 1190 mutex_unlock(&ctx->mutex);
75216638
SH
1191 ucma_put_ctx(ctx);
1192 return ret;
1193}
1194
1195static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1196 int in_len, int out_len)
1197{
1198 struct rdma_ucm_disconnect cmd;
1199 struct ucma_context *ctx;
1200 int ret;
1201
1202 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1203 return -EFAULT;
1204
8b77586b 1205 ctx = ucma_get_ctx_dev(file, cmd.id);
75216638
SH
1206 if (IS_ERR(ctx))
1207 return PTR_ERR(ctx);
1208
7c119107 1209 mutex_lock(&ctx->mutex);
75216638 1210 ret = rdma_disconnect(ctx->cm_id);
7c119107 1211 mutex_unlock(&ctx->mutex);
75216638
SH
1212 ucma_put_ctx(ctx);
1213 return ret;
1214}
1215
1216static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1217 const char __user *inbuf,
1218 int in_len, int out_len)
1219{
1220 struct rdma_ucm_init_qp_attr cmd;
1221 struct ib_uverbs_qp_attr resp;
1222 struct ucma_context *ctx;
1223 struct ib_qp_attr qp_attr;
1224 int ret;
1225
1226 if (out_len < sizeof(resp))
1227 return -ENOSPC;
1228
1229 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1230 return -EFAULT;
1231
a5880b84
LR
1232 if (cmd.qp_state > IB_QPS_ERR)
1233 return -EINVAL;
1234
8b77586b 1235 ctx = ucma_get_ctx_dev(file, cmd.id);
75216638
SH
1236 if (IS_ERR(ctx))
1237 return PTR_ERR(ctx);
1238
1239 resp.qp_attr_mask = 0;
1240 memset(&qp_attr, 0, sizeof qp_attr);
1241 qp_attr.qp_state = cmd.qp_state;
7c119107 1242 mutex_lock(&ctx->mutex);
75216638 1243 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
7c119107 1244 mutex_unlock(&ctx->mutex);
75216638
SH
1245 if (ret)
1246 goto out;
1247
d541e455 1248 ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
6f57c933 1249 if (copy_to_user(u64_to_user_ptr(cmd.response),
75216638
SH
1250 &resp, sizeof(resp)))
1251 ret = -EFAULT;
1252
1253out:
1254 ucma_put_ctx(ctx);
1255 return ret;
1256}
1257
7ce86409
SH
1258static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1259 void *optval, size_t optlen)
1260{
1261 int ret = 0;
1262
1263 switch (optname) {
1264 case RDMA_OPTION_ID_TOS:
1265 if (optlen != sizeof(u8)) {
1266 ret = -EINVAL;
1267 break;
1268 }
1269 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1270 break;
a9bb7912
HS
1271 case RDMA_OPTION_ID_REUSEADDR:
1272 if (optlen != sizeof(int)) {
1273 ret = -EINVAL;
1274 break;
1275 }
1276 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1277 break;
68602120
SH
1278 case RDMA_OPTION_ID_AFONLY:
1279 if (optlen != sizeof(int)) {
1280 ret = -EINVAL;
1281 break;
1282 }
1283 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1284 break;
2c1619ed
DG
1285 case RDMA_OPTION_ID_ACK_TIMEOUT:
1286 if (optlen != sizeof(u8)) {
1287 ret = -EINVAL;
1288 break;
1289 }
1290 ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval));
1291 break;
7ce86409
SH
1292 default:
1293 ret = -ENOSYS;
1294 }
1295
1296 return ret;
1297}
1298
a7ca1f00
SH
1299static int ucma_set_ib_path(struct ucma_context *ctx,
1300 struct ib_path_rec_data *path_data, size_t optlen)
1301{
c2f8fc4e 1302 struct sa_path_rec sa_path;
a7ca1f00
SH
1303 struct rdma_cm_event event;
1304 int ret;
1305
1306 if (optlen % sizeof(*path_data))
1307 return -EINVAL;
1308
1309 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1310 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1311 IB_PATH_BIDIRECTIONAL))
1312 break;
1313 }
1314
1315 if (!optlen)
1316 return -EINVAL;
1317
8435168d
RD
1318 if (!ctx->cm_id->device)
1319 return -EINVAL;
1320
c2be9dc0 1321 memset(&sa_path, 0, sizeof(sa_path));
c2be9dc0 1322
57520751 1323 sa_path.rec_type = SA_PATH_REC_TYPE_IB;
a7ca1f00 1324 ib_sa_unpack_path(path_data->path_rec, &sa_path);
57520751
DC
1325
1326 if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
1327 struct sa_path_rec opa;
1328
1329 sa_convert_path_ib_to_opa(&opa, &sa_path);
7c119107 1330 mutex_lock(&ctx->mutex);
fe75889f 1331 ret = rdma_set_ib_path(ctx->cm_id, &opa);
7c119107 1332 mutex_unlock(&ctx->mutex);
57520751 1333 } else {
7c119107 1334 mutex_lock(&ctx->mutex);
fe75889f 1335 ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
7c119107 1336 mutex_unlock(&ctx->mutex);
57520751 1337 }
a7ca1f00
SH
1338 if (ret)
1339 return ret;
1340
1341 memset(&event, 0, sizeof event);
1342 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1343 return ucma_event_handler(ctx->cm_id, &event);
1344}
1345
1346static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1347 void *optval, size_t optlen)
1348{
1349 int ret;
1350
1351 switch (optname) {
1352 case RDMA_OPTION_IB_PATH:
1353 ret = ucma_set_ib_path(ctx, optval, optlen);
1354 break;
1355 default:
1356 ret = -ENOSYS;
1357 }
1358
1359 return ret;
1360}
1361
7ce86409
SH
1362static int ucma_set_option_level(struct ucma_context *ctx, int level,
1363 int optname, void *optval, size_t optlen)
1364{
1365 int ret;
1366
1367 switch (level) {
1368 case RDMA_OPTION_ID:
7c119107 1369 mutex_lock(&ctx->mutex);
7ce86409 1370 ret = ucma_set_option_id(ctx, optname, optval, optlen);
7c119107 1371 mutex_unlock(&ctx->mutex);
7ce86409 1372 break;
a7ca1f00
SH
1373 case RDMA_OPTION_IB:
1374 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1375 break;
7ce86409
SH
1376 default:
1377 ret = -ENOSYS;
1378 }
1379
1380 return ret;
1381}
1382
1383static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1384 int in_len, int out_len)
1385{
1386 struct rdma_ucm_set_option cmd;
1387 struct ucma_context *ctx;
1388 void *optval;
1389 int ret;
1390
1391 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1392 return -EFAULT;
1393
ef95a90a
SR
1394 if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1395 return -EINVAL;
1396
7ce86409
SH
1397 ctx = ucma_get_ctx(file, cmd.id);
1398 if (IS_ERR(ctx))
1399 return PTR_ERR(ctx);
1400
6f57c933 1401 optval = memdup_user(u64_to_user_ptr(cmd.optval),
0764c76e
RD
1402 cmd.optlen);
1403 if (IS_ERR(optval)) {
1404 ret = PTR_ERR(optval);
1405 goto out;
7ce86409
SH
1406 }
1407
1408 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1409 cmd.optlen);
7ce86409 1410 kfree(optval);
0764c76e
RD
1411
1412out:
7ce86409
SH
1413 ucma_put_ctx(ctx);
1414 return ret;
1415}
1416
75216638
SH
1417static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1418 int in_len, int out_len)
1419{
1420 struct rdma_ucm_notify cmd;
1421 struct ucma_context *ctx;
c8d3bcbf 1422 int ret = -EINVAL;
75216638
SH
1423
1424 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1425 return -EFAULT;
1426
1427 ctx = ucma_get_ctx(file, cmd.id);
1428 if (IS_ERR(ctx))
1429 return PTR_ERR(ctx);
1430
7c119107 1431 mutex_lock(&ctx->mutex);
c8d3bcbf
LR
1432 if (ctx->cm_id->device)
1433 ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
7c119107 1434 mutex_unlock(&ctx->mutex);
c8d3bcbf 1435
75216638
SH
1436 ucma_put_ctx(ctx);
1437 return ret;
1438}
1439
5bc2b7b3
SH
1440static ssize_t ucma_process_join(struct ucma_file *file,
1441 struct rdma_ucm_join_mcast *cmd, int out_len)
c8f6a362 1442{
c8f6a362
SH
1443 struct rdma_ucm_create_id_resp resp;
1444 struct ucma_context *ctx;
1445 struct ucma_multicast *mc;
5bc2b7b3 1446 struct sockaddr *addr;
c8f6a362 1447 int ret;
ab15c95a 1448 u8 join_state;
c8f6a362
SH
1449
1450 if (out_len < sizeof(resp))
1451 return -ENOSPC;
1452
5bc2b7b3 1453 addr = (struct sockaddr *) &cmd->addr;
0c81ffc6 1454 if (cmd->addr_size != rdma_addr_size(addr))
ab15c95a
AV
1455 return -EINVAL;
1456
1457 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1458 join_state = BIT(FULLMEMBER_JOIN);
1459 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1460 join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1461 else
5bc2b7b3 1462 return -EINVAL;
c8f6a362 1463
8b77586b 1464 ctx = ucma_get_ctx_dev(file, cmd->id);
c8f6a362
SH
1465 if (IS_ERR(ctx))
1466 return PTR_ERR(ctx);
1467
1468 mutex_lock(&file->mut);
1469 mc = ucma_alloc_multicast(ctx);
6aea938f
JB
1470 if (!mc) {
1471 ret = -ENOMEM;
c8f6a362
SH
1472 goto err1;
1473 }
ab15c95a 1474 mc->join_state = join_state;
5bc2b7b3
SH
1475 mc->uid = cmd->uid;
1476 memcpy(&mc->addr, addr, cmd->addr_size);
7c119107 1477 mutex_lock(&ctx->mutex);
ab15c95a
AV
1478 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1479 join_state, mc);
7c119107 1480 mutex_unlock(&ctx->mutex);
c8f6a362
SH
1481 if (ret)
1482 goto err2;
1483
1484 resp.id = mc->id;
6f57c933 1485 if (copy_to_user(u64_to_user_ptr(cmd->response),
c8f6a362
SH
1486 &resp, sizeof(resp))) {
1487 ret = -EFAULT;
1488 goto err3;
1489 }
1490
4dfd5321 1491 xa_store(&multicast_table, mc->id, mc, 0);
cb2595c1 1492
c8f6a362
SH
1493 mutex_unlock(&file->mut);
1494 ucma_put_ctx(ctx);
1495 return 0;
1496
1497err3:
3f446754 1498 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
c8f6a362
SH
1499 ucma_cleanup_mc_events(mc);
1500err2:
4dfd5321 1501 xa_erase(&multicast_table, mc->id);
c8f6a362
SH
1502 list_del(&mc->list);
1503 kfree(mc);
1504err1:
1505 mutex_unlock(&file->mut);
1506 ucma_put_ctx(ctx);
1507 return ret;
1508}
1509
5bc2b7b3
SH
1510static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1511 const char __user *inbuf,
1512 int in_len, int out_len)
1513{
1514 struct rdma_ucm_join_ip_mcast cmd;
1515 struct rdma_ucm_join_mcast join_cmd;
1516
1517 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1518 return -EFAULT;
1519
1520 join_cmd.response = cmd.response;
1521 join_cmd.uid = cmd.uid;
1522 join_cmd.id = cmd.id;
84652aef 1523 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
0c81ffc6
LR
1524 if (!join_cmd.addr_size)
1525 return -EINVAL;
1526
ab15c95a 1527 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
5bc2b7b3
SH
1528 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1529
1530 return ucma_process_join(file, &join_cmd, out_len);
1531}
1532
1533static ssize_t ucma_join_multicast(struct ucma_file *file,
1534 const char __user *inbuf,
1535 int in_len, int out_len)
1536{
1537 struct rdma_ucm_join_mcast cmd;
1538
1539 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1540 return -EFAULT;
1541
84652aef 1542 if (!rdma_addr_size_kss(&cmd.addr))
0c81ffc6
LR
1543 return -EINVAL;
1544
5bc2b7b3
SH
1545 return ucma_process_join(file, &cmd, out_len);
1546}
1547
c8f6a362
SH
1548static ssize_t ucma_leave_multicast(struct ucma_file *file,
1549 const char __user *inbuf,
1550 int in_len, int out_len)
1551{
1552 struct rdma_ucm_destroy_id cmd;
1553 struct rdma_ucm_destroy_id_resp resp;
1554 struct ucma_multicast *mc;
1555 int ret = 0;
1556
1557 if (out_len < sizeof(resp))
1558 return -ENOSPC;
1559
1560 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1561 return -EFAULT;
1562
4dfd5321
MW
1563 xa_lock(&multicast_table);
1564 mc = xa_load(&multicast_table, cmd.id);
c8f6a362
SH
1565 if (!mc)
1566 mc = ERR_PTR(-ENOENT);
1567 else if (mc->ctx->file != file)
1568 mc = ERR_PTR(-EINVAL);
167b95ec 1569 else if (!refcount_inc_not_zero(&mc->ctx->ref))
7e967fd0
JG
1570 mc = ERR_PTR(-ENXIO);
1571 else
4dfd5321
MW
1572 __xa_erase(&multicast_table, mc->id);
1573 xa_unlock(&multicast_table);
c8f6a362
SH
1574
1575 if (IS_ERR(mc)) {
1576 ret = PTR_ERR(mc);
1577 goto out;
1578 }
1579
7c119107 1580 mutex_lock(&mc->ctx->mutex);
3f446754 1581 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
7c119107
JG
1582 mutex_unlock(&mc->ctx->mutex);
1583
c8f6a362
SH
1584 mutex_lock(&mc->ctx->file->mut);
1585 ucma_cleanup_mc_events(mc);
1586 list_del(&mc->list);
1587 mutex_unlock(&mc->ctx->file->mut);
1588
1589 ucma_put_ctx(mc->ctx);
1590 resp.events_reported = mc->events_reported;
1591 kfree(mc);
1592
6f57c933 1593 if (copy_to_user(u64_to_user_ptr(cmd.response),
c8f6a362
SH
1594 &resp, sizeof(resp)))
1595 ret = -EFAULT;
1596out:
1597 return ret;
1598}
1599
88314e4d
SH
1600static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1601{
1602 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1603 if (file1 < file2) {
1604 mutex_lock(&file1->mut);
31b57b87 1605 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
88314e4d
SH
1606 } else {
1607 mutex_lock(&file2->mut);
31b57b87 1608 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
88314e4d
SH
1609 }
1610}
1611
1612static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1613{
1614 if (file1 < file2) {
1615 mutex_unlock(&file2->mut);
1616 mutex_unlock(&file1->mut);
1617 } else {
1618 mutex_unlock(&file1->mut);
1619 mutex_unlock(&file2->mut);
1620 }
1621}
1622
1623static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1624{
1625 struct ucma_event *uevent, *tmp;
1626
1627 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1628 if (uevent->ctx == ctx)
1629 list_move_tail(&uevent->list, &file->event_list);
1630}
1631
1632static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1633 const char __user *inbuf,
1634 int in_len, int out_len)
1635{
1636 struct rdma_ucm_migrate_id cmd;
1637 struct rdma_ucm_migrate_resp resp;
1638 struct ucma_context *ctx;
2903ff01 1639 struct fd f;
88314e4d
SH
1640 struct ucma_file *cur_file;
1641 int ret = 0;
1642
1643 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1644 return -EFAULT;
1645
1646 /* Get current fd to protect against it being closed */
2903ff01
AV
1647 f = fdget(cmd.fd);
1648 if (!f.file)
88314e4d 1649 return -ENOENT;
0d23ba60
JH
1650 if (f.file->f_op != &ucma_fops) {
1651 ret = -EINVAL;
1652 goto file_put;
1653 }
88314e4d
SH
1654
1655 /* Validate current fd and prevent destruction of id. */
2903ff01 1656 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
88314e4d
SH
1657 if (IS_ERR(ctx)) {
1658 ret = PTR_ERR(ctx);
1659 goto file_put;
1660 }
1661
1662 cur_file = ctx->file;
1663 if (cur_file == new_file) {
1664 resp.events_reported = ctx->events_reported;
1665 goto response;
1666 }
1667
1668 /*
1669 * Migrate events between fd's, maintaining order, and avoiding new
1670 * events being added before existing events.
1671 */
1672 ucma_lock_files(cur_file, new_file);
afcafe07 1673 xa_lock(&ctx_table);
88314e4d
SH
1674
1675 list_move_tail(&ctx->list, &new_file->ctx_list);
1676 ucma_move_events(ctx, new_file);
1677 ctx->file = new_file;
1678 resp.events_reported = ctx->events_reported;
1679
afcafe07 1680 xa_unlock(&ctx_table);
88314e4d
SH
1681 ucma_unlock_files(cur_file, new_file);
1682
1683response:
6f57c933 1684 if (copy_to_user(u64_to_user_ptr(cmd.response),
88314e4d
SH
1685 &resp, sizeof(resp)))
1686 ret = -EFAULT;
1687
1688 ucma_put_ctx(ctx);
1689file_put:
2903ff01 1690 fdput(f);
88314e4d
SH
1691 return ret;
1692}
1693
75216638
SH
1694static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1695 const char __user *inbuf,
1696 int in_len, int out_len) = {
05ad9457
SH
1697 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1698 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1699 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1700 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1701 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1702 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1703 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1704 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1705 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1706 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1707 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1708 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1709 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1710 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1711 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1712 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1713 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1714 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1715 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
eebe4c3a 1716 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
209cf2a7 1717 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
5bc2b7b3
SH
1718 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1719 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
75216638
SH
1720};
1721
1722static ssize_t ucma_write(struct file *filp, const char __user *buf,
1723 size_t len, loff_t *pos)
1724{
1725 struct ucma_file *file = filp->private_data;
1726 struct rdma_ucm_cmd_hdr hdr;
1727 ssize_t ret;
1728
f73a1dbc
LR
1729 if (!ib_safe_file_access(filp)) {
1730 pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1731 task_tgid_vnr(current), current->comm);
e6bd18f5 1732 return -EACCES;
f73a1dbc 1733 }
e6bd18f5 1734
75216638
SH
1735 if (len < sizeof(hdr))
1736 return -EINVAL;
1737
1738 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1739 return -EFAULT;
1740
caf6e3f2 1741 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
75216638 1742 return -EINVAL;
a3671a4f 1743 hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
75216638
SH
1744
1745 if (hdr.in + sizeof(hdr) > len)
1746 return -EINVAL;
1747
1748 if (!ucma_cmd_table[hdr.cmd])
1749 return -ENOSYS;
1750
1751 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1752 if (!ret)
1753 ret = len;
1754
1755 return ret;
1756}
1757
afc9a42b 1758static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
75216638
SH
1759{
1760 struct ucma_file *file = filp->private_data;
afc9a42b 1761 __poll_t mask = 0;
75216638
SH
1762
1763 poll_wait(filp, &file->poll_wait, wait);
1764
1765 if (!list_empty(&file->event_list))
a9a08845 1766 mask = EPOLLIN | EPOLLRDNORM;
75216638
SH
1767
1768 return mask;
1769}
1770
f7a6117e
RD
1771/*
1772 * ucma_open() does not need the BKL:
1773 *
1774 * - no global state is referred to;
1775 * - there is no ioctl method to race against;
1776 * - no further module initialization is required for open to work
1777 * after the device is registered.
1778 */
75216638
SH
1779static int ucma_open(struct inode *inode, struct file *filp)
1780{
1781 struct ucma_file *file;
1782
1783 file = kmalloc(sizeof *file, GFP_KERNEL);
1784 if (!file)
1785 return -ENOMEM;
1786
a190d3b0
BS
1787 file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1788 WQ_MEM_RECLAIM);
0174b381
SL
1789 if (!file->close_wq) {
1790 kfree(file);
1791 return -ENOMEM;
1792 }
1793
75216638
SH
1794 INIT_LIST_HEAD(&file->event_list);
1795 INIT_LIST_HEAD(&file->ctx_list);
1796 init_waitqueue_head(&file->poll_wait);
1797 mutex_init(&file->mut);
1798
1799 filp->private_data = file;
1800 file->filp = filp;
bc1db9af 1801
c5bf68fe 1802 return stream_open(inode, filp);
75216638
SH
1803}
1804
1805static int ucma_close(struct inode *inode, struct file *filp)
1806{
1807 struct ucma_file *file = filp->private_data;
1808 struct ucma_context *ctx, *tmp;
1809
1810 mutex_lock(&file->mut);
1811 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
e1c30298 1812 ctx->destroying = 1;
75216638
SH
1813 mutex_unlock(&file->mut);
1814
afcafe07 1815 xa_erase(&ctx_table, ctx->id);
e1c30298
YH
1816 flush_workqueue(file->close_wq);
1817 /* At that step once ctx was marked as destroying and workqueue
1818 * was flushed we are safe from any inflights handlers that
1819 * might put other closing task.
1820 */
afcafe07 1821 xa_lock(&ctx_table);
e1c30298 1822 if (!ctx->closing) {
afcafe07 1823 xa_unlock(&ctx_table);
5fe23f26
CW
1824 ucma_put_ctx(ctx);
1825 wait_for_completion(&ctx->comp);
e1c30298
YH
1826 /* rdma_destroy_id ensures that no event handlers are
1827 * inflight for that id before releasing it.
1828 */
1829 rdma_destroy_id(ctx->cm_id);
1830 } else {
afcafe07 1831 xa_unlock(&ctx_table);
e1c30298
YH
1832 }
1833
75216638
SH
1834 ucma_free_ctx(ctx);
1835 mutex_lock(&file->mut);
1836 }
1837 mutex_unlock(&file->mut);
e1c30298 1838 destroy_workqueue(file->close_wq);
75216638
SH
1839 kfree(file);
1840 return 0;
1841}
1842
2b8693c0 1843static const struct file_operations ucma_fops = {
75216638
SH
1844 .owner = THIS_MODULE,
1845 .open = ucma_open,
1846 .release = ucma_close,
1847 .write = ucma_write,
1848 .poll = ucma_poll,
bc1db9af 1849 .llseek = no_llseek,
75216638
SH
1850};
1851
1852static struct miscdevice ucma_misc = {
04ea2f81
RD
1853 .minor = MISC_DYNAMIC_MINOR,
1854 .name = "rdma_cm",
1855 .nodename = "infiniband/rdma_cm",
1856 .mode = 0666,
1857 .fops = &ucma_fops,
75216638
SH
1858};
1859
8f71bb00
JG
1860static int ucma_get_global_nl_info(struct ib_client_nl_info *res)
1861{
1862 res->abi = RDMA_USER_CM_ABI_VERSION;
1863 res->cdev = ucma_misc.this_device;
1864 return 0;
1865}
1866
1867static struct ib_client rdma_cma_client = {
1868 .name = "rdma_cm",
1869 .get_global_nl_info = ucma_get_global_nl_info,
1870};
1871MODULE_ALIAS_RDMA_CLIENT("rdma_cm");
1872
75216638
SH
1873static ssize_t show_abi_version(struct device *dev,
1874 struct device_attribute *attr,
1875 char *buf)
1876{
1877 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1878}
1879static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1880
1881static int __init ucma_init(void)
1882{
1883 int ret;
1884
1885 ret = misc_register(&ucma_misc);
1886 if (ret)
1887 return ret;
1888
1889 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1890 if (ret) {
aba25a3e 1891 pr_err("rdma_ucm: couldn't create abi_version attr\n");
97cb7e40
SW
1892 goto err1;
1893 }
1894
ec8f23ce 1895 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
97cb7e40 1896 if (!ucma_ctl_table_hdr) {
aba25a3e 1897 pr_err("rdma_ucm: couldn't register sysctl paths\n");
97cb7e40
SW
1898 ret = -ENOMEM;
1899 goto err2;
75216638 1900 }
8f71bb00
JG
1901
1902 ret = ib_register_client(&rdma_cma_client);
1903 if (ret)
1904 goto err3;
1905
75216638 1906 return 0;
8f71bb00
JG
1907err3:
1908 unregister_net_sysctl_table(ucma_ctl_table_hdr);
97cb7e40
SW
1909err2:
1910 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1911err1:
75216638
SH
1912 misc_deregister(&ucma_misc);
1913 return ret;
1914}
1915
1916static void __exit ucma_cleanup(void)
1917{
8f71bb00 1918 ib_unregister_client(&rdma_cma_client);
5dd3df10 1919 unregister_net_sysctl_table(ucma_ctl_table_hdr);
75216638
SH
1920 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1921 misc_deregister(&ucma_misc);
75216638
SH
1922}
1923
1924module_init(ucma_init);
1925module_exit(ucma_cleanup);