2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
42 #include <linux/poll.h>
43 #include <linux/sched.h>
44 #include <linux/file.h>
45 #include <linux/cdev.h>
46 #include <linux/anon_inodes.h>
47 #include <linux/slab.h>
49 #include <linux/uaccess.h>
54 #include "core_priv.h"
55 #include "rdma_core.h"
57 MODULE_AUTHOR("Roland Dreier");
58 MODULE_DESCRIPTION("InfiniBand userspace verbs access");
59 MODULE_LICENSE("Dual BSD/GPL");
62 IB_UVERBS_MAJOR
= 231,
63 IB_UVERBS_BASE_MINOR
= 192,
64 IB_UVERBS_MAX_DEVICES
= 32
67 #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
69 static struct class *uverbs_class
;
71 static DEFINE_SPINLOCK(map_lock
);
72 static DECLARE_BITMAP(dev_map
, IB_UVERBS_MAX_DEVICES
);
74 static ssize_t (*uverbs_cmd_table
[])(struct ib_uverbs_file
*file
,
75 struct ib_device
*ib_dev
,
76 const char __user
*buf
, int in_len
,
78 [IB_USER_VERBS_CMD_GET_CONTEXT
] = ib_uverbs_get_context
,
79 [IB_USER_VERBS_CMD_QUERY_DEVICE
] = ib_uverbs_query_device
,
80 [IB_USER_VERBS_CMD_QUERY_PORT
] = ib_uverbs_query_port
,
81 [IB_USER_VERBS_CMD_ALLOC_PD
] = ib_uverbs_alloc_pd
,
82 [IB_USER_VERBS_CMD_DEALLOC_PD
] = ib_uverbs_dealloc_pd
,
83 [IB_USER_VERBS_CMD_REG_MR
] = ib_uverbs_reg_mr
,
84 [IB_USER_VERBS_CMD_REREG_MR
] = ib_uverbs_rereg_mr
,
85 [IB_USER_VERBS_CMD_DEREG_MR
] = ib_uverbs_dereg_mr
,
86 [IB_USER_VERBS_CMD_ALLOC_MW
] = ib_uverbs_alloc_mw
,
87 [IB_USER_VERBS_CMD_DEALLOC_MW
] = ib_uverbs_dealloc_mw
,
88 [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
] = ib_uverbs_create_comp_channel
,
89 [IB_USER_VERBS_CMD_CREATE_CQ
] = ib_uverbs_create_cq
,
90 [IB_USER_VERBS_CMD_RESIZE_CQ
] = ib_uverbs_resize_cq
,
91 [IB_USER_VERBS_CMD_POLL_CQ
] = ib_uverbs_poll_cq
,
92 [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
] = ib_uverbs_req_notify_cq
,
93 [IB_USER_VERBS_CMD_DESTROY_CQ
] = ib_uverbs_destroy_cq
,
94 [IB_USER_VERBS_CMD_CREATE_QP
] = ib_uverbs_create_qp
,
95 [IB_USER_VERBS_CMD_QUERY_QP
] = ib_uverbs_query_qp
,
96 [IB_USER_VERBS_CMD_MODIFY_QP
] = ib_uverbs_modify_qp
,
97 [IB_USER_VERBS_CMD_DESTROY_QP
] = ib_uverbs_destroy_qp
,
98 [IB_USER_VERBS_CMD_POST_SEND
] = ib_uverbs_post_send
,
99 [IB_USER_VERBS_CMD_POST_RECV
] = ib_uverbs_post_recv
,
100 [IB_USER_VERBS_CMD_POST_SRQ_RECV
] = ib_uverbs_post_srq_recv
,
101 [IB_USER_VERBS_CMD_CREATE_AH
] = ib_uverbs_create_ah
,
102 [IB_USER_VERBS_CMD_DESTROY_AH
] = ib_uverbs_destroy_ah
,
103 [IB_USER_VERBS_CMD_ATTACH_MCAST
] = ib_uverbs_attach_mcast
,
104 [IB_USER_VERBS_CMD_DETACH_MCAST
] = ib_uverbs_detach_mcast
,
105 [IB_USER_VERBS_CMD_CREATE_SRQ
] = ib_uverbs_create_srq
,
106 [IB_USER_VERBS_CMD_MODIFY_SRQ
] = ib_uverbs_modify_srq
,
107 [IB_USER_VERBS_CMD_QUERY_SRQ
] = ib_uverbs_query_srq
,
108 [IB_USER_VERBS_CMD_DESTROY_SRQ
] = ib_uverbs_destroy_srq
,
109 [IB_USER_VERBS_CMD_OPEN_XRCD
] = ib_uverbs_open_xrcd
,
110 [IB_USER_VERBS_CMD_CLOSE_XRCD
] = ib_uverbs_close_xrcd
,
111 [IB_USER_VERBS_CMD_CREATE_XSRQ
] = ib_uverbs_create_xsrq
,
112 [IB_USER_VERBS_CMD_OPEN_QP
] = ib_uverbs_open_qp
,
115 static int (*uverbs_ex_cmd_table
[])(struct ib_uverbs_file
*file
,
116 struct ib_device
*ib_dev
,
117 struct ib_udata
*ucore
,
118 struct ib_udata
*uhw
) = {
119 [IB_USER_VERBS_EX_CMD_CREATE_FLOW
] = ib_uverbs_ex_create_flow
,
120 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW
] = ib_uverbs_ex_destroy_flow
,
121 [IB_USER_VERBS_EX_CMD_QUERY_DEVICE
] = ib_uverbs_ex_query_device
,
122 [IB_USER_VERBS_EX_CMD_CREATE_CQ
] = ib_uverbs_ex_create_cq
,
123 [IB_USER_VERBS_EX_CMD_CREATE_QP
] = ib_uverbs_ex_create_qp
,
124 [IB_USER_VERBS_EX_CMD_CREATE_WQ
] = ib_uverbs_ex_create_wq
,
125 [IB_USER_VERBS_EX_CMD_MODIFY_WQ
] = ib_uverbs_ex_modify_wq
,
126 [IB_USER_VERBS_EX_CMD_DESTROY_WQ
] = ib_uverbs_ex_destroy_wq
,
127 [IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL
] = ib_uverbs_ex_create_rwq_ind_table
,
128 [IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL
] = ib_uverbs_ex_destroy_rwq_ind_table
,
129 [IB_USER_VERBS_EX_CMD_MODIFY_QP
] = ib_uverbs_ex_modify_qp
,
132 static void ib_uverbs_add_one(struct ib_device
*device
);
133 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
);
135 int uverbs_dealloc_mw(struct ib_mw
*mw
)
137 struct ib_pd
*pd
= mw
->pd
;
140 ret
= mw
->device
->dealloc_mw(mw
);
142 atomic_dec(&pd
->usecnt
);
146 static void ib_uverbs_release_dev(struct kobject
*kobj
)
148 struct ib_uverbs_device
*dev
=
149 container_of(kobj
, struct ib_uverbs_device
, kobj
);
151 cleanup_srcu_struct(&dev
->disassociate_srcu
);
155 static struct kobj_type ib_uverbs_dev_ktype
= {
156 .release
= ib_uverbs_release_dev
,
159 static void ib_uverbs_release_async_event_file(struct kref
*ref
)
161 struct ib_uverbs_async_event_file
*file
=
162 container_of(ref
, struct ib_uverbs_async_event_file
, ref
);
167 void ib_uverbs_release_ucq(struct ib_uverbs_file
*file
,
168 struct ib_uverbs_completion_event_file
*ev_file
,
169 struct ib_ucq_object
*uobj
)
171 struct ib_uverbs_event
*evt
, *tmp
;
174 spin_lock_irq(&ev_file
->ev_queue
.lock
);
175 list_for_each_entry_safe(evt
, tmp
, &uobj
->comp_list
, obj_list
) {
176 list_del(&evt
->list
);
179 spin_unlock_irq(&ev_file
->ev_queue
.lock
);
181 uverbs_uobject_put(&ev_file
->uobj_file
.uobj
);
184 spin_lock_irq(&file
->async_file
->ev_queue
.lock
);
185 list_for_each_entry_safe(evt
, tmp
, &uobj
->async_list
, obj_list
) {
186 list_del(&evt
->list
);
189 spin_unlock_irq(&file
->async_file
->ev_queue
.lock
);
192 void ib_uverbs_release_uevent(struct ib_uverbs_file
*file
,
193 struct ib_uevent_object
*uobj
)
195 struct ib_uverbs_event
*evt
, *tmp
;
197 spin_lock_irq(&file
->async_file
->ev_queue
.lock
);
198 list_for_each_entry_safe(evt
, tmp
, &uobj
->event_list
, obj_list
) {
199 list_del(&evt
->list
);
202 spin_unlock_irq(&file
->async_file
->ev_queue
.lock
);
205 void ib_uverbs_detach_umcast(struct ib_qp
*qp
,
206 struct ib_uqp_object
*uobj
)
208 struct ib_uverbs_mcast_entry
*mcast
, *tmp
;
210 list_for_each_entry_safe(mcast
, tmp
, &uobj
->mcast_list
, list
) {
211 ib_detach_mcast(qp
, &mcast
->gid
, mcast
->lid
);
212 list_del(&mcast
->list
);
217 static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file
*file
,
218 struct ib_ucontext
*context
,
221 context
->closing
= 1;
222 uverbs_cleanup_ucontext(context
, device_removed
);
223 put_pid(context
->tgid
);
225 ib_rdmacg_uncharge(&context
->cg_obj
, context
->device
,
226 RDMACG_RESOURCE_HCA_HANDLE
);
228 return context
->device
->dealloc_ucontext(context
);
231 static void ib_uverbs_comp_dev(struct ib_uverbs_device
*dev
)
233 complete(&dev
->comp
);
236 void ib_uverbs_release_file(struct kref
*ref
)
238 struct ib_uverbs_file
*file
=
239 container_of(ref
, struct ib_uverbs_file
, ref
);
240 struct ib_device
*ib_dev
;
243 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
244 ib_dev
= srcu_dereference(file
->device
->ib_dev
,
245 &file
->device
->disassociate_srcu
);
246 if (ib_dev
&& !ib_dev
->disassociate_ucontext
)
247 module_put(ib_dev
->owner
);
248 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
250 if (atomic_dec_and_test(&file
->device
->refcount
))
251 ib_uverbs_comp_dev(file
->device
);
256 static ssize_t
ib_uverbs_event_read(struct ib_uverbs_event_queue
*ev_queue
,
257 struct ib_uverbs_file
*uverbs_file
,
258 struct file
*filp
, char __user
*buf
,
259 size_t count
, loff_t
*pos
,
262 struct ib_uverbs_event
*event
;
265 spin_lock_irq(&ev_queue
->lock
);
267 while (list_empty(&ev_queue
->event_list
)) {
268 spin_unlock_irq(&ev_queue
->lock
);
270 if (filp
->f_flags
& O_NONBLOCK
)
273 if (wait_event_interruptible(ev_queue
->poll_wait
,
274 (!list_empty(&ev_queue
->event_list
) ||
275 /* The barriers built into wait_event_interruptible()
276 * and wake_up() guarentee this will see the null set
279 !uverbs_file
->device
->ib_dev
)))
282 /* If device was disassociated and no event exists set an error */
283 if (list_empty(&ev_queue
->event_list
) &&
284 !uverbs_file
->device
->ib_dev
)
287 spin_lock_irq(&ev_queue
->lock
);
290 event
= list_entry(ev_queue
->event_list
.next
, struct ib_uverbs_event
, list
);
292 if (eventsz
> count
) {
296 list_del(ev_queue
->event_list
.next
);
297 if (event
->counter
) {
299 list_del(&event
->obj_list
);
303 spin_unlock_irq(&ev_queue
->lock
);
306 if (copy_to_user(buf
, event
, eventsz
))
317 static ssize_t
ib_uverbs_async_event_read(struct file
*filp
, char __user
*buf
,
318 size_t count
, loff_t
*pos
)
320 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
322 return ib_uverbs_event_read(&file
->ev_queue
, file
->uverbs_file
, filp
,
324 sizeof(struct ib_uverbs_async_event_desc
));
327 static ssize_t
ib_uverbs_comp_event_read(struct file
*filp
, char __user
*buf
,
328 size_t count
, loff_t
*pos
)
330 struct ib_uverbs_completion_event_file
*comp_ev_file
=
333 return ib_uverbs_event_read(&comp_ev_file
->ev_queue
,
334 comp_ev_file
->uobj_file
.ufile
, filp
,
336 sizeof(struct ib_uverbs_comp_event_desc
));
339 static unsigned int ib_uverbs_event_poll(struct ib_uverbs_event_queue
*ev_queue
,
341 struct poll_table_struct
*wait
)
343 unsigned int pollflags
= 0;
345 poll_wait(filp
, &ev_queue
->poll_wait
, wait
);
347 spin_lock_irq(&ev_queue
->lock
);
348 if (!list_empty(&ev_queue
->event_list
))
349 pollflags
= POLLIN
| POLLRDNORM
;
350 spin_unlock_irq(&ev_queue
->lock
);
355 static unsigned int ib_uverbs_async_event_poll(struct file
*filp
,
356 struct poll_table_struct
*wait
)
358 return ib_uverbs_event_poll(filp
->private_data
, filp
, wait
);
361 static unsigned int ib_uverbs_comp_event_poll(struct file
*filp
,
362 struct poll_table_struct
*wait
)
364 struct ib_uverbs_completion_event_file
*comp_ev_file
=
367 return ib_uverbs_event_poll(&comp_ev_file
->ev_queue
, filp
, wait
);
370 static int ib_uverbs_async_event_fasync(int fd
, struct file
*filp
, int on
)
372 struct ib_uverbs_event_queue
*ev_queue
= filp
->private_data
;
374 return fasync_helper(fd
, filp
, on
, &ev_queue
->async_queue
);
377 static int ib_uverbs_comp_event_fasync(int fd
, struct file
*filp
, int on
)
379 struct ib_uverbs_completion_event_file
*comp_ev_file
=
382 return fasync_helper(fd
, filp
, on
, &comp_ev_file
->ev_queue
.async_queue
);
385 static int ib_uverbs_async_event_close(struct inode
*inode
, struct file
*filp
)
387 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
388 struct ib_uverbs_file
*uverbs_file
= file
->uverbs_file
;
389 struct ib_uverbs_event
*entry
, *tmp
;
390 int closed_already
= 0;
392 mutex_lock(&uverbs_file
->device
->lists_mutex
);
393 spin_lock_irq(&file
->ev_queue
.lock
);
394 closed_already
= file
->ev_queue
.is_closed
;
395 file
->ev_queue
.is_closed
= 1;
396 list_for_each_entry_safe(entry
, tmp
, &file
->ev_queue
.event_list
, list
) {
398 list_del(&entry
->obj_list
);
401 spin_unlock_irq(&file
->ev_queue
.lock
);
402 if (!closed_already
) {
403 list_del(&file
->list
);
404 ib_unregister_event_handler(&uverbs_file
->event_handler
);
406 mutex_unlock(&uverbs_file
->device
->lists_mutex
);
408 kref_put(&uverbs_file
->ref
, ib_uverbs_release_file
);
409 kref_put(&file
->ref
, ib_uverbs_release_async_event_file
);
414 static int ib_uverbs_comp_event_close(struct inode
*inode
, struct file
*filp
)
416 struct ib_uverbs_completion_event_file
*file
= filp
->private_data
;
417 struct ib_uverbs_event
*entry
, *tmp
;
419 spin_lock_irq(&file
->ev_queue
.lock
);
420 list_for_each_entry_safe(entry
, tmp
, &file
->ev_queue
.event_list
, list
) {
422 list_del(&entry
->obj_list
);
425 spin_unlock_irq(&file
->ev_queue
.lock
);
427 uverbs_close_fd(filp
);
432 const struct file_operations uverbs_event_fops
= {
433 .owner
= THIS_MODULE
,
434 .read
= ib_uverbs_comp_event_read
,
435 .poll
= ib_uverbs_comp_event_poll
,
436 .release
= ib_uverbs_comp_event_close
,
437 .fasync
= ib_uverbs_comp_event_fasync
,
441 static const struct file_operations uverbs_async_event_fops
= {
442 .owner
= THIS_MODULE
,
443 .read
= ib_uverbs_async_event_read
,
444 .poll
= ib_uverbs_async_event_poll
,
445 .release
= ib_uverbs_async_event_close
,
446 .fasync
= ib_uverbs_async_event_fasync
,
450 void ib_uverbs_comp_handler(struct ib_cq
*cq
, void *cq_context
)
452 struct ib_uverbs_event_queue
*ev_queue
= cq_context
;
453 struct ib_ucq_object
*uobj
;
454 struct ib_uverbs_event
*entry
;
460 spin_lock_irqsave(&ev_queue
->lock
, flags
);
461 if (ev_queue
->is_closed
) {
462 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
466 entry
= kmalloc(sizeof *entry
, GFP_ATOMIC
);
468 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
472 uobj
= container_of(cq
->uobject
, struct ib_ucq_object
, uobject
);
474 entry
->desc
.comp
.cq_handle
= cq
->uobject
->user_handle
;
475 entry
->counter
= &uobj
->comp_events_reported
;
477 list_add_tail(&entry
->list
, &ev_queue
->event_list
);
478 list_add_tail(&entry
->obj_list
, &uobj
->comp_list
);
479 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
481 wake_up_interruptible(&ev_queue
->poll_wait
);
482 kill_fasync(&ev_queue
->async_queue
, SIGIO
, POLL_IN
);
485 static void ib_uverbs_async_handler(struct ib_uverbs_file
*file
,
486 __u64 element
, __u64 event
,
487 struct list_head
*obj_list
,
490 struct ib_uverbs_event
*entry
;
493 spin_lock_irqsave(&file
->async_file
->ev_queue
.lock
, flags
);
494 if (file
->async_file
->ev_queue
.is_closed
) {
495 spin_unlock_irqrestore(&file
->async_file
->ev_queue
.lock
, flags
);
499 entry
= kmalloc(sizeof *entry
, GFP_ATOMIC
);
501 spin_unlock_irqrestore(&file
->async_file
->ev_queue
.lock
, flags
);
505 entry
->desc
.async
.element
= element
;
506 entry
->desc
.async
.event_type
= event
;
507 entry
->desc
.async
.reserved
= 0;
508 entry
->counter
= counter
;
510 list_add_tail(&entry
->list
, &file
->async_file
->ev_queue
.event_list
);
512 list_add_tail(&entry
->obj_list
, obj_list
);
513 spin_unlock_irqrestore(&file
->async_file
->ev_queue
.lock
, flags
);
515 wake_up_interruptible(&file
->async_file
->ev_queue
.poll_wait
);
516 kill_fasync(&file
->async_file
->ev_queue
.async_queue
, SIGIO
, POLL_IN
);
519 void ib_uverbs_cq_event_handler(struct ib_event
*event
, void *context_ptr
)
521 struct ib_ucq_object
*uobj
= container_of(event
->element
.cq
->uobject
,
522 struct ib_ucq_object
, uobject
);
524 ib_uverbs_async_handler(uobj
->uverbs_file
, uobj
->uobject
.user_handle
,
525 event
->event
, &uobj
->async_list
,
526 &uobj
->async_events_reported
);
529 void ib_uverbs_qp_event_handler(struct ib_event
*event
, void *context_ptr
)
531 struct ib_uevent_object
*uobj
;
533 /* for XRC target qp's, check that qp is live */
534 if (!event
->element
.qp
->uobject
)
537 uobj
= container_of(event
->element
.qp
->uobject
,
538 struct ib_uevent_object
, uobject
);
540 ib_uverbs_async_handler(context_ptr
, uobj
->uobject
.user_handle
,
541 event
->event
, &uobj
->event_list
,
542 &uobj
->events_reported
);
545 void ib_uverbs_wq_event_handler(struct ib_event
*event
, void *context_ptr
)
547 struct ib_uevent_object
*uobj
= container_of(event
->element
.wq
->uobject
,
548 struct ib_uevent_object
, uobject
);
550 ib_uverbs_async_handler(context_ptr
, uobj
->uobject
.user_handle
,
551 event
->event
, &uobj
->event_list
,
552 &uobj
->events_reported
);
555 void ib_uverbs_srq_event_handler(struct ib_event
*event
, void *context_ptr
)
557 struct ib_uevent_object
*uobj
;
559 uobj
= container_of(event
->element
.srq
->uobject
,
560 struct ib_uevent_object
, uobject
);
562 ib_uverbs_async_handler(context_ptr
, uobj
->uobject
.user_handle
,
563 event
->event
, &uobj
->event_list
,
564 &uobj
->events_reported
);
567 void ib_uverbs_event_handler(struct ib_event_handler
*handler
,
568 struct ib_event
*event
)
570 struct ib_uverbs_file
*file
=
571 container_of(handler
, struct ib_uverbs_file
, event_handler
);
573 ib_uverbs_async_handler(file
, event
->element
.port_num
, event
->event
,
577 void ib_uverbs_free_async_event_file(struct ib_uverbs_file
*file
)
579 kref_put(&file
->async_file
->ref
, ib_uverbs_release_async_event_file
);
580 file
->async_file
= NULL
;
583 void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue
*ev_queue
)
585 spin_lock_init(&ev_queue
->lock
);
586 INIT_LIST_HEAD(&ev_queue
->event_list
);
587 init_waitqueue_head(&ev_queue
->poll_wait
);
588 ev_queue
->is_closed
= 0;
589 ev_queue
->async_queue
= NULL
;
592 struct file
*ib_uverbs_alloc_async_event_file(struct ib_uverbs_file
*uverbs_file
,
593 struct ib_device
*ib_dev
)
595 struct ib_uverbs_async_event_file
*ev_file
;
599 ev_file
= kzalloc(sizeof(*ev_file
), GFP_KERNEL
);
601 return ERR_PTR(-ENOMEM
);
603 ib_uverbs_init_event_queue(&ev_file
->ev_queue
);
604 ev_file
->uverbs_file
= uverbs_file
;
605 kref_get(&ev_file
->uverbs_file
->ref
);
606 kref_init(&ev_file
->ref
);
607 filp
= anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops
,
612 mutex_lock(&uverbs_file
->device
->lists_mutex
);
613 list_add_tail(&ev_file
->list
,
614 &uverbs_file
->device
->uverbs_events_file_list
);
615 mutex_unlock(&uverbs_file
->device
->lists_mutex
);
617 WARN_ON(uverbs_file
->async_file
);
618 uverbs_file
->async_file
= ev_file
;
619 kref_get(&uverbs_file
->async_file
->ref
);
620 INIT_IB_EVENT_HANDLER(&uverbs_file
->event_handler
,
622 ib_uverbs_event_handler
);
623 ret
= ib_register_event_handler(&uverbs_file
->event_handler
);
627 /* At that point async file stuff was fully set */
633 kref_put(&uverbs_file
->async_file
->ref
,
634 ib_uverbs_release_async_event_file
);
635 uverbs_file
->async_file
= NULL
;
639 kref_put(&ev_file
->uverbs_file
->ref
, ib_uverbs_release_file
);
640 kref_put(&ev_file
->ref
, ib_uverbs_release_async_event_file
);
644 static int verify_command_mask(struct ib_device
*ib_dev
, __u32 command
)
648 if (command
<= IB_USER_VERBS_CMD_OPEN_QP
)
649 mask
= ib_dev
->uverbs_cmd_mask
;
651 mask
= ib_dev
->uverbs_ex_cmd_mask
;
653 if (mask
& ((u64
)1 << command
))
659 static ssize_t
ib_uverbs_write(struct file
*filp
, const char __user
*buf
,
660 size_t count
, loff_t
*pos
)
662 struct ib_uverbs_file
*file
= filp
->private_data
;
663 struct ib_device
*ib_dev
;
664 struct ib_uverbs_cmd_hdr hdr
;
670 if (!ib_safe_file_access(filp
)) {
671 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
672 task_tgid_vnr(current
), current
->comm
);
676 if (count
< sizeof hdr
)
679 if (copy_from_user(&hdr
, buf
, sizeof hdr
))
682 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
683 ib_dev
= srcu_dereference(file
->device
->ib_dev
,
684 &file
->device
->disassociate_srcu
);
690 if (hdr
.command
& ~(__u32
)(IB_USER_VERBS_CMD_FLAGS_MASK
|
691 IB_USER_VERBS_CMD_COMMAND_MASK
)) {
696 command
= hdr
.command
& IB_USER_VERBS_CMD_COMMAND_MASK
;
697 if (verify_command_mask(ib_dev
, command
)) {
702 if (!file
->ucontext
&&
703 command
!= IB_USER_VERBS_CMD_GET_CONTEXT
) {
708 flags
= (hdr
.command
&
709 IB_USER_VERBS_CMD_FLAGS_MASK
) >> IB_USER_VERBS_CMD_FLAGS_SHIFT
;
712 if (command
>= ARRAY_SIZE(uverbs_cmd_table
) ||
713 !uverbs_cmd_table
[command
]) {
718 if (hdr
.in_words
* 4 != count
) {
723 ret
= uverbs_cmd_table
[command
](file
, ib_dev
,
728 } else if (flags
== IB_USER_VERBS_CMD_FLAG_EXTENDED
) {
729 struct ib_uverbs_ex_cmd_hdr ex_hdr
;
730 struct ib_udata ucore
;
732 size_t written_count
= count
;
734 if (command
>= ARRAY_SIZE(uverbs_ex_cmd_table
) ||
735 !uverbs_ex_cmd_table
[command
]) {
740 if (!file
->ucontext
) {
745 if (count
< (sizeof(hdr
) + sizeof(ex_hdr
))) {
750 if (copy_from_user(&ex_hdr
, buf
+ sizeof(hdr
), sizeof(ex_hdr
))) {
755 count
-= sizeof(hdr
) + sizeof(ex_hdr
);
756 buf
+= sizeof(hdr
) + sizeof(ex_hdr
);
758 if ((hdr
.in_words
+ ex_hdr
.provider_in_words
) * 8 != count
) {
763 if (ex_hdr
.cmd_hdr_reserved
) {
768 if (ex_hdr
.response
) {
769 if (!hdr
.out_words
&& !ex_hdr
.provider_out_words
) {
774 if (!access_ok(VERIFY_WRITE
,
775 (void __user
*) (unsigned long) ex_hdr
.response
,
776 (hdr
.out_words
+ ex_hdr
.provider_out_words
) * 8)) {
781 if (hdr
.out_words
|| ex_hdr
.provider_out_words
) {
787 INIT_UDATA_BUF_OR_NULL(&ucore
, buf
, (unsigned long) ex_hdr
.response
,
788 hdr
.in_words
* 8, hdr
.out_words
* 8);
790 INIT_UDATA_BUF_OR_NULL(&uhw
,
792 (unsigned long) ex_hdr
.response
+ ucore
.outlen
,
793 ex_hdr
.provider_in_words
* 8,
794 ex_hdr
.provider_out_words
* 8);
796 ret
= uverbs_ex_cmd_table
[command
](file
,
807 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
811 static int ib_uverbs_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
813 struct ib_uverbs_file
*file
= filp
->private_data
;
814 struct ib_device
*ib_dev
;
818 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
819 ib_dev
= srcu_dereference(file
->device
->ib_dev
,
820 &file
->device
->disassociate_srcu
);
829 ret
= ib_dev
->mmap(file
->ucontext
, vma
);
831 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
836 * ib_uverbs_open() does not need the BKL:
838 * - the ib_uverbs_device structures are properly reference counted and
839 * everything else is purely local to the file being created, so
840 * races against other open calls are not a problem;
841 * - there is no ioctl method to race against;
842 * - the open method will either immediately run -ENXIO, or all
843 * required initialization will be done.
845 static int ib_uverbs_open(struct inode
*inode
, struct file
*filp
)
847 struct ib_uverbs_device
*dev
;
848 struct ib_uverbs_file
*file
;
849 struct ib_device
*ib_dev
;
851 int module_dependent
;
854 dev
= container_of(inode
->i_cdev
, struct ib_uverbs_device
, cdev
);
855 if (!atomic_inc_not_zero(&dev
->refcount
))
858 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
859 mutex_lock(&dev
->lists_mutex
);
860 ib_dev
= srcu_dereference(dev
->ib_dev
,
861 &dev
->disassociate_srcu
);
867 /* In case IB device supports disassociate ucontext, there is no hard
868 * dependency between uverbs device and its low level device.
870 module_dependent
= !(ib_dev
->disassociate_ucontext
);
872 if (module_dependent
) {
873 if (!try_module_get(ib_dev
->owner
)) {
879 file
= kzalloc(sizeof(*file
), GFP_KERNEL
);
882 if (module_dependent
)
889 spin_lock_init(&file
->idr_lock
);
890 idr_init(&file
->idr
);
891 file
->ucontext
= NULL
;
892 file
->async_file
= NULL
;
893 kref_init(&file
->ref
);
894 mutex_init(&file
->mutex
);
895 mutex_init(&file
->cleanup_mutex
);
897 filp
->private_data
= file
;
898 kobject_get(&dev
->kobj
);
899 list_add_tail(&file
->list
, &dev
->uverbs_file_list
);
900 mutex_unlock(&dev
->lists_mutex
);
901 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
903 return nonseekable_open(inode
, filp
);
906 module_put(ib_dev
->owner
);
909 mutex_unlock(&dev
->lists_mutex
);
910 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
911 if (atomic_dec_and_test(&dev
->refcount
))
912 ib_uverbs_comp_dev(dev
);
917 static int ib_uverbs_close(struct inode
*inode
, struct file
*filp
)
919 struct ib_uverbs_file
*file
= filp
->private_data
;
920 struct ib_uverbs_device
*dev
= file
->device
;
922 mutex_lock(&file
->cleanup_mutex
);
923 if (file
->ucontext
) {
924 ib_uverbs_cleanup_ucontext(file
, file
->ucontext
, false);
925 file
->ucontext
= NULL
;
927 mutex_unlock(&file
->cleanup_mutex
);
928 idr_destroy(&file
->idr
);
930 mutex_lock(&file
->device
->lists_mutex
);
931 if (!file
->is_closed
) {
932 list_del(&file
->list
);
935 mutex_unlock(&file
->device
->lists_mutex
);
937 if (file
->async_file
)
938 kref_put(&file
->async_file
->ref
,
939 ib_uverbs_release_async_event_file
);
941 kref_put(&file
->ref
, ib_uverbs_release_file
);
942 kobject_put(&dev
->kobj
);
947 static const struct file_operations uverbs_fops
= {
948 .owner
= THIS_MODULE
,
949 .write
= ib_uverbs_write
,
950 .open
= ib_uverbs_open
,
951 .release
= ib_uverbs_close
,
955 static const struct file_operations uverbs_mmap_fops
= {
956 .owner
= THIS_MODULE
,
957 .write
= ib_uverbs_write
,
958 .mmap
= ib_uverbs_mmap
,
959 .open
= ib_uverbs_open
,
960 .release
= ib_uverbs_close
,
964 static struct ib_client uverbs_client
= {
966 .add
= ib_uverbs_add_one
,
967 .remove
= ib_uverbs_remove_one
970 static ssize_t
show_ibdev(struct device
*device
, struct device_attribute
*attr
,
975 struct ib_uverbs_device
*dev
= dev_get_drvdata(device
);
976 struct ib_device
*ib_dev
;
981 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
982 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
984 ret
= sprintf(buf
, "%s\n", ib_dev
->name
);
985 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
989 static DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
991 static ssize_t
show_dev_abi_version(struct device
*device
,
992 struct device_attribute
*attr
, char *buf
)
994 struct ib_uverbs_device
*dev
= dev_get_drvdata(device
);
997 struct ib_device
*ib_dev
;
1001 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
1002 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
1004 ret
= sprintf(buf
, "%d\n", ib_dev
->uverbs_abi_ver
);
1005 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1009 static DEVICE_ATTR(abi_version
, S_IRUGO
, show_dev_abi_version
, NULL
);
1011 static CLASS_ATTR_STRING(abi_version
, S_IRUGO
,
1012 __stringify(IB_USER_VERBS_ABI_VERSION
));
1014 static dev_t overflow_maj
;
1015 static DECLARE_BITMAP(overflow_map
, IB_UVERBS_MAX_DEVICES
);
1018 * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
1019 * requesting a new major number and doubling the number of max devices we
1020 * support. It's stupid, but simple.
1022 static int find_overflow_devnum(void)
1026 if (!overflow_maj
) {
1027 ret
= alloc_chrdev_region(&overflow_maj
, 0, IB_UVERBS_MAX_DEVICES
,
1028 "infiniband_verbs");
1030 pr_err("user_verbs: couldn't register dynamic device number\n");
1035 ret
= find_first_zero_bit(overflow_map
, IB_UVERBS_MAX_DEVICES
);
1036 if (ret
>= IB_UVERBS_MAX_DEVICES
)
1042 static void ib_uverbs_add_one(struct ib_device
*device
)
1046 struct ib_uverbs_device
*uverbs_dev
;
1049 if (!device
->alloc_ucontext
)
1052 uverbs_dev
= kzalloc(sizeof *uverbs_dev
, GFP_KERNEL
);
1056 ret
= init_srcu_struct(&uverbs_dev
->disassociate_srcu
);
1062 atomic_set(&uverbs_dev
->refcount
, 1);
1063 init_completion(&uverbs_dev
->comp
);
1064 uverbs_dev
->xrcd_tree
= RB_ROOT
;
1065 mutex_init(&uverbs_dev
->xrcd_tree_mutex
);
1066 kobject_init(&uverbs_dev
->kobj
, &ib_uverbs_dev_ktype
);
1067 mutex_init(&uverbs_dev
->lists_mutex
);
1068 INIT_LIST_HEAD(&uverbs_dev
->uverbs_file_list
);
1069 INIT_LIST_HEAD(&uverbs_dev
->uverbs_events_file_list
);
1071 spin_lock(&map_lock
);
1072 devnum
= find_first_zero_bit(dev_map
, IB_UVERBS_MAX_DEVICES
);
1073 if (devnum
>= IB_UVERBS_MAX_DEVICES
) {
1074 spin_unlock(&map_lock
);
1075 devnum
= find_overflow_devnum();
1079 spin_lock(&map_lock
);
1080 uverbs_dev
->devnum
= devnum
+ IB_UVERBS_MAX_DEVICES
;
1081 base
= devnum
+ overflow_maj
;
1082 set_bit(devnum
, overflow_map
);
1084 uverbs_dev
->devnum
= devnum
;
1085 base
= devnum
+ IB_UVERBS_BASE_DEV
;
1086 set_bit(devnum
, dev_map
);
1088 spin_unlock(&map_lock
);
1090 rcu_assign_pointer(uverbs_dev
->ib_dev
, device
);
1091 uverbs_dev
->num_comp_vectors
= device
->num_comp_vectors
;
1093 cdev_init(&uverbs_dev
->cdev
, NULL
);
1094 uverbs_dev
->cdev
.owner
= THIS_MODULE
;
1095 uverbs_dev
->cdev
.ops
= device
->mmap
? &uverbs_mmap_fops
: &uverbs_fops
;
1096 cdev_set_parent(&uverbs_dev
->cdev
, &uverbs_dev
->kobj
);
1097 kobject_set_name(&uverbs_dev
->cdev
.kobj
, "uverbs%d", uverbs_dev
->devnum
);
1098 if (cdev_add(&uverbs_dev
->cdev
, base
, 1))
1101 uverbs_dev
->dev
= device_create(uverbs_class
, device
->dev
.parent
,
1102 uverbs_dev
->cdev
.dev
, uverbs_dev
,
1103 "uverbs%d", uverbs_dev
->devnum
);
1104 if (IS_ERR(uverbs_dev
->dev
))
1107 if (device_create_file(uverbs_dev
->dev
, &dev_attr_ibdev
))
1109 if (device_create_file(uverbs_dev
->dev
, &dev_attr_abi_version
))
1112 ib_set_client_data(device
, &uverbs_client
, uverbs_dev
);
1117 device_destroy(uverbs_class
, uverbs_dev
->cdev
.dev
);
1120 cdev_del(&uverbs_dev
->cdev
);
1121 if (uverbs_dev
->devnum
< IB_UVERBS_MAX_DEVICES
)
1122 clear_bit(devnum
, dev_map
);
1124 clear_bit(devnum
, overflow_map
);
1127 if (atomic_dec_and_test(&uverbs_dev
->refcount
))
1128 ib_uverbs_comp_dev(uverbs_dev
);
1129 wait_for_completion(&uverbs_dev
->comp
);
1130 kobject_put(&uverbs_dev
->kobj
);
1134 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device
*uverbs_dev
,
1135 struct ib_device
*ib_dev
)
1137 struct ib_uverbs_file
*file
;
1138 struct ib_uverbs_async_event_file
*event_file
;
1139 struct ib_event event
;
1141 /* Pending running commands to terminate */
1142 synchronize_srcu(&uverbs_dev
->disassociate_srcu
);
1143 event
.event
= IB_EVENT_DEVICE_FATAL
;
1144 event
.element
.port_num
= 0;
1145 event
.device
= ib_dev
;
1147 mutex_lock(&uverbs_dev
->lists_mutex
);
1148 while (!list_empty(&uverbs_dev
->uverbs_file_list
)) {
1149 struct ib_ucontext
*ucontext
;
1150 file
= list_first_entry(&uverbs_dev
->uverbs_file_list
,
1151 struct ib_uverbs_file
, list
);
1152 file
->is_closed
= 1;
1153 list_del(&file
->list
);
1154 kref_get(&file
->ref
);
1155 mutex_unlock(&uverbs_dev
->lists_mutex
);
1157 ib_uverbs_event_handler(&file
->event_handler
, &event
);
1159 mutex_lock(&file
->cleanup_mutex
);
1160 ucontext
= file
->ucontext
;
1161 file
->ucontext
= NULL
;
1162 mutex_unlock(&file
->cleanup_mutex
);
1164 /* At this point ib_uverbs_close cannot be running
1165 * ib_uverbs_cleanup_ucontext
1168 /* We must release the mutex before going ahead and
1169 * calling disassociate_ucontext. disassociate_ucontext
1170 * might end up indirectly calling uverbs_close,
1171 * for example due to freeing the resources
1174 ib_dev
->disassociate_ucontext(ucontext
);
1175 mutex_lock(&file
->cleanup_mutex
);
1176 ib_uverbs_cleanup_ucontext(file
, ucontext
, true);
1177 mutex_unlock(&file
->cleanup_mutex
);
1180 mutex_lock(&uverbs_dev
->lists_mutex
);
1181 kref_put(&file
->ref
, ib_uverbs_release_file
);
1184 while (!list_empty(&uverbs_dev
->uverbs_events_file_list
)) {
1185 event_file
= list_first_entry(&uverbs_dev
->
1186 uverbs_events_file_list
,
1187 struct ib_uverbs_async_event_file
,
1189 spin_lock_irq(&event_file
->ev_queue
.lock
);
1190 event_file
->ev_queue
.is_closed
= 1;
1191 spin_unlock_irq(&event_file
->ev_queue
.lock
);
1193 list_del(&event_file
->list
);
1194 ib_unregister_event_handler(
1195 &event_file
->uverbs_file
->event_handler
);
1196 event_file
->uverbs_file
->event_handler
.device
=
1199 wake_up_interruptible(&event_file
->ev_queue
.poll_wait
);
1200 kill_fasync(&event_file
->ev_queue
.async_queue
, SIGIO
, POLL_IN
);
1202 mutex_unlock(&uverbs_dev
->lists_mutex
);
1205 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
)
1207 struct ib_uverbs_device
*uverbs_dev
= client_data
;
1208 int wait_clients
= 1;
1213 dev_set_drvdata(uverbs_dev
->dev
, NULL
);
1214 device_destroy(uverbs_class
, uverbs_dev
->cdev
.dev
);
1215 cdev_del(&uverbs_dev
->cdev
);
1217 if (uverbs_dev
->devnum
< IB_UVERBS_MAX_DEVICES
)
1218 clear_bit(uverbs_dev
->devnum
, dev_map
);
1220 clear_bit(uverbs_dev
->devnum
- IB_UVERBS_MAX_DEVICES
, overflow_map
);
1222 if (device
->disassociate_ucontext
) {
1223 /* We disassociate HW resources and immediately return.
1224 * Userspace will see a EIO errno for all future access.
1225 * Upon returning, ib_device may be freed internally and is not
1227 * uverbs_device is still available until all clients close
1228 * their files, then the uverbs device ref count will be zero
1229 * and its resources will be freed.
1230 * Note: At this point no more files can be opened since the
1231 * cdev was deleted, however active clients can still issue
1232 * commands and close their open files.
1234 rcu_assign_pointer(uverbs_dev
->ib_dev
, NULL
);
1235 ib_uverbs_free_hw_resources(uverbs_dev
, device
);
1239 if (atomic_dec_and_test(&uverbs_dev
->refcount
))
1240 ib_uverbs_comp_dev(uverbs_dev
);
1242 wait_for_completion(&uverbs_dev
->comp
);
1243 kobject_put(&uverbs_dev
->kobj
);
1246 static char *uverbs_devnode(struct device
*dev
, umode_t
*mode
)
1250 return kasprintf(GFP_KERNEL
, "infiniband/%s", dev_name(dev
));
1253 static int __init
ib_uverbs_init(void)
1257 ret
= register_chrdev_region(IB_UVERBS_BASE_DEV
, IB_UVERBS_MAX_DEVICES
,
1258 "infiniband_verbs");
1260 pr_err("user_verbs: couldn't register device number\n");
1264 uverbs_class
= class_create(THIS_MODULE
, "infiniband_verbs");
1265 if (IS_ERR(uverbs_class
)) {
1266 ret
= PTR_ERR(uverbs_class
);
1267 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1271 uverbs_class
->devnode
= uverbs_devnode
;
1273 ret
= class_create_file(uverbs_class
, &class_attr_abi_version
.attr
);
1275 pr_err("user_verbs: couldn't create abi_version attribute\n");
1279 ret
= ib_register_client(&uverbs_client
);
1281 pr_err("user_verbs: couldn't register client\n");
1288 class_destroy(uverbs_class
);
1291 unregister_chrdev_region(IB_UVERBS_BASE_DEV
, IB_UVERBS_MAX_DEVICES
);
1297 static void __exit
ib_uverbs_cleanup(void)
1299 ib_unregister_client(&uverbs_client
);
1300 class_destroy(uverbs_class
);
1301 unregister_chrdev_region(IB_UVERBS_BASE_DEV
, IB_UVERBS_MAX_DEVICES
);
1303 unregister_chrdev_region(overflow_maj
, IB_UVERBS_MAX_DEVICES
);
1306 module_init(ib_uverbs_init
);
1307 module_exit(ib_uverbs_cleanup
);