]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/infiniband/core/rdma_core.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / infiniband / core / rdma_core.c
CommitLineData
38321256
MB
1/*
2 * Copyright (c) 2016, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/file.h>
34#include <linux/anon_inodes.h>
e951747a 35#include <linux/sched/mm.h>
38321256
MB
36#include <rdma/ib_verbs.h>
37#include <rdma/uverbs_types.h>
38#include <linux/rcupdate.h>
a0aa309c 39#include <rdma/uverbs_ioctl.h>
fac9658c 40#include <rdma/rdma_user_ioctl.h>
38321256
MB
41#include "uverbs.h"
42#include "core_priv.h"
43#include "rdma_core.h"
44
45void uverbs_uobject_get(struct ib_uobject *uobject)
46{
47 kref_get(&uobject->ref);
48}
49
30004b86 50static void uverbs_uobject_free(struct kref *ref)
38321256
MB
51{
52 struct ib_uobject *uobj =
53 container_of(ref, struct ib_uobject, ref);
54
6b0d08f4 55 if (uobj->uapi_object->type_class->needs_kfree_rcu)
38321256
MB
56 kfree_rcu(uobj, rcu);
57 else
58 kfree(uobj);
59}
60
61void uverbs_uobject_put(struct ib_uobject *uobject)
62{
30004b86 63 kref_put(&uobject->ref, uverbs_uobject_free);
38321256
MB
64}
65
9867f5c6
JG
66static int uverbs_try_lock_object(struct ib_uobject *uobj,
67 enum rdma_lookup_mode mode)
38321256
MB
68{
69 /*
30004b86
MB
70 * When a shared access is required, we use a positive counter. Each
71 * shared access request checks that the value != -1 and increment it.
72 * Exclusive access is required for operations like write or destroy.
73 * In exclusive access mode, we check that the counter is zero (nobody
74 * claimed this object) and we set it to -1. Releasing a shared access
75 * lock is done simply by decreasing the counter. As for exclusive
76 * access locks, since only a single one of them is is allowed
77 * concurrently, setting the counter to zero is enough for releasing
78 * this lock.
38321256 79 */
9867f5c6
JG
80 switch (mode) {
81 case UVERBS_LOOKUP_READ:
bfc18e38 82 return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
38321256 83 -EBUSY : 0;
9867f5c6 84 case UVERBS_LOOKUP_WRITE:
7452a3c7 85 /* lock is exclusive */
9867f5c6 86 return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
7452a3c7
JG
87 case UVERBS_LOOKUP_DESTROY:
88 return 0;
9867f5c6
JG
89 }
90 return 0;
38321256
MB
91}
92
9867f5c6
JG
93static void assert_uverbs_usecnt(struct ib_uobject *uobj,
94 enum rdma_lookup_mode mode)
87ad80ab
JG
95{
96#ifdef CONFIG_LOCKDEP
9867f5c6
JG
97 switch (mode) {
98 case UVERBS_LOOKUP_READ:
87ad80ab 99 WARN_ON(atomic_read(&uobj->usecnt) <= 0);
9867f5c6
JG
100 break;
101 case UVERBS_LOOKUP_WRITE:
102 WARN_ON(atomic_read(&uobj->usecnt) != -1);
103 break;
7452a3c7
JG
104 case UVERBS_LOOKUP_DESTROY:
105 break;
9867f5c6 106 }
87ad80ab
JG
107#endif
108}
109
110/*
1e857e65
JG
111 * This must be called with the hw_destroy_rwsem locked for read or write,
112 * also the uobject itself must be locked for write.
87ad80ab
JG
113 *
114 * Upon return the HW object is guaranteed to be destroyed.
115 *
116 * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held,
117 * however the type's allocat_commit function cannot have been called and the
118 * uobject cannot be on the uobjects_lists
119 *
120 * For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via
121 * rdma_lookup_get_uobject) and the object is left in a state where the caller
122 * needs to call rdma_lookup_put_uobject.
123 *
124 * For all other destroy modes this function internally unlocks the uobject
125 * and consumes the kref on the uobj.
126 */
127static int uverbs_destroy_uobject(struct ib_uobject *uobj,
128 enum rdma_remove_reason reason)
129{
130 struct ib_uverbs_file *ufile = uobj->ufile;
131 unsigned long flags;
132 int ret;
133
1e857e65 134 lockdep_assert_held(&ufile->hw_destroy_rwsem);
9867f5c6 135 assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
87ad80ab
JG
136
137 if (uobj->object) {
6b0d08f4 138 ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason);
87ad80ab
JG
139 if (ret) {
140 if (ib_is_destroy_retryable(ret, reason, uobj))
141 return ret;
142
143 /* Nothing to be done, dangle the memory and move on */
144 WARN(true,
145 "ib_uverbs: failed to remove uobject id %d, driver err=%d",
146 uobj->id, ret);
147 }
148
149 uobj->object = NULL;
150 }
151
152 if (reason == RDMA_REMOVE_ABORT) {
153 WARN_ON(!list_empty(&uobj->list));
154 WARN_ON(!uobj->context);
6b0d08f4 155 uobj->uapi_object->type_class->alloc_abort(uobj);
87ad80ab
JG
156 }
157
158 uobj->context = NULL;
159
160 /*
161 * For DESTROY the usecnt is held write locked, the caller is expected
0f50d88a
JG
162 * to put it unlock and put the object when done with it. Only DESTROY
163 * can remove the IDR handle.
87ad80ab
JG
164 */
165 if (reason != RDMA_REMOVE_DESTROY)
166 atomic_set(&uobj->usecnt, 0);
0f50d88a 167 else
6b0d08f4 168 uobj->uapi_object->type_class->remove_handle(uobj);
87ad80ab
JG
169
170 if (!list_empty(&uobj->list)) {
171 spin_lock_irqsave(&ufile->uobjects_lock, flags);
172 list_del_init(&uobj->list);
173 spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
174
175 /*
176 * Pairs with the get in rdma_alloc_commit_uobject(), could
177 * destroy uobj.
178 */
179 uverbs_uobject_put(uobj);
180 }
181
182 /*
183 * When aborting the stack kref remains owned by the core code, and is
184 * not transferred into the type. Pairs with the get in alloc_uobj
185 */
186 if (reason == RDMA_REMOVE_ABORT)
187 uverbs_uobject_put(uobj);
188
189 return 0;
190}
191
7452a3c7
JG
192/*
193 * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
194 * sequence. It should only be used from command callbacks. On success the
195 * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
196 * version requires the caller to have already obtained an
197 * LOOKUP_DESTROY uobject kref.
198 */
199int uobj_destroy(struct ib_uobject *uobj)
200{
201 struct ib_uverbs_file *ufile = uobj->ufile;
202 int ret;
203
204 down_read(&ufile->hw_destroy_rwsem);
205
206 ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
207 if (ret)
208 goto out_unlock;
209
210 ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY);
211 if (ret) {
212 atomic_set(&uobj->usecnt, 0);
213 goto out_unlock;
214 }
215
216out_unlock:
217 up_read(&ufile->hw_destroy_rwsem);
218 return ret;
219}
220
c33e73af 221/*
32ed5c00
JG
222 * uobj_get_destroy destroys the HW object and returns a handle to the uobj
223 * with a NULL object pointer. The caller must pair this with
224 * uverbs_put_destroy.
c33e73af 225 */
6b0d08f4 226struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
8313c10f
JG
227 u32 id,
228 const struct uverbs_attr_bundle *attrs)
c33e73af
JG
229{
230 struct ib_uobject *uobj;
231 int ret;
232
8313c10f
JG
233 uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id,
234 UVERBS_LOOKUP_DESTROY);
c33e73af 235 if (IS_ERR(uobj))
32ed5c00 236 return uobj;
c33e73af 237
7452a3c7 238 ret = uobj_destroy(uobj);
32ed5c00 239 if (ret) {
7452a3c7 240 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
32ed5c00
JG
241 return ERR_PTR(ret);
242 }
c33e73af 243
32ed5c00
JG
244 return uobj;
245}
246
247/*
7106a976
JG
248 * Does both uobj_get_destroy() and uobj_put_destroy(). Returns 0 on success
249 * (negative errno on failure). For use by callers that do not need the uobj.
32ed5c00 250 */
6b0d08f4 251int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
7106a976 252 const struct uverbs_attr_bundle *attrs)
32ed5c00
JG
253{
254 struct ib_uobject *uobj;
255
8313c10f 256 uobj = __uobj_get_destroy(obj, id, attrs);
32ed5c00
JG
257 if (IS_ERR(uobj))
258 return PTR_ERR(uobj);
259
9867f5c6 260 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
7106a976 261 return 0;
c33e73af
JG
262}
263
87ad80ab 264/* alloc_uobj must be undone by uverbs_destroy_uobject() */
6ef1c828 265static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile,
6b0d08f4 266 const struct uverbs_api_object *obj)
38321256 267{
22fa27fb
JG
268 struct ib_uobject *uobj;
269 struct ib_ucontext *ucontext;
270
8313c10f 271 ucontext = ib_uverbs_get_ucontext_file(ufile);
22fa27fb
JG
272 if (IS_ERR(ucontext))
273 return ERR_CAST(ucontext);
38321256 274
6b0d08f4 275 uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL);
38321256
MB
276 if (!uobj)
277 return ERR_PTR(-ENOMEM);
278 /*
279 * user_handle should be filled by the handler,
280 * The object is added to the list in the commit stage.
281 */
6ef1c828 282 uobj->ufile = ufile;
22fa27fb 283 uobj->context = ucontext;
5671f79b 284 INIT_LIST_HEAD(&uobj->list);
6b0d08f4 285 uobj->uapi_object = obj;
d9dc7a35
JG
286 /*
287 * Allocated objects start out as write locked to deny any other
288 * syscalls from accessing them until they are committed. See
289 * rdma_alloc_commit_uobject
290 */
291 atomic_set(&uobj->usecnt, -1);
38321256
MB
292 kref_init(&uobj->ref);
293
294 return uobj;
295}
296
297static int idr_add_uobj(struct ib_uobject *uobj)
298{
299 int ret;
300
301 idr_preload(GFP_KERNEL);
6f258884 302 spin_lock(&uobj->ufile->idr_lock);
38321256
MB
303
304 /*
305 * We start with allocating an idr pointing to NULL. This represents an
306 * object which isn't initialized yet. We'll replace it later on with
307 * the real object once we commit.
308 */
6f258884 309 ret = idr_alloc(&uobj->ufile->idr, NULL, 0,
38321256
MB
310 min_t(unsigned long, U32_MAX - 1, INT_MAX), GFP_NOWAIT);
311 if (ret >= 0)
312 uobj->id = ret;
313
6f258884 314 spin_unlock(&uobj->ufile->idr_lock);
38321256
MB
315 idr_preload_end();
316
317 return ret < 0 ? ret : 0;
318}
319
38321256 320/* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
6ef1c828 321static struct ib_uobject *
6b0d08f4 322lookup_get_idr_uobject(const struct uverbs_api_object *obj,
9867f5c6
JG
323 struct ib_uverbs_file *ufile, s64 id,
324 enum rdma_lookup_mode mode)
38321256
MB
325{
326 struct ib_uobject *uobj;
1250c304
JG
327 unsigned long idrno = id;
328
329 if (id < 0 || id > ULONG_MAX)
330 return ERR_PTR(-EINVAL);
38321256
MB
331
332 rcu_read_lock();
333 /* object won't be released as we're protected in rcu */
1250c304 334 uobj = idr_find(&ufile->idr, idrno);
38321256
MB
335 if (!uobj) {
336 uobj = ERR_PTR(-ENOENT);
337 goto free;
338 }
339
6623e3e3
LR
340 /*
341 * The idr_find is guaranteed to return a pointer to something that
342 * isn't freed yet, or NULL, as the free after idr_remove goes through
343 * kfree_rcu(). However the object may still have been released and
344 * kfree() could be called at any time.
345 */
346 if (!kref_get_unless_zero(&uobj->ref))
347 uobj = ERR_PTR(-ENOENT);
348
38321256
MB
349free:
350 rcu_read_unlock();
351 return uobj;
352}
353
9867f5c6 354static struct ib_uobject *
6b0d08f4 355lookup_get_fd_uobject(const struct uverbs_api_object *obj,
9867f5c6
JG
356 struct ib_uverbs_file *ufile, s64 id,
357 enum rdma_lookup_mode mode)
cf8966b3 358{
6b0d08f4 359 const struct uverbs_obj_fd_type *fd_type;
cf8966b3
MB
360 struct file *f;
361 struct ib_uobject *uobject;
1250c304 362 int fdno = id;
cf8966b3 363
1250c304
JG
364 if (fdno != id)
365 return ERR_PTR(-EINVAL);
366
9867f5c6 367 if (mode != UVERBS_LOOKUP_READ)
cf8966b3
MB
368 return ERR_PTR(-EOPNOTSUPP);
369
6b0d08f4
JG
370 if (!obj->type_attrs)
371 return ERR_PTR(-EIO);
372 fd_type =
373 container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
374
1250c304 375 f = fget(fdno);
cf8966b3
MB
376 if (!f)
377 return ERR_PTR(-EBADF);
378
379 uobject = f->private_data;
380 /*
381 * fget(id) ensures we are not currently running uverbs_close_fd,
382 * and the caller is expected to ensure that uverbs_close_fd is never
383 * done while a call top lookup is possible.
384 */
385 if (f->f_op != fd_type->fops) {
386 fput(f);
387 return ERR_PTR(-EBADF);
388 }
389
390 uverbs_uobject_get(uobject);
391 return uobject;
392}
393
6b0d08f4 394struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
1250c304 395 struct ib_uverbs_file *ufile, s64 id,
9867f5c6 396 enum rdma_lookup_mode mode)
38321256
MB
397{
398 struct ib_uobject *uobj;
399 int ret;
400
4d7e8cc5
YH
401 if (IS_ERR(obj) && PTR_ERR(obj) == -ENOMSG) {
402 /* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */
403 uobj = lookup_get_idr_uobject(NULL, ufile, id, mode);
404 if (IS_ERR(uobj))
405 return uobj;
406 } else {
407 if (IS_ERR(obj))
408 return ERR_PTR(-EINVAL);
409
410 uobj = obj->type_class->lookup_get(obj, ufile, id, mode);
411 if (IS_ERR(uobj))
412 return uobj;
413
414 if (uobj->uapi_object != obj) {
415 ret = -EINVAL;
416 goto free;
417 }
38321256
MB
418 }
419
cc2e14e6
JG
420 /*
421 * If we have been disassociated block every command except for
422 * DESTROY based commands.
423 */
424 if (mode != UVERBS_LOOKUP_DESTROY &&
425 !srcu_dereference(ufile->device->ib_dev,
426 &ufile->device->disassociate_srcu)) {
427 ret = -EIO;
428 goto free;
429 }
430
9867f5c6 431 ret = uverbs_try_lock_object(uobj, mode);
e951747a 432 if (ret)
38321256 433 goto free;
38321256
MB
434
435 return uobj;
436free:
4d7e8cc5 437 uobj->uapi_object->type_class->lookup_put(uobj, mode);
38321256
MB
438 uverbs_uobject_put(uobj);
439 return ERR_PTR(ret);
440}
3d9dfd06
SR
441struct ib_uobject *_uobj_get_read(enum uverbs_default_objects type,
442 u32 object_id,
443 struct uverbs_attr_bundle *attrs)
444{
445 struct ib_uobject *uobj;
446
447 uobj = rdma_lookup_get_uobject(uobj_get_type(attrs, type), attrs->ufile,
448 object_id, UVERBS_LOOKUP_READ);
449 if (IS_ERR(uobj))
450 return uobj;
451
452 attrs->context = uobj->context;
453
454 return uobj;
455}
456
457struct ib_uobject *_uobj_get_write(enum uverbs_default_objects type,
458 u32 object_id,
459 struct uverbs_attr_bundle *attrs)
460{
461 struct ib_uobject *uobj;
462
463 uobj = rdma_lookup_get_uobject(uobj_get_type(attrs, type), attrs->ufile,
464 object_id, UVERBS_LOOKUP_WRITE);
465
466 if (IS_ERR(uobj))
467 return uobj;
468
469 attrs->context = uobj->context;
470
471 return uobj;
472}
38321256 473
6b0d08f4
JG
474static struct ib_uobject *
475alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
476 struct ib_uverbs_file *ufile)
38321256
MB
477{
478 int ret;
479 struct ib_uobject *uobj;
480
6b0d08f4 481 uobj = alloc_uobj(ufile, obj);
38321256
MB
482 if (IS_ERR(uobj))
483 return uobj;
484
485 ret = idr_add_uobj(uobj);
486 if (ret)
487 goto uobj_put;
488
22fa27fb 489 ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device,
38321256
MB
490 RDMACG_RESOURCE_HCA_OBJECT);
491 if (ret)
492 goto idr_remove;
493
494 return uobj;
495
496idr_remove:
5671f79b
JG
497 spin_lock(&ufile->idr_lock);
498 idr_remove(&ufile->idr, uobj->id);
499 spin_unlock(&ufile->idr_lock);
38321256
MB
500uobj_put:
501 uverbs_uobject_put(uobj);
502 return ERR_PTR(ret);
503}
504
6b0d08f4
JG
505static struct ib_uobject *
506alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
507 struct ib_uverbs_file *ufile)
cf8966b3 508{
cf8966b3
MB
509 int new_fd;
510 struct ib_uobject *uobj;
cf8966b3
MB
511
512 new_fd = get_unused_fd_flags(O_CLOEXEC);
513 if (new_fd < 0)
514 return ERR_PTR(new_fd);
515
6b0d08f4 516 uobj = alloc_uobj(ufile, obj);
cf8966b3
MB
517 if (IS_ERR(uobj)) {
518 put_unused_fd(new_fd);
519 return uobj;
520 }
521
d0259e82 522 uobj->id = new_fd;
d0259e82 523 uobj->ufile = ufile;
cf8966b3
MB
524
525 return uobj;
526}
527
6b0d08f4 528struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
6ef1c828 529 struct ib_uverbs_file *ufile)
38321256 530{
1e857e65
JG
531 struct ib_uobject *ret;
532
4d7e8cc5 533 if (IS_ERR(obj))
6b0d08f4
JG
534 return ERR_PTR(-EINVAL);
535
1e857e65
JG
536 /*
537 * The hw_destroy_rwsem is held across the entire object creation and
538 * released during rdma_alloc_commit_uobject or
539 * rdma_alloc_abort_uobject
540 */
541 if (!down_read_trylock(&ufile->hw_destroy_rwsem))
542 return ERR_PTR(-EIO);
543
6b0d08f4 544 ret = obj->type_class->alloc_begin(obj, ufile);
1e857e65
JG
545 if (IS_ERR(ret)) {
546 up_read(&ufile->hw_destroy_rwsem);
547 return ret;
548 }
549 return ret;
38321256
MB
550}
551
87ad80ab
JG
552static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
553{
554 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
555 RDMACG_RESOURCE_HCA_OBJECT);
556
557 spin_lock(&uobj->ufile->idr_lock);
558 idr_remove(&uobj->ufile->idr, uobj->id);
559 spin_unlock(&uobj->ufile->idr_lock);
560}
561
0f50d88a
JG
562static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
563 enum rdma_remove_reason why)
38321256
MB
564{
565 const struct uverbs_obj_idr_type *idr_type =
6b0d08f4
JG
566 container_of(uobj->uapi_object->type_attrs,
567 struct uverbs_obj_idr_type, type);
38321256
MB
568 int ret = idr_type->destroy_object(uobj, why);
569
570 /*
571 * We can only fail gracefully if the user requested to destroy the
1c77483e
YH
572 * object or when a retry may be called upon an error.
573 * In the rest of the cases, just remove whatever you can.
38321256 574 */
1c77483e 575 if (ib_is_destroy_retryable(ret, why, uobj))
38321256
MB
576 return ret;
577
87ad80ab
JG
578 if (why == RDMA_REMOVE_ABORT)
579 return 0;
5671f79b 580
0f50d88a
JG
581 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
582 RDMACG_RESOURCE_HCA_OBJECT);
38321256 583
87ad80ab 584 return 0;
38321256
MB
585}
586
0f50d88a
JG
587static void remove_handle_idr_uobject(struct ib_uobject *uobj)
588{
589 spin_lock(&uobj->ufile->idr_lock);
590 idr_remove(&uobj->ufile->idr, uobj->id);
591 spin_unlock(&uobj->ufile->idr_lock);
592 /* Matches the kref in alloc_commit_idr_uobject */
593 uverbs_uobject_put(uobj);
594}
595
cf8966b3
MB
596static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
597{
aba94548 598 put_unused_fd(uobj->id);
cf8966b3
MB
599}
600
0f50d88a
JG
601static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj,
602 enum rdma_remove_reason why)
cf8966b3 603{
6b0d08f4
JG
604 const struct uverbs_obj_fd_type *fd_type = container_of(
605 uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
d0259e82 606 int ret = fd_type->context_closed(uobj, why);
cf8966b3 607
1c77483e 608 if (ib_is_destroy_retryable(ret, why, uobj))
cf8966b3
MB
609 return ret;
610
87ad80ab 611 return 0;
38321256
MB
612}
613
0f50d88a
JG
614static void remove_handle_fd_uobject(struct ib_uobject *uobj)
615{
616}
617
aba94548 618static int alloc_commit_idr_uobject(struct ib_uobject *uobj)
38321256 619{
c561c288
JG
620 struct ib_uverbs_file *ufile = uobj->ufile;
621
622 spin_lock(&ufile->idr_lock);
38321256
MB
623 /*
624 * We already allocated this IDR with a NULL object, so
625 * this shouldn't fail.
c561c288
JG
626 *
627 * NOTE: Once we set the IDR we loose ownership of our kref on uobj.
628 * It will be put by remove_commit_idr_uobject()
38321256 629 */
c561c288
JG
630 WARN_ON(idr_replace(&ufile->idr, uobj, uobj->id));
631 spin_unlock(&ufile->idr_lock);
aba94548
JG
632
633 return 0;
38321256
MB
634}
635
aba94548 636static int alloc_commit_fd_uobject(struct ib_uobject *uobj)
cf8966b3 637{
6b0d08f4
JG
638 const struct uverbs_obj_fd_type *fd_type = container_of(
639 uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type);
c561c288 640 int fd = uobj->id;
aba94548
JG
641 struct file *filp;
642
643 /*
644 * The kref for uobj is moved into filp->private data and put in
645 * uverbs_close_fd(). Once alloc_commit() succeeds uverbs_close_fd()
646 * must be guaranteed to be called from the provided fops release
647 * callback.
648 */
649 filp = anon_inode_getfile(fd_type->name,
650 fd_type->fops,
651 uobj,
652 fd_type->flags);
653 if (IS_ERR(filp))
654 return PTR_ERR(filp);
655
656 uobj->object = filp;
657
658 /* Matching put will be done in uverbs_close_fd() */
659 kref_get(&uobj->ufile->ref);
c561c288 660
cf8966b3 661 /* This shouldn't be used anymore. Use the file object instead */
d0259e82 662 uobj->id = 0;
c561c288 663
c561c288
JG
664 /*
665 * NOTE: Once we install the file we loose ownership of our kref on
666 * uobj. It will be put by uverbs_close_fd()
667 */
aba94548
JG
668 fd_install(fd, filp);
669
670 return 0;
cf8966b3
MB
671}
672
c561c288
JG
673/*
674 * In all cases rdma_alloc_commit_uobject() consumes the kref to uobj and the
2c96eb7d
JG
675 * caller can no longer assume uobj is valid. If this function fails it
676 * destroys the uboject, including the attached HW object.
c561c288 677 */
2c96eb7d 678int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj)
38321256 679{
6a5e9c88 680 struct ib_uverbs_file *ufile = uobj->ufile;
aba94548 681 int ret;
6a5e9c88 682
aba94548 683 /* alloc_commit consumes the uobj kref */
6b0d08f4 684 ret = uobj->uapi_object->type_class->alloc_commit(uobj);
aba94548 685 if (ret) {
87ad80ab 686 uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT);
1e857e65 687 up_read(&ufile->hw_destroy_rwsem);
aba94548
JG
688 return ret;
689 }
d9dc7a35 690
5671f79b
JG
691 /* kref is held so long as the uobj is on the uobj list. */
692 uverbs_uobject_get(uobj);
87064277 693 spin_lock_irq(&ufile->uobjects_lock);
6a5e9c88 694 list_add(&uobj->list, &ufile->uobjects);
87064277 695 spin_unlock_irq(&ufile->uobjects_lock);
8efe991e 696
aba94548
JG
697 /* matches atomic_set(-1) in alloc_uobj */
698 atomic_set(&uobj->usecnt, 0);
699
1e857e65 700 /* Matches the down_read in rdma_alloc_begin_uobject */
87064277 701 up_read(&ufile->hw_destroy_rwsem);
38321256
MB
702
703 return 0;
704}
705
5671f79b
JG
706/*
707 * This consumes the kref for uobj. It is up to the caller to unwind the HW
708 * object and anything else connected to uobj before calling this.
709 */
38321256
MB
710void rdma_alloc_abort_uobject(struct ib_uobject *uobj)
711{
1e857e65
JG
712 struct ib_uverbs_file *ufile = uobj->ufile;
713
87ad80ab
JG
714 uobj->object = NULL;
715 uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT);
1e857e65
JG
716
717 /* Matches the down_read in rdma_alloc_begin_uobject */
718 up_read(&ufile->hw_destroy_rwsem);
38321256
MB
719}
720
9867f5c6
JG
721static void lookup_put_idr_uobject(struct ib_uobject *uobj,
722 enum rdma_lookup_mode mode)
38321256
MB
723{
724}
725
9867f5c6
JG
726static void lookup_put_fd_uobject(struct ib_uobject *uobj,
727 enum rdma_lookup_mode mode)
cf8966b3
MB
728{
729 struct file *filp = uobj->object;
730
9867f5c6 731 WARN_ON(mode != UVERBS_LOOKUP_READ);
cf8966b3
MB
732 /* This indirectly calls uverbs_close_fd and free the object */
733 fput(filp);
734}
735
9867f5c6
JG
736void rdma_lookup_put_uobject(struct ib_uobject *uobj,
737 enum rdma_lookup_mode mode)
38321256 738{
9867f5c6 739 assert_uverbs_usecnt(uobj, mode);
6b0d08f4 740 uobj->uapi_object->type_class->lookup_put(uobj, mode);
38321256
MB
741 /*
742 * In order to unlock an object, either decrease its usecnt for
30004b86 743 * read access or zero it in case of exclusive access. See
38321256
MB
744 * uverbs_try_lock_object for locking schema information.
745 */
9867f5c6
JG
746 switch (mode) {
747 case UVERBS_LOOKUP_READ:
38321256 748 atomic_dec(&uobj->usecnt);
9867f5c6
JG
749 break;
750 case UVERBS_LOOKUP_WRITE:
38321256 751 atomic_set(&uobj->usecnt, 0);
9867f5c6 752 break;
7452a3c7
JG
753 case UVERBS_LOOKUP_DESTROY:
754 break;
9867f5c6 755 }
38321256 756
5671f79b 757 /* Pairs with the kref obtained by type->lookup_get */
38321256
MB
758 uverbs_uobject_put(uobj);
759}
760
0f50d88a
JG
761void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile)
762{
763 spin_lock_init(&ufile->idr_lock);
764 idr_init(&ufile->idr);
765}
766
767void release_ufile_idr_uobject(struct ib_uverbs_file *ufile)
768{
769 struct ib_uobject *entry;
770 int id;
771
772 /*
773 * At this point uverbs_cleanup_ufile() is guaranteed to have run, and
774 * there are no HW objects left, however the IDR is still populated
775 * with anything that has not been cleaned up by userspace. Since the
776 * kref on ufile is 0, nothing is allowed to call lookup_get.
777 *
778 * This is an optimized equivalent to remove_handle_idr_uobject
779 */
780 idr_for_each_entry(&ufile->idr, entry, id) {
781 WARN_ON(entry->object);
782 uverbs_uobject_put(entry);
783 }
784
785 idr_destroy(&ufile->idr);
786}
787
38321256
MB
788const struct uverbs_obj_type_class uverbs_idr_class = {
789 .alloc_begin = alloc_begin_idr_uobject,
790 .lookup_get = lookup_get_idr_uobject,
791 .alloc_commit = alloc_commit_idr_uobject,
792 .alloc_abort = alloc_abort_idr_uobject,
793 .lookup_put = lookup_put_idr_uobject,
0f50d88a
JG
794 .destroy_hw = destroy_hw_idr_uobject,
795 .remove_handle = remove_handle_idr_uobject,
38321256
MB
796 /*
797 * When we destroy an object, we first just lock it for WRITE and
798 * actually DESTROY it in the finalize stage. So, the problematic
799 * scenario is when we just started the finalize stage of the
800 * destruction (nothing was executed yet). Now, the other thread
801 * fetched the object for READ access, but it didn't lock it yet.
802 * The DESTROY thread continues and starts destroying the object.
803 * When the other thread continue - without the RCU, it would
804 * access freed memory. However, the rcu_read_lock delays the free
805 * until the rcu_read_lock of the READ operation quits. Since the
30004b86 806 * exclusive lock of the object is still taken by the DESTROY flow, the
38321256
MB
807 * READ operation will get -EBUSY and it'll just bail out.
808 */
809 .needs_kfree_rcu = true,
810};
1114b0a8 811EXPORT_SYMBOL(uverbs_idr_class);
38321256 812
cf8966b3
MB
813void uverbs_close_fd(struct file *f)
814{
d0259e82 815 struct ib_uobject *uobj = f->private_data;
e6d5d5dd
JG
816 struct ib_uverbs_file *ufile = uobj->ufile;
817
87064277 818 if (down_read_trylock(&ufile->hw_destroy_rwsem)) {
87ad80ab
JG
819 /*
820 * lookup_get_fd_uobject holds the kref on the struct file any
821 * time a FD uobj is locked, which prevents this release
822 * method from being invoked. Meaning we can always get the
823 * write lock here, or we have a kernel bug.
824 */
9867f5c6 825 WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE));
87ad80ab 826 uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE);
87064277 827 up_read(&ufile->hw_destroy_rwsem);
e6d5d5dd
JG
828 }
829
e6d5d5dd
JG
830 /* Matches the get in alloc_begin_fd_uobject */
831 kref_put(&ufile->ref, ib_uverbs_release_file);
cf8966b3 832
5671f79b 833 /* Pairs with filp->private_data in alloc_begin_fd_uobject */
d0259e82 834 uverbs_uobject_put(uobj);
cf8966b3 835}
6bf8f22a 836EXPORT_SYMBOL(uverbs_close_fd);
cf8966b3 837
e951747a
JG
838/*
839 * Drop the ucontext off the ufile and completely disconnect it from the
840 * ib_device
841 */
842static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
843 enum rdma_remove_reason reason)
844{
845 struct ib_ucontext *ucontext = ufile->ucontext;
ce92db1c 846 struct ib_device *ib_dev = ucontext->device;
e951747a 847
ce92db1c
JG
848 /*
849 * If we are closing the FD then the user mmap VMAs must have
850 * already been destroyed as they hold on to the filep, otherwise
851 * they need to be zap'd.
852 */
5f9794dc
JG
853 if (reason == RDMA_REMOVE_DRIVER_REMOVE) {
854 uverbs_user_mmap_disassociate(ufile);
3023a1e9
KH
855 if (ib_dev->ops.disassociate_ucontext)
856 ib_dev->ops.disassociate_ucontext(ucontext);
5f9794dc 857 }
e951747a 858
ce92db1c 859 ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev,
e951747a
JG
860 RDMACG_RESOURCE_HCA_HANDLE);
861
12d23a91
LR
862 rdma_restrack_del(&ucontext->res);
863
a2a074ef
LR
864 ib_dev->ops.dealloc_ucontext(ucontext);
865 kfree(ucontext);
e951747a
JG
866
867 ufile->ucontext = NULL;
868}
869
6ef1c828
JG
870static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
871 enum rdma_remove_reason reason)
38321256 872{
1c77483e
YH
873 struct ib_uobject *obj, *next_obj;
874 int ret = -EINVAL;
38321256 875
1c77483e
YH
876 /*
877 * This shouldn't run while executing other commands on this
878 * context. Thus, the only thing we should take care of is
879 * releasing a FD while traversing this list. The FD could be
880 * closed and released from the _release fop of this FD.
881 * In order to mitigate this, we add a lock.
882 * We take and release the lock per traversal in order to let
883 * other threads (which might still use the FDs) chance to run.
884 */
6a5e9c88 885 list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) {
1c77483e
YH
886 /*
887 * if we hit this WARN_ON, that means we are
888 * racing with a lookup_get.
889 */
9867f5c6 890 WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
87ad80ab
JG
891 if (!uverbs_destroy_uobject(obj, reason))
892 ret = 0;
e4ff3d22
AK
893 else
894 atomic_set(&obj->usecnt, 0);
1c77483e 895 }
1c77483e
YH
896 return ret;
897}
898
e951747a
JG
899/*
900 * Destroy the uncontext and every uobject associated with it. If called with
901 * reason != RDMA_REMOVE_CLOSE this will not return until the destruction has
902 * been completed and ufile->ucontext is NULL.
903 *
904 * This is internally locked and can be called in parallel from multiple
905 * contexts.
906 */
907void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
908 enum rdma_remove_reason reason)
1c77483e 909{
e951747a
JG
910 if (reason == RDMA_REMOVE_CLOSE) {
911 /*
912 * During destruction we might trigger something that
913 * synchronously calls release on any file descriptor. For
914 * this reason all paths that come from file_operations
915 * release must use try_lock. They can progress knowing that
916 * there is an ongoing uverbs_destroy_ufile_hw that will clean
917 * up the driver resources.
918 */
919 if (!mutex_trylock(&ufile->ucontext_lock))
920 return;
921
922 } else {
923 mutex_lock(&ufile->ucontext_lock);
924 }
925
926 down_write(&ufile->hw_destroy_rwsem);
6a5e9c88 927
38321256 928 /*
e951747a
JG
929 * If a ucontext was never created then we can't have any uobjects to
930 * cleanup, nothing to do.
38321256 931 */
e951747a
JG
932 if (!ufile->ucontext)
933 goto done;
934
935 ufile->ucontext->closing = true;
6a5e9c88
JG
936 ufile->ucontext->cleanup_retryable = true;
937 while (!list_empty(&ufile->uobjects))
6ef1c828 938 if (__uverbs_cleanup_ufile(ufile, reason)) {
1c77483e
YH
939 /*
940 * No entry was cleaned-up successfully during this
941 * iteration
942 */
943 break;
944 }
38321256 945
6a5e9c88
JG
946 ufile->ucontext->cleanup_retryable = false;
947 if (!list_empty(&ufile->uobjects))
6ef1c828 948 __uverbs_cleanup_ufile(ufile, reason);
38321256 949
e951747a
JG
950 ufile_destroy_ucontext(ufile, reason);
951
952done:
87064277 953 up_write(&ufile->hw_destroy_rwsem);
e951747a 954 mutex_unlock(&ufile->ucontext_lock);
38321256
MB
955}
956
cf8966b3
MB
957const struct uverbs_obj_type_class uverbs_fd_class = {
958 .alloc_begin = alloc_begin_fd_uobject,
959 .lookup_get = lookup_get_fd_uobject,
960 .alloc_commit = alloc_commit_fd_uobject,
961 .alloc_abort = alloc_abort_fd_uobject,
962 .lookup_put = lookup_put_fd_uobject,
0f50d88a
JG
963 .destroy_hw = destroy_hw_fd_uobject,
964 .remove_handle = remove_handle_fd_uobject,
cf8966b3
MB
965 .needs_kfree_rcu = false,
966};
1114b0a8 967EXPORT_SYMBOL(uverbs_fd_class);
cf8966b3 968
6ef1c828 969struct ib_uobject *
6b0d08f4 970uverbs_get_uobject_from_file(u16 object_id,
6ef1c828 971 struct ib_uverbs_file *ufile,
1250c304 972 enum uverbs_obj_access access, s64 id)
a0aa309c 973{
6b0d08f4
JG
974 const struct uverbs_api_object *obj =
975 uapi_get_object(ufile->device->uapi, object_id);
976
a0aa309c
MB
977 switch (access) {
978 case UVERBS_ACCESS_READ:
6b0d08f4 979 return rdma_lookup_get_uobject(obj, ufile, id,
9867f5c6 980 UVERBS_LOOKUP_READ);
a0aa309c 981 case UVERBS_ACCESS_DESTROY:
7452a3c7 982 /* Actual destruction is done inside uverbs_handle_method */
6b0d08f4 983 return rdma_lookup_get_uobject(obj, ufile, id,
7452a3c7 984 UVERBS_LOOKUP_DESTROY);
a0aa309c 985 case UVERBS_ACCESS_WRITE:
6b0d08f4 986 return rdma_lookup_get_uobject(obj, ufile, id,
9867f5c6 987 UVERBS_LOOKUP_WRITE);
a0aa309c 988 case UVERBS_ACCESS_NEW:
6b0d08f4 989 return rdma_alloc_begin_uobject(obj, ufile);
a0aa309c
MB
990 default:
991 WARN_ON(true);
992 return ERR_PTR(-EOPNOTSUPP);
993 }
994}
995
996int uverbs_finalize_object(struct ib_uobject *uobj,
997 enum uverbs_obj_access access,
998 bool commit)
999{
1000 int ret = 0;
1001
1002 /*
1003 * refcounts should be handled at the object level and not at the
1004 * uobject level. Refcounts of the objects themselves are done in
1005 * handlers.
1006 */
1007
1008 switch (access) {
1009 case UVERBS_ACCESS_READ:
9867f5c6 1010 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ);
a0aa309c
MB
1011 break;
1012 case UVERBS_ACCESS_WRITE:
9867f5c6 1013 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
a0aa309c
MB
1014 break;
1015 case UVERBS_ACCESS_DESTROY:
7452a3c7
JG
1016 if (uobj)
1017 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
a0aa309c
MB
1018 break;
1019 case UVERBS_ACCESS_NEW:
1020 if (commit)
1021 ret = rdma_alloc_commit_uobject(uobj);
1022 else
1023 rdma_alloc_abort_uobject(uobj);
1024 break;
1025 default:
1026 WARN_ON(true);
1027 ret = -EOPNOTSUPP;
1028 }
1029
1030 return ret;
1031}