]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/infiniband/core/uverbs_cmd.c
[IB] umad: Fix device lifetime problems
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / core / uverbs_cmd.c
CommitLineData
bc38a6ab
RD
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
eb9d3cd5 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
bc38a6ab
RD
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $
35 */
36
6b73597e
RD
37#include <linux/file.h>
38
bc38a6ab
RD
39#include <asm/uaccess.h>
40
41#include "uverbs.h"
42
43#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
44 do { \
45 (udata)->inbuf = (void __user *) (ibuf); \
46 (udata)->outbuf = (void __user *) (obuf); \
47 (udata)->inlen = (ilen); \
48 (udata)->outlen = (olen); \
49 } while (0)
50
bc38a6ab
RD
51ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
52 const char __user *buf,
53 int in_len, int out_len)
54{
55 struct ib_uverbs_get_context cmd;
56 struct ib_uverbs_get_context_resp resp;
57 struct ib_udata udata;
58 struct ib_device *ibdev = file->device->ib_dev;
63c47c28 59 struct ib_ucontext *ucontext;
6b73597e 60 struct file *filp;
63c47c28 61 int ret;
bc38a6ab
RD
62
63 if (out_len < sizeof resp)
64 return -ENOSPC;
65
66 if (copy_from_user(&cmd, buf, sizeof cmd))
67 return -EFAULT;
68
63c47c28
RD
69 down(&file->mutex);
70
71 if (file->ucontext) {
72 ret = -EINVAL;
73 goto err;
74 }
75
bc38a6ab
RD
76 INIT_UDATA(&udata, buf + sizeof cmd,
77 (unsigned long) cmd.response + sizeof resp,
78 in_len - sizeof cmd, out_len - sizeof resp);
79
63c47c28
RD
80 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
81 if (IS_ERR(ucontext))
82 return PTR_ERR(file->ucontext);
bc38a6ab 83
63c47c28
RD
84 ucontext->device = ibdev;
85 INIT_LIST_HEAD(&ucontext->pd_list);
86 INIT_LIST_HEAD(&ucontext->mr_list);
87 INIT_LIST_HEAD(&ucontext->mw_list);
88 INIT_LIST_HEAD(&ucontext->cq_list);
89 INIT_LIST_HEAD(&ucontext->qp_list);
90 INIT_LIST_HEAD(&ucontext->srq_list);
91 INIT_LIST_HEAD(&ucontext->ah_list);
bc38a6ab 92
6b73597e
RD
93 resp.num_comp_vectors = file->device->num_comp_vectors;
94
95 filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd);
96 if (IS_ERR(filp)) {
97 ret = PTR_ERR(filp);
98 goto err_free;
99 }
bc38a6ab
RD
100
101 if (copy_to_user((void __user *) (unsigned long) cmd.response,
63c47c28
RD
102 &resp, sizeof resp)) {
103 ret = -EFAULT;
6b73597e 104 goto err_file;
63c47c28
RD
105 }
106
6b73597e
RD
107 file->async_file = filp->private_data;
108
109 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
110 ib_uverbs_event_handler);
111 ret = ib_register_event_handler(&file->event_handler);
112 if (ret)
113 goto err_file;
114
115 kref_get(&file->async_file->ref);
116 kref_get(&file->ref);
117 file->ucontext = ucontext;
118
119 fd_install(resp.async_fd, filp);
120
63c47c28 121 up(&file->mutex);
bc38a6ab
RD
122
123 return in_len;
124
6b73597e
RD
125err_file:
126 put_unused_fd(resp.async_fd);
127 fput(filp);
128
63c47c28
RD
129err_free:
130 ibdev->dealloc_ucontext(ucontext);
bc38a6ab 131
63c47c28
RD
132err:
133 up(&file->mutex);
134 return ret;
bc38a6ab
RD
135}
136
137ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
138 const char __user *buf,
139 int in_len, int out_len)
140{
141 struct ib_uverbs_query_device cmd;
142 struct ib_uverbs_query_device_resp resp;
143 struct ib_device_attr attr;
144 int ret;
145
146 if (out_len < sizeof resp)
147 return -ENOSPC;
148
149 if (copy_from_user(&cmd, buf, sizeof cmd))
150 return -EFAULT;
151
152 ret = ib_query_device(file->device->ib_dev, &attr);
153 if (ret)
154 return ret;
155
156 memset(&resp, 0, sizeof resp);
157
158 resp.fw_ver = attr.fw_ver;
159 resp.node_guid = attr.node_guid;
160 resp.sys_image_guid = attr.sys_image_guid;
161 resp.max_mr_size = attr.max_mr_size;
162 resp.page_size_cap = attr.page_size_cap;
163 resp.vendor_id = attr.vendor_id;
164 resp.vendor_part_id = attr.vendor_part_id;
165 resp.hw_ver = attr.hw_ver;
166 resp.max_qp = attr.max_qp;
167 resp.max_qp_wr = attr.max_qp_wr;
168 resp.device_cap_flags = attr.device_cap_flags;
169 resp.max_sge = attr.max_sge;
170 resp.max_sge_rd = attr.max_sge_rd;
171 resp.max_cq = attr.max_cq;
172 resp.max_cqe = attr.max_cqe;
173 resp.max_mr = attr.max_mr;
174 resp.max_pd = attr.max_pd;
175 resp.max_qp_rd_atom = attr.max_qp_rd_atom;
176 resp.max_ee_rd_atom = attr.max_ee_rd_atom;
177 resp.max_res_rd_atom = attr.max_res_rd_atom;
178 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
179 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
180 resp.atomic_cap = attr.atomic_cap;
181 resp.max_ee = attr.max_ee;
182 resp.max_rdd = attr.max_rdd;
183 resp.max_mw = attr.max_mw;
184 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
185 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
186 resp.max_mcast_grp = attr.max_mcast_grp;
187 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
188 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
189 resp.max_ah = attr.max_ah;
190 resp.max_fmr = attr.max_fmr;
191 resp.max_map_per_fmr = attr.max_map_per_fmr;
192 resp.max_srq = attr.max_srq;
193 resp.max_srq_wr = attr.max_srq_wr;
194 resp.max_srq_sge = attr.max_srq_sge;
195 resp.max_pkeys = attr.max_pkeys;
196 resp.local_ca_ack_delay = attr.local_ca_ack_delay;
197 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
198
199 if (copy_to_user((void __user *) (unsigned long) cmd.response,
200 &resp, sizeof resp))
201 return -EFAULT;
202
203 return in_len;
204}
205
206ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
207 const char __user *buf,
208 int in_len, int out_len)
209{
210 struct ib_uverbs_query_port cmd;
211 struct ib_uverbs_query_port_resp resp;
212 struct ib_port_attr attr;
213 int ret;
214
215 if (out_len < sizeof resp)
216 return -ENOSPC;
217
218 if (copy_from_user(&cmd, buf, sizeof cmd))
219 return -EFAULT;
220
221 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
222 if (ret)
223 return ret;
224
225 memset(&resp, 0, sizeof resp);
226
227 resp.state = attr.state;
228 resp.max_mtu = attr.max_mtu;
229 resp.active_mtu = attr.active_mtu;
230 resp.gid_tbl_len = attr.gid_tbl_len;
231 resp.port_cap_flags = attr.port_cap_flags;
232 resp.max_msg_sz = attr.max_msg_sz;
233 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
234 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
235 resp.pkey_tbl_len = attr.pkey_tbl_len;
236 resp.lid = attr.lid;
237 resp.sm_lid = attr.sm_lid;
238 resp.lmc = attr.lmc;
239 resp.max_vl_num = attr.max_vl_num;
240 resp.sm_sl = attr.sm_sl;
241 resp.subnet_timeout = attr.subnet_timeout;
242 resp.init_type_reply = attr.init_type_reply;
243 resp.active_width = attr.active_width;
244 resp.active_speed = attr.active_speed;
245 resp.phys_state = attr.phys_state;
246
247 if (copy_to_user((void __user *) (unsigned long) cmd.response,
248 &resp, sizeof resp))
249 return -EFAULT;
250
251 return in_len;
252}
253
bc38a6ab
RD
254ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
255 const char __user *buf,
256 int in_len, int out_len)
257{
258 struct ib_uverbs_alloc_pd cmd;
259 struct ib_uverbs_alloc_pd_resp resp;
260 struct ib_udata udata;
261 struct ib_uobject *uobj;
262 struct ib_pd *pd;
263 int ret;
264
265 if (out_len < sizeof resp)
266 return -ENOSPC;
267
268 if (copy_from_user(&cmd, buf, sizeof cmd))
269 return -EFAULT;
270
271 INIT_UDATA(&udata, buf + sizeof cmd,
272 (unsigned long) cmd.response + sizeof resp,
273 in_len - sizeof cmd, out_len - sizeof resp);
274
275 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
276 if (!uobj)
277 return -ENOMEM;
278
279 uobj->context = file->ucontext;
280
281 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
282 file->ucontext, &udata);
283 if (IS_ERR(pd)) {
284 ret = PTR_ERR(pd);
285 goto err;
286 }
287
288 pd->device = file->device->ib_dev;
289 pd->uobject = uobj;
290 atomic_set(&pd->usecnt, 0);
291
eb9d3cd5
RD
292 down(&ib_uverbs_idr_mutex);
293
bc38a6ab
RD
294retry:
295 if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) {
296 ret = -ENOMEM;
eb9d3cd5 297 goto err_up;
bc38a6ab
RD
298 }
299
bc38a6ab 300 ret = idr_get_new(&ib_uverbs_pd_idr, pd, &uobj->id);
bc38a6ab
RD
301
302 if (ret == -EAGAIN)
303 goto retry;
304 if (ret)
eb9d3cd5 305 goto err_up;
bc38a6ab
RD
306
307 memset(&resp, 0, sizeof resp);
308 resp.pd_handle = uobj->id;
309
310 if (copy_to_user((void __user *) (unsigned long) cmd.response,
311 &resp, sizeof resp)) {
312 ret = -EFAULT;
eb9d3cd5 313 goto err_idr;
bc38a6ab
RD
314 }
315
eb9d3cd5
RD
316 down(&file->mutex);
317 list_add_tail(&uobj->list, &file->ucontext->pd_list);
63c47c28 318 up(&file->mutex);
bc38a6ab 319
bc38a6ab
RD
320 up(&ib_uverbs_idr_mutex);
321
eb9d3cd5
RD
322 return in_len;
323
324err_idr:
325 idr_remove(&ib_uverbs_pd_idr, uobj->id);
326
327err_up:
328 up(&ib_uverbs_idr_mutex);
bc38a6ab
RD
329 ib_dealloc_pd(pd);
330
331err:
332 kfree(uobj);
333 return ret;
334}
335
336ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
337 const char __user *buf,
338 int in_len, int out_len)
339{
340 struct ib_uverbs_dealloc_pd cmd;
341 struct ib_pd *pd;
342 struct ib_uobject *uobj;
343 int ret = -EINVAL;
344
345 if (copy_from_user(&cmd, buf, sizeof cmd))
346 return -EFAULT;
347
348 down(&ib_uverbs_idr_mutex);
349
350 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
351 if (!pd || pd->uobject->context != file->ucontext)
352 goto out;
353
354 uobj = pd->uobject;
355
356 ret = ib_dealloc_pd(pd);
357 if (ret)
358 goto out;
359
360 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
361
63c47c28 362 down(&file->mutex);
bc38a6ab 363 list_del(&uobj->list);
63c47c28 364 up(&file->mutex);
bc38a6ab
RD
365
366 kfree(uobj);
367
368out:
369 up(&ib_uverbs_idr_mutex);
370
371 return ret ? ret : in_len;
372}
373
374ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
375 const char __user *buf, int in_len,
376 int out_len)
377{
378 struct ib_uverbs_reg_mr cmd;
379 struct ib_uverbs_reg_mr_resp resp;
380 struct ib_udata udata;
381 struct ib_umem_object *obj;
382 struct ib_pd *pd;
383 struct ib_mr *mr;
384 int ret;
385
386 if (out_len < sizeof resp)
387 return -ENOSPC;
388
389 if (copy_from_user(&cmd, buf, sizeof cmd))
390 return -EFAULT;
391
392 INIT_UDATA(&udata, buf + sizeof cmd,
393 (unsigned long) cmd.response + sizeof resp,
394 in_len - sizeof cmd, out_len - sizeof resp);
395
396 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
397 return -EINVAL;
398
f575394f
RD
399 /*
400 * Local write permission is required if remote write or
401 * remote atomic permission is also requested.
402 */
403 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
404 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
405 return -EINVAL;
406
bc38a6ab
RD
407 obj = kmalloc(sizeof *obj, GFP_KERNEL);
408 if (!obj)
409 return -ENOMEM;
410
411 obj->uobject.context = file->ucontext;
412
413 /*
414 * We ask for writable memory if any access flags other than
415 * "remote read" are set. "Local write" and "remote write"
416 * obviously require write access. "Remote atomic" can do
417 * things like fetch and add, which will modify memory, and
418 * "MW bind" can change permissions by binding a window.
419 */
420 ret = ib_umem_get(file->device->ib_dev, &obj->umem,
421 (void *) (unsigned long) cmd.start, cmd.length,
422 !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ));
423 if (ret)
424 goto err_free;
425
426 obj->umem.virt_base = cmd.hca_va;
427
428 down(&ib_uverbs_idr_mutex);
429
430 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
431 if (!pd || pd->uobject->context != file->ucontext) {
432 ret = -EINVAL;
433 goto err_up;
434 }
435
436 if (!pd->device->reg_user_mr) {
437 ret = -ENOSYS;
438 goto err_up;
439 }
440
441 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata);
442 if (IS_ERR(mr)) {
443 ret = PTR_ERR(mr);
444 goto err_up;
445 }
446
447 mr->device = pd->device;
448 mr->pd = pd;
449 mr->uobject = &obj->uobject;
450 atomic_inc(&pd->usecnt);
451 atomic_set(&mr->usecnt, 0);
452
453 memset(&resp, 0, sizeof resp);
454 resp.lkey = mr->lkey;
455 resp.rkey = mr->rkey;
456
457retry:
458 if (!idr_pre_get(&ib_uverbs_mr_idr, GFP_KERNEL)) {
459 ret = -ENOMEM;
460 goto err_unreg;
461 }
462
463 ret = idr_get_new(&ib_uverbs_mr_idr, mr, &obj->uobject.id);
464
465 if (ret == -EAGAIN)
466 goto retry;
467 if (ret)
468 goto err_unreg;
469
470 resp.mr_handle = obj->uobject.id;
471
bc38a6ab
RD
472 if (copy_to_user((void __user *) (unsigned long) cmd.response,
473 &resp, sizeof resp)) {
474 ret = -EFAULT;
eb9d3cd5 475 goto err_idr;
bc38a6ab
RD
476 }
477
eb9d3cd5
RD
478 down(&file->mutex);
479 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
480 up(&file->mutex);
481
bc38a6ab
RD
482 up(&ib_uverbs_idr_mutex);
483
484 return in_len;
485
eb9d3cd5
RD
486err_idr:
487 idr_remove(&ib_uverbs_mr_idr, obj->uobject.id);
bc38a6ab
RD
488
489err_unreg:
490 ib_dereg_mr(mr);
491
492err_up:
493 up(&ib_uverbs_idr_mutex);
494
495 ib_umem_release(file->device->ib_dev, &obj->umem);
496
497err_free:
498 kfree(obj);
499 return ret;
500}
501
502ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
503 const char __user *buf, int in_len,
504 int out_len)
505{
506 struct ib_uverbs_dereg_mr cmd;
507 struct ib_mr *mr;
508 struct ib_umem_object *memobj;
509 int ret = -EINVAL;
510
511 if (copy_from_user(&cmd, buf, sizeof cmd))
512 return -EFAULT;
513
514 down(&ib_uverbs_idr_mutex);
515
516 mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle);
517 if (!mr || mr->uobject->context != file->ucontext)
518 goto out;
519
520 memobj = container_of(mr->uobject, struct ib_umem_object, uobject);
521
522 ret = ib_dereg_mr(mr);
523 if (ret)
524 goto out;
525
526 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
527
63c47c28 528 down(&file->mutex);
bc38a6ab 529 list_del(&memobj->uobject.list);
63c47c28 530 up(&file->mutex);
bc38a6ab
RD
531
532 ib_umem_release(file->device->ib_dev, &memobj->umem);
533 kfree(memobj);
534
535out:
536 up(&ib_uverbs_idr_mutex);
537
538 return ret ? ret : in_len;
539}
540
6b73597e
RD
541ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
542 const char __user *buf, int in_len,
543 int out_len)
544{
545 struct ib_uverbs_create_comp_channel cmd;
546 struct ib_uverbs_create_comp_channel_resp resp;
547 struct file *filp;
548
549 if (out_len < sizeof resp)
550 return -ENOSPC;
551
552 if (copy_from_user(&cmd, buf, sizeof cmd))
553 return -EFAULT;
554
555 filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd);
556 if (IS_ERR(filp))
557 return PTR_ERR(filp);
558
559 if (copy_to_user((void __user *) (unsigned long) cmd.response,
560 &resp, sizeof resp)) {
561 put_unused_fd(resp.fd);
562 fput(filp);
563 return -EFAULT;
564 }
565
566 fd_install(resp.fd, filp);
567 return in_len;
568}
569
bc38a6ab
RD
570ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
571 const char __user *buf, int in_len,
572 int out_len)
573{
574 struct ib_uverbs_create_cq cmd;
575 struct ib_uverbs_create_cq_resp resp;
576 struct ib_udata udata;
63aaf647 577 struct ib_ucq_object *uobj;
6b73597e 578 struct ib_uverbs_event_file *ev_file = NULL;
bc38a6ab
RD
579 struct ib_cq *cq;
580 int ret;
581
582 if (out_len < sizeof resp)
583 return -ENOSPC;
584
585 if (copy_from_user(&cmd, buf, sizeof cmd))
586 return -EFAULT;
587
588 INIT_UDATA(&udata, buf + sizeof cmd,
589 (unsigned long) cmd.response + sizeof resp,
590 in_len - sizeof cmd, out_len - sizeof resp);
591
6b73597e 592 if (cmd.comp_vector >= file->device->num_comp_vectors)
bc38a6ab
RD
593 return -EINVAL;
594
6b73597e
RD
595 if (cmd.comp_channel >= 0)
596 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
597
bc38a6ab
RD
598 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
599 if (!uobj)
600 return -ENOMEM;
601
63aaf647
RD
602 uobj->uobject.user_handle = cmd.user_handle;
603 uobj->uobject.context = file->ucontext;
604 uobj->comp_events_reported = 0;
605 uobj->async_events_reported = 0;
606 INIT_LIST_HEAD(&uobj->comp_list);
607 INIT_LIST_HEAD(&uobj->async_list);
bc38a6ab
RD
608
609 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
610 file->ucontext, &udata);
611 if (IS_ERR(cq)) {
612 ret = PTR_ERR(cq);
613 goto err;
614 }
615
616 cq->device = file->device->ib_dev;
63aaf647 617 cq->uobject = &uobj->uobject;
bc38a6ab
RD
618 cq->comp_handler = ib_uverbs_comp_handler;
619 cq->event_handler = ib_uverbs_cq_event_handler;
6b73597e 620 cq->cq_context = ev_file;
bc38a6ab
RD
621 atomic_set(&cq->usecnt, 0);
622
eb9d3cd5
RD
623 down(&ib_uverbs_idr_mutex);
624
bc38a6ab
RD
625retry:
626 if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) {
627 ret = -ENOMEM;
eb9d3cd5 628 goto err_up;
bc38a6ab
RD
629 }
630
63aaf647 631 ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id);
bc38a6ab
RD
632
633 if (ret == -EAGAIN)
634 goto retry;
635 if (ret)
eb9d3cd5 636 goto err_up;
bc38a6ab
RD
637
638 memset(&resp, 0, sizeof resp);
63aaf647 639 resp.cq_handle = uobj->uobject.id;
bc38a6ab
RD
640 resp.cqe = cq->cqe;
641
642 if (copy_to_user((void __user *) (unsigned long) cmd.response,
643 &resp, sizeof resp)) {
644 ret = -EFAULT;
eb9d3cd5 645 goto err_idr;
bc38a6ab
RD
646 }
647
eb9d3cd5
RD
648 down(&file->mutex);
649 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
63c47c28 650 up(&file->mutex);
bc38a6ab 651
bc38a6ab
RD
652 up(&ib_uverbs_idr_mutex);
653
eb9d3cd5
RD
654 return in_len;
655
656err_idr:
657 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
658
659err_up:
660 up(&ib_uverbs_idr_mutex);
bc38a6ab
RD
661 ib_destroy_cq(cq);
662
663err:
664 kfree(uobj);
665 return ret;
666}
667
67cdb40c
RD
668ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
669 const char __user *buf, int in_len,
670 int out_len)
671{
672 struct ib_uverbs_poll_cq cmd;
673 struct ib_uverbs_poll_cq_resp *resp;
674 struct ib_cq *cq;
675 struct ib_wc *wc;
676 int ret = 0;
677 int i;
678 int rsize;
679
680 if (copy_from_user(&cmd, buf, sizeof cmd))
681 return -EFAULT;
682
683 wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL);
684 if (!wc)
685 return -ENOMEM;
686
687 rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc);
688 resp = kmalloc(rsize, GFP_KERNEL);
689 if (!resp) {
690 ret = -ENOMEM;
691 goto out_wc;
692 }
693
694 down(&ib_uverbs_idr_mutex);
695 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
696 if (!cq || cq->uobject->context != file->ucontext) {
697 ret = -EINVAL;
698 goto out;
699 }
700
701 resp->count = ib_poll_cq(cq, cmd.ne, wc);
702
703 for (i = 0; i < resp->count; i++) {
704 resp->wc[i].wr_id = wc[i].wr_id;
705 resp->wc[i].status = wc[i].status;
706 resp->wc[i].opcode = wc[i].opcode;
707 resp->wc[i].vendor_err = wc[i].vendor_err;
708 resp->wc[i].byte_len = wc[i].byte_len;
709 resp->wc[i].imm_data = wc[i].imm_data;
710 resp->wc[i].qp_num = wc[i].qp_num;
711 resp->wc[i].src_qp = wc[i].src_qp;
712 resp->wc[i].wc_flags = wc[i].wc_flags;
713 resp->wc[i].pkey_index = wc[i].pkey_index;
714 resp->wc[i].slid = wc[i].slid;
715 resp->wc[i].sl = wc[i].sl;
716 resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits;
717 resp->wc[i].port_num = wc[i].port_num;
718 }
719
720 if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize))
721 ret = -EFAULT;
722
723out:
724 up(&ib_uverbs_idr_mutex);
725 kfree(resp);
726
727out_wc:
728 kfree(wc);
729 return ret ? ret : in_len;
730}
731
732ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
733 const char __user *buf, int in_len,
734 int out_len)
735{
736 struct ib_uverbs_req_notify_cq cmd;
737 struct ib_cq *cq;
738 int ret = -EINVAL;
739
740 if (copy_from_user(&cmd, buf, sizeof cmd))
741 return -EFAULT;
742
743 down(&ib_uverbs_idr_mutex);
744 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
745 if (cq && cq->uobject->context == file->ucontext) {
746 ib_req_notify_cq(cq, cmd.solicited_only ?
747 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
748 ret = in_len;
749 }
750 up(&ib_uverbs_idr_mutex);
751
752 return ret;
753}
754
bc38a6ab
RD
755ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
756 const char __user *buf, int in_len,
757 int out_len)
758{
63aaf647
RD
759 struct ib_uverbs_destroy_cq cmd;
760 struct ib_uverbs_destroy_cq_resp resp;
761 struct ib_cq *cq;
762 struct ib_ucq_object *uobj;
6b73597e 763 struct ib_uverbs_event_file *ev_file;
63aaf647
RD
764 struct ib_uverbs_event *evt, *tmp;
765 u64 user_handle;
766 int ret = -EINVAL;
bc38a6ab
RD
767
768 if (copy_from_user(&cmd, buf, sizeof cmd))
769 return -EFAULT;
770
63aaf647
RD
771 memset(&resp, 0, sizeof resp);
772
bc38a6ab
RD
773 down(&ib_uverbs_idr_mutex);
774
775 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
776 if (!cq || cq->uobject->context != file->ucontext)
777 goto out;
778
63aaf647 779 user_handle = cq->uobject->user_handle;
6b73597e
RD
780 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
781 ev_file = cq->cq_context;
bc38a6ab
RD
782
783 ret = ib_destroy_cq(cq);
784 if (ret)
785 goto out;
786
787 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
788
63c47c28 789 down(&file->mutex);
63aaf647 790 list_del(&uobj->uobject.list);
63c47c28 791 up(&file->mutex);
bc38a6ab 792
6b73597e
RD
793 if (ev_file) {
794 spin_lock_irq(&ev_file->lock);
795 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
796 list_del(&evt->list);
797 kfree(evt);
798 }
799 spin_unlock_irq(&ev_file->lock);
800
801 kref_put(&ev_file->ref, ib_uverbs_release_event_file);
63aaf647 802 }
63aaf647 803
6b73597e 804 spin_lock_irq(&file->async_file->lock);
63aaf647
RD
805 list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
806 list_del(&evt->list);
807 kfree(evt);
808 }
6b73597e 809 spin_unlock_irq(&file->async_file->lock);
63aaf647
RD
810
811 resp.comp_events_reported = uobj->comp_events_reported;
812 resp.async_events_reported = uobj->async_events_reported;
813
bc38a6ab
RD
814 kfree(uobj);
815
63aaf647
RD
816 if (copy_to_user((void __user *) (unsigned long) cmd.response,
817 &resp, sizeof resp))
818 ret = -EFAULT;
819
bc38a6ab
RD
820out:
821 up(&ib_uverbs_idr_mutex);
822
823 return ret ? ret : in_len;
824}
825
826ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
827 const char __user *buf, int in_len,
828 int out_len)
829{
830 struct ib_uverbs_create_qp cmd;
831 struct ib_uverbs_create_qp_resp resp;
832 struct ib_udata udata;
63aaf647 833 struct ib_uevent_object *uobj;
bc38a6ab
RD
834 struct ib_pd *pd;
835 struct ib_cq *scq, *rcq;
f520ba5a 836 struct ib_srq *srq;
bc38a6ab
RD
837 struct ib_qp *qp;
838 struct ib_qp_init_attr attr;
839 int ret;
840
841 if (out_len < sizeof resp)
842 return -ENOSPC;
843
844 if (copy_from_user(&cmd, buf, sizeof cmd))
845 return -EFAULT;
846
847 INIT_UDATA(&udata, buf + sizeof cmd,
848 (unsigned long) cmd.response + sizeof resp,
849 in_len - sizeof cmd, out_len - sizeof resp);
850
851 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
852 if (!uobj)
853 return -ENOMEM;
854
855 down(&ib_uverbs_idr_mutex);
856
857 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
858 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
859 rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle);
f520ba5a 860 srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL;
bc38a6ab
RD
861
862 if (!pd || pd->uobject->context != file->ucontext ||
863 !scq || scq->uobject->context != file->ucontext ||
f520ba5a
RD
864 !rcq || rcq->uobject->context != file->ucontext ||
865 (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) {
bc38a6ab
RD
866 ret = -EINVAL;
867 goto err_up;
868 }
869
870 attr.event_handler = ib_uverbs_qp_event_handler;
871 attr.qp_context = file;
872 attr.send_cq = scq;
873 attr.recv_cq = rcq;
f520ba5a 874 attr.srq = srq;
bc38a6ab
RD
875 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
876 attr.qp_type = cmd.qp_type;
877
878 attr.cap.max_send_wr = cmd.max_send_wr;
879 attr.cap.max_recv_wr = cmd.max_recv_wr;
880 attr.cap.max_send_sge = cmd.max_send_sge;
881 attr.cap.max_recv_sge = cmd.max_recv_sge;
882 attr.cap.max_inline_data = cmd.max_inline_data;
883
63aaf647
RD
884 uobj->uobject.user_handle = cmd.user_handle;
885 uobj->uobject.context = file->ucontext;
886 uobj->events_reported = 0;
887 INIT_LIST_HEAD(&uobj->event_list);
bc38a6ab
RD
888
889 qp = pd->device->create_qp(pd, &attr, &udata);
890 if (IS_ERR(qp)) {
891 ret = PTR_ERR(qp);
892 goto err_up;
893 }
894
895 qp->device = pd->device;
896 qp->pd = pd;
897 qp->send_cq = attr.send_cq;
898 qp->recv_cq = attr.recv_cq;
899 qp->srq = attr.srq;
63aaf647 900 qp->uobject = &uobj->uobject;
bc38a6ab
RD
901 qp->event_handler = attr.event_handler;
902 qp->qp_context = attr.qp_context;
903 qp->qp_type = attr.qp_type;
904 atomic_inc(&pd->usecnt);
905 atomic_inc(&attr.send_cq->usecnt);
906 atomic_inc(&attr.recv_cq->usecnt);
907 if (attr.srq)
908 atomic_inc(&attr.srq->usecnt);
909
910 memset(&resp, 0, sizeof resp);
911 resp.qpn = qp->qp_num;
912
913retry:
914 if (!idr_pre_get(&ib_uverbs_qp_idr, GFP_KERNEL)) {
915 ret = -ENOMEM;
916 goto err_destroy;
917 }
918
63aaf647 919 ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id);
bc38a6ab
RD
920
921 if (ret == -EAGAIN)
922 goto retry;
923 if (ret)
924 goto err_destroy;
925
63aaf647 926 resp.qp_handle = uobj->uobject.id;
bc38a6ab 927
bc38a6ab
RD
928 if (copy_to_user((void __user *) (unsigned long) cmd.response,
929 &resp, sizeof resp)) {
930 ret = -EFAULT;
eb9d3cd5 931 goto err_idr;
bc38a6ab
RD
932 }
933
eb9d3cd5
RD
934 down(&file->mutex);
935 list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list);
936 up(&file->mutex);
937
bc38a6ab
RD
938 up(&ib_uverbs_idr_mutex);
939
940 return in_len;
941
eb9d3cd5
RD
942err_idr:
943 idr_remove(&ib_uverbs_qp_idr, uobj->uobject.id);
bc38a6ab
RD
944
945err_destroy:
946 ib_destroy_qp(qp);
947
948err_up:
949 up(&ib_uverbs_idr_mutex);
950
951 kfree(uobj);
952 return ret;
953}
954
955ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
956 const char __user *buf, int in_len,
957 int out_len)
958{
959 struct ib_uverbs_modify_qp cmd;
960 struct ib_qp *qp;
961 struct ib_qp_attr *attr;
962 int ret;
963
964 if (copy_from_user(&cmd, buf, sizeof cmd))
965 return -EFAULT;
966
967 attr = kmalloc(sizeof *attr, GFP_KERNEL);
968 if (!attr)
969 return -ENOMEM;
970
971 down(&ib_uverbs_idr_mutex);
972
973 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
974 if (!qp || qp->uobject->context != file->ucontext) {
975 ret = -EINVAL;
976 goto out;
977 }
978
979 attr->qp_state = cmd.qp_state;
980 attr->cur_qp_state = cmd.cur_qp_state;
981 attr->path_mtu = cmd.path_mtu;
982 attr->path_mig_state = cmd.path_mig_state;
983 attr->qkey = cmd.qkey;
984 attr->rq_psn = cmd.rq_psn;
985 attr->sq_psn = cmd.sq_psn;
986 attr->dest_qp_num = cmd.dest_qp_num;
987 attr->qp_access_flags = cmd.qp_access_flags;
988 attr->pkey_index = cmd.pkey_index;
989 attr->alt_pkey_index = cmd.pkey_index;
990 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
991 attr->max_rd_atomic = cmd.max_rd_atomic;
992 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
993 attr->min_rnr_timer = cmd.min_rnr_timer;
994 attr->port_num = cmd.port_num;
995 attr->timeout = cmd.timeout;
996 attr->retry_cnt = cmd.retry_cnt;
997 attr->rnr_retry = cmd.rnr_retry;
998 attr->alt_port_num = cmd.alt_port_num;
999 attr->alt_timeout = cmd.alt_timeout;
1000
1001 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
1002 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
1003 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
1004 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
1005 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
1006 attr->ah_attr.dlid = cmd.dest.dlid;
1007 attr->ah_attr.sl = cmd.dest.sl;
1008 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
1009 attr->ah_attr.static_rate = cmd.dest.static_rate;
1010 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
1011 attr->ah_attr.port_num = cmd.dest.port_num;
1012
1013 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
1014 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
1015 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
1016 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
1017 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
1018 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
1019 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
1020 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
1021 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
1022 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1023 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
1024
1025 ret = ib_modify_qp(qp, attr, cmd.attr_mask);
1026 if (ret)
1027 goto out;
1028
1029 ret = in_len;
1030
1031out:
1032 up(&ib_uverbs_idr_mutex);
1033 kfree(attr);
1034
1035 return ret;
1036}
1037
1038ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1039 const char __user *buf, int in_len,
1040 int out_len)
1041{
63aaf647
RD
1042 struct ib_uverbs_destroy_qp cmd;
1043 struct ib_uverbs_destroy_qp_resp resp;
1044 struct ib_qp *qp;
1045 struct ib_uevent_object *uobj;
1046 struct ib_uverbs_event *evt, *tmp;
1047 int ret = -EINVAL;
bc38a6ab
RD
1048
1049 if (copy_from_user(&cmd, buf, sizeof cmd))
1050 return -EFAULT;
1051
63aaf647
RD
1052 memset(&resp, 0, sizeof resp);
1053
bc38a6ab
RD
1054 down(&ib_uverbs_idr_mutex);
1055
1056 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1057 if (!qp || qp->uobject->context != file->ucontext)
1058 goto out;
1059
63aaf647 1060 uobj = container_of(qp->uobject, struct ib_uevent_object, uobject);
bc38a6ab
RD
1061
1062 ret = ib_destroy_qp(qp);
1063 if (ret)
1064 goto out;
1065
1066 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
1067
63c47c28 1068 down(&file->mutex);
63aaf647 1069 list_del(&uobj->uobject.list);
63c47c28 1070 up(&file->mutex);
bc38a6ab 1071
6b73597e 1072 spin_lock_irq(&file->async_file->lock);
63aaf647
RD
1073 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
1074 list_del(&evt->list);
1075 kfree(evt);
1076 }
6b73597e 1077 spin_unlock_irq(&file->async_file->lock);
63aaf647
RD
1078
1079 resp.events_reported = uobj->events_reported;
1080
bc38a6ab
RD
1081 kfree(uobj);
1082
63aaf647
RD
1083 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1084 &resp, sizeof resp))
1085 ret = -EFAULT;
1086
bc38a6ab
RD
1087out:
1088 up(&ib_uverbs_idr_mutex);
1089
1090 return ret ? ret : in_len;
1091}
1092
67cdb40c
RD
1093ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1094 const char __user *buf, int in_len,
1095 int out_len)
1096{
1097 struct ib_uverbs_post_send cmd;
1098 struct ib_uverbs_post_send_resp resp;
1099 struct ib_uverbs_send_wr *user_wr;
1100 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
1101 struct ib_qp *qp;
1102 int i, sg_ind;
1103 ssize_t ret = -EINVAL;
1104
1105 if (copy_from_user(&cmd, buf, sizeof cmd))
1106 return -EFAULT;
1107
1108 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
1109 cmd.sge_count * sizeof (struct ib_uverbs_sge))
1110 return -EINVAL;
1111
1112 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
1113 return -EINVAL;
1114
1115 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
1116 if (!user_wr)
1117 return -ENOMEM;
1118
1119 down(&ib_uverbs_idr_mutex);
1120
1121 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1122 if (!qp || qp->uobject->context != file->ucontext)
1123 goto out;
1124
1125 sg_ind = 0;
1126 last = NULL;
1127 for (i = 0; i < cmd.wr_count; ++i) {
1128 if (copy_from_user(user_wr,
1129 buf + sizeof cmd + i * cmd.wqe_size,
1130 cmd.wqe_size)) {
1131 ret = -EFAULT;
1132 goto out;
1133 }
1134
1135 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
1136 ret = -EINVAL;
1137 goto out;
1138 }
1139
1140 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1141 user_wr->num_sge * sizeof (struct ib_sge),
1142 GFP_KERNEL);
1143 if (!next) {
1144 ret = -ENOMEM;
1145 goto out;
1146 }
1147
1148 if (!last)
1149 wr = next;
1150 else
1151 last->next = next;
1152 last = next;
1153
1154 next->next = NULL;
1155 next->wr_id = user_wr->wr_id;
1156 next->num_sge = user_wr->num_sge;
1157 next->opcode = user_wr->opcode;
1158 next->send_flags = user_wr->send_flags;
1159 next->imm_data = user_wr->imm_data;
1160
1161 if (qp->qp_type == IB_QPT_UD) {
1162 next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr,
1163 user_wr->wr.ud.ah);
1164 if (!next->wr.ud.ah) {
1165 ret = -EINVAL;
1166 goto out;
1167 }
1168 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
1169 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
1170 } else {
1171 switch (next->opcode) {
1172 case IB_WR_RDMA_WRITE:
1173 case IB_WR_RDMA_WRITE_WITH_IMM:
1174 case IB_WR_RDMA_READ:
1175 next->wr.rdma.remote_addr =
1176 user_wr->wr.rdma.remote_addr;
1177 next->wr.rdma.rkey =
1178 user_wr->wr.rdma.rkey;
1179 break;
1180 case IB_WR_ATOMIC_CMP_AND_SWP:
1181 case IB_WR_ATOMIC_FETCH_AND_ADD:
1182 next->wr.atomic.remote_addr =
1183 user_wr->wr.atomic.remote_addr;
1184 next->wr.atomic.compare_add =
1185 user_wr->wr.atomic.compare_add;
1186 next->wr.atomic.swap = user_wr->wr.atomic.swap;
1187 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
1188 break;
1189 default:
1190 break;
1191 }
1192 }
1193
1194 if (next->num_sge) {
1195 next->sg_list = (void *) next +
1196 ALIGN(sizeof *next, sizeof (struct ib_sge));
1197 if (copy_from_user(next->sg_list,
1198 buf + sizeof cmd +
1199 cmd.wr_count * cmd.wqe_size +
1200 sg_ind * sizeof (struct ib_sge),
1201 next->num_sge * sizeof (struct ib_sge))) {
1202 ret = -EFAULT;
1203 goto out;
1204 }
1205 sg_ind += next->num_sge;
1206 } else
1207 next->sg_list = NULL;
1208 }
1209
1210 resp.bad_wr = 0;
1211 ret = qp->device->post_send(qp, wr, &bad_wr);
1212 if (ret)
1213 for (next = wr; next; next = next->next) {
1214 ++resp.bad_wr;
1215 if (next == bad_wr)
1216 break;
1217 }
1218
1219 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1220 &resp, sizeof resp))
1221 ret = -EFAULT;
1222
1223out:
1224 up(&ib_uverbs_idr_mutex);
1225
1226 while (wr) {
1227 next = wr->next;
1228 kfree(wr);
1229 wr = next;
1230 }
1231
1232 kfree(user_wr);
1233
1234 return ret ? ret : in_len;
1235}
1236
1237static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
1238 int in_len,
1239 u32 wr_count,
1240 u32 sge_count,
1241 u32 wqe_size)
1242{
1243 struct ib_uverbs_recv_wr *user_wr;
1244 struct ib_recv_wr *wr = NULL, *last, *next;
1245 int sg_ind;
1246 int i;
1247 int ret;
1248
1249 if (in_len < wqe_size * wr_count +
1250 sge_count * sizeof (struct ib_uverbs_sge))
1251 return ERR_PTR(-EINVAL);
1252
1253 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
1254 return ERR_PTR(-EINVAL);
1255
1256 user_wr = kmalloc(wqe_size, GFP_KERNEL);
1257 if (!user_wr)
1258 return ERR_PTR(-ENOMEM);
1259
1260 sg_ind = 0;
1261 last = NULL;
1262 for (i = 0; i < wr_count; ++i) {
1263 if (copy_from_user(user_wr, buf + i * wqe_size,
1264 wqe_size)) {
1265 ret = -EFAULT;
1266 goto err;
1267 }
1268
1269 if (user_wr->num_sge + sg_ind > sge_count) {
1270 ret = -EINVAL;
1271 goto err;
1272 }
1273
1274 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1275 user_wr->num_sge * sizeof (struct ib_sge),
1276 GFP_KERNEL);
1277 if (!next) {
1278 ret = -ENOMEM;
1279 goto err;
1280 }
1281
1282 if (!last)
1283 wr = next;
1284 else
1285 last->next = next;
1286 last = next;
1287
1288 next->next = NULL;
1289 next->wr_id = user_wr->wr_id;
1290 next->num_sge = user_wr->num_sge;
1291
1292 if (next->num_sge) {
1293 next->sg_list = (void *) next +
1294 ALIGN(sizeof *next, sizeof (struct ib_sge));
1295 if (copy_from_user(next->sg_list,
1296 buf + wr_count * wqe_size +
1297 sg_ind * sizeof (struct ib_sge),
1298 next->num_sge * sizeof (struct ib_sge))) {
1299 ret = -EFAULT;
1300 goto err;
1301 }
1302 sg_ind += next->num_sge;
1303 } else
1304 next->sg_list = NULL;
1305 }
1306
1307 kfree(user_wr);
1308 return wr;
1309
1310err:
1311 kfree(user_wr);
1312
1313 while (wr) {
1314 next = wr->next;
1315 kfree(wr);
1316 wr = next;
1317 }
1318
1319 return ERR_PTR(ret);
1320}
1321
1322ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
1323 const char __user *buf, int in_len,
1324 int out_len)
1325{
1326 struct ib_uverbs_post_recv cmd;
1327 struct ib_uverbs_post_recv_resp resp;
1328 struct ib_recv_wr *wr, *next, *bad_wr;
1329 struct ib_qp *qp;
1330 ssize_t ret = -EINVAL;
1331
1332 if (copy_from_user(&cmd, buf, sizeof cmd))
1333 return -EFAULT;
1334
1335 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1336 in_len - sizeof cmd, cmd.wr_count,
1337 cmd.sge_count, cmd.wqe_size);
1338 if (IS_ERR(wr))
1339 return PTR_ERR(wr);
1340
1341 down(&ib_uverbs_idr_mutex);
1342
1343 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1344 if (!qp || qp->uobject->context != file->ucontext)
1345 goto out;
1346
1347 resp.bad_wr = 0;
1348 ret = qp->device->post_recv(qp, wr, &bad_wr);
1349 if (ret)
1350 for (next = wr; next; next = next->next) {
1351 ++resp.bad_wr;
1352 if (next == bad_wr)
1353 break;
1354 }
1355
1356
1357 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1358 &resp, sizeof resp))
1359 ret = -EFAULT;
1360
1361out:
1362 up(&ib_uverbs_idr_mutex);
1363
1364 while (wr) {
1365 next = wr->next;
1366 kfree(wr);
1367 wr = next;
1368 }
1369
1370 return ret ? ret : in_len;
1371}
1372
1373ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
1374 const char __user *buf, int in_len,
1375 int out_len)
1376{
1377 struct ib_uverbs_post_srq_recv cmd;
1378 struct ib_uverbs_post_srq_recv_resp resp;
1379 struct ib_recv_wr *wr, *next, *bad_wr;
1380 struct ib_srq *srq;
1381 ssize_t ret = -EINVAL;
1382
1383 if (copy_from_user(&cmd, buf, sizeof cmd))
1384 return -EFAULT;
1385
1386 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1387 in_len - sizeof cmd, cmd.wr_count,
1388 cmd.sge_count, cmd.wqe_size);
1389 if (IS_ERR(wr))
1390 return PTR_ERR(wr);
1391
1392 down(&ib_uverbs_idr_mutex);
1393
1394 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1395 if (!srq || srq->uobject->context != file->ucontext)
1396 goto out;
1397
1398 resp.bad_wr = 0;
1399 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
1400 if (ret)
1401 for (next = wr; next; next = next->next) {
1402 ++resp.bad_wr;
1403 if (next == bad_wr)
1404 break;
1405 }
1406
1407
1408 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1409 &resp, sizeof resp))
1410 ret = -EFAULT;
1411
1412out:
1413 up(&ib_uverbs_idr_mutex);
1414
1415 while (wr) {
1416 next = wr->next;
1417 kfree(wr);
1418 wr = next;
1419 }
1420
1421 return ret ? ret : in_len;
1422}
1423
1424ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
1425 const char __user *buf, int in_len,
1426 int out_len)
1427{
1428 struct ib_uverbs_create_ah cmd;
1429 struct ib_uverbs_create_ah_resp resp;
1430 struct ib_uobject *uobj;
1431 struct ib_pd *pd;
1432 struct ib_ah *ah;
1433 struct ib_ah_attr attr;
1434 int ret;
1435
1436 if (out_len < sizeof resp)
1437 return -ENOSPC;
1438
1439 if (copy_from_user(&cmd, buf, sizeof cmd))
1440 return -EFAULT;
1441
1442 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
1443 if (!uobj)
1444 return -ENOMEM;
1445
1446 down(&ib_uverbs_idr_mutex);
1447
1448 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
1449 if (!pd || pd->uobject->context != file->ucontext) {
1450 ret = -EINVAL;
1451 goto err_up;
1452 }
1453
1454 uobj->user_handle = cmd.user_handle;
1455 uobj->context = file->ucontext;
1456
1457 attr.dlid = cmd.attr.dlid;
1458 attr.sl = cmd.attr.sl;
1459 attr.src_path_bits = cmd.attr.src_path_bits;
1460 attr.static_rate = cmd.attr.static_rate;
1461 attr.port_num = cmd.attr.port_num;
1462 attr.grh.flow_label = cmd.attr.grh.flow_label;
1463 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
1464 attr.grh.hop_limit = cmd.attr.grh.hop_limit;
1465 attr.grh.traffic_class = cmd.attr.grh.traffic_class;
1466 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
1467
1468 ah = ib_create_ah(pd, &attr);
1469 if (IS_ERR(ah)) {
1470 ret = PTR_ERR(ah);
1471 goto err_up;
1472 }
1473
1474 ah->uobject = uobj;
1475
1476retry:
1477 if (!idr_pre_get(&ib_uverbs_ah_idr, GFP_KERNEL)) {
1478 ret = -ENOMEM;
1479 goto err_destroy;
1480 }
1481
1482 ret = idr_get_new(&ib_uverbs_ah_idr, ah, &uobj->id);
1483
1484 if (ret == -EAGAIN)
1485 goto retry;
1486 if (ret)
1487 goto err_destroy;
1488
1489 resp.ah_handle = uobj->id;
1490
1491 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1492 &resp, sizeof resp)) {
1493 ret = -EFAULT;
1494 goto err_idr;
1495 }
1496
1497 down(&file->mutex);
1498 list_add_tail(&uobj->list, &file->ucontext->ah_list);
1499 up(&file->mutex);
1500
1501 up(&ib_uverbs_idr_mutex);
1502
1503 return in_len;
1504
1505err_idr:
1506 idr_remove(&ib_uverbs_ah_idr, uobj->id);
1507
1508err_destroy:
1509 ib_destroy_ah(ah);
1510
1511err_up:
1512 up(&ib_uverbs_idr_mutex);
1513
1514 kfree(uobj);
1515 return ret;
1516}
1517
1518ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
1519 const char __user *buf, int in_len, int out_len)
1520{
1521 struct ib_uverbs_destroy_ah cmd;
1522 struct ib_ah *ah;
1523 struct ib_uobject *uobj;
1524 int ret = -EINVAL;
1525
1526 if (copy_from_user(&cmd, buf, sizeof cmd))
1527 return -EFAULT;
1528
1529 down(&ib_uverbs_idr_mutex);
1530
1531 ah = idr_find(&ib_uverbs_ah_idr, cmd.ah_handle);
1532 if (!ah || ah->uobject->context != file->ucontext)
1533 goto out;
1534
1535 uobj = ah->uobject;
1536
1537 ret = ib_destroy_ah(ah);
1538 if (ret)
1539 goto out;
1540
1541 idr_remove(&ib_uverbs_ah_idr, cmd.ah_handle);
1542
1543 down(&file->mutex);
1544 list_del(&uobj->list);
1545 up(&file->mutex);
1546
1547 kfree(uobj);
1548
1549out:
1550 up(&ib_uverbs_idr_mutex);
1551
1552 return ret ? ret : in_len;
1553}
1554
bc38a6ab
RD
1555ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
1556 const char __user *buf, int in_len,
1557 int out_len)
1558{
1559 struct ib_uverbs_attach_mcast cmd;
1560 struct ib_qp *qp;
1561 int ret = -EINVAL;
1562
1563 if (copy_from_user(&cmd, buf, sizeof cmd))
1564 return -EFAULT;
1565
1566 down(&ib_uverbs_idr_mutex);
1567
1568 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1569 if (qp && qp->uobject->context == file->ucontext)
1570 ret = ib_attach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
1571
1572 up(&ib_uverbs_idr_mutex);
1573
1574 return ret ? ret : in_len;
1575}
1576
1577ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
1578 const char __user *buf, int in_len,
1579 int out_len)
1580{
1581 struct ib_uverbs_detach_mcast cmd;
1582 struct ib_qp *qp;
1583 int ret = -EINVAL;
1584
1585 if (copy_from_user(&cmd, buf, sizeof cmd))
1586 return -EFAULT;
1587
1588 down(&ib_uverbs_idr_mutex);
1589
1590 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1591 if (qp && qp->uobject->context == file->ucontext)
1592 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
1593
1594 up(&ib_uverbs_idr_mutex);
1595
1596 return ret ? ret : in_len;
1597}
f520ba5a
RD
1598
1599ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1600 const char __user *buf, int in_len,
1601 int out_len)
1602{
1603 struct ib_uverbs_create_srq cmd;
1604 struct ib_uverbs_create_srq_resp resp;
1605 struct ib_udata udata;
63aaf647 1606 struct ib_uevent_object *uobj;
f520ba5a
RD
1607 struct ib_pd *pd;
1608 struct ib_srq *srq;
1609 struct ib_srq_init_attr attr;
1610 int ret;
1611
1612 if (out_len < sizeof resp)
1613 return -ENOSPC;
1614
1615 if (copy_from_user(&cmd, buf, sizeof cmd))
1616 return -EFAULT;
1617
1618 INIT_UDATA(&udata, buf + sizeof cmd,
1619 (unsigned long) cmd.response + sizeof resp,
1620 in_len - sizeof cmd, out_len - sizeof resp);
1621
1622 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
1623 if (!uobj)
1624 return -ENOMEM;
1625
1626 down(&ib_uverbs_idr_mutex);
1627
1628 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
1629
1630 if (!pd || pd->uobject->context != file->ucontext) {
1631 ret = -EINVAL;
1632 goto err_up;
1633 }
1634
1635 attr.event_handler = ib_uverbs_srq_event_handler;
1636 attr.srq_context = file;
1637 attr.attr.max_wr = cmd.max_wr;
1638 attr.attr.max_sge = cmd.max_sge;
1639 attr.attr.srq_limit = cmd.srq_limit;
1640
63aaf647
RD
1641 uobj->uobject.user_handle = cmd.user_handle;
1642 uobj->uobject.context = file->ucontext;
1643 uobj->events_reported = 0;
1644 INIT_LIST_HEAD(&uobj->event_list);
f520ba5a
RD
1645
1646 srq = pd->device->create_srq(pd, &attr, &udata);
1647 if (IS_ERR(srq)) {
1648 ret = PTR_ERR(srq);
1649 goto err_up;
1650 }
1651
1652 srq->device = pd->device;
1653 srq->pd = pd;
63aaf647 1654 srq->uobject = &uobj->uobject;
f520ba5a
RD
1655 srq->event_handler = attr.event_handler;
1656 srq->srq_context = attr.srq_context;
1657 atomic_inc(&pd->usecnt);
1658 atomic_set(&srq->usecnt, 0);
1659
1660 memset(&resp, 0, sizeof resp);
1661
1662retry:
1663 if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) {
1664 ret = -ENOMEM;
1665 goto err_destroy;
1666 }
1667
63aaf647 1668 ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id);
f520ba5a
RD
1669
1670 if (ret == -EAGAIN)
1671 goto retry;
1672 if (ret)
1673 goto err_destroy;
1674
63aaf647 1675 resp.srq_handle = uobj->uobject.id;
f520ba5a 1676
f520ba5a
RD
1677 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1678 &resp, sizeof resp)) {
1679 ret = -EFAULT;
eb9d3cd5 1680 goto err_idr;
f520ba5a
RD
1681 }
1682
eb9d3cd5
RD
1683 down(&file->mutex);
1684 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
1685 up(&file->mutex);
1686
f520ba5a
RD
1687 up(&ib_uverbs_idr_mutex);
1688
1689 return in_len;
1690
eb9d3cd5
RD
1691err_idr:
1692 idr_remove(&ib_uverbs_srq_idr, uobj->uobject.id);
f520ba5a
RD
1693
1694err_destroy:
1695 ib_destroy_srq(srq);
1696
1697err_up:
1698 up(&ib_uverbs_idr_mutex);
1699
1700 kfree(uobj);
1701 return ret;
1702}
1703
1704ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
1705 const char __user *buf, int in_len,
1706 int out_len)
1707{
1708 struct ib_uverbs_modify_srq cmd;
1709 struct ib_srq *srq;
1710 struct ib_srq_attr attr;
1711 int ret;
1712
1713 if (copy_from_user(&cmd, buf, sizeof cmd))
1714 return -EFAULT;
1715
1716 down(&ib_uverbs_idr_mutex);
1717
1718 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1719 if (!srq || srq->uobject->context != file->ucontext) {
1720 ret = -EINVAL;
1721 goto out;
1722 }
1723
1724 attr.max_wr = cmd.max_wr;
1725 attr.max_sge = cmd.max_sge;
1726 attr.srq_limit = cmd.srq_limit;
1727
1728 ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
1729
1730out:
1731 up(&ib_uverbs_idr_mutex);
1732
1733 return ret ? ret : in_len;
1734}
1735
1736ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
1737 const char __user *buf, int in_len,
1738 int out_len)
1739{
63aaf647
RD
1740 struct ib_uverbs_destroy_srq cmd;
1741 struct ib_uverbs_destroy_srq_resp resp;
1742 struct ib_srq *srq;
1743 struct ib_uevent_object *uobj;
1744 struct ib_uverbs_event *evt, *tmp;
1745 int ret = -EINVAL;
f520ba5a
RD
1746
1747 if (copy_from_user(&cmd, buf, sizeof cmd))
1748 return -EFAULT;
1749
1750 down(&ib_uverbs_idr_mutex);
1751
63aaf647
RD
1752 memset(&resp, 0, sizeof resp);
1753
f520ba5a
RD
1754 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1755 if (!srq || srq->uobject->context != file->ucontext)
1756 goto out;
1757
63aaf647 1758 uobj = container_of(srq->uobject, struct ib_uevent_object, uobject);
f520ba5a
RD
1759
1760 ret = ib_destroy_srq(srq);
1761 if (ret)
1762 goto out;
1763
1764 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
1765
63c47c28 1766 down(&file->mutex);
63aaf647 1767 list_del(&uobj->uobject.list);
63c47c28 1768 up(&file->mutex);
f520ba5a 1769
6b73597e 1770 spin_lock_irq(&file->async_file->lock);
63aaf647
RD
1771 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
1772 list_del(&evt->list);
1773 kfree(evt);
1774 }
6b73597e 1775 spin_unlock_irq(&file->async_file->lock);
63aaf647
RD
1776
1777 resp.events_reported = uobj->events_reported;
1778
f520ba5a
RD
1779 kfree(uobj);
1780
63aaf647
RD
1781 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1782 &resp, sizeof resp))
1783 ret = -EFAULT;
1784
f520ba5a
RD
1785out:
1786 up(&ib_uverbs_idr_mutex);
1787
1788 return ret ? ret : in_len;
1789}