]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/infiniband/core/uverbs_cmd.c
mac80211_hwsim: Fix a possible sleep-in-atomic bug in hwsim_get_radio_nl
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / core / uverbs_cmd.c
1 /*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/file.h>
37 #include <linux/fs.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
40
41 #include <linux/uaccess.h>
42
43 #include <rdma/uverbs_types.h>
44 #include <rdma/uverbs_std_types.h>
45 #include "rdma_core.h"
46
47 #include "uverbs.h"
48 #include "core_priv.h"
49
50 static struct ib_uverbs_completion_event_file *
51 ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context)
52 {
53 struct ib_uobject *uobj = uobj_get_read(uobj_get_type(comp_channel),
54 fd, context);
55 struct ib_uobject_file *uobj_file;
56
57 if (IS_ERR(uobj))
58 return (void *)uobj;
59
60 uverbs_uobject_get(uobj);
61 uobj_put_read(uobj);
62
63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
64 return container_of(uobj_file, struct ib_uverbs_completion_event_file,
65 uobj_file);
66 }
67
68 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
69 struct ib_device *ib_dev,
70 const char __user *buf,
71 int in_len, int out_len)
72 {
73 struct ib_uverbs_get_context cmd;
74 struct ib_uverbs_get_context_resp resp;
75 struct ib_udata udata;
76 struct ib_ucontext *ucontext;
77 struct file *filp;
78 struct ib_rdmacg_object cg_obj;
79 int ret;
80
81 if (out_len < sizeof resp)
82 return -ENOSPC;
83
84 if (copy_from_user(&cmd, buf, sizeof cmd))
85 return -EFAULT;
86
87 mutex_lock(&file->mutex);
88
89 if (file->ucontext) {
90 ret = -EINVAL;
91 goto err;
92 }
93
94 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
95 u64_to_user_ptr(cmd.response) + sizeof(resp),
96 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
97 out_len - sizeof(resp));
98
99 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
100 if (ret)
101 goto err;
102
103 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
104 if (IS_ERR(ucontext)) {
105 ret = PTR_ERR(ucontext);
106 goto err_alloc;
107 }
108
109 ucontext->device = ib_dev;
110 ucontext->cg_obj = cg_obj;
111 /* ufile is required when some objects are released */
112 ucontext->ufile = file;
113 uverbs_initialize_ucontext(ucontext);
114
115 rcu_read_lock();
116 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
117 rcu_read_unlock();
118 ucontext->closing = 0;
119
120 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
121 ucontext->umem_tree = RB_ROOT_CACHED;
122 init_rwsem(&ucontext->umem_rwsem);
123 ucontext->odp_mrs_count = 0;
124 INIT_LIST_HEAD(&ucontext->no_private_counters);
125
126 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
127 ucontext->invalidate_range = NULL;
128
129 #endif
130
131 resp.num_comp_vectors = file->device->num_comp_vectors;
132
133 ret = get_unused_fd_flags(O_CLOEXEC);
134 if (ret < 0)
135 goto err_free;
136 resp.async_fd = ret;
137
138 filp = ib_uverbs_alloc_async_event_file(file, ib_dev);
139 if (IS_ERR(filp)) {
140 ret = PTR_ERR(filp);
141 goto err_fd;
142 }
143
144 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
145 ret = -EFAULT;
146 goto err_file;
147 }
148
149 file->ucontext = ucontext;
150
151 fd_install(resp.async_fd, filp);
152
153 mutex_unlock(&file->mutex);
154
155 return in_len;
156
157 err_file:
158 ib_uverbs_free_async_event_file(file);
159 fput(filp);
160
161 err_fd:
162 put_unused_fd(resp.async_fd);
163
164 err_free:
165 put_pid(ucontext->tgid);
166 ib_dev->dealloc_ucontext(ucontext);
167
168 err_alloc:
169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
170
171 err:
172 mutex_unlock(&file->mutex);
173 return ret;
174 }
175
176 static void copy_query_dev_fields(struct ib_uverbs_file *file,
177 struct ib_device *ib_dev,
178 struct ib_uverbs_query_device_resp *resp,
179 struct ib_device_attr *attr)
180 {
181 resp->fw_ver = attr->fw_ver;
182 resp->node_guid = ib_dev->node_guid;
183 resp->sys_image_guid = attr->sys_image_guid;
184 resp->max_mr_size = attr->max_mr_size;
185 resp->page_size_cap = attr->page_size_cap;
186 resp->vendor_id = attr->vendor_id;
187 resp->vendor_part_id = attr->vendor_part_id;
188 resp->hw_ver = attr->hw_ver;
189 resp->max_qp = attr->max_qp;
190 resp->max_qp_wr = attr->max_qp_wr;
191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
192 resp->max_sge = attr->max_sge;
193 resp->max_sge_rd = attr->max_sge_rd;
194 resp->max_cq = attr->max_cq;
195 resp->max_cqe = attr->max_cqe;
196 resp->max_mr = attr->max_mr;
197 resp->max_pd = attr->max_pd;
198 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
199 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
200 resp->max_res_rd_atom = attr->max_res_rd_atom;
201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
203 resp->atomic_cap = attr->atomic_cap;
204 resp->max_ee = attr->max_ee;
205 resp->max_rdd = attr->max_rdd;
206 resp->max_mw = attr->max_mw;
207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
209 resp->max_mcast_grp = attr->max_mcast_grp;
210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
212 resp->max_ah = attr->max_ah;
213 resp->max_fmr = attr->max_fmr;
214 resp->max_map_per_fmr = attr->max_map_per_fmr;
215 resp->max_srq = attr->max_srq;
216 resp->max_srq_wr = attr->max_srq_wr;
217 resp->max_srq_sge = attr->max_srq_sge;
218 resp->max_pkeys = attr->max_pkeys;
219 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
220 resp->phys_port_cnt = ib_dev->phys_port_cnt;
221 }
222
223 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
224 struct ib_device *ib_dev,
225 const char __user *buf,
226 int in_len, int out_len)
227 {
228 struct ib_uverbs_query_device cmd;
229 struct ib_uverbs_query_device_resp resp;
230
231 if (out_len < sizeof resp)
232 return -ENOSPC;
233
234 if (copy_from_user(&cmd, buf, sizeof cmd))
235 return -EFAULT;
236
237 memset(&resp, 0, sizeof resp);
238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
239
240 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
241 return -EFAULT;
242
243 return in_len;
244 }
245
246 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
247 struct ib_device *ib_dev,
248 const char __user *buf,
249 int in_len, int out_len)
250 {
251 struct ib_uverbs_query_port cmd;
252 struct ib_uverbs_query_port_resp resp;
253 struct ib_port_attr attr;
254 int ret;
255
256 if (out_len < sizeof resp)
257 return -ENOSPC;
258
259 if (copy_from_user(&cmd, buf, sizeof cmd))
260 return -EFAULT;
261
262 ret = ib_query_port(ib_dev, cmd.port_num, &attr);
263 if (ret)
264 return ret;
265
266 memset(&resp, 0, sizeof resp);
267
268 resp.state = attr.state;
269 resp.max_mtu = attr.max_mtu;
270 resp.active_mtu = attr.active_mtu;
271 resp.gid_tbl_len = attr.gid_tbl_len;
272 resp.port_cap_flags = attr.port_cap_flags;
273 resp.max_msg_sz = attr.max_msg_sz;
274 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
275 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
276 resp.pkey_tbl_len = attr.pkey_tbl_len;
277
278 if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) {
279 resp.lid = OPA_TO_IB_UCAST_LID(attr.lid);
280 resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid);
281 } else {
282 resp.lid = ib_lid_cpu16(attr.lid);
283 resp.sm_lid = ib_lid_cpu16(attr.sm_lid);
284 }
285 resp.lmc = attr.lmc;
286 resp.max_vl_num = attr.max_vl_num;
287 resp.sm_sl = attr.sm_sl;
288 resp.subnet_timeout = attr.subnet_timeout;
289 resp.init_type_reply = attr.init_type_reply;
290 resp.active_width = attr.active_width;
291 resp.active_speed = attr.active_speed;
292 resp.phys_state = attr.phys_state;
293 resp.link_layer = rdma_port_get_link_layer(ib_dev,
294 cmd.port_num);
295
296 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
297 return -EFAULT;
298
299 return in_len;
300 }
301
302 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
303 struct ib_device *ib_dev,
304 const char __user *buf,
305 int in_len, int out_len)
306 {
307 struct ib_uverbs_alloc_pd cmd;
308 struct ib_uverbs_alloc_pd_resp resp;
309 struct ib_udata udata;
310 struct ib_uobject *uobj;
311 struct ib_pd *pd;
312 int ret;
313
314 if (out_len < sizeof resp)
315 return -ENOSPC;
316
317 if (copy_from_user(&cmd, buf, sizeof cmd))
318 return -EFAULT;
319
320 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
321 u64_to_user_ptr(cmd.response) + sizeof(resp),
322 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
323 out_len - sizeof(resp));
324
325 uobj = uobj_alloc(uobj_get_type(pd), file->ucontext);
326 if (IS_ERR(uobj))
327 return PTR_ERR(uobj);
328
329 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
330 if (IS_ERR(pd)) {
331 ret = PTR_ERR(pd);
332 goto err;
333 }
334
335 pd->device = ib_dev;
336 pd->uobject = uobj;
337 pd->__internal_mr = NULL;
338 atomic_set(&pd->usecnt, 0);
339
340 uobj->object = pd;
341 memset(&resp, 0, sizeof resp);
342 resp.pd_handle = uobj->id;
343
344 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
345 ret = -EFAULT;
346 goto err_copy;
347 }
348
349 uobj_alloc_commit(uobj);
350
351 return in_len;
352
353 err_copy:
354 ib_dealloc_pd(pd);
355
356 err:
357 uobj_alloc_abort(uobj);
358 return ret;
359 }
360
361 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
362 struct ib_device *ib_dev,
363 const char __user *buf,
364 int in_len, int out_len)
365 {
366 struct ib_uverbs_dealloc_pd cmd;
367 struct ib_uobject *uobj;
368 int ret;
369
370 if (copy_from_user(&cmd, buf, sizeof cmd))
371 return -EFAULT;
372
373 uobj = uobj_get_write(uobj_get_type(pd), cmd.pd_handle,
374 file->ucontext);
375 if (IS_ERR(uobj))
376 return PTR_ERR(uobj);
377
378 ret = uobj_remove_commit(uobj);
379
380 return ret ?: in_len;
381 }
382
383 struct xrcd_table_entry {
384 struct rb_node node;
385 struct ib_xrcd *xrcd;
386 struct inode *inode;
387 };
388
389 static int xrcd_table_insert(struct ib_uverbs_device *dev,
390 struct inode *inode,
391 struct ib_xrcd *xrcd)
392 {
393 struct xrcd_table_entry *entry, *scan;
394 struct rb_node **p = &dev->xrcd_tree.rb_node;
395 struct rb_node *parent = NULL;
396
397 entry = kmalloc(sizeof *entry, GFP_KERNEL);
398 if (!entry)
399 return -ENOMEM;
400
401 entry->xrcd = xrcd;
402 entry->inode = inode;
403
404 while (*p) {
405 parent = *p;
406 scan = rb_entry(parent, struct xrcd_table_entry, node);
407
408 if (inode < scan->inode) {
409 p = &(*p)->rb_left;
410 } else if (inode > scan->inode) {
411 p = &(*p)->rb_right;
412 } else {
413 kfree(entry);
414 return -EEXIST;
415 }
416 }
417
418 rb_link_node(&entry->node, parent, p);
419 rb_insert_color(&entry->node, &dev->xrcd_tree);
420 igrab(inode);
421 return 0;
422 }
423
424 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
425 struct inode *inode)
426 {
427 struct xrcd_table_entry *entry;
428 struct rb_node *p = dev->xrcd_tree.rb_node;
429
430 while (p) {
431 entry = rb_entry(p, struct xrcd_table_entry, node);
432
433 if (inode < entry->inode)
434 p = p->rb_left;
435 else if (inode > entry->inode)
436 p = p->rb_right;
437 else
438 return entry;
439 }
440
441 return NULL;
442 }
443
444 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
445 {
446 struct xrcd_table_entry *entry;
447
448 entry = xrcd_table_search(dev, inode);
449 if (!entry)
450 return NULL;
451
452 return entry->xrcd;
453 }
454
455 static void xrcd_table_delete(struct ib_uverbs_device *dev,
456 struct inode *inode)
457 {
458 struct xrcd_table_entry *entry;
459
460 entry = xrcd_table_search(dev, inode);
461 if (entry) {
462 iput(inode);
463 rb_erase(&entry->node, &dev->xrcd_tree);
464 kfree(entry);
465 }
466 }
467
468 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
469 struct ib_device *ib_dev,
470 const char __user *buf, int in_len,
471 int out_len)
472 {
473 struct ib_uverbs_open_xrcd cmd;
474 struct ib_uverbs_open_xrcd_resp resp;
475 struct ib_udata udata;
476 struct ib_uxrcd_object *obj;
477 struct ib_xrcd *xrcd = NULL;
478 struct fd f = {NULL, 0};
479 struct inode *inode = NULL;
480 int ret = 0;
481 int new_xrcd = 0;
482
483 if (out_len < sizeof resp)
484 return -ENOSPC;
485
486 if (copy_from_user(&cmd, buf, sizeof cmd))
487 return -EFAULT;
488
489 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
490 u64_to_user_ptr(cmd.response) + sizeof(resp),
491 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
492 out_len - sizeof(resp));
493
494 mutex_lock(&file->device->xrcd_tree_mutex);
495
496 if (cmd.fd != -1) {
497 /* search for file descriptor */
498 f = fdget(cmd.fd);
499 if (!f.file) {
500 ret = -EBADF;
501 goto err_tree_mutex_unlock;
502 }
503
504 inode = file_inode(f.file);
505 xrcd = find_xrcd(file->device, inode);
506 if (!xrcd && !(cmd.oflags & O_CREAT)) {
507 /* no file descriptor. Need CREATE flag */
508 ret = -EAGAIN;
509 goto err_tree_mutex_unlock;
510 }
511
512 if (xrcd && cmd.oflags & O_EXCL) {
513 ret = -EINVAL;
514 goto err_tree_mutex_unlock;
515 }
516 }
517
518 obj = (struct ib_uxrcd_object *)uobj_alloc(uobj_get_type(xrcd),
519 file->ucontext);
520 if (IS_ERR(obj)) {
521 ret = PTR_ERR(obj);
522 goto err_tree_mutex_unlock;
523 }
524
525 if (!xrcd) {
526 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
527 if (IS_ERR(xrcd)) {
528 ret = PTR_ERR(xrcd);
529 goto err;
530 }
531
532 xrcd->inode = inode;
533 xrcd->device = ib_dev;
534 atomic_set(&xrcd->usecnt, 0);
535 mutex_init(&xrcd->tgt_qp_mutex);
536 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
537 new_xrcd = 1;
538 }
539
540 atomic_set(&obj->refcnt, 0);
541 obj->uobject.object = xrcd;
542 memset(&resp, 0, sizeof resp);
543 resp.xrcd_handle = obj->uobject.id;
544
545 if (inode) {
546 if (new_xrcd) {
547 /* create new inode/xrcd table entry */
548 ret = xrcd_table_insert(file->device, inode, xrcd);
549 if (ret)
550 goto err_dealloc_xrcd;
551 }
552 atomic_inc(&xrcd->usecnt);
553 }
554
555 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
556 ret = -EFAULT;
557 goto err_copy;
558 }
559
560 if (f.file)
561 fdput(f);
562
563 uobj_alloc_commit(&obj->uobject);
564
565 mutex_unlock(&file->device->xrcd_tree_mutex);
566 return in_len;
567
568 err_copy:
569 if (inode) {
570 if (new_xrcd)
571 xrcd_table_delete(file->device, inode);
572 atomic_dec(&xrcd->usecnt);
573 }
574
575 err_dealloc_xrcd:
576 ib_dealloc_xrcd(xrcd);
577
578 err:
579 uobj_alloc_abort(&obj->uobject);
580
581 err_tree_mutex_unlock:
582 if (f.file)
583 fdput(f);
584
585 mutex_unlock(&file->device->xrcd_tree_mutex);
586
587 return ret;
588 }
589
590 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
591 struct ib_device *ib_dev,
592 const char __user *buf, int in_len,
593 int out_len)
594 {
595 struct ib_uverbs_close_xrcd cmd;
596 struct ib_uobject *uobj;
597 int ret = 0;
598
599 if (copy_from_user(&cmd, buf, sizeof cmd))
600 return -EFAULT;
601
602 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,
603 file->ucontext);
604 if (IS_ERR(uobj)) {
605 mutex_unlock(&file->device->xrcd_tree_mutex);
606 return PTR_ERR(uobj);
607 }
608
609 ret = uobj_remove_commit(uobj);
610 return ret ?: in_len;
611 }
612
613 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
614 struct ib_xrcd *xrcd,
615 enum rdma_remove_reason why)
616 {
617 struct inode *inode;
618 int ret;
619
620 inode = xrcd->inode;
621 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
622 return 0;
623
624 ret = ib_dealloc_xrcd(xrcd);
625
626 if (why == RDMA_REMOVE_DESTROY && ret)
627 atomic_inc(&xrcd->usecnt);
628 else if (inode)
629 xrcd_table_delete(dev, inode);
630
631 return ret;
632 }
633
634 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
635 struct ib_device *ib_dev,
636 const char __user *buf, int in_len,
637 int out_len)
638 {
639 struct ib_uverbs_reg_mr cmd;
640 struct ib_uverbs_reg_mr_resp resp;
641 struct ib_udata udata;
642 struct ib_uobject *uobj;
643 struct ib_pd *pd;
644 struct ib_mr *mr;
645 int ret;
646
647 if (out_len < sizeof resp)
648 return -ENOSPC;
649
650 if (copy_from_user(&cmd, buf, sizeof cmd))
651 return -EFAULT;
652
653 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
654 u64_to_user_ptr(cmd.response) + sizeof(resp),
655 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
656 out_len - sizeof(resp));
657
658 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
659 return -EINVAL;
660
661 ret = ib_check_mr_access(cmd.access_flags);
662 if (ret)
663 return ret;
664
665 uobj = uobj_alloc(uobj_get_type(mr), file->ucontext);
666 if (IS_ERR(uobj))
667 return PTR_ERR(uobj);
668
669 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
670 if (!pd) {
671 ret = -EINVAL;
672 goto err_free;
673 }
674
675 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
676 if (!(pd->device->attrs.device_cap_flags &
677 IB_DEVICE_ON_DEMAND_PAGING)) {
678 pr_debug("ODP support not available\n");
679 ret = -EINVAL;
680 goto err_put;
681 }
682 }
683
684 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
685 cmd.access_flags, &udata);
686 if (IS_ERR(mr)) {
687 ret = PTR_ERR(mr);
688 goto err_put;
689 }
690
691 mr->device = pd->device;
692 mr->pd = pd;
693 mr->uobject = uobj;
694 atomic_inc(&pd->usecnt);
695
696 uobj->object = mr;
697
698 memset(&resp, 0, sizeof resp);
699 resp.lkey = mr->lkey;
700 resp.rkey = mr->rkey;
701 resp.mr_handle = uobj->id;
702
703 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
704 ret = -EFAULT;
705 goto err_copy;
706 }
707
708 uobj_put_obj_read(pd);
709
710 uobj_alloc_commit(uobj);
711
712 return in_len;
713
714 err_copy:
715 ib_dereg_mr(mr);
716
717 err_put:
718 uobj_put_obj_read(pd);
719
720 err_free:
721 uobj_alloc_abort(uobj);
722 return ret;
723 }
724
725 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
726 struct ib_device *ib_dev,
727 const char __user *buf, int in_len,
728 int out_len)
729 {
730 struct ib_uverbs_rereg_mr cmd;
731 struct ib_uverbs_rereg_mr_resp resp;
732 struct ib_udata udata;
733 struct ib_pd *pd = NULL;
734 struct ib_mr *mr;
735 struct ib_pd *old_pd;
736 int ret;
737 struct ib_uobject *uobj;
738
739 if (out_len < sizeof(resp))
740 return -ENOSPC;
741
742 if (copy_from_user(&cmd, buf, sizeof(cmd)))
743 return -EFAULT;
744
745 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
746 u64_to_user_ptr(cmd.response) + sizeof(resp),
747 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
748 out_len - sizeof(resp));
749
750 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
751 return -EINVAL;
752
753 if ((cmd.flags & IB_MR_REREG_TRANS) &&
754 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
755 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
756 return -EINVAL;
757
758 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle,
759 file->ucontext);
760 if (IS_ERR(uobj))
761 return PTR_ERR(uobj);
762
763 mr = uobj->object;
764
765 if (cmd.flags & IB_MR_REREG_ACCESS) {
766 ret = ib_check_mr_access(cmd.access_flags);
767 if (ret)
768 goto put_uobjs;
769 }
770
771 if (cmd.flags & IB_MR_REREG_PD) {
772 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
773 if (!pd) {
774 ret = -EINVAL;
775 goto put_uobjs;
776 }
777 }
778
779 old_pd = mr->pd;
780 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
781 cmd.length, cmd.hca_va,
782 cmd.access_flags, pd, &udata);
783 if (!ret) {
784 if (cmd.flags & IB_MR_REREG_PD) {
785 atomic_inc(&pd->usecnt);
786 mr->pd = pd;
787 atomic_dec(&old_pd->usecnt);
788 }
789 } else {
790 goto put_uobj_pd;
791 }
792
793 memset(&resp, 0, sizeof(resp));
794 resp.lkey = mr->lkey;
795 resp.rkey = mr->rkey;
796
797 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
798 ret = -EFAULT;
799 else
800 ret = in_len;
801
802 put_uobj_pd:
803 if (cmd.flags & IB_MR_REREG_PD)
804 uobj_put_obj_read(pd);
805
806 put_uobjs:
807 uobj_put_write(uobj);
808
809 return ret;
810 }
811
812 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
813 struct ib_device *ib_dev,
814 const char __user *buf, int in_len,
815 int out_len)
816 {
817 struct ib_uverbs_dereg_mr cmd;
818 struct ib_uobject *uobj;
819 int ret = -EINVAL;
820
821 if (copy_from_user(&cmd, buf, sizeof cmd))
822 return -EFAULT;
823
824 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle,
825 file->ucontext);
826 if (IS_ERR(uobj))
827 return PTR_ERR(uobj);
828
829 ret = uobj_remove_commit(uobj);
830
831 return ret ?: in_len;
832 }
833
834 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
835 struct ib_device *ib_dev,
836 const char __user *buf, int in_len,
837 int out_len)
838 {
839 struct ib_uverbs_alloc_mw cmd;
840 struct ib_uverbs_alloc_mw_resp resp;
841 struct ib_uobject *uobj;
842 struct ib_pd *pd;
843 struct ib_mw *mw;
844 struct ib_udata udata;
845 int ret;
846
847 if (out_len < sizeof(resp))
848 return -ENOSPC;
849
850 if (copy_from_user(&cmd, buf, sizeof(cmd)))
851 return -EFAULT;
852
853 uobj = uobj_alloc(uobj_get_type(mw), file->ucontext);
854 if (IS_ERR(uobj))
855 return PTR_ERR(uobj);
856
857 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
858 if (!pd) {
859 ret = -EINVAL;
860 goto err_free;
861 }
862
863 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
864 u64_to_user_ptr(cmd.response) + sizeof(resp),
865 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
866 out_len - sizeof(resp));
867
868 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata);
869 if (IS_ERR(mw)) {
870 ret = PTR_ERR(mw);
871 goto err_put;
872 }
873
874 mw->device = pd->device;
875 mw->pd = pd;
876 mw->uobject = uobj;
877 atomic_inc(&pd->usecnt);
878
879 uobj->object = mw;
880
881 memset(&resp, 0, sizeof(resp));
882 resp.rkey = mw->rkey;
883 resp.mw_handle = uobj->id;
884
885 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) {
886 ret = -EFAULT;
887 goto err_copy;
888 }
889
890 uobj_put_obj_read(pd);
891 uobj_alloc_commit(uobj);
892
893 return in_len;
894
895 err_copy:
896 uverbs_dealloc_mw(mw);
897 err_put:
898 uobj_put_obj_read(pd);
899 err_free:
900 uobj_alloc_abort(uobj);
901 return ret;
902 }
903
904 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
905 struct ib_device *ib_dev,
906 const char __user *buf, int in_len,
907 int out_len)
908 {
909 struct ib_uverbs_dealloc_mw cmd;
910 struct ib_uobject *uobj;
911 int ret = -EINVAL;
912
913 if (copy_from_user(&cmd, buf, sizeof(cmd)))
914 return -EFAULT;
915
916 uobj = uobj_get_write(uobj_get_type(mw), cmd.mw_handle,
917 file->ucontext);
918 if (IS_ERR(uobj))
919 return PTR_ERR(uobj);
920
921 ret = uobj_remove_commit(uobj);
922 return ret ?: in_len;
923 }
924
925 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
926 struct ib_device *ib_dev,
927 const char __user *buf, int in_len,
928 int out_len)
929 {
930 struct ib_uverbs_create_comp_channel cmd;
931 struct ib_uverbs_create_comp_channel_resp resp;
932 struct ib_uobject *uobj;
933 struct ib_uverbs_completion_event_file *ev_file;
934
935 if (out_len < sizeof resp)
936 return -ENOSPC;
937
938 if (copy_from_user(&cmd, buf, sizeof cmd))
939 return -EFAULT;
940
941 uobj = uobj_alloc(uobj_get_type(comp_channel), file->ucontext);
942 if (IS_ERR(uobj))
943 return PTR_ERR(uobj);
944
945 resp.fd = uobj->id;
946
947 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
948 uobj_file.uobj);
949 ib_uverbs_init_event_queue(&ev_file->ev_queue);
950
951 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
952 uobj_alloc_abort(uobj);
953 return -EFAULT;
954 }
955
956 uobj_alloc_commit(uobj);
957 return in_len;
958 }
959
960 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
961 struct ib_device *ib_dev,
962 struct ib_udata *ucore,
963 struct ib_udata *uhw,
964 struct ib_uverbs_ex_create_cq *cmd,
965 size_t cmd_sz,
966 int (*cb)(struct ib_uverbs_file *file,
967 struct ib_ucq_object *obj,
968 struct ib_uverbs_ex_create_cq_resp *resp,
969 struct ib_udata *udata,
970 void *context),
971 void *context)
972 {
973 struct ib_ucq_object *obj;
974 struct ib_uverbs_completion_event_file *ev_file = NULL;
975 struct ib_cq *cq;
976 int ret;
977 struct ib_uverbs_ex_create_cq_resp resp;
978 struct ib_cq_init_attr attr = {};
979
980 if (cmd->comp_vector >= file->device->num_comp_vectors)
981 return ERR_PTR(-EINVAL);
982
983 obj = (struct ib_ucq_object *)uobj_alloc(uobj_get_type(cq),
984 file->ucontext);
985 if (IS_ERR(obj))
986 return obj;
987
988 if (cmd->comp_channel >= 0) {
989 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel,
990 file->ucontext);
991 if (IS_ERR(ev_file)) {
992 ret = PTR_ERR(ev_file);
993 goto err;
994 }
995 }
996
997 obj->uobject.user_handle = cmd->user_handle;
998 obj->uverbs_file = file;
999 obj->comp_events_reported = 0;
1000 obj->async_events_reported = 0;
1001 INIT_LIST_HEAD(&obj->comp_list);
1002 INIT_LIST_HEAD(&obj->async_list);
1003
1004 attr.cqe = cmd->cqe;
1005 attr.comp_vector = cmd->comp_vector;
1006
1007 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1008 attr.flags = cmd->flags;
1009
1010 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw);
1011 if (IS_ERR(cq)) {
1012 ret = PTR_ERR(cq);
1013 goto err_file;
1014 }
1015
1016 cq->device = ib_dev;
1017 cq->uobject = &obj->uobject;
1018 cq->comp_handler = ib_uverbs_comp_handler;
1019 cq->event_handler = ib_uverbs_cq_event_handler;
1020 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
1021 atomic_set(&cq->usecnt, 0);
1022
1023 obj->uobject.object = cq;
1024 memset(&resp, 0, sizeof resp);
1025 resp.base.cq_handle = obj->uobject.id;
1026 resp.base.cqe = cq->cqe;
1027
1028 resp.response_length = offsetof(typeof(resp), response_length) +
1029 sizeof(resp.response_length);
1030
1031 ret = cb(file, obj, &resp, ucore, context);
1032 if (ret)
1033 goto err_cb;
1034
1035 uobj_alloc_commit(&obj->uobject);
1036
1037 return obj;
1038
1039 err_cb:
1040 ib_destroy_cq(cq);
1041
1042 err_file:
1043 if (ev_file)
1044 ib_uverbs_release_ucq(file, ev_file, obj);
1045
1046 err:
1047 uobj_alloc_abort(&obj->uobject);
1048
1049 return ERR_PTR(ret);
1050 }
1051
1052 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1053 struct ib_ucq_object *obj,
1054 struct ib_uverbs_ex_create_cq_resp *resp,
1055 struct ib_udata *ucore, void *context)
1056 {
1057 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1058 return -EFAULT;
1059
1060 return 0;
1061 }
1062
1063 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1064 struct ib_device *ib_dev,
1065 const char __user *buf, int in_len,
1066 int out_len)
1067 {
1068 struct ib_uverbs_create_cq cmd;
1069 struct ib_uverbs_ex_create_cq cmd_ex;
1070 struct ib_uverbs_create_cq_resp resp;
1071 struct ib_udata ucore;
1072 struct ib_udata uhw;
1073 struct ib_ucq_object *obj;
1074
1075 if (out_len < sizeof(resp))
1076 return -ENOSPC;
1077
1078 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1079 return -EFAULT;
1080
1081 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
1082 sizeof(cmd), sizeof(resp));
1083
1084 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
1085 u64_to_user_ptr(cmd.response) + sizeof(resp),
1086 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1087 out_len - sizeof(resp));
1088
1089 memset(&cmd_ex, 0, sizeof(cmd_ex));
1090 cmd_ex.user_handle = cmd.user_handle;
1091 cmd_ex.cqe = cmd.cqe;
1092 cmd_ex.comp_vector = cmd.comp_vector;
1093 cmd_ex.comp_channel = cmd.comp_channel;
1094
1095 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
1096 offsetof(typeof(cmd_ex), comp_channel) +
1097 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1098 NULL);
1099
1100 if (IS_ERR(obj))
1101 return PTR_ERR(obj);
1102
1103 return in_len;
1104 }
1105
1106 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1107 struct ib_ucq_object *obj,
1108 struct ib_uverbs_ex_create_cq_resp *resp,
1109 struct ib_udata *ucore, void *context)
1110 {
1111 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1112 return -EFAULT;
1113
1114 return 0;
1115 }
1116
1117 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
1118 struct ib_device *ib_dev,
1119 struct ib_udata *ucore,
1120 struct ib_udata *uhw)
1121 {
1122 struct ib_uverbs_ex_create_cq_resp resp;
1123 struct ib_uverbs_ex_create_cq cmd;
1124 struct ib_ucq_object *obj;
1125 int err;
1126
1127 if (ucore->inlen < sizeof(cmd))
1128 return -EINVAL;
1129
1130 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1131 if (err)
1132 return err;
1133
1134 if (cmd.comp_mask)
1135 return -EINVAL;
1136
1137 if (cmd.reserved)
1138 return -EINVAL;
1139
1140 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1141 sizeof(resp.response_length)))
1142 return -ENOSPC;
1143
1144 obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
1145 min(ucore->inlen, sizeof(cmd)),
1146 ib_uverbs_ex_create_cq_cb, NULL);
1147
1148 if (IS_ERR(obj))
1149 return PTR_ERR(obj);
1150
1151 return 0;
1152 }
1153
1154 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1155 struct ib_device *ib_dev,
1156 const char __user *buf, int in_len,
1157 int out_len)
1158 {
1159 struct ib_uverbs_resize_cq cmd;
1160 struct ib_uverbs_resize_cq_resp resp = {};
1161 struct ib_udata udata;
1162 struct ib_cq *cq;
1163 int ret = -EINVAL;
1164
1165 if (copy_from_user(&cmd, buf, sizeof cmd))
1166 return -EFAULT;
1167
1168 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
1169 u64_to_user_ptr(cmd.response) + sizeof(resp),
1170 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1171 out_len - sizeof(resp));
1172
1173 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
1174 if (!cq)
1175 return -EINVAL;
1176
1177 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1178 if (ret)
1179 goto out;
1180
1181 resp.cqe = cq->cqe;
1182
1183 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe))
1184 ret = -EFAULT;
1185
1186 out:
1187 uobj_put_obj_read(cq);
1188
1189 return ret ? ret : in_len;
1190 }
1191
1192 static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
1193 struct ib_wc *wc)
1194 {
1195 struct ib_uverbs_wc tmp;
1196
1197 tmp.wr_id = wc->wr_id;
1198 tmp.status = wc->status;
1199 tmp.opcode = wc->opcode;
1200 tmp.vendor_err = wc->vendor_err;
1201 tmp.byte_len = wc->byte_len;
1202 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1203 tmp.qp_num = wc->qp->qp_num;
1204 tmp.src_qp = wc->src_qp;
1205 tmp.wc_flags = wc->wc_flags;
1206 tmp.pkey_index = wc->pkey_index;
1207 if (rdma_cap_opa_ah(ib_dev, wc->port_num))
1208 tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid);
1209 else
1210 tmp.slid = ib_lid_cpu16(wc->slid);
1211 tmp.sl = wc->sl;
1212 tmp.dlid_path_bits = wc->dlid_path_bits;
1213 tmp.port_num = wc->port_num;
1214 tmp.reserved = 0;
1215
1216 if (copy_to_user(dest, &tmp, sizeof tmp))
1217 return -EFAULT;
1218
1219 return 0;
1220 }
1221
1222 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1223 struct ib_device *ib_dev,
1224 const char __user *buf, int in_len,
1225 int out_len)
1226 {
1227 struct ib_uverbs_poll_cq cmd;
1228 struct ib_uverbs_poll_cq_resp resp;
1229 u8 __user *header_ptr;
1230 u8 __user *data_ptr;
1231 struct ib_cq *cq;
1232 struct ib_wc wc;
1233 int ret;
1234
1235 if (copy_from_user(&cmd, buf, sizeof cmd))
1236 return -EFAULT;
1237
1238 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
1239 if (!cq)
1240 return -EINVAL;
1241
1242 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1243 header_ptr = u64_to_user_ptr(cmd.response);
1244 data_ptr = header_ptr + sizeof resp;
1245
1246 memset(&resp, 0, sizeof resp);
1247 while (resp.count < cmd.ne) {
1248 ret = ib_poll_cq(cq, 1, &wc);
1249 if (ret < 0)
1250 goto out_put;
1251 if (!ret)
1252 break;
1253
1254 ret = copy_wc_to_user(ib_dev, data_ptr, &wc);
1255 if (ret)
1256 goto out_put;
1257
1258 data_ptr += sizeof(struct ib_uverbs_wc);
1259 ++resp.count;
1260 }
1261
1262 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1263 ret = -EFAULT;
1264 goto out_put;
1265 }
1266
1267 ret = in_len;
1268
1269 out_put:
1270 uobj_put_obj_read(cq);
1271 return ret;
1272 }
1273
1274 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1275 struct ib_device *ib_dev,
1276 const char __user *buf, int in_len,
1277 int out_len)
1278 {
1279 struct ib_uverbs_req_notify_cq cmd;
1280 struct ib_cq *cq;
1281
1282 if (copy_from_user(&cmd, buf, sizeof cmd))
1283 return -EFAULT;
1284
1285 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
1286 if (!cq)
1287 return -EINVAL;
1288
1289 ib_req_notify_cq(cq, cmd.solicited_only ?
1290 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1291
1292 uobj_put_obj_read(cq);
1293
1294 return in_len;
1295 }
1296
1297 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1298 struct ib_device *ib_dev,
1299 const char __user *buf, int in_len,
1300 int out_len)
1301 {
1302 struct ib_uverbs_destroy_cq cmd;
1303 struct ib_uverbs_destroy_cq_resp resp;
1304 struct ib_uobject *uobj;
1305 struct ib_cq *cq;
1306 struct ib_ucq_object *obj;
1307 int ret = -EINVAL;
1308
1309 if (copy_from_user(&cmd, buf, sizeof cmd))
1310 return -EFAULT;
1311
1312 uobj = uobj_get_write(uobj_get_type(cq), cmd.cq_handle,
1313 file->ucontext);
1314 if (IS_ERR(uobj))
1315 return PTR_ERR(uobj);
1316
1317 /*
1318 * Make sure we don't free the memory in remove_commit as we still
1319 * needs the uobject memory to create the response.
1320 */
1321 uverbs_uobject_get(uobj);
1322 cq = uobj->object;
1323 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1324
1325 memset(&resp, 0, sizeof(resp));
1326
1327 ret = uobj_remove_commit(uobj);
1328 if (ret) {
1329 uverbs_uobject_put(uobj);
1330 return ret;
1331 }
1332
1333 resp.comp_events_reported = obj->comp_events_reported;
1334 resp.async_events_reported = obj->async_events_reported;
1335
1336 uverbs_uobject_put(uobj);
1337 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
1338 return -EFAULT;
1339
1340 return in_len;
1341 }
1342
1343 static int create_qp(struct ib_uverbs_file *file,
1344 struct ib_udata *ucore,
1345 struct ib_udata *uhw,
1346 struct ib_uverbs_ex_create_qp *cmd,
1347 size_t cmd_sz,
1348 int (*cb)(struct ib_uverbs_file *file,
1349 struct ib_uverbs_ex_create_qp_resp *resp,
1350 struct ib_udata *udata),
1351 void *context)
1352 {
1353 struct ib_uqp_object *obj;
1354 struct ib_device *device;
1355 struct ib_pd *pd = NULL;
1356 struct ib_xrcd *xrcd = NULL;
1357 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT);
1358 struct ib_cq *scq = NULL, *rcq = NULL;
1359 struct ib_srq *srq = NULL;
1360 struct ib_qp *qp;
1361 char *buf;
1362 struct ib_qp_init_attr attr = {};
1363 struct ib_uverbs_ex_create_qp_resp resp;
1364 int ret;
1365 struct ib_rwq_ind_table *ind_tbl = NULL;
1366 bool has_sq = true;
1367
1368 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1369 return -EPERM;
1370
1371 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp),
1372 file->ucontext);
1373 if (IS_ERR(obj))
1374 return PTR_ERR(obj);
1375 obj->uxrcd = NULL;
1376 obj->uevent.uobject.user_handle = cmd->user_handle;
1377 mutex_init(&obj->mcast_lock);
1378
1379 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) +
1380 sizeof(cmd->rwq_ind_tbl_handle) &&
1381 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) {
1382 ind_tbl = uobj_get_obj_read(rwq_ind_table,
1383 cmd->rwq_ind_tbl_handle,
1384 file->ucontext);
1385 if (!ind_tbl) {
1386 ret = -EINVAL;
1387 goto err_put;
1388 }
1389
1390 attr.rwq_ind_tbl = ind_tbl;
1391 }
1392
1393 if (cmd_sz > sizeof(*cmd) &&
1394 !ib_is_udata_cleared(ucore, sizeof(*cmd),
1395 cmd_sz - sizeof(*cmd))) {
1396 ret = -EOPNOTSUPP;
1397 goto err_put;
1398 }
1399
1400 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
1401 ret = -EINVAL;
1402 goto err_put;
1403 }
1404
1405 if (ind_tbl && !cmd->max_send_wr)
1406 has_sq = false;
1407
1408 if (cmd->qp_type == IB_QPT_XRC_TGT) {
1409 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->pd_handle,
1410 file->ucontext);
1411
1412 if (IS_ERR(xrcd_uobj)) {
1413 ret = -EINVAL;
1414 goto err_put;
1415 }
1416
1417 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1418 if (!xrcd) {
1419 ret = -EINVAL;
1420 goto err_put;
1421 }
1422 device = xrcd->device;
1423 } else {
1424 if (cmd->qp_type == IB_QPT_XRC_INI) {
1425 cmd->max_recv_wr = 0;
1426 cmd->max_recv_sge = 0;
1427 } else {
1428 if (cmd->is_srq) {
1429 srq = uobj_get_obj_read(srq, cmd->srq_handle,
1430 file->ucontext);
1431 if (!srq || srq->srq_type == IB_SRQT_XRC) {
1432 ret = -EINVAL;
1433 goto err_put;
1434 }
1435 }
1436
1437 if (!ind_tbl) {
1438 if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1439 rcq = uobj_get_obj_read(cq, cmd->recv_cq_handle,
1440 file->ucontext);
1441 if (!rcq) {
1442 ret = -EINVAL;
1443 goto err_put;
1444 }
1445 }
1446 }
1447 }
1448
1449 if (has_sq)
1450 scq = uobj_get_obj_read(cq, cmd->send_cq_handle,
1451 file->ucontext);
1452 if (!ind_tbl)
1453 rcq = rcq ?: scq;
1454 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext);
1455 if (!pd || (!scq && has_sq)) {
1456 ret = -EINVAL;
1457 goto err_put;
1458 }
1459
1460 device = pd->device;
1461 }
1462
1463 attr.event_handler = ib_uverbs_qp_event_handler;
1464 attr.qp_context = file;
1465 attr.send_cq = scq;
1466 attr.recv_cq = rcq;
1467 attr.srq = srq;
1468 attr.xrcd = xrcd;
1469 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1470 IB_SIGNAL_REQ_WR;
1471 attr.qp_type = cmd->qp_type;
1472 attr.create_flags = 0;
1473
1474 attr.cap.max_send_wr = cmd->max_send_wr;
1475 attr.cap.max_recv_wr = cmd->max_recv_wr;
1476 attr.cap.max_send_sge = cmd->max_send_sge;
1477 attr.cap.max_recv_sge = cmd->max_recv_sge;
1478 attr.cap.max_inline_data = cmd->max_inline_data;
1479
1480 obj->uevent.events_reported = 0;
1481 INIT_LIST_HEAD(&obj->uevent.event_list);
1482 INIT_LIST_HEAD(&obj->mcast_list);
1483
1484 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) +
1485 sizeof(cmd->create_flags))
1486 attr.create_flags = cmd->create_flags;
1487
1488 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1489 IB_QP_CREATE_CROSS_CHANNEL |
1490 IB_QP_CREATE_MANAGED_SEND |
1491 IB_QP_CREATE_MANAGED_RECV |
1492 IB_QP_CREATE_SCATTER_FCS |
1493 IB_QP_CREATE_CVLAN_STRIPPING |
1494 IB_QP_CREATE_SOURCE_QPN |
1495 IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
1496 ret = -EINVAL;
1497 goto err_put;
1498 }
1499
1500 if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
1501 if (!capable(CAP_NET_RAW)) {
1502 ret = -EPERM;
1503 goto err_put;
1504 }
1505
1506 attr.source_qpn = cmd->source_qpn;
1507 }
1508
1509 buf = (void *)cmd + sizeof(*cmd);
1510 if (cmd_sz > sizeof(*cmd))
1511 if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
1512 cmd_sz - sizeof(*cmd) - 1))) {
1513 ret = -EINVAL;
1514 goto err_put;
1515 }
1516
1517 if (cmd->qp_type == IB_QPT_XRC_TGT)
1518 qp = ib_create_qp(pd, &attr);
1519 else
1520 qp = device->create_qp(pd, &attr, uhw);
1521
1522 if (IS_ERR(qp)) {
1523 ret = PTR_ERR(qp);
1524 goto err_put;
1525 }
1526
1527 if (cmd->qp_type != IB_QPT_XRC_TGT) {
1528 ret = ib_create_qp_security(qp, device);
1529 if (ret)
1530 goto err_cb;
1531
1532 qp->real_qp = qp;
1533 qp->device = device;
1534 qp->pd = pd;
1535 qp->send_cq = attr.send_cq;
1536 qp->recv_cq = attr.recv_cq;
1537 qp->srq = attr.srq;
1538 qp->rwq_ind_tbl = ind_tbl;
1539 qp->event_handler = attr.event_handler;
1540 qp->qp_context = attr.qp_context;
1541 qp->qp_type = attr.qp_type;
1542 atomic_set(&qp->usecnt, 0);
1543 atomic_inc(&pd->usecnt);
1544 qp->port = 0;
1545 if (attr.send_cq)
1546 atomic_inc(&attr.send_cq->usecnt);
1547 if (attr.recv_cq)
1548 atomic_inc(&attr.recv_cq->usecnt);
1549 if (attr.srq)
1550 atomic_inc(&attr.srq->usecnt);
1551 if (ind_tbl)
1552 atomic_inc(&ind_tbl->usecnt);
1553 }
1554 qp->uobject = &obj->uevent.uobject;
1555
1556 obj->uevent.uobject.object = qp;
1557
1558 memset(&resp, 0, sizeof resp);
1559 resp.base.qpn = qp->qp_num;
1560 resp.base.qp_handle = obj->uevent.uobject.id;
1561 resp.base.max_recv_sge = attr.cap.max_recv_sge;
1562 resp.base.max_send_sge = attr.cap.max_send_sge;
1563 resp.base.max_recv_wr = attr.cap.max_recv_wr;
1564 resp.base.max_send_wr = attr.cap.max_send_wr;
1565 resp.base.max_inline_data = attr.cap.max_inline_data;
1566
1567 resp.response_length = offsetof(typeof(resp), response_length) +
1568 sizeof(resp.response_length);
1569
1570 ret = cb(file, &resp, ucore);
1571 if (ret)
1572 goto err_cb;
1573
1574 if (xrcd) {
1575 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1576 uobject);
1577 atomic_inc(&obj->uxrcd->refcnt);
1578 uobj_put_read(xrcd_uobj);
1579 }
1580
1581 if (pd)
1582 uobj_put_obj_read(pd);
1583 if (scq)
1584 uobj_put_obj_read(scq);
1585 if (rcq && rcq != scq)
1586 uobj_put_obj_read(rcq);
1587 if (srq)
1588 uobj_put_obj_read(srq);
1589 if (ind_tbl)
1590 uobj_put_obj_read(ind_tbl);
1591
1592 uobj_alloc_commit(&obj->uevent.uobject);
1593
1594 return 0;
1595 err_cb:
1596 ib_destroy_qp(qp);
1597
1598 err_put:
1599 if (!IS_ERR(xrcd_uobj))
1600 uobj_put_read(xrcd_uobj);
1601 if (pd)
1602 uobj_put_obj_read(pd);
1603 if (scq)
1604 uobj_put_obj_read(scq);
1605 if (rcq && rcq != scq)
1606 uobj_put_obj_read(rcq);
1607 if (srq)
1608 uobj_put_obj_read(srq);
1609 if (ind_tbl)
1610 uobj_put_obj_read(ind_tbl);
1611
1612 uobj_alloc_abort(&obj->uevent.uobject);
1613 return ret;
1614 }
1615
1616 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file,
1617 struct ib_uverbs_ex_create_qp_resp *resp,
1618 struct ib_udata *ucore)
1619 {
1620 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1621 return -EFAULT;
1622
1623 return 0;
1624 }
1625
1626 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1627 struct ib_device *ib_dev,
1628 const char __user *buf, int in_len,
1629 int out_len)
1630 {
1631 struct ib_uverbs_create_qp cmd;
1632 struct ib_uverbs_ex_create_qp cmd_ex;
1633 struct ib_udata ucore;
1634 struct ib_udata uhw;
1635 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp);
1636 int err;
1637
1638 if (out_len < resp_size)
1639 return -ENOSPC;
1640
1641 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1642 return -EFAULT;
1643
1644 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
1645 sizeof(cmd), resp_size);
1646 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
1647 u64_to_user_ptr(cmd.response) + resp_size,
1648 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1649 out_len - resp_size);
1650
1651 memset(&cmd_ex, 0, sizeof(cmd_ex));
1652 cmd_ex.user_handle = cmd.user_handle;
1653 cmd_ex.pd_handle = cmd.pd_handle;
1654 cmd_ex.send_cq_handle = cmd.send_cq_handle;
1655 cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1656 cmd_ex.srq_handle = cmd.srq_handle;
1657 cmd_ex.max_send_wr = cmd.max_send_wr;
1658 cmd_ex.max_recv_wr = cmd.max_recv_wr;
1659 cmd_ex.max_send_sge = cmd.max_send_sge;
1660 cmd_ex.max_recv_sge = cmd.max_recv_sge;
1661 cmd_ex.max_inline_data = cmd.max_inline_data;
1662 cmd_ex.sq_sig_all = cmd.sq_sig_all;
1663 cmd_ex.qp_type = cmd.qp_type;
1664 cmd_ex.is_srq = cmd.is_srq;
1665
1666 err = create_qp(file, &ucore, &uhw, &cmd_ex,
1667 offsetof(typeof(cmd_ex), is_srq) +
1668 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb,
1669 NULL);
1670
1671 if (err)
1672 return err;
1673
1674 return in_len;
1675 }
1676
1677 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file,
1678 struct ib_uverbs_ex_create_qp_resp *resp,
1679 struct ib_udata *ucore)
1680 {
1681 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1682 return -EFAULT;
1683
1684 return 0;
1685 }
1686
1687 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
1688 struct ib_device *ib_dev,
1689 struct ib_udata *ucore,
1690 struct ib_udata *uhw)
1691 {
1692 struct ib_uverbs_ex_create_qp_resp resp;
1693 struct ib_uverbs_ex_create_qp cmd = {0};
1694 int err;
1695
1696 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) +
1697 sizeof(cmd.comp_mask)))
1698 return -EINVAL;
1699
1700 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
1701 if (err)
1702 return err;
1703
1704 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
1705 return -EINVAL;
1706
1707 if (cmd.reserved)
1708 return -EINVAL;
1709
1710 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1711 sizeof(resp.response_length)))
1712 return -ENOSPC;
1713
1714 err = create_qp(file, ucore, uhw, &cmd,
1715 min(ucore->inlen, sizeof(cmd)),
1716 ib_uverbs_ex_create_qp_cb, NULL);
1717
1718 if (err)
1719 return err;
1720
1721 return 0;
1722 }
1723
1724 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1725 struct ib_device *ib_dev,
1726 const char __user *buf, int in_len, int out_len)
1727 {
1728 struct ib_uverbs_open_qp cmd;
1729 struct ib_uverbs_create_qp_resp resp;
1730 struct ib_udata udata;
1731 struct ib_uqp_object *obj;
1732 struct ib_xrcd *xrcd;
1733 struct ib_uobject *uninitialized_var(xrcd_uobj);
1734 struct ib_qp *qp;
1735 struct ib_qp_open_attr attr;
1736 int ret;
1737
1738 if (out_len < sizeof resp)
1739 return -ENOSPC;
1740
1741 if (copy_from_user(&cmd, buf, sizeof cmd))
1742 return -EFAULT;
1743
1744 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
1745 u64_to_user_ptr(cmd.response) + sizeof(resp),
1746 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1747 out_len - sizeof(resp));
1748
1749 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp),
1750 file->ucontext);
1751 if (IS_ERR(obj))
1752 return PTR_ERR(obj);
1753
1754 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd.pd_handle,
1755 file->ucontext);
1756 if (IS_ERR(xrcd_uobj)) {
1757 ret = -EINVAL;
1758 goto err_put;
1759 }
1760
1761 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1762 if (!xrcd) {
1763 ret = -EINVAL;
1764 goto err_xrcd;
1765 }
1766
1767 attr.event_handler = ib_uverbs_qp_event_handler;
1768 attr.qp_context = file;
1769 attr.qp_num = cmd.qpn;
1770 attr.qp_type = cmd.qp_type;
1771
1772 obj->uevent.events_reported = 0;
1773 INIT_LIST_HEAD(&obj->uevent.event_list);
1774 INIT_LIST_HEAD(&obj->mcast_list);
1775
1776 qp = ib_open_qp(xrcd, &attr);
1777 if (IS_ERR(qp)) {
1778 ret = PTR_ERR(qp);
1779 goto err_xrcd;
1780 }
1781
1782 obj->uevent.uobject.object = qp;
1783 obj->uevent.uobject.user_handle = cmd.user_handle;
1784
1785 memset(&resp, 0, sizeof resp);
1786 resp.qpn = qp->qp_num;
1787 resp.qp_handle = obj->uevent.uobject.id;
1788
1789 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
1790 ret = -EFAULT;
1791 goto err_destroy;
1792 }
1793
1794 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1795 atomic_inc(&obj->uxrcd->refcnt);
1796 qp->uobject = &obj->uevent.uobject;
1797 uobj_put_read(xrcd_uobj);
1798
1799
1800 uobj_alloc_commit(&obj->uevent.uobject);
1801
1802 return in_len;
1803
1804 err_destroy:
1805 ib_destroy_qp(qp);
1806 err_xrcd:
1807 uobj_put_read(xrcd_uobj);
1808 err_put:
1809 uobj_alloc_abort(&obj->uevent.uobject);
1810 return ret;
1811 }
1812
1813 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
1814 struct rdma_ah_attr *rdma_attr)
1815 {
1816 const struct ib_global_route *grh;
1817
1818 uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr);
1819 uverb_attr->sl = rdma_ah_get_sl(rdma_attr);
1820 uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr);
1821 uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr);
1822 uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) &
1823 IB_AH_GRH);
1824 if (uverb_attr->is_global) {
1825 grh = rdma_ah_read_grh(rdma_attr);
1826 memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
1827 uverb_attr->flow_label = grh->flow_label;
1828 uverb_attr->sgid_index = grh->sgid_index;
1829 uverb_attr->hop_limit = grh->hop_limit;
1830 uverb_attr->traffic_class = grh->traffic_class;
1831 }
1832 uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr);
1833 }
1834
1835 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1836 struct ib_device *ib_dev,
1837 const char __user *buf, int in_len,
1838 int out_len)
1839 {
1840 struct ib_uverbs_query_qp cmd;
1841 struct ib_uverbs_query_qp_resp resp;
1842 struct ib_qp *qp;
1843 struct ib_qp_attr *attr;
1844 struct ib_qp_init_attr *init_attr;
1845 int ret;
1846
1847 if (copy_from_user(&cmd, buf, sizeof cmd))
1848 return -EFAULT;
1849
1850 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1851 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1852 if (!attr || !init_attr) {
1853 ret = -ENOMEM;
1854 goto out;
1855 }
1856
1857 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
1858 if (!qp) {
1859 ret = -EINVAL;
1860 goto out;
1861 }
1862
1863 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1864
1865 uobj_put_obj_read(qp);
1866
1867 if (ret)
1868 goto out;
1869
1870 memset(&resp, 0, sizeof resp);
1871
1872 resp.qp_state = attr->qp_state;
1873 resp.cur_qp_state = attr->cur_qp_state;
1874 resp.path_mtu = attr->path_mtu;
1875 resp.path_mig_state = attr->path_mig_state;
1876 resp.qkey = attr->qkey;
1877 resp.rq_psn = attr->rq_psn;
1878 resp.sq_psn = attr->sq_psn;
1879 resp.dest_qp_num = attr->dest_qp_num;
1880 resp.qp_access_flags = attr->qp_access_flags;
1881 resp.pkey_index = attr->pkey_index;
1882 resp.alt_pkey_index = attr->alt_pkey_index;
1883 resp.sq_draining = attr->sq_draining;
1884 resp.max_rd_atomic = attr->max_rd_atomic;
1885 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1886 resp.min_rnr_timer = attr->min_rnr_timer;
1887 resp.port_num = attr->port_num;
1888 resp.timeout = attr->timeout;
1889 resp.retry_cnt = attr->retry_cnt;
1890 resp.rnr_retry = attr->rnr_retry;
1891 resp.alt_port_num = attr->alt_port_num;
1892 resp.alt_timeout = attr->alt_timeout;
1893
1894 copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
1895 copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
1896
1897 resp.max_send_wr = init_attr->cap.max_send_wr;
1898 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1899 resp.max_send_sge = init_attr->cap.max_send_sge;
1900 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1901 resp.max_inline_data = init_attr->cap.max_inline_data;
1902 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1903
1904 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
1905 ret = -EFAULT;
1906
1907 out:
1908 kfree(attr);
1909 kfree(init_attr);
1910
1911 return ret ? ret : in_len;
1912 }
1913
1914 /* Remove ignored fields set in the attribute mask */
1915 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1916 {
1917 switch (qp_type) {
1918 case IB_QPT_XRC_INI:
1919 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1920 case IB_QPT_XRC_TGT:
1921 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1922 IB_QP_RNR_RETRY);
1923 default:
1924 return mask;
1925 }
1926 }
1927
1928 static void copy_ah_attr_from_uverbs(struct ib_device *dev,
1929 struct rdma_ah_attr *rdma_attr,
1930 struct ib_uverbs_qp_dest *uverb_attr)
1931 {
1932 rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
1933 if (uverb_attr->is_global) {
1934 rdma_ah_set_grh(rdma_attr, NULL,
1935 uverb_attr->flow_label,
1936 uverb_attr->sgid_index,
1937 uverb_attr->hop_limit,
1938 uverb_attr->traffic_class);
1939 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
1940 } else {
1941 rdma_ah_set_ah_flags(rdma_attr, 0);
1942 }
1943 rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
1944 rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
1945 rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
1946 rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
1947 rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
1948 rdma_ah_set_make_grd(rdma_attr, false);
1949 }
1950
1951 static int modify_qp(struct ib_uverbs_file *file,
1952 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
1953 {
1954 struct ib_qp_attr *attr;
1955 struct ib_qp *qp;
1956 int ret;
1957
1958 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1959 if (!attr)
1960 return -ENOMEM;
1961
1962 qp = uobj_get_obj_read(qp, cmd->base.qp_handle, file->ucontext);
1963 if (!qp) {
1964 ret = -EINVAL;
1965 goto out;
1966 }
1967
1968 if ((cmd->base.attr_mask & IB_QP_PORT) &&
1969 !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
1970 ret = -EINVAL;
1971 goto release_qp;
1972 }
1973
1974 attr->qp_state = cmd->base.qp_state;
1975 attr->cur_qp_state = cmd->base.cur_qp_state;
1976 attr->path_mtu = cmd->base.path_mtu;
1977 attr->path_mig_state = cmd->base.path_mig_state;
1978 attr->qkey = cmd->base.qkey;
1979 attr->rq_psn = cmd->base.rq_psn;
1980 attr->sq_psn = cmd->base.sq_psn;
1981 attr->dest_qp_num = cmd->base.dest_qp_num;
1982 attr->qp_access_flags = cmd->base.qp_access_flags;
1983 attr->pkey_index = cmd->base.pkey_index;
1984 attr->alt_pkey_index = cmd->base.alt_pkey_index;
1985 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
1986 attr->max_rd_atomic = cmd->base.max_rd_atomic;
1987 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
1988 attr->min_rnr_timer = cmd->base.min_rnr_timer;
1989 attr->port_num = cmd->base.port_num;
1990 attr->timeout = cmd->base.timeout;
1991 attr->retry_cnt = cmd->base.retry_cnt;
1992 attr->rnr_retry = cmd->base.rnr_retry;
1993 attr->alt_port_num = cmd->base.alt_port_num;
1994 attr->alt_timeout = cmd->base.alt_timeout;
1995 attr->rate_limit = cmd->rate_limit;
1996
1997 if (cmd->base.attr_mask & IB_QP_AV)
1998 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
1999 &cmd->base.dest);
2000
2001 if (cmd->base.attr_mask & IB_QP_ALT_PATH)
2002 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
2003 &cmd->base.alt_dest);
2004
2005 ret = ib_modify_qp_with_udata(qp, attr,
2006 modify_qp_mask(qp->qp_type,
2007 cmd->base.attr_mask),
2008 udata);
2009
2010 release_qp:
2011 uobj_put_obj_read(qp);
2012 out:
2013 kfree(attr);
2014
2015 return ret;
2016 }
2017
2018 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2019 struct ib_device *ib_dev,
2020 const char __user *buf, int in_len,
2021 int out_len)
2022 {
2023 struct ib_uverbs_ex_modify_qp cmd = {};
2024 struct ib_udata udata;
2025 int ret;
2026
2027 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base)))
2028 return -EFAULT;
2029
2030 if (cmd.base.attr_mask &
2031 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
2032 return -EOPNOTSUPP;
2033
2034 ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL,
2035 in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr),
2036 out_len);
2037
2038 ret = modify_qp(file, &cmd, &udata);
2039 if (ret)
2040 return ret;
2041
2042 return in_len;
2043 }
2044
2045 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
2046 struct ib_device *ib_dev,
2047 struct ib_udata *ucore,
2048 struct ib_udata *uhw)
2049 {
2050 struct ib_uverbs_ex_modify_qp cmd = {};
2051 int ret;
2052
2053 /*
2054 * Last bit is reserved for extending the attr_mask by
2055 * using another field.
2056 */
2057 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
2058
2059 if (ucore->inlen < sizeof(cmd.base))
2060 return -EINVAL;
2061
2062 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2063 if (ret)
2064 return ret;
2065
2066 if (cmd.base.attr_mask &
2067 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
2068 return -EOPNOTSUPP;
2069
2070 if (ucore->inlen > sizeof(cmd)) {
2071 if (ib_is_udata_cleared(ucore, sizeof(cmd),
2072 ucore->inlen - sizeof(cmd)))
2073 return -EOPNOTSUPP;
2074 }
2075
2076 ret = modify_qp(file, &cmd, uhw);
2077
2078 return ret;
2079 }
2080
2081 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2082 struct ib_device *ib_dev,
2083 const char __user *buf, int in_len,
2084 int out_len)
2085 {
2086 struct ib_uverbs_destroy_qp cmd;
2087 struct ib_uverbs_destroy_qp_resp resp;
2088 struct ib_uobject *uobj;
2089 struct ib_uqp_object *obj;
2090 int ret = -EINVAL;
2091
2092 if (copy_from_user(&cmd, buf, sizeof cmd))
2093 return -EFAULT;
2094
2095 memset(&resp, 0, sizeof resp);
2096
2097 uobj = uobj_get_write(uobj_get_type(qp), cmd.qp_handle,
2098 file->ucontext);
2099 if (IS_ERR(uobj))
2100 return PTR_ERR(uobj);
2101
2102 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2103 /*
2104 * Make sure we don't free the memory in remove_commit as we still
2105 * needs the uobject memory to create the response.
2106 */
2107 uverbs_uobject_get(uobj);
2108
2109 ret = uobj_remove_commit(uobj);
2110 if (ret) {
2111 uverbs_uobject_put(uobj);
2112 return ret;
2113 }
2114
2115 resp.events_reported = obj->uevent.events_reported;
2116 uverbs_uobject_put(uobj);
2117
2118 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
2119 return -EFAULT;
2120
2121 return in_len;
2122 }
2123
2124 static void *alloc_wr(size_t wr_size, __u32 num_sge)
2125 {
2126 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
2127 sizeof (struct ib_sge))
2128 return NULL;
2129
2130 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2131 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2132 }
2133
2134 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2135 struct ib_device *ib_dev,
2136 const char __user *buf, int in_len,
2137 int out_len)
2138 {
2139 struct ib_uverbs_post_send cmd;
2140 struct ib_uverbs_post_send_resp resp;
2141 struct ib_uverbs_send_wr *user_wr;
2142 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2143 struct ib_qp *qp;
2144 int i, sg_ind;
2145 int is_ud;
2146 ssize_t ret = -EINVAL;
2147 size_t next_size;
2148
2149 if (copy_from_user(&cmd, buf, sizeof cmd))
2150 return -EFAULT;
2151
2152 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2153 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2154 return -EINVAL;
2155
2156 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2157 return -EINVAL;
2158
2159 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2160 if (!user_wr)
2161 return -ENOMEM;
2162
2163 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
2164 if (!qp)
2165 goto out;
2166
2167 is_ud = qp->qp_type == IB_QPT_UD;
2168 sg_ind = 0;
2169 last = NULL;
2170 for (i = 0; i < cmd.wr_count; ++i) {
2171 if (copy_from_user(user_wr,
2172 buf + sizeof cmd + i * cmd.wqe_size,
2173 cmd.wqe_size)) {
2174 ret = -EFAULT;
2175 goto out_put;
2176 }
2177
2178 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2179 ret = -EINVAL;
2180 goto out_put;
2181 }
2182
2183 if (is_ud) {
2184 struct ib_ud_wr *ud;
2185
2186 if (user_wr->opcode != IB_WR_SEND &&
2187 user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2188 ret = -EINVAL;
2189 goto out_put;
2190 }
2191
2192 next_size = sizeof(*ud);
2193 ud = alloc_wr(next_size, user_wr->num_sge);
2194 if (!ud) {
2195 ret = -ENOMEM;
2196 goto out_put;
2197 }
2198
2199 ud->ah = uobj_get_obj_read(ah, user_wr->wr.ud.ah,
2200 file->ucontext);
2201 if (!ud->ah) {
2202 kfree(ud);
2203 ret = -EINVAL;
2204 goto out_put;
2205 }
2206 ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2207 ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2208
2209 next = &ud->wr;
2210 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2211 user_wr->opcode == IB_WR_RDMA_WRITE ||
2212 user_wr->opcode == IB_WR_RDMA_READ) {
2213 struct ib_rdma_wr *rdma;
2214
2215 next_size = sizeof(*rdma);
2216 rdma = alloc_wr(next_size, user_wr->num_sge);
2217 if (!rdma) {
2218 ret = -ENOMEM;
2219 goto out_put;
2220 }
2221
2222 rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2223 rdma->rkey = user_wr->wr.rdma.rkey;
2224
2225 next = &rdma->wr;
2226 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2227 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2228 struct ib_atomic_wr *atomic;
2229
2230 next_size = sizeof(*atomic);
2231 atomic = alloc_wr(next_size, user_wr->num_sge);
2232 if (!atomic) {
2233 ret = -ENOMEM;
2234 goto out_put;
2235 }
2236
2237 atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2238 atomic->compare_add = user_wr->wr.atomic.compare_add;
2239 atomic->swap = user_wr->wr.atomic.swap;
2240 atomic->rkey = user_wr->wr.atomic.rkey;
2241
2242 next = &atomic->wr;
2243 } else if (user_wr->opcode == IB_WR_SEND ||
2244 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2245 user_wr->opcode == IB_WR_SEND_WITH_INV) {
2246 next_size = sizeof(*next);
2247 next = alloc_wr(next_size, user_wr->num_sge);
2248 if (!next) {
2249 ret = -ENOMEM;
2250 goto out_put;
2251 }
2252 } else {
2253 ret = -EINVAL;
2254 goto out_put;
2255 }
2256
2257 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2258 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2259 next->ex.imm_data =
2260 (__be32 __force) user_wr->ex.imm_data;
2261 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2262 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2263 }
2264
2265 if (!last)
2266 wr = next;
2267 else
2268 last->next = next;
2269 last = next;
2270
2271 next->next = NULL;
2272 next->wr_id = user_wr->wr_id;
2273 next->num_sge = user_wr->num_sge;
2274 next->opcode = user_wr->opcode;
2275 next->send_flags = user_wr->send_flags;
2276
2277 if (next->num_sge) {
2278 next->sg_list = (void *) next +
2279 ALIGN(next_size, sizeof(struct ib_sge));
2280 if (copy_from_user(next->sg_list,
2281 buf + sizeof cmd +
2282 cmd.wr_count * cmd.wqe_size +
2283 sg_ind * sizeof (struct ib_sge),
2284 next->num_sge * sizeof (struct ib_sge))) {
2285 ret = -EFAULT;
2286 goto out_put;
2287 }
2288 sg_ind += next->num_sge;
2289 } else
2290 next->sg_list = NULL;
2291 }
2292
2293 resp.bad_wr = 0;
2294 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2295 if (ret)
2296 for (next = wr; next; next = next->next) {
2297 ++resp.bad_wr;
2298 if (next == bad_wr)
2299 break;
2300 }
2301
2302 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
2303 ret = -EFAULT;
2304
2305 out_put:
2306 uobj_put_obj_read(qp);
2307
2308 while (wr) {
2309 if (is_ud && ud_wr(wr)->ah)
2310 uobj_put_obj_read(ud_wr(wr)->ah);
2311 next = wr->next;
2312 kfree(wr);
2313 wr = next;
2314 }
2315
2316 out:
2317 kfree(user_wr);
2318
2319 return ret ? ret : in_len;
2320 }
2321
2322 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2323 int in_len,
2324 u32 wr_count,
2325 u32 sge_count,
2326 u32 wqe_size)
2327 {
2328 struct ib_uverbs_recv_wr *user_wr;
2329 struct ib_recv_wr *wr = NULL, *last, *next;
2330 int sg_ind;
2331 int i;
2332 int ret;
2333
2334 if (in_len < wqe_size * wr_count +
2335 sge_count * sizeof (struct ib_uverbs_sge))
2336 return ERR_PTR(-EINVAL);
2337
2338 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2339 return ERR_PTR(-EINVAL);
2340
2341 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2342 if (!user_wr)
2343 return ERR_PTR(-ENOMEM);
2344
2345 sg_ind = 0;
2346 last = NULL;
2347 for (i = 0; i < wr_count; ++i) {
2348 if (copy_from_user(user_wr, buf + i * wqe_size,
2349 wqe_size)) {
2350 ret = -EFAULT;
2351 goto err;
2352 }
2353
2354 if (user_wr->num_sge + sg_ind > sge_count) {
2355 ret = -EINVAL;
2356 goto err;
2357 }
2358
2359 if (user_wr->num_sge >=
2360 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
2361 sizeof (struct ib_sge)) {
2362 ret = -EINVAL;
2363 goto err;
2364 }
2365
2366 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2367 user_wr->num_sge * sizeof (struct ib_sge),
2368 GFP_KERNEL);
2369 if (!next) {
2370 ret = -ENOMEM;
2371 goto err;
2372 }
2373
2374 if (!last)
2375 wr = next;
2376 else
2377 last->next = next;
2378 last = next;
2379
2380 next->next = NULL;
2381 next->wr_id = user_wr->wr_id;
2382 next->num_sge = user_wr->num_sge;
2383
2384 if (next->num_sge) {
2385 next->sg_list = (void *) next +
2386 ALIGN(sizeof *next, sizeof (struct ib_sge));
2387 if (copy_from_user(next->sg_list,
2388 buf + wr_count * wqe_size +
2389 sg_ind * sizeof (struct ib_sge),
2390 next->num_sge * sizeof (struct ib_sge))) {
2391 ret = -EFAULT;
2392 goto err;
2393 }
2394 sg_ind += next->num_sge;
2395 } else
2396 next->sg_list = NULL;
2397 }
2398
2399 kfree(user_wr);
2400 return wr;
2401
2402 err:
2403 kfree(user_wr);
2404
2405 while (wr) {
2406 next = wr->next;
2407 kfree(wr);
2408 wr = next;
2409 }
2410
2411 return ERR_PTR(ret);
2412 }
2413
2414 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2415 struct ib_device *ib_dev,
2416 const char __user *buf, int in_len,
2417 int out_len)
2418 {
2419 struct ib_uverbs_post_recv cmd;
2420 struct ib_uverbs_post_recv_resp resp;
2421 struct ib_recv_wr *wr, *next, *bad_wr;
2422 struct ib_qp *qp;
2423 ssize_t ret = -EINVAL;
2424
2425 if (copy_from_user(&cmd, buf, sizeof cmd))
2426 return -EFAULT;
2427
2428 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2429 in_len - sizeof cmd, cmd.wr_count,
2430 cmd.sge_count, cmd.wqe_size);
2431 if (IS_ERR(wr))
2432 return PTR_ERR(wr);
2433
2434 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
2435 if (!qp)
2436 goto out;
2437
2438 resp.bad_wr = 0;
2439 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2440
2441 uobj_put_obj_read(qp);
2442 if (ret) {
2443 for (next = wr; next; next = next->next) {
2444 ++resp.bad_wr;
2445 if (next == bad_wr)
2446 break;
2447 }
2448 }
2449
2450 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
2451 ret = -EFAULT;
2452
2453 out:
2454 while (wr) {
2455 next = wr->next;
2456 kfree(wr);
2457 wr = next;
2458 }
2459
2460 return ret ? ret : in_len;
2461 }
2462
2463 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2464 struct ib_device *ib_dev,
2465 const char __user *buf, int in_len,
2466 int out_len)
2467 {
2468 struct ib_uverbs_post_srq_recv cmd;
2469 struct ib_uverbs_post_srq_recv_resp resp;
2470 struct ib_recv_wr *wr, *next, *bad_wr;
2471 struct ib_srq *srq;
2472 ssize_t ret = -EINVAL;
2473
2474 if (copy_from_user(&cmd, buf, sizeof cmd))
2475 return -EFAULT;
2476
2477 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2478 in_len - sizeof cmd, cmd.wr_count,
2479 cmd.sge_count, cmd.wqe_size);
2480 if (IS_ERR(wr))
2481 return PTR_ERR(wr);
2482
2483 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
2484 if (!srq)
2485 goto out;
2486
2487 resp.bad_wr = 0;
2488 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2489
2490 uobj_put_obj_read(srq);
2491
2492 if (ret)
2493 for (next = wr; next; next = next->next) {
2494 ++resp.bad_wr;
2495 if (next == bad_wr)
2496 break;
2497 }
2498
2499 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
2500 ret = -EFAULT;
2501
2502 out:
2503 while (wr) {
2504 next = wr->next;
2505 kfree(wr);
2506 wr = next;
2507 }
2508
2509 return ret ? ret : in_len;
2510 }
2511
2512 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2513 struct ib_device *ib_dev,
2514 const char __user *buf, int in_len,
2515 int out_len)
2516 {
2517 struct ib_uverbs_create_ah cmd;
2518 struct ib_uverbs_create_ah_resp resp;
2519 struct ib_uobject *uobj;
2520 struct ib_pd *pd;
2521 struct ib_ah *ah;
2522 struct rdma_ah_attr attr;
2523 int ret;
2524 struct ib_udata udata;
2525
2526 if (out_len < sizeof resp)
2527 return -ENOSPC;
2528
2529 if (copy_from_user(&cmd, buf, sizeof cmd))
2530 return -EFAULT;
2531
2532 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
2533 return -EINVAL;
2534
2535 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
2536 u64_to_user_ptr(cmd.response) + sizeof(resp),
2537 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
2538 out_len - sizeof(resp));
2539
2540 uobj = uobj_alloc(uobj_get_type(ah), file->ucontext);
2541 if (IS_ERR(uobj))
2542 return PTR_ERR(uobj);
2543
2544 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
2545 if (!pd) {
2546 ret = -EINVAL;
2547 goto err;
2548 }
2549
2550 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
2551 rdma_ah_set_make_grd(&attr, false);
2552 rdma_ah_set_dlid(&attr, cmd.attr.dlid);
2553 rdma_ah_set_sl(&attr, cmd.attr.sl);
2554 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
2555 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
2556 rdma_ah_set_port_num(&attr, cmd.attr.port_num);
2557
2558 if (cmd.attr.is_global) {
2559 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
2560 cmd.attr.grh.sgid_index,
2561 cmd.attr.grh.hop_limit,
2562 cmd.attr.grh.traffic_class);
2563 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
2564 } else {
2565 rdma_ah_set_ah_flags(&attr, 0);
2566 }
2567
2568 ah = rdma_create_user_ah(pd, &attr, &udata);
2569 if (IS_ERR(ah)) {
2570 ret = PTR_ERR(ah);
2571 goto err_put;
2572 }
2573
2574 ah->uobject = uobj;
2575 uobj->user_handle = cmd.user_handle;
2576 uobj->object = ah;
2577
2578 resp.ah_handle = uobj->id;
2579
2580 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
2581 ret = -EFAULT;
2582 goto err_copy;
2583 }
2584
2585 uobj_put_obj_read(pd);
2586 uobj_alloc_commit(uobj);
2587
2588 return in_len;
2589
2590 err_copy:
2591 rdma_destroy_ah(ah);
2592
2593 err_put:
2594 uobj_put_obj_read(pd);
2595
2596 err:
2597 uobj_alloc_abort(uobj);
2598 return ret;
2599 }
2600
2601 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2602 struct ib_device *ib_dev,
2603 const char __user *buf, int in_len, int out_len)
2604 {
2605 struct ib_uverbs_destroy_ah cmd;
2606 struct ib_uobject *uobj;
2607 int ret;
2608
2609 if (copy_from_user(&cmd, buf, sizeof cmd))
2610 return -EFAULT;
2611
2612 uobj = uobj_get_write(uobj_get_type(ah), cmd.ah_handle,
2613 file->ucontext);
2614 if (IS_ERR(uobj))
2615 return PTR_ERR(uobj);
2616
2617 ret = uobj_remove_commit(uobj);
2618 return ret ?: in_len;
2619 }
2620
2621 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2622 struct ib_device *ib_dev,
2623 const char __user *buf, int in_len,
2624 int out_len)
2625 {
2626 struct ib_uverbs_attach_mcast cmd;
2627 struct ib_qp *qp;
2628 struct ib_uqp_object *obj;
2629 struct ib_uverbs_mcast_entry *mcast;
2630 int ret;
2631
2632 if (copy_from_user(&cmd, buf, sizeof cmd))
2633 return -EFAULT;
2634
2635 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
2636 if (!qp)
2637 return -EINVAL;
2638
2639 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2640
2641 mutex_lock(&obj->mcast_lock);
2642 list_for_each_entry(mcast, &obj->mcast_list, list)
2643 if (cmd.mlid == mcast->lid &&
2644 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2645 ret = 0;
2646 goto out_put;
2647 }
2648
2649 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2650 if (!mcast) {
2651 ret = -ENOMEM;
2652 goto out_put;
2653 }
2654
2655 mcast->lid = cmd.mlid;
2656 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2657
2658 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2659 if (!ret)
2660 list_add_tail(&mcast->list, &obj->mcast_list);
2661 else
2662 kfree(mcast);
2663
2664 out_put:
2665 mutex_unlock(&obj->mcast_lock);
2666 uobj_put_obj_read(qp);
2667
2668 return ret ? ret : in_len;
2669 }
2670
2671 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2672 struct ib_device *ib_dev,
2673 const char __user *buf, int in_len,
2674 int out_len)
2675 {
2676 struct ib_uverbs_detach_mcast cmd;
2677 struct ib_uqp_object *obj;
2678 struct ib_qp *qp;
2679 struct ib_uverbs_mcast_entry *mcast;
2680 int ret = -EINVAL;
2681 bool found = false;
2682
2683 if (copy_from_user(&cmd, buf, sizeof cmd))
2684 return -EFAULT;
2685
2686 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
2687 if (!qp)
2688 return -EINVAL;
2689
2690 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2691 mutex_lock(&obj->mcast_lock);
2692
2693 list_for_each_entry(mcast, &obj->mcast_list, list)
2694 if (cmd.mlid == mcast->lid &&
2695 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2696 list_del(&mcast->list);
2697 kfree(mcast);
2698 found = true;
2699 break;
2700 }
2701
2702 if (!found) {
2703 ret = -EINVAL;
2704 goto out_put;
2705 }
2706
2707 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
2708
2709 out_put:
2710 mutex_unlock(&obj->mcast_lock);
2711 uobj_put_obj_read(qp);
2712 return ret ? ret : in_len;
2713 }
2714
2715 static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec,
2716 union ib_flow_spec *ib_spec)
2717 {
2718 ib_spec->type = kern_spec->type;
2719 switch (ib_spec->type) {
2720 case IB_FLOW_SPEC_ACTION_TAG:
2721 if (kern_spec->flow_tag.size !=
2722 sizeof(struct ib_uverbs_flow_spec_action_tag))
2723 return -EINVAL;
2724
2725 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
2726 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
2727 break;
2728 case IB_FLOW_SPEC_ACTION_DROP:
2729 if (kern_spec->drop.size !=
2730 sizeof(struct ib_uverbs_flow_spec_action_drop))
2731 return -EINVAL;
2732
2733 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
2734 break;
2735 default:
2736 return -EINVAL;
2737 }
2738 return 0;
2739 }
2740
2741 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec)
2742 {
2743 /* Returns user space filter size, includes padding */
2744 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
2745 }
2746
2747 static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size,
2748 u16 ib_real_filter_sz)
2749 {
2750 /*
2751 * User space filter structures must be 64 bit aligned, otherwise this
2752 * may pass, but we won't handle additional new attributes.
2753 */
2754
2755 if (kern_filter_size > ib_real_filter_sz) {
2756 if (memchr_inv(kern_spec_filter +
2757 ib_real_filter_sz, 0,
2758 kern_filter_size - ib_real_filter_sz))
2759 return -EINVAL;
2760 return ib_real_filter_sz;
2761 }
2762 return kern_filter_size;
2763 }
2764
2765 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
2766 union ib_flow_spec *ib_spec)
2767 {
2768 ssize_t actual_filter_sz;
2769 ssize_t kern_filter_sz;
2770 ssize_t ib_filter_sz;
2771 void *kern_spec_mask;
2772 void *kern_spec_val;
2773
2774 if (kern_spec->reserved)
2775 return -EINVAL;
2776
2777 ib_spec->type = kern_spec->type;
2778
2779 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
2780 /* User flow spec size must be aligned to 4 bytes */
2781 if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
2782 return -EINVAL;
2783
2784 kern_spec_val = (void *)kern_spec +
2785 sizeof(struct ib_uverbs_flow_spec_hdr);
2786 kern_spec_mask = kern_spec_val + kern_filter_sz;
2787 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
2788 return -EINVAL;
2789
2790 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2791 case IB_FLOW_SPEC_ETH:
2792 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
2793 actual_filter_sz = spec_filter_size(kern_spec_mask,
2794 kern_filter_sz,
2795 ib_filter_sz);
2796 if (actual_filter_sz <= 0)
2797 return -EINVAL;
2798 ib_spec->size = sizeof(struct ib_flow_spec_eth);
2799 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
2800 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
2801 break;
2802 case IB_FLOW_SPEC_IPV4:
2803 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
2804 actual_filter_sz = spec_filter_size(kern_spec_mask,
2805 kern_filter_sz,
2806 ib_filter_sz);
2807 if (actual_filter_sz <= 0)
2808 return -EINVAL;
2809 ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
2810 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
2811 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
2812 break;
2813 case IB_FLOW_SPEC_IPV6:
2814 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
2815 actual_filter_sz = spec_filter_size(kern_spec_mask,
2816 kern_filter_sz,
2817 ib_filter_sz);
2818 if (actual_filter_sz <= 0)
2819 return -EINVAL;
2820 ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
2821 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
2822 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
2823
2824 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
2825 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
2826 return -EINVAL;
2827 break;
2828 case IB_FLOW_SPEC_TCP:
2829 case IB_FLOW_SPEC_UDP:
2830 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
2831 actual_filter_sz = spec_filter_size(kern_spec_mask,
2832 kern_filter_sz,
2833 ib_filter_sz);
2834 if (actual_filter_sz <= 0)
2835 return -EINVAL;
2836 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
2837 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
2838 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
2839 break;
2840 case IB_FLOW_SPEC_VXLAN_TUNNEL:
2841 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
2842 actual_filter_sz = spec_filter_size(kern_spec_mask,
2843 kern_filter_sz,
2844 ib_filter_sz);
2845 if (actual_filter_sz <= 0)
2846 return -EINVAL;
2847 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
2848 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
2849 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
2850
2851 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
2852 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
2853 return -EINVAL;
2854 break;
2855 default:
2856 return -EINVAL;
2857 }
2858 return 0;
2859 }
2860
2861 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2862 union ib_flow_spec *ib_spec)
2863 {
2864 if (kern_spec->reserved)
2865 return -EINVAL;
2866
2867 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
2868 return kern_spec_to_ib_spec_action(kern_spec, ib_spec);
2869 else
2870 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
2871 }
2872
2873 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
2874 struct ib_device *ib_dev,
2875 struct ib_udata *ucore,
2876 struct ib_udata *uhw)
2877 {
2878 struct ib_uverbs_ex_create_wq cmd = {};
2879 struct ib_uverbs_ex_create_wq_resp resp = {};
2880 struct ib_uwq_object *obj;
2881 int err = 0;
2882 struct ib_cq *cq;
2883 struct ib_pd *pd;
2884 struct ib_wq *wq;
2885 struct ib_wq_init_attr wq_init_attr = {};
2886 size_t required_cmd_sz;
2887 size_t required_resp_len;
2888
2889 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge);
2890 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn);
2891
2892 if (ucore->inlen < required_cmd_sz)
2893 return -EINVAL;
2894
2895 if (ucore->outlen < required_resp_len)
2896 return -ENOSPC;
2897
2898 if (ucore->inlen > sizeof(cmd) &&
2899 !ib_is_udata_cleared(ucore, sizeof(cmd),
2900 ucore->inlen - sizeof(cmd)))
2901 return -EOPNOTSUPP;
2902
2903 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2904 if (err)
2905 return err;
2906
2907 if (cmd.comp_mask)
2908 return -EOPNOTSUPP;
2909
2910 obj = (struct ib_uwq_object *)uobj_alloc(uobj_get_type(wq),
2911 file->ucontext);
2912 if (IS_ERR(obj))
2913 return PTR_ERR(obj);
2914
2915 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
2916 if (!pd) {
2917 err = -EINVAL;
2918 goto err_uobj;
2919 }
2920
2921 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
2922 if (!cq) {
2923 err = -EINVAL;
2924 goto err_put_pd;
2925 }
2926
2927 wq_init_attr.cq = cq;
2928 wq_init_attr.max_sge = cmd.max_sge;
2929 wq_init_attr.max_wr = cmd.max_wr;
2930 wq_init_attr.wq_context = file;
2931 wq_init_attr.wq_type = cmd.wq_type;
2932 wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
2933 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) +
2934 sizeof(cmd.create_flags)))
2935 wq_init_attr.create_flags = cmd.create_flags;
2936 obj->uevent.events_reported = 0;
2937 INIT_LIST_HEAD(&obj->uevent.event_list);
2938 wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
2939 if (IS_ERR(wq)) {
2940 err = PTR_ERR(wq);
2941 goto err_put_cq;
2942 }
2943
2944 wq->uobject = &obj->uevent.uobject;
2945 obj->uevent.uobject.object = wq;
2946 wq->wq_type = wq_init_attr.wq_type;
2947 wq->cq = cq;
2948 wq->pd = pd;
2949 wq->device = pd->device;
2950 wq->wq_context = wq_init_attr.wq_context;
2951 atomic_set(&wq->usecnt, 0);
2952 atomic_inc(&pd->usecnt);
2953 atomic_inc(&cq->usecnt);
2954 wq->uobject = &obj->uevent.uobject;
2955 obj->uevent.uobject.object = wq;
2956
2957 memset(&resp, 0, sizeof(resp));
2958 resp.wq_handle = obj->uevent.uobject.id;
2959 resp.max_sge = wq_init_attr.max_sge;
2960 resp.max_wr = wq_init_attr.max_wr;
2961 resp.wqn = wq->wq_num;
2962 resp.response_length = required_resp_len;
2963 err = ib_copy_to_udata(ucore,
2964 &resp, resp.response_length);
2965 if (err)
2966 goto err_copy;
2967
2968 uobj_put_obj_read(pd);
2969 uobj_put_obj_read(cq);
2970 uobj_alloc_commit(&obj->uevent.uobject);
2971 return 0;
2972
2973 err_copy:
2974 ib_destroy_wq(wq);
2975 err_put_cq:
2976 uobj_put_obj_read(cq);
2977 err_put_pd:
2978 uobj_put_obj_read(pd);
2979 err_uobj:
2980 uobj_alloc_abort(&obj->uevent.uobject);
2981
2982 return err;
2983 }
2984
2985 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
2986 struct ib_device *ib_dev,
2987 struct ib_udata *ucore,
2988 struct ib_udata *uhw)
2989 {
2990 struct ib_uverbs_ex_destroy_wq cmd = {};
2991 struct ib_uverbs_ex_destroy_wq_resp resp = {};
2992 struct ib_uobject *uobj;
2993 struct ib_uwq_object *obj;
2994 size_t required_cmd_sz;
2995 size_t required_resp_len;
2996 int ret;
2997
2998 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle);
2999 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
3000
3001 if (ucore->inlen < required_cmd_sz)
3002 return -EINVAL;
3003
3004 if (ucore->outlen < required_resp_len)
3005 return -ENOSPC;
3006
3007 if (ucore->inlen > sizeof(cmd) &&
3008 !ib_is_udata_cleared(ucore, sizeof(cmd),
3009 ucore->inlen - sizeof(cmd)))
3010 return -EOPNOTSUPP;
3011
3012 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3013 if (ret)
3014 return ret;
3015
3016 if (cmd.comp_mask)
3017 return -EOPNOTSUPP;
3018
3019 resp.response_length = required_resp_len;
3020 uobj = uobj_get_write(uobj_get_type(wq), cmd.wq_handle,
3021 file->ucontext);
3022 if (IS_ERR(uobj))
3023 return PTR_ERR(uobj);
3024
3025 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
3026 /*
3027 * Make sure we don't free the memory in remove_commit as we still
3028 * needs the uobject memory to create the response.
3029 */
3030 uverbs_uobject_get(uobj);
3031
3032 ret = uobj_remove_commit(uobj);
3033 resp.events_reported = obj->uevent.events_reported;
3034 uverbs_uobject_put(uobj);
3035 if (ret)
3036 return ret;
3037
3038 return ib_copy_to_udata(ucore, &resp, resp.response_length);
3039 }
3040
3041 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
3042 struct ib_device *ib_dev,
3043 struct ib_udata *ucore,
3044 struct ib_udata *uhw)
3045 {
3046 struct ib_uverbs_ex_modify_wq cmd = {};
3047 struct ib_wq *wq;
3048 struct ib_wq_attr wq_attr = {};
3049 size_t required_cmd_sz;
3050 int ret;
3051
3052 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state);
3053 if (ucore->inlen < required_cmd_sz)
3054 return -EINVAL;
3055
3056 if (ucore->inlen > sizeof(cmd) &&
3057 !ib_is_udata_cleared(ucore, sizeof(cmd),
3058 ucore->inlen - sizeof(cmd)))
3059 return -EOPNOTSUPP;
3060
3061 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3062 if (ret)
3063 return ret;
3064
3065 if (!cmd.attr_mask)
3066 return -EINVAL;
3067
3068 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
3069 return -EINVAL;
3070
3071 wq = uobj_get_obj_read(wq, cmd.wq_handle, file->ucontext);
3072 if (!wq)
3073 return -EINVAL;
3074
3075 wq_attr.curr_wq_state = cmd.curr_wq_state;
3076 wq_attr.wq_state = cmd.wq_state;
3077 if (cmd.attr_mask & IB_WQ_FLAGS) {
3078 wq_attr.flags = cmd.flags;
3079 wq_attr.flags_mask = cmd.flags_mask;
3080 }
3081 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
3082 uobj_put_obj_read(wq);
3083 return ret;
3084 }
3085
3086 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
3087 struct ib_device *ib_dev,
3088 struct ib_udata *ucore,
3089 struct ib_udata *uhw)
3090 {
3091 struct ib_uverbs_ex_create_rwq_ind_table cmd = {};
3092 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
3093 struct ib_uobject *uobj;
3094 int err = 0;
3095 struct ib_rwq_ind_table_init_attr init_attr = {};
3096 struct ib_rwq_ind_table *rwq_ind_tbl;
3097 struct ib_wq **wqs = NULL;
3098 u32 *wqs_handles = NULL;
3099 struct ib_wq *wq = NULL;
3100 int i, j, num_read_wqs;
3101 u32 num_wq_handles;
3102 u32 expected_in_size;
3103 size_t required_cmd_sz_header;
3104 size_t required_resp_len;
3105
3106 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size);
3107 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num);
3108
3109 if (ucore->inlen < required_cmd_sz_header)
3110 return -EINVAL;
3111
3112 if (ucore->outlen < required_resp_len)
3113 return -ENOSPC;
3114
3115 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header);
3116 if (err)
3117 return err;
3118
3119 ucore->inbuf += required_cmd_sz_header;
3120 ucore->inlen -= required_cmd_sz_header;
3121
3122 if (cmd.comp_mask)
3123 return -EOPNOTSUPP;
3124
3125 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
3126 return -EINVAL;
3127
3128 num_wq_handles = 1 << cmd.log_ind_tbl_size;
3129 expected_in_size = num_wq_handles * sizeof(__u32);
3130 if (num_wq_handles == 1)
3131 /* input size for wq handles is u64 aligned */
3132 expected_in_size += sizeof(__u32);
3133
3134 if (ucore->inlen < expected_in_size)
3135 return -EINVAL;
3136
3137 if (ucore->inlen > expected_in_size &&
3138 !ib_is_udata_cleared(ucore, expected_in_size,
3139 ucore->inlen - expected_in_size))
3140 return -EOPNOTSUPP;
3141
3142 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
3143 GFP_KERNEL);
3144 if (!wqs_handles)
3145 return -ENOMEM;
3146
3147 err = ib_copy_from_udata(wqs_handles, ucore,
3148 num_wq_handles * sizeof(__u32));
3149 if (err)
3150 goto err_free;
3151
3152 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
3153 if (!wqs) {
3154 err = -ENOMEM;
3155 goto err_free;
3156 }
3157
3158 for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
3159 num_read_wqs++) {
3160 wq = uobj_get_obj_read(wq, wqs_handles[num_read_wqs],
3161 file->ucontext);
3162 if (!wq) {
3163 err = -EINVAL;
3164 goto put_wqs;
3165 }
3166
3167 wqs[num_read_wqs] = wq;
3168 }
3169
3170 uobj = uobj_alloc(uobj_get_type(rwq_ind_table), file->ucontext);
3171 if (IS_ERR(uobj)) {
3172 err = PTR_ERR(uobj);
3173 goto put_wqs;
3174 }
3175
3176 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3177 init_attr.ind_tbl = wqs;
3178 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
3179
3180 if (IS_ERR(rwq_ind_tbl)) {
3181 err = PTR_ERR(rwq_ind_tbl);
3182 goto err_uobj;
3183 }
3184
3185 rwq_ind_tbl->ind_tbl = wqs;
3186 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
3187 rwq_ind_tbl->uobject = uobj;
3188 uobj->object = rwq_ind_tbl;
3189 rwq_ind_tbl->device = ib_dev;
3190 atomic_set(&rwq_ind_tbl->usecnt, 0);
3191
3192 for (i = 0; i < num_wq_handles; i++)
3193 atomic_inc(&wqs[i]->usecnt);
3194
3195 resp.ind_tbl_handle = uobj->id;
3196 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3197 resp.response_length = required_resp_len;
3198
3199 err = ib_copy_to_udata(ucore,
3200 &resp, resp.response_length);
3201 if (err)
3202 goto err_copy;
3203
3204 kfree(wqs_handles);
3205
3206 for (j = 0; j < num_read_wqs; j++)
3207 uobj_put_obj_read(wqs[j]);
3208
3209 uobj_alloc_commit(uobj);
3210 return 0;
3211
3212 err_copy:
3213 ib_destroy_rwq_ind_table(rwq_ind_tbl);
3214 err_uobj:
3215 uobj_alloc_abort(uobj);
3216 put_wqs:
3217 for (j = 0; j < num_read_wqs; j++)
3218 uobj_put_obj_read(wqs[j]);
3219 err_free:
3220 kfree(wqs_handles);
3221 kfree(wqs);
3222 return err;
3223 }
3224
3225 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file,
3226 struct ib_device *ib_dev,
3227 struct ib_udata *ucore,
3228 struct ib_udata *uhw)
3229 {
3230 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {};
3231 struct ib_uobject *uobj;
3232 int ret;
3233 size_t required_cmd_sz;
3234
3235 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle);
3236
3237 if (ucore->inlen < required_cmd_sz)
3238 return -EINVAL;
3239
3240 if (ucore->inlen > sizeof(cmd) &&
3241 !ib_is_udata_cleared(ucore, sizeof(cmd),
3242 ucore->inlen - sizeof(cmd)))
3243 return -EOPNOTSUPP;
3244
3245 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3246 if (ret)
3247 return ret;
3248
3249 if (cmd.comp_mask)
3250 return -EOPNOTSUPP;
3251
3252 uobj = uobj_get_write(uobj_get_type(rwq_ind_table), cmd.ind_tbl_handle,
3253 file->ucontext);
3254 if (IS_ERR(uobj))
3255 return PTR_ERR(uobj);
3256
3257 return uobj_remove_commit(uobj);
3258 }
3259
3260 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3261 struct ib_device *ib_dev,
3262 struct ib_udata *ucore,
3263 struct ib_udata *uhw)
3264 {
3265 struct ib_uverbs_create_flow cmd;
3266 struct ib_uverbs_create_flow_resp resp;
3267 struct ib_uobject *uobj;
3268 struct ib_flow *flow_id;
3269 struct ib_uverbs_flow_attr *kern_flow_attr;
3270 struct ib_flow_attr *flow_attr;
3271 struct ib_qp *qp;
3272 int err = 0;
3273 void *kern_spec;
3274 void *ib_spec;
3275 int i;
3276
3277 if (ucore->inlen < sizeof(cmd))
3278 return -EINVAL;
3279
3280 if (ucore->outlen < sizeof(resp))
3281 return -ENOSPC;
3282
3283 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3284 if (err)
3285 return err;
3286
3287 ucore->inbuf += sizeof(cmd);
3288 ucore->inlen -= sizeof(cmd);
3289
3290 if (cmd.comp_mask)
3291 return -EINVAL;
3292
3293 if (!capable(CAP_NET_RAW))
3294 return -EPERM;
3295
3296 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3297 return -EINVAL;
3298
3299 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3300 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3301 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3302 return -EINVAL;
3303
3304 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3305 return -EINVAL;
3306
3307 if (cmd.flow_attr.size > ucore->inlen ||
3308 cmd.flow_attr.size >
3309 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3310 return -EINVAL;
3311
3312 if (cmd.flow_attr.reserved[0] ||
3313 cmd.flow_attr.reserved[1])
3314 return -EINVAL;
3315
3316 if (cmd.flow_attr.num_of_specs) {
3317 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3318 GFP_KERNEL);
3319 if (!kern_flow_attr)
3320 return -ENOMEM;
3321
3322 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
3323 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
3324 cmd.flow_attr.size);
3325 if (err)
3326 goto err_free_attr;
3327 } else {
3328 kern_flow_attr = &cmd.flow_attr;
3329 }
3330
3331 uobj = uobj_alloc(uobj_get_type(flow), file->ucontext);
3332 if (IS_ERR(uobj)) {
3333 err = PTR_ERR(uobj);
3334 goto err_free_attr;
3335 }
3336
3337 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
3338 if (!qp) {
3339 err = -EINVAL;
3340 goto err_uobj;
3341 }
3342
3343 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs *
3344 sizeof(union ib_flow_spec), GFP_KERNEL);
3345 if (!flow_attr) {
3346 err = -ENOMEM;
3347 goto err_put;
3348 }
3349
3350 flow_attr->type = kern_flow_attr->type;
3351 flow_attr->priority = kern_flow_attr->priority;
3352 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3353 flow_attr->port = kern_flow_attr->port;
3354 flow_attr->flags = kern_flow_attr->flags;
3355 flow_attr->size = sizeof(*flow_attr);
3356
3357 kern_spec = kern_flow_attr + 1;
3358 ib_spec = flow_attr + 1;
3359 for (i = 0; i < flow_attr->num_of_specs &&
3360 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
3361 cmd.flow_attr.size >=
3362 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
3363 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
3364 if (err)
3365 goto err_free;
3366 flow_attr->size +=
3367 ((union ib_flow_spec *) ib_spec)->size;
3368 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
3369 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
3370 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3371 }
3372 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3373 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3374 i, cmd.flow_attr.size);
3375 err = -EINVAL;
3376 goto err_free;
3377 }
3378 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
3379 if (IS_ERR(flow_id)) {
3380 err = PTR_ERR(flow_id);
3381 goto err_free;
3382 }
3383 flow_id->uobject = uobj;
3384 uobj->object = flow_id;
3385
3386 memset(&resp, 0, sizeof(resp));
3387 resp.flow_handle = uobj->id;
3388
3389 err = ib_copy_to_udata(ucore,
3390 &resp, sizeof(resp));
3391 if (err)
3392 goto err_copy;
3393
3394 uobj_put_obj_read(qp);
3395 uobj_alloc_commit(uobj);
3396 kfree(flow_attr);
3397 if (cmd.flow_attr.num_of_specs)
3398 kfree(kern_flow_attr);
3399 return 0;
3400 err_copy:
3401 ib_destroy_flow(flow_id);
3402 err_free:
3403 kfree(flow_attr);
3404 err_put:
3405 uobj_put_obj_read(qp);
3406 err_uobj:
3407 uobj_alloc_abort(uobj);
3408 err_free_attr:
3409 if (cmd.flow_attr.num_of_specs)
3410 kfree(kern_flow_attr);
3411 return err;
3412 }
3413
3414 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
3415 struct ib_device *ib_dev,
3416 struct ib_udata *ucore,
3417 struct ib_udata *uhw)
3418 {
3419 struct ib_uverbs_destroy_flow cmd;
3420 struct ib_uobject *uobj;
3421 int ret;
3422
3423 if (ucore->inlen < sizeof(cmd))
3424 return -EINVAL;
3425
3426 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3427 if (ret)
3428 return ret;
3429
3430 if (cmd.comp_mask)
3431 return -EINVAL;
3432
3433 uobj = uobj_get_write(uobj_get_type(flow), cmd.flow_handle,
3434 file->ucontext);
3435 if (IS_ERR(uobj))
3436 return PTR_ERR(uobj);
3437
3438 ret = uobj_remove_commit(uobj);
3439 return ret;
3440 }
3441
3442 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
3443 struct ib_device *ib_dev,
3444 struct ib_uverbs_create_xsrq *cmd,
3445 struct ib_udata *udata)
3446 {
3447 struct ib_uverbs_create_srq_resp resp;
3448 struct ib_usrq_object *obj;
3449 struct ib_pd *pd;
3450 struct ib_srq *srq;
3451 struct ib_uobject *uninitialized_var(xrcd_uobj);
3452 struct ib_srq_init_attr attr;
3453 int ret;
3454
3455 obj = (struct ib_usrq_object *)uobj_alloc(uobj_get_type(srq),
3456 file->ucontext);
3457 if (IS_ERR(obj))
3458 return PTR_ERR(obj);
3459
3460 if (cmd->srq_type == IB_SRQT_TM)
3461 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
3462
3463 if (cmd->srq_type == IB_SRQT_XRC) {
3464 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->xrcd_handle,
3465 file->ucontext);
3466 if (IS_ERR(xrcd_uobj)) {
3467 ret = -EINVAL;
3468 goto err;
3469 }
3470
3471 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
3472 if (!attr.ext.xrc.xrcd) {
3473 ret = -EINVAL;
3474 goto err_put_xrcd;
3475 }
3476
3477 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3478 atomic_inc(&obj->uxrcd->refcnt);
3479 }
3480
3481 if (ib_srq_has_cq(cmd->srq_type)) {
3482 attr.ext.cq = uobj_get_obj_read(cq, cmd->cq_handle,
3483 file->ucontext);
3484 if (!attr.ext.cq) {
3485 ret = -EINVAL;
3486 goto err_put_xrcd;
3487 }
3488 }
3489
3490 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext);
3491 if (!pd) {
3492 ret = -EINVAL;
3493 goto err_put_cq;
3494 }
3495
3496 attr.event_handler = ib_uverbs_srq_event_handler;
3497 attr.srq_context = file;
3498 attr.srq_type = cmd->srq_type;
3499 attr.attr.max_wr = cmd->max_wr;
3500 attr.attr.max_sge = cmd->max_sge;
3501 attr.attr.srq_limit = cmd->srq_limit;
3502
3503 obj->uevent.events_reported = 0;
3504 INIT_LIST_HEAD(&obj->uevent.event_list);
3505
3506 srq = pd->device->create_srq(pd, &attr, udata);
3507 if (IS_ERR(srq)) {
3508 ret = PTR_ERR(srq);
3509 goto err_put;
3510 }
3511
3512 srq->device = pd->device;
3513 srq->pd = pd;
3514 srq->srq_type = cmd->srq_type;
3515 srq->uobject = &obj->uevent.uobject;
3516 srq->event_handler = attr.event_handler;
3517 srq->srq_context = attr.srq_context;
3518
3519 if (ib_srq_has_cq(cmd->srq_type)) {
3520 srq->ext.cq = attr.ext.cq;
3521 atomic_inc(&attr.ext.cq->usecnt);
3522 }
3523
3524 if (cmd->srq_type == IB_SRQT_XRC) {
3525 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3526 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3527 }
3528
3529 atomic_inc(&pd->usecnt);
3530 atomic_set(&srq->usecnt, 0);
3531
3532 obj->uevent.uobject.object = srq;
3533 obj->uevent.uobject.user_handle = cmd->user_handle;
3534
3535 memset(&resp, 0, sizeof resp);
3536 resp.srq_handle = obj->uevent.uobject.id;
3537 resp.max_wr = attr.attr.max_wr;
3538 resp.max_sge = attr.attr.max_sge;
3539 if (cmd->srq_type == IB_SRQT_XRC)
3540 resp.srqn = srq->ext.xrc.srq_num;
3541
3542 if (copy_to_user((void __user *) (unsigned long) cmd->response,
3543 &resp, sizeof resp)) {
3544 ret = -EFAULT;
3545 goto err_copy;
3546 }
3547
3548 if (cmd->srq_type == IB_SRQT_XRC)
3549 uobj_put_read(xrcd_uobj);
3550
3551 if (ib_srq_has_cq(cmd->srq_type))
3552 uobj_put_obj_read(attr.ext.cq);
3553
3554 uobj_put_obj_read(pd);
3555 uobj_alloc_commit(&obj->uevent.uobject);
3556
3557 return 0;
3558
3559 err_copy:
3560 ib_destroy_srq(srq);
3561
3562 err_put:
3563 uobj_put_obj_read(pd);
3564
3565 err_put_cq:
3566 if (ib_srq_has_cq(cmd->srq_type))
3567 uobj_put_obj_read(attr.ext.cq);
3568
3569 err_put_xrcd:
3570 if (cmd->srq_type == IB_SRQT_XRC) {
3571 atomic_dec(&obj->uxrcd->refcnt);
3572 uobj_put_read(xrcd_uobj);
3573 }
3574
3575 err:
3576 uobj_alloc_abort(&obj->uevent.uobject);
3577 return ret;
3578 }
3579
3580 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3581 struct ib_device *ib_dev,
3582 const char __user *buf, int in_len,
3583 int out_len)
3584 {
3585 struct ib_uverbs_create_srq cmd;
3586 struct ib_uverbs_create_xsrq xcmd;
3587 struct ib_uverbs_create_srq_resp resp;
3588 struct ib_udata udata;
3589 int ret;
3590
3591 if (out_len < sizeof resp)
3592 return -ENOSPC;
3593
3594 if (copy_from_user(&cmd, buf, sizeof cmd))
3595 return -EFAULT;
3596
3597 memset(&xcmd, 0, sizeof(xcmd));
3598 xcmd.response = cmd.response;
3599 xcmd.user_handle = cmd.user_handle;
3600 xcmd.srq_type = IB_SRQT_BASIC;
3601 xcmd.pd_handle = cmd.pd_handle;
3602 xcmd.max_wr = cmd.max_wr;
3603 xcmd.max_sge = cmd.max_sge;
3604 xcmd.srq_limit = cmd.srq_limit;
3605
3606 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
3607 u64_to_user_ptr(cmd.response) + sizeof(resp),
3608 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
3609 out_len - sizeof(resp));
3610
3611 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
3612 if (ret)
3613 return ret;
3614
3615 return in_len;
3616 }
3617
3618 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3619 struct ib_device *ib_dev,
3620 const char __user *buf, int in_len, int out_len)
3621 {
3622 struct ib_uverbs_create_xsrq cmd;
3623 struct ib_uverbs_create_srq_resp resp;
3624 struct ib_udata udata;
3625 int ret;
3626
3627 if (out_len < sizeof resp)
3628 return -ENOSPC;
3629
3630 if (copy_from_user(&cmd, buf, sizeof cmd))
3631 return -EFAULT;
3632
3633 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
3634 u64_to_user_ptr(cmd.response) + sizeof(resp),
3635 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
3636 out_len - sizeof(resp));
3637
3638 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
3639 if (ret)
3640 return ret;
3641
3642 return in_len;
3643 }
3644
3645 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3646 struct ib_device *ib_dev,
3647 const char __user *buf, int in_len,
3648 int out_len)
3649 {
3650 struct ib_uverbs_modify_srq cmd;
3651 struct ib_udata udata;
3652 struct ib_srq *srq;
3653 struct ib_srq_attr attr;
3654 int ret;
3655
3656 if (copy_from_user(&cmd, buf, sizeof cmd))
3657 return -EFAULT;
3658
3659 ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3660 out_len);
3661
3662 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
3663 if (!srq)
3664 return -EINVAL;
3665
3666 attr.max_wr = cmd.max_wr;
3667 attr.srq_limit = cmd.srq_limit;
3668
3669 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3670
3671 uobj_put_obj_read(srq);
3672
3673 return ret ? ret : in_len;
3674 }
3675
3676 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3677 struct ib_device *ib_dev,
3678 const char __user *buf,
3679 int in_len, int out_len)
3680 {
3681 struct ib_uverbs_query_srq cmd;
3682 struct ib_uverbs_query_srq_resp resp;
3683 struct ib_srq_attr attr;
3684 struct ib_srq *srq;
3685 int ret;
3686
3687 if (out_len < sizeof resp)
3688 return -ENOSPC;
3689
3690 if (copy_from_user(&cmd, buf, sizeof cmd))
3691 return -EFAULT;
3692
3693 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
3694 if (!srq)
3695 return -EINVAL;
3696
3697 ret = ib_query_srq(srq, &attr);
3698
3699 uobj_put_obj_read(srq);
3700
3701 if (ret)
3702 return ret;
3703
3704 memset(&resp, 0, sizeof resp);
3705
3706 resp.max_wr = attr.max_wr;
3707 resp.max_sge = attr.max_sge;
3708 resp.srq_limit = attr.srq_limit;
3709
3710 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
3711 return -EFAULT;
3712
3713 return in_len;
3714 }
3715
3716 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3717 struct ib_device *ib_dev,
3718 const char __user *buf, int in_len,
3719 int out_len)
3720 {
3721 struct ib_uverbs_destroy_srq cmd;
3722 struct ib_uverbs_destroy_srq_resp resp;
3723 struct ib_uobject *uobj;
3724 struct ib_uevent_object *obj;
3725 int ret = -EINVAL;
3726
3727 if (copy_from_user(&cmd, buf, sizeof cmd))
3728 return -EFAULT;
3729
3730 uobj = uobj_get_write(uobj_get_type(srq), cmd.srq_handle,
3731 file->ucontext);
3732 if (IS_ERR(uobj))
3733 return PTR_ERR(uobj);
3734
3735 obj = container_of(uobj, struct ib_uevent_object, uobject);
3736 /*
3737 * Make sure we don't free the memory in remove_commit as we still
3738 * needs the uobject memory to create the response.
3739 */
3740 uverbs_uobject_get(uobj);
3741
3742 memset(&resp, 0, sizeof(resp));
3743
3744 ret = uobj_remove_commit(uobj);
3745 if (ret) {
3746 uverbs_uobject_put(uobj);
3747 return ret;
3748 }
3749 resp.events_reported = obj->events_reported;
3750 uverbs_uobject_put(uobj);
3751 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
3752 return -EFAULT;
3753
3754 return in_len;
3755 }
3756
3757 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3758 struct ib_device *ib_dev,
3759 struct ib_udata *ucore,
3760 struct ib_udata *uhw)
3761 {
3762 struct ib_uverbs_ex_query_device_resp resp = { {0} };
3763 struct ib_uverbs_ex_query_device cmd;
3764 struct ib_device_attr attr = {0};
3765 int err;
3766
3767 if (ucore->inlen < sizeof(cmd))
3768 return -EINVAL;
3769
3770 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3771 if (err)
3772 return err;
3773
3774 if (cmd.comp_mask)
3775 return -EINVAL;
3776
3777 if (cmd.reserved)
3778 return -EINVAL;
3779
3780 resp.response_length = offsetof(typeof(resp), odp_caps);
3781
3782 if (ucore->outlen < resp.response_length)
3783 return -ENOSPC;
3784
3785 err = ib_dev->query_device(ib_dev, &attr, uhw);
3786 if (err)
3787 return err;
3788
3789 copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
3790
3791 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3792 goto end;
3793
3794 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3795 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3796 resp.odp_caps.per_transport_caps.rc_odp_caps =
3797 attr.odp_caps.per_transport_caps.rc_odp_caps;
3798 resp.odp_caps.per_transport_caps.uc_odp_caps =
3799 attr.odp_caps.per_transport_caps.uc_odp_caps;
3800 resp.odp_caps.per_transport_caps.ud_odp_caps =
3801 attr.odp_caps.per_transport_caps.ud_odp_caps;
3802 #endif
3803 resp.response_length += sizeof(resp.odp_caps);
3804
3805 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3806 goto end;
3807
3808 resp.timestamp_mask = attr.timestamp_mask;
3809 resp.response_length += sizeof(resp.timestamp_mask);
3810
3811 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3812 goto end;
3813
3814 resp.hca_core_clock = attr.hca_core_clock;
3815 resp.response_length += sizeof(resp.hca_core_clock);
3816
3817 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex))
3818 goto end;
3819
3820 resp.device_cap_flags_ex = attr.device_cap_flags;
3821 resp.response_length += sizeof(resp.device_cap_flags_ex);
3822
3823 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps))
3824 goto end;
3825
3826 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
3827 resp.rss_caps.max_rwq_indirection_tables =
3828 attr.rss_caps.max_rwq_indirection_tables;
3829 resp.rss_caps.max_rwq_indirection_table_size =
3830 attr.rss_caps.max_rwq_indirection_table_size;
3831
3832 resp.response_length += sizeof(resp.rss_caps);
3833
3834 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq))
3835 goto end;
3836
3837 resp.max_wq_type_rq = attr.max_wq_type_rq;
3838 resp.response_length += sizeof(resp.max_wq_type_rq);
3839
3840 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps))
3841 goto end;
3842
3843 resp.raw_packet_caps = attr.raw_packet_caps;
3844 resp.response_length += sizeof(resp.raw_packet_caps);
3845
3846 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps))
3847 goto end;
3848
3849 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
3850 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
3851 resp.tm_caps.max_ops = attr.tm_caps.max_ops;
3852 resp.tm_caps.max_sge = attr.tm_caps.max_sge;
3853 resp.tm_caps.flags = attr.tm_caps.flags;
3854 resp.response_length += sizeof(resp.tm_caps);
3855
3856 if (ucore->outlen < resp.response_length + sizeof(resp.cq_moderation_caps))
3857 goto end;
3858
3859 resp.cq_moderation_caps.max_cq_moderation_count =
3860 attr.cq_caps.max_cq_moderation_count;
3861 resp.cq_moderation_caps.max_cq_moderation_period =
3862 attr.cq_caps.max_cq_moderation_period;
3863 resp.response_length += sizeof(resp.cq_moderation_caps);
3864 end:
3865 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3866 return err;
3867 }
3868
3869 int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file,
3870 struct ib_device *ib_dev,
3871 struct ib_udata *ucore,
3872 struct ib_udata *uhw)
3873 {
3874 struct ib_uverbs_ex_modify_cq cmd = {};
3875 struct ib_cq *cq;
3876 size_t required_cmd_sz;
3877 int ret;
3878
3879 required_cmd_sz = offsetof(typeof(cmd), reserved) +
3880 sizeof(cmd.reserved);
3881 if (ucore->inlen < required_cmd_sz)
3882 return -EINVAL;
3883
3884 /* sanity checks */
3885 if (ucore->inlen > sizeof(cmd) &&
3886 !ib_is_udata_cleared(ucore, sizeof(cmd),
3887 ucore->inlen - sizeof(cmd)))
3888 return -EOPNOTSUPP;
3889
3890 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3891 if (ret)
3892 return ret;
3893
3894 if (!cmd.attr_mask || cmd.reserved)
3895 return -EINVAL;
3896
3897 if (cmd.attr_mask > IB_CQ_MODERATE)
3898 return -EOPNOTSUPP;
3899
3900 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
3901 if (!cq)
3902 return -EINVAL;
3903
3904 ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
3905
3906 uobj_put_obj_read(cq);
3907
3908 return ret;
3909 }