]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/infiniband/core/uverbs_cmd.c
RDMA/uverbs: fix ptr_ret.cocci warnings
[mirror_ubuntu-eoan-kernel.git] / drivers / infiniband / core / uverbs_cmd.c
CommitLineData
bc38a6ab
RD
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
f7c6a7b5 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
eb9d3cd5 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8bdb0e86 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
bc38a6ab
RD
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
bc38a6ab
RD
34 */
35
6b73597e 36#include <linux/file.h>
70a30e16 37#include <linux/fs.h>
5a0e3ad6 38#include <linux/slab.h>
8ada2c1c 39#include <linux/sched.h>
6b73597e 40
7c0f6ba6 41#include <linux/uaccess.h>
bc38a6ab 42
fd3c7904
MB
43#include <rdma/uverbs_types.h>
44#include <rdma/uverbs_std_types.h>
45#include "rdma_core.h"
46
bc38a6ab 47#include "uverbs.h"
ed4c54e5 48#include "core_priv.h"
bc38a6ab 49
1e7710f3 50static struct ib_uverbs_completion_event_file *
8313c10f 51_ib_uverbs_lookup_comp_file(s32 fd, const struct uverbs_attr_bundle *attrs)
1e7710f3 52{
1250c304 53 struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL,
8313c10f 54 fd, attrs);
1e7710f3
MB
55
56 if (IS_ERR(uobj))
57 return (void *)uobj;
58
59 uverbs_uobject_get(uobj);
60 uobj_put_read(uobj);
61
d0259e82
JG
62 return container_of(uobj, struct ib_uverbs_completion_event_file,
63 uobj);
1e7710f3 64}
1250c304
JG
65#define ib_uverbs_lookup_comp_file(_fd, _ufile) \
66 _ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile)
1e7710f3 67
7106a976
JG
68static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs,
69 const char __user *buf, int in_len,
70 int out_len)
bc38a6ab 71{
8313c10f 72 struct ib_uverbs_file *file = attrs->ufile;
bc38a6ab
RD
73 struct ib_uverbs_get_context cmd;
74 struct ib_uverbs_get_context_resp resp;
63c47c28 75 struct ib_ucontext *ucontext;
6b73597e 76 struct file *filp;
43579b5f 77 struct ib_rdmacg_object cg_obj;
bbd51e88 78 struct ib_device *ib_dev;
63c47c28 79 int ret;
bc38a6ab
RD
80
81 if (out_len < sizeof resp)
82 return -ENOSPC;
83
84 if (copy_from_user(&cmd, buf, sizeof cmd))
85 return -EFAULT;
86
e951747a 87 mutex_lock(&file->ucontext_lock);
bbd51e88
JG
88 ib_dev = srcu_dereference(file->device->ib_dev,
89 &file->device->disassociate_srcu);
90 if (!ib_dev) {
91 ret = -EIO;
92 goto err;
93 }
63c47c28
RD
94
95 if (file->ucontext) {
96 ret = -EINVAL;
97 goto err;
98 }
99
43579b5f
PP
100 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
101 if (ret)
102 goto err;
103
ef87df2c 104 ucontext = ib_dev->alloc_ucontext(ib_dev, &attrs->driver_udata);
77f76013 105 if (IS_ERR(ucontext)) {
df42245a 106 ret = PTR_ERR(ucontext);
43579b5f 107 goto err_alloc;
77f76013 108 }
bc38a6ab 109
057aec0d 110 ucontext->device = ib_dev;
43579b5f 111 ucontext->cg_obj = cg_obj;
771addf6
MB
112 /* ufile is required when some objects are released */
113 ucontext->ufile = file;
fd3c7904 114
6ceb6331 115 ucontext->closing = false;
1c77483e 116 ucontext->cleanup_retryable = false;
bc38a6ab 117
882214e2 118#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
f27a0d50
JG
119 mutex_init(&ucontext->per_mm_list_lock);
120 INIT_LIST_HEAD(&ucontext->per_mm_list);
86bee4c9 121 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
882214e2
HE
122 ucontext->invalidate_range = NULL;
123
124#endif
125
6b73597e
RD
126 resp.num_comp_vectors = file->device->num_comp_vectors;
127
da183c7a 128 ret = get_unused_fd_flags(O_CLOEXEC);
b1e4594b
AV
129 if (ret < 0)
130 goto err_free;
131 resp.async_fd = ret;
132
1e7710f3 133 filp = ib_uverbs_alloc_async_event_file(file, ib_dev);
6b73597e
RD
134 if (IS_ERR(filp)) {
135 ret = PTR_ERR(filp);
b1e4594b 136 goto err_fd;
6b73597e 137 }
bc38a6ab 138
40a20339 139 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
63c47c28 140 ret = -EFAULT;
6b73597e 141 goto err_file;
63c47c28
RD
142 }
143
6b73597e
RD
144 fd_install(resp.async_fd, filp);
145
22fa27fb
JG
146 /*
147 * Make sure that ib_uverbs_get_ucontext() sees the pointer update
148 * only after all writes to setup the ucontext have completed
149 */
150 smp_store_release(&file->ucontext, ucontext);
151
e951747a 152 mutex_unlock(&file->ucontext_lock);
bc38a6ab 153
7106a976 154 return 0;
bc38a6ab 155
6b73597e 156err_file:
03c40442 157 ib_uverbs_free_async_event_file(file);
6b73597e
RD
158 fput(filp);
159
b1e4594b
AV
160err_fd:
161 put_unused_fd(resp.async_fd);
162
63c47c28 163err_free:
057aec0d 164 ib_dev->dealloc_ucontext(ucontext);
bc38a6ab 165
43579b5f
PP
166err_alloc:
167 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
168
63c47c28 169err:
e951747a 170 mutex_unlock(&file->ucontext_lock);
63c47c28 171 return ret;
bc38a6ab
RD
172}
173
bbd51e88 174static void copy_query_dev_fields(struct ib_ucontext *ucontext,
02d1aa7a
EC
175 struct ib_uverbs_query_device_resp *resp,
176 struct ib_device_attr *attr)
177{
bbd51e88
JG
178 struct ib_device *ib_dev = ucontext->device;
179
02d1aa7a 180 resp->fw_ver = attr->fw_ver;
057aec0d 181 resp->node_guid = ib_dev->node_guid;
02d1aa7a
EC
182 resp->sys_image_guid = attr->sys_image_guid;
183 resp->max_mr_size = attr->max_mr_size;
184 resp->page_size_cap = attr->page_size_cap;
185 resp->vendor_id = attr->vendor_id;
186 resp->vendor_part_id = attr->vendor_part_id;
187 resp->hw_ver = attr->hw_ver;
188 resp->max_qp = attr->max_qp;
189 resp->max_qp_wr = attr->max_qp_wr;
fb532d6a 190 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
33023fb8 191 resp->max_sge = min(attr->max_send_sge, attr->max_recv_sge);
02d1aa7a
EC
192 resp->max_sge_rd = attr->max_sge_rd;
193 resp->max_cq = attr->max_cq;
194 resp->max_cqe = attr->max_cqe;
195 resp->max_mr = attr->max_mr;
196 resp->max_pd = attr->max_pd;
197 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
198 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
199 resp->max_res_rd_atom = attr->max_res_rd_atom;
200 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
201 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
202 resp->atomic_cap = attr->atomic_cap;
203 resp->max_ee = attr->max_ee;
204 resp->max_rdd = attr->max_rdd;
205 resp->max_mw = attr->max_mw;
206 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
207 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
208 resp->max_mcast_grp = attr->max_mcast_grp;
209 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
210 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
211 resp->max_ah = attr->max_ah;
212 resp->max_fmr = attr->max_fmr;
213 resp->max_map_per_fmr = attr->max_map_per_fmr;
214 resp->max_srq = attr->max_srq;
215 resp->max_srq_wr = attr->max_srq_wr;
216 resp->max_srq_sge = attr->max_srq_sge;
217 resp->max_pkeys = attr->max_pkeys;
218 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
057aec0d 219 resp->phys_port_cnt = ib_dev->phys_port_cnt;
02d1aa7a
EC
220}
221
7106a976
JG
222static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs,
223 const char __user *buf, int in_len,
224 int out_len)
bc38a6ab
RD
225{
226 struct ib_uverbs_query_device cmd;
227 struct ib_uverbs_query_device_resp resp;
bbd51e88
JG
228 struct ib_ucontext *ucontext;
229
8313c10f 230 ucontext = ib_uverbs_get_ucontext(attrs);
bbd51e88
JG
231 if (IS_ERR(ucontext))
232 return PTR_ERR(ucontext);
bc38a6ab
RD
233
234 if (out_len < sizeof resp)
235 return -ENOSPC;
236
237 if (copy_from_user(&cmd, buf, sizeof cmd))
238 return -EFAULT;
239
bc38a6ab 240 memset(&resp, 0, sizeof resp);
bbd51e88 241 copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs);
bc38a6ab 242
40a20339 243 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
bc38a6ab
RD
244 return -EFAULT;
245
7106a976 246 return 0;
bc38a6ab
RD
247}
248
2f944c0f
JG
249/*
250 * ib_uverbs_query_port_resp.port_cap_flags started out as just a copy of the
251 * PortInfo CapabilityMask, but was extended with unique bits.
252 */
253static u32 make_port_cap_flags(const struct ib_port_attr *attr)
254{
255 u32 res;
256
257 /* All IBA CapabilityMask bits are passed through here, except bit 26,
258 * which is overridden with IP_BASED_GIDS. This is due to a historical
259 * mistake in the implementation of IP_BASED_GIDS. Otherwise all other
260 * bits match the IBA definition across all kernel versions.
261 */
262 res = attr->port_cap_flags & ~(u32)IB_UVERBS_PCF_IP_BASED_GIDS;
263
264 if (attr->ip_gids)
265 res |= IB_UVERBS_PCF_IP_BASED_GIDS;
266
267 return res;
268}
269
7106a976
JG
270static int ib_uverbs_query_port(struct uverbs_attr_bundle *attrs,
271 const char __user *buf, int in_len, int out_len)
bc38a6ab
RD
272{
273 struct ib_uverbs_query_port cmd;
274 struct ib_uverbs_query_port_resp resp;
275 struct ib_port_attr attr;
276 int ret;
bbd51e88
JG
277 struct ib_ucontext *ucontext;
278 struct ib_device *ib_dev;
279
8313c10f 280 ucontext = ib_uverbs_get_ucontext(attrs);
bbd51e88
JG
281 if (IS_ERR(ucontext))
282 return PTR_ERR(ucontext);
283 ib_dev = ucontext->device;
bc38a6ab
RD
284
285 if (out_len < sizeof resp)
286 return -ENOSPC;
287
288 if (copy_from_user(&cmd, buf, sizeof cmd))
289 return -EFAULT;
290
057aec0d 291 ret = ib_query_port(ib_dev, cmd.port_num, &attr);
bc38a6ab
RD
292 if (ret)
293 return ret;
294
295 memset(&resp, 0, sizeof resp);
296
297 resp.state = attr.state;
298 resp.max_mtu = attr.max_mtu;
299 resp.active_mtu = attr.active_mtu;
300 resp.gid_tbl_len = attr.gid_tbl_len;
2f944c0f 301 resp.port_cap_flags = make_port_cap_flags(&attr);
bc38a6ab
RD
302 resp.max_msg_sz = attr.max_msg_sz;
303 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
304 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
305 resp.pkey_tbl_len = attr.pkey_tbl_len;
62ede777 306
8942acea
AK
307 if (rdma_is_grh_required(ib_dev, cmd.port_num))
308 resp.flags |= IB_UVERBS_QPF_GRH_REQUIRED;
309
db58540b 310 if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) {
62ede777 311 resp.lid = OPA_TO_IB_UCAST_LID(attr.lid);
db58540b
DC
312 resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid);
313 } else {
62ede777
HD
314 resp.lid = ib_lid_cpu16(attr.lid);
315 resp.sm_lid = ib_lid_cpu16(attr.sm_lid);
db58540b 316 }
bc38a6ab
RD
317 resp.lmc = attr.lmc;
318 resp.max_vl_num = attr.max_vl_num;
319 resp.sm_sl = attr.sm_sl;
320 resp.subnet_timeout = attr.subnet_timeout;
321 resp.init_type_reply = attr.init_type_reply;
322 resp.active_width = attr.active_width;
323 resp.active_speed = attr.active_speed;
324 resp.phys_state = attr.phys_state;
057aec0d 325 resp.link_layer = rdma_port_get_link_layer(ib_dev,
2420b60b 326 cmd.port_num);
bc38a6ab 327
40a20339 328 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
bc38a6ab
RD
329 return -EFAULT;
330
7106a976 331 return 0;
bc38a6ab
RD
332}
333
7106a976
JG
334static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs,
335 const char __user *buf, int in_len, int out_len)
bc38a6ab
RD
336{
337 struct ib_uverbs_alloc_pd cmd;
338 struct ib_uverbs_alloc_pd_resp resp;
bc38a6ab
RD
339 struct ib_uobject *uobj;
340 struct ib_pd *pd;
341 int ret;
bbd51e88 342 struct ib_device *ib_dev;
bc38a6ab
RD
343
344 if (out_len < sizeof resp)
345 return -ENOSPC;
346
347 if (copy_from_user(&cmd, buf, sizeof cmd))
348 return -EFAULT;
349
8313c10f 350 uobj = uobj_alloc(UVERBS_OBJECT_PD, attrs, &ib_dev);
fd3c7904
MB
351 if (IS_ERR(uobj))
352 return PTR_ERR(uobj);
bc38a6ab 353
ef87df2c 354 pd = ib_dev->alloc_pd(ib_dev, uobj->context, &attrs->driver_udata);
bc38a6ab
RD
355 if (IS_ERR(pd)) {
356 ret = PTR_ERR(pd);
357 goto err;
358 }
359
057aec0d 360 pd->device = ib_dev;
bc38a6ab 361 pd->uobject = uobj;
50d46335 362 pd->__internal_mr = NULL;
bc38a6ab
RD
363 atomic_set(&pd->usecnt, 0);
364
9ead190b 365 uobj->object = pd;
bc38a6ab
RD
366 memset(&resp, 0, sizeof resp);
367 resp.pd_handle = uobj->id;
9d5f8c20
LR
368 pd->res.type = RDMA_RESTRACK_PD;
369 rdma_restrack_add(&pd->res);
bc38a6ab 370
40a20339 371 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
bc38a6ab 372 ret = -EFAULT;
9ead190b 373 goto err_copy;
bc38a6ab
RD
374 }
375
7106a976 376 return uobj_alloc_commit(uobj);
eb9d3cd5 377
9ead190b 378err_copy:
bc38a6ab
RD
379 ib_dealloc_pd(pd);
380
381err:
fd3c7904 382 uobj_alloc_abort(uobj);
bc38a6ab
RD
383 return ret;
384}
385
7106a976
JG
386static int ib_uverbs_dealloc_pd(struct uverbs_attr_bundle *attrs,
387 const char __user *buf, int in_len, int out_len)
bc38a6ab
RD
388{
389 struct ib_uverbs_dealloc_pd cmd;
bc38a6ab
RD
390
391 if (copy_from_user(&cmd, buf, sizeof cmd))
392 return -EFAULT;
393
7106a976 394 return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
bc38a6ab
RD
395}
396
53d0bd1e
SH
397struct xrcd_table_entry {
398 struct rb_node node;
399 struct ib_xrcd *xrcd;
400 struct inode *inode;
401};
402
403static int xrcd_table_insert(struct ib_uverbs_device *dev,
404 struct inode *inode,
405 struct ib_xrcd *xrcd)
406{
407 struct xrcd_table_entry *entry, *scan;
408 struct rb_node **p = &dev->xrcd_tree.rb_node;
409 struct rb_node *parent = NULL;
410
411 entry = kmalloc(sizeof *entry, GFP_KERNEL);
412 if (!entry)
413 return -ENOMEM;
414
415 entry->xrcd = xrcd;
416 entry->inode = inode;
417
418 while (*p) {
419 parent = *p;
420 scan = rb_entry(parent, struct xrcd_table_entry, node);
421
422 if (inode < scan->inode) {
423 p = &(*p)->rb_left;
424 } else if (inode > scan->inode) {
425 p = &(*p)->rb_right;
426 } else {
427 kfree(entry);
428 return -EEXIST;
429 }
430 }
431
432 rb_link_node(&entry->node, parent, p);
433 rb_insert_color(&entry->node, &dev->xrcd_tree);
434 igrab(inode);
435 return 0;
436}
437
438static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
439 struct inode *inode)
440{
441 struct xrcd_table_entry *entry;
442 struct rb_node *p = dev->xrcd_tree.rb_node;
443
444 while (p) {
445 entry = rb_entry(p, struct xrcd_table_entry, node);
446
447 if (inode < entry->inode)
448 p = p->rb_left;
449 else if (inode > entry->inode)
450 p = p->rb_right;
451 else
452 return entry;
453 }
454
455 return NULL;
456}
457
458static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
459{
460 struct xrcd_table_entry *entry;
461
462 entry = xrcd_table_search(dev, inode);
463 if (!entry)
464 return NULL;
465
466 return entry->xrcd;
467}
468
469static void xrcd_table_delete(struct ib_uverbs_device *dev,
470 struct inode *inode)
471{
472 struct xrcd_table_entry *entry;
473
474 entry = xrcd_table_search(dev, inode);
475 if (entry) {
476 iput(inode);
477 rb_erase(&entry->node, &dev->xrcd_tree);
478 kfree(entry);
479 }
480}
481
7106a976
JG
482static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs,
483 const char __user *buf, int in_len, int out_len)
53d0bd1e 484{
8313c10f 485 struct ib_uverbs_device *ibudev = attrs->ufile->device;
53d0bd1e
SH
486 struct ib_uverbs_open_xrcd cmd;
487 struct ib_uverbs_open_xrcd_resp resp;
53d0bd1e
SH
488 struct ib_uxrcd_object *obj;
489 struct ib_xrcd *xrcd = NULL;
2903ff01 490 struct fd f = {NULL, 0};
53d0bd1e 491 struct inode *inode = NULL;
2903ff01 492 int ret = 0;
53d0bd1e 493 int new_xrcd = 0;
bbd51e88 494 struct ib_device *ib_dev;
53d0bd1e
SH
495
496 if (out_len < sizeof resp)
497 return -ENOSPC;
498
499 if (copy_from_user(&cmd, buf, sizeof cmd))
500 return -EFAULT;
501
8313c10f 502 mutex_lock(&ibudev->xrcd_tree_mutex);
53d0bd1e
SH
503
504 if (cmd.fd != -1) {
505 /* search for file descriptor */
2903ff01
AV
506 f = fdget(cmd.fd);
507 if (!f.file) {
53d0bd1e
SH
508 ret = -EBADF;
509 goto err_tree_mutex_unlock;
510 }
511
496ad9aa 512 inode = file_inode(f.file);
8313c10f 513 xrcd = find_xrcd(ibudev, inode);
53d0bd1e
SH
514 if (!xrcd && !(cmd.oflags & O_CREAT)) {
515 /* no file descriptor. Need CREATE flag */
516 ret = -EAGAIN;
517 goto err_tree_mutex_unlock;
518 }
519
520 if (xrcd && cmd.oflags & O_EXCL) {
521 ret = -EINVAL;
522 goto err_tree_mutex_unlock;
523 }
524 }
525
8313c10f 526 obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, attrs,
bbd51e88 527 &ib_dev);
fd3c7904
MB
528 if (IS_ERR(obj)) {
529 ret = PTR_ERR(obj);
53d0bd1e
SH
530 goto err_tree_mutex_unlock;
531 }
532
53d0bd1e 533 if (!xrcd) {
ef87df2c
JG
534 xrcd = ib_dev->alloc_xrcd(ib_dev, obj->uobject.context,
535 &attrs->driver_udata);
53d0bd1e
SH
536 if (IS_ERR(xrcd)) {
537 ret = PTR_ERR(xrcd);
538 goto err;
539 }
540
541 xrcd->inode = inode;
057aec0d 542 xrcd->device = ib_dev;
53d0bd1e
SH
543 atomic_set(&xrcd->usecnt, 0);
544 mutex_init(&xrcd->tgt_qp_mutex);
545 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
546 new_xrcd = 1;
547 }
548
549 atomic_set(&obj->refcnt, 0);
550 obj->uobject.object = xrcd;
53d0bd1e
SH
551 memset(&resp, 0, sizeof resp);
552 resp.xrcd_handle = obj->uobject.id;
553
554 if (inode) {
555 if (new_xrcd) {
556 /* create new inode/xrcd table entry */
8313c10f 557 ret = xrcd_table_insert(ibudev, inode, xrcd);
53d0bd1e 558 if (ret)
fd3c7904 559 goto err_dealloc_xrcd;
53d0bd1e
SH
560 }
561 atomic_inc(&xrcd->usecnt);
562 }
563
40a20339 564 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
53d0bd1e
SH
565 ret = -EFAULT;
566 goto err_copy;
567 }
568
2903ff01
AV
569 if (f.file)
570 fdput(f);
53d0bd1e 571
8313c10f 572 mutex_unlock(&ibudev->xrcd_tree_mutex);
1ff5325c 573
7106a976 574 return uobj_alloc_commit(&obj->uobject);
53d0bd1e
SH
575
576err_copy:
577 if (inode) {
578 if (new_xrcd)
8313c10f 579 xrcd_table_delete(ibudev, inode);
53d0bd1e
SH
580 atomic_dec(&xrcd->usecnt);
581 }
582
fd3c7904 583err_dealloc_xrcd:
53d0bd1e
SH
584 ib_dealloc_xrcd(xrcd);
585
586err:
fd3c7904 587 uobj_alloc_abort(&obj->uobject);
53d0bd1e
SH
588
589err_tree_mutex_unlock:
2903ff01
AV
590 if (f.file)
591 fdput(f);
53d0bd1e 592
8313c10f 593 mutex_unlock(&ibudev->xrcd_tree_mutex);
53d0bd1e
SH
594
595 return ret;
596}
597
7106a976
JG
598static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs,
599 const char __user *buf, int in_len, int out_len)
53d0bd1e
SH
600{
601 struct ib_uverbs_close_xrcd cmd;
53d0bd1e
SH
602
603 if (copy_from_user(&cmd, buf, sizeof cmd))
604 return -EFAULT;
605
7106a976 606 return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs);
53d0bd1e
SH
607}
608
1c77483e 609int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject,
6be60aed
MB
610 struct ib_xrcd *xrcd,
611 enum rdma_remove_reason why)
53d0bd1e
SH
612{
613 struct inode *inode;
6be60aed 614 int ret;
1c77483e 615 struct ib_uverbs_device *dev = uobject->context->ufile->device;
53d0bd1e
SH
616
617 inode = xrcd->inode;
618 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
6be60aed 619 return 0;
53d0bd1e 620
6be60aed 621 ret = ib_dealloc_xrcd(xrcd);
53d0bd1e 622
1c77483e 623 if (ib_is_destroy_retryable(ret, why, uobject)) {
6be60aed 624 atomic_inc(&xrcd->usecnt);
1c77483e
YH
625 return ret;
626 }
627
628 if (inode)
53d0bd1e 629 xrcd_table_delete(dev, inode);
6be60aed
MB
630
631 return ret;
53d0bd1e
SH
632}
633
7106a976
JG
634static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs,
635 const char __user *buf, int in_len, int out_len)
bc38a6ab
RD
636{
637 struct ib_uverbs_reg_mr cmd;
638 struct ib_uverbs_reg_mr_resp resp;
f7c6a7b5 639 struct ib_uobject *uobj;
bc38a6ab
RD
640 struct ib_pd *pd;
641 struct ib_mr *mr;
642 int ret;
bbd51e88 643 struct ib_device *ib_dev;
bc38a6ab
RD
644
645 if (out_len < sizeof resp)
646 return -ENOSPC;
647
648 if (copy_from_user(&cmd, buf, sizeof cmd))
649 return -EFAULT;
650
bc38a6ab
RD
651 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
652 return -EINVAL;
653
1c636f80
EC
654 ret = ib_check_mr_access(cmd.access_flags);
655 if (ret)
656 return ret;
f575394f 657
8313c10f 658 uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
fd3c7904
MB
659 if (IS_ERR(uobj))
660 return PTR_ERR(uobj);
bc38a6ab 661
8313c10f 662 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
aaf1aef5
RD
663 if (!pd) {
664 ret = -EINVAL;
f7c6a7b5 665 goto err_free;
aaf1aef5 666 }
bc38a6ab 667
860f10a7 668 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
86bee4c9
OG
669 if (!(pd->device->attrs.device_cap_flags &
670 IB_DEVICE_ON_DEMAND_PAGING)) {
860f10a7
SG
671 pr_debug("ODP support not available\n");
672 ret = -EINVAL;
673 goto err_put;
674 }
675 }
676
f7c6a7b5 677 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
ef87df2c 678 cmd.access_flags, &attrs->driver_udata);
bc38a6ab
RD
679 if (IS_ERR(mr)) {
680 ret = PTR_ERR(mr);
9ead190b 681 goto err_put;
bc38a6ab
RD
682 }
683
684 mr->device = pd->device;
685 mr->pd = pd;
54e7e48b 686 mr->dm = NULL;
f7c6a7b5 687 mr->uobject = uobj;
bc38a6ab 688 atomic_inc(&pd->usecnt);
fccec5b8
SW
689 mr->res.type = RDMA_RESTRACK_MR;
690 rdma_restrack_add(&mr->res);
bc38a6ab 691
f7c6a7b5 692 uobj->object = mr;
bc38a6ab 693
9ead190b
RD
694 memset(&resp, 0, sizeof resp);
695 resp.lkey = mr->lkey;
696 resp.rkey = mr->rkey;
f7c6a7b5 697 resp.mr_handle = uobj->id;
bc38a6ab 698
40a20339 699 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
bc38a6ab 700 ret = -EFAULT;
9ead190b 701 goto err_copy;
bc38a6ab
RD
702 }
703
fd3c7904 704 uobj_put_obj_read(pd);
eb9d3cd5 705
7106a976 706 return uobj_alloc_commit(uobj);
bc38a6ab 707
9ead190b 708err_copy:
bc38a6ab
RD
709 ib_dereg_mr(mr);
710
9ead190b 711err_put:
fd3c7904 712 uobj_put_obj_read(pd);
bc38a6ab 713
bc38a6ab 714err_free:
fd3c7904 715 uobj_alloc_abort(uobj);
bc38a6ab
RD
716 return ret;
717}
718
7106a976
JG
719static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs,
720 const char __user *buf, int in_len, int out_len)
7e6edb9b
MB
721{
722 struct ib_uverbs_rereg_mr cmd;
723 struct ib_uverbs_rereg_mr_resp resp;
7e6edb9b
MB
724 struct ib_pd *pd = NULL;
725 struct ib_mr *mr;
726 struct ib_pd *old_pd;
727 int ret;
728 struct ib_uobject *uobj;
729
730 if (out_len < sizeof(resp))
731 return -ENOSPC;
732
733 if (copy_from_user(&cmd, buf, sizeof(cmd)))
734 return -EFAULT;
735
7e6edb9b
MB
736 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
737 return -EINVAL;
738
739 if ((cmd.flags & IB_MR_REREG_TRANS) &&
740 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
741 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
742 return -EINVAL;
743
8313c10f 744 uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
fd3c7904
MB
745 if (IS_ERR(uobj))
746 return PTR_ERR(uobj);
7e6edb9b
MB
747
748 mr = uobj->object;
749
5ccbf63f
AL
750 if (mr->dm) {
751 ret = -EINVAL;
752 goto put_uobjs;
753 }
754
7e6edb9b
MB
755 if (cmd.flags & IB_MR_REREG_ACCESS) {
756 ret = ib_check_mr_access(cmd.access_flags);
757 if (ret)
758 goto put_uobjs;
759 }
760
761 if (cmd.flags & IB_MR_REREG_PD) {
2cc1e3b8 762 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
8313c10f 763 attrs);
7e6edb9b
MB
764 if (!pd) {
765 ret = -EINVAL;
766 goto put_uobjs;
767 }
768 }
769
7e6edb9b 770 old_pd = mr->pd;
ef87df2c
JG
771 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, cmd.length,
772 cmd.hca_va, cmd.access_flags, pd,
773 &attrs->driver_udata);
7e6edb9b
MB
774 if (!ret) {
775 if (cmd.flags & IB_MR_REREG_PD) {
776 atomic_inc(&pd->usecnt);
777 mr->pd = pd;
778 atomic_dec(&old_pd->usecnt);
779 }
780 } else {
781 goto put_uobj_pd;
782 }
783
784 memset(&resp, 0, sizeof(resp));
785 resp.lkey = mr->lkey;
786 resp.rkey = mr->rkey;
787
40a20339 788 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
7e6edb9b
MB
789 ret = -EFAULT;
790 else
7106a976 791 ret = 0;
7e6edb9b
MB
792
793put_uobj_pd:
794 if (cmd.flags & IB_MR_REREG_PD)
fd3c7904 795 uobj_put_obj_read(pd);
7e6edb9b
MB
796
797put_uobjs:
fd3c7904 798 uobj_put_write(uobj);
7e6edb9b
MB
799
800 return ret;
801}
802
7106a976
JG
803static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs,
804 const char __user *buf, int in_len, int out_len)
bc38a6ab
RD
805{
806 struct ib_uverbs_dereg_mr cmd;
bc38a6ab
RD
807
808 if (copy_from_user(&cmd, buf, sizeof cmd))
809 return -EFAULT;
810
7106a976 811 return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
bc38a6ab
RD
812}
813
7106a976
JG
814static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs,
815 const char __user *buf, int in_len, int out_len)
6b52a12b
SM
816{
817 struct ib_uverbs_alloc_mw cmd;
818 struct ib_uverbs_alloc_mw_resp resp;
819 struct ib_uobject *uobj;
820 struct ib_pd *pd;
821 struct ib_mw *mw;
822 int ret;
bbd51e88 823 struct ib_device *ib_dev;
6b52a12b
SM
824
825 if (out_len < sizeof(resp))
826 return -ENOSPC;
827
828 if (copy_from_user(&cmd, buf, sizeof(cmd)))
829 return -EFAULT;
830
8313c10f 831 uobj = uobj_alloc(UVERBS_OBJECT_MW, attrs, &ib_dev);
fd3c7904
MB
832 if (IS_ERR(uobj))
833 return PTR_ERR(uobj);
6b52a12b 834
8313c10f 835 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
6b52a12b
SM
836 if (!pd) {
837 ret = -EINVAL;
838 goto err_free;
839 }
840
ef87df2c 841 mw = pd->device->alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
6b52a12b
SM
842 if (IS_ERR(mw)) {
843 ret = PTR_ERR(mw);
844 goto err_put;
845 }
846
847 mw->device = pd->device;
848 mw->pd = pd;
849 mw->uobject = uobj;
850 atomic_inc(&pd->usecnt);
851
852 uobj->object = mw;
6b52a12b
SM
853
854 memset(&resp, 0, sizeof(resp));
855 resp.rkey = mw->rkey;
856 resp.mw_handle = uobj->id;
857
40a20339 858 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) {
6b52a12b
SM
859 ret = -EFAULT;
860 goto err_copy;
861 }
862
fd3c7904 863 uobj_put_obj_read(pd);
7106a976 864 return uobj_alloc_commit(uobj);
6b52a12b
SM
865
866err_copy:
feb7c1e3 867 uverbs_dealloc_mw(mw);
6b52a12b 868err_put:
fd3c7904 869 uobj_put_obj_read(pd);
6b52a12b 870err_free:
fd3c7904 871 uobj_alloc_abort(uobj);
6b52a12b
SM
872 return ret;
873}
874
7106a976
JG
875static int ib_uverbs_dealloc_mw(struct uverbs_attr_bundle *attrs,
876 const char __user *buf, int in_len, int out_len)
6b52a12b
SM
877{
878 struct ib_uverbs_dealloc_mw cmd;
6b52a12b
SM
879
880 if (copy_from_user(&cmd, buf, sizeof(cmd)))
881 return -EFAULT;
882
7106a976 883 return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, attrs);
6b52a12b
SM
884}
885
7106a976
JG
886static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs,
887 const char __user *buf, int in_len,
888 int out_len)
6b73597e
RD
889{
890 struct ib_uverbs_create_comp_channel cmd;
891 struct ib_uverbs_create_comp_channel_resp resp;
1e7710f3
MB
892 struct ib_uobject *uobj;
893 struct ib_uverbs_completion_event_file *ev_file;
bbd51e88 894 struct ib_device *ib_dev;
6b73597e
RD
895
896 if (out_len < sizeof resp)
897 return -ENOSPC;
898
899 if (copy_from_user(&cmd, buf, sizeof cmd))
900 return -EFAULT;
901
8313c10f 902 uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, attrs, &ib_dev);
1e7710f3
MB
903 if (IS_ERR(uobj))
904 return PTR_ERR(uobj);
b1e4594b 905
1e7710f3
MB
906 resp.fd = uobj->id;
907
908 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
d0259e82 909 uobj);
db1b5ddd 910 ib_uverbs_init_event_queue(&ev_file->ev_queue);
6b73597e 911
40a20339 912 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
1e7710f3 913 uobj_alloc_abort(uobj);
6b73597e
RD
914 return -EFAULT;
915 }
916
7106a976 917 return uobj_alloc_commit(uobj);
6b73597e
RD
918}
919
8313c10f 920static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
565197dd 921 struct ib_udata *ucore,
565197dd
MB
922 struct ib_uverbs_ex_create_cq *cmd,
923 size_t cmd_sz,
8313c10f 924 int (*cb)(struct uverbs_attr_bundle *attrs,
565197dd
MB
925 struct ib_ucq_object *obj,
926 struct ib_uverbs_ex_create_cq_resp *resp,
ef87df2c 927 struct ib_udata *ucore,
565197dd
MB
928 void *context),
929 void *context)
bc38a6ab 930{
9ead190b 931 struct ib_ucq_object *obj;
1e7710f3 932 struct ib_uverbs_completion_event_file *ev_file = NULL;
bc38a6ab
RD
933 struct ib_cq *cq;
934 int ret;
565197dd 935 struct ib_uverbs_ex_create_cq_resp resp;
bcf4c1ea 936 struct ib_cq_init_attr attr = {};
bbd51e88 937 struct ib_device *ib_dev;
21885586 938
8313c10f 939 if (cmd->comp_vector >= attrs->ufile->device->num_comp_vectors)
565197dd 940 return ERR_PTR(-EINVAL);
bc38a6ab 941
8313c10f 942 obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, attrs,
bbd51e88 943 &ib_dev);
fd3c7904
MB
944 if (IS_ERR(obj))
945 return obj;
9ead190b 946
565197dd 947 if (cmd->comp_channel >= 0) {
8313c10f 948 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, attrs);
1e7710f3
MB
949 if (IS_ERR(ev_file)) {
950 ret = PTR_ERR(ev_file);
ac4e7b35
JM
951 goto err;
952 }
953 }
954
fd3c7904 955 obj->uobject.user_handle = cmd->user_handle;
9ead190b
RD
956 obj->comp_events_reported = 0;
957 obj->async_events_reported = 0;
958 INIT_LIST_HEAD(&obj->comp_list);
959 INIT_LIST_HEAD(&obj->async_list);
bc38a6ab 960
565197dd
MB
961 attr.cqe = cmd->cqe;
962 attr.comp_vector = cmd->comp_vector;
963
964 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
965 attr.flags = cmd->flags;
966
ef87df2c
JG
967 cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context,
968 &attrs->driver_udata);
bc38a6ab
RD
969 if (IS_ERR(cq)) {
970 ret = PTR_ERR(cq);
9ead190b 971 goto err_file;
bc38a6ab
RD
972 }
973
057aec0d 974 cq->device = ib_dev;
9ead190b 975 cq->uobject = &obj->uobject;
bc38a6ab
RD
976 cq->comp_handler = ib_uverbs_comp_handler;
977 cq->event_handler = ib_uverbs_cq_event_handler;
699a2d5b 978 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
bc38a6ab
RD
979 atomic_set(&cq->usecnt, 0);
980
9ead190b 981 obj->uobject.object = cq;
bc38a6ab 982 memset(&resp, 0, sizeof resp);
565197dd
MB
983 resp.base.cq_handle = obj->uobject.id;
984 resp.base.cqe = cq->cqe;
bc38a6ab 985
565197dd
MB
986 resp.response_length = offsetof(typeof(resp), response_length) +
987 sizeof(resp.response_length);
988
0cba0efc
LR
989 cq->res.type = RDMA_RESTRACK_CQ;
990 rdma_restrack_add(&cq->res);
991
8313c10f 992 ret = cb(attrs, obj, &resp, ucore, context);
565197dd
MB
993 if (ret)
994 goto err_cb;
bc38a6ab 995
7106a976 996 ret = uobj_alloc_commit(&obj->uobject);
2c96eb7d
JG
997 if (ret)
998 return ERR_PTR(ret);
565197dd 999 return obj;
eb9d3cd5 1000
565197dd 1001err_cb:
bc38a6ab
RD
1002 ib_destroy_cq(cq);
1003
9ead190b 1004err_file:
ac4e7b35 1005 if (ev_file)
8313c10f 1006 ib_uverbs_release_ucq(attrs->ufile, ev_file, obj);
9ead190b
RD
1007
1008err:
fd3c7904 1009 uobj_alloc_abort(&obj->uobject);
565197dd
MB
1010
1011 return ERR_PTR(ret);
1012}
1013
8313c10f 1014static int ib_uverbs_create_cq_cb(struct uverbs_attr_bundle *attrs,
565197dd
MB
1015 struct ib_ucq_object *obj,
1016 struct ib_uverbs_ex_create_cq_resp *resp,
1017 struct ib_udata *ucore, void *context)
1018{
1019 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1020 return -EFAULT;
1021
1022 return 0;
1023}
1024
7106a976
JG
1025static int ib_uverbs_create_cq(struct uverbs_attr_bundle *attrs,
1026 const char __user *buf, int in_len, int out_len)
565197dd
MB
1027{
1028 struct ib_uverbs_create_cq cmd;
1029 struct ib_uverbs_ex_create_cq cmd_ex;
1030 struct ib_uverbs_create_cq_resp resp;
1031 struct ib_udata ucore;
565197dd
MB
1032 struct ib_ucq_object *obj;
1033
1034 if (out_len < sizeof(resp))
1035 return -ENOSPC;
1036
1037 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1038 return -EFAULT;
1039
40a20339
AB
1040 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
1041 sizeof(cmd), sizeof(resp));
565197dd 1042
565197dd
MB
1043 memset(&cmd_ex, 0, sizeof(cmd_ex));
1044 cmd_ex.user_handle = cmd.user_handle;
1045 cmd_ex.cqe = cmd.cqe;
1046 cmd_ex.comp_vector = cmd.comp_vector;
1047 cmd_ex.comp_channel = cmd.comp_channel;
1048
ef87df2c 1049 obj = create_cq(attrs, &ucore, &cmd_ex,
565197dd 1050 offsetof(typeof(cmd_ex), comp_channel) +
ef87df2c
JG
1051 sizeof(cmd.comp_channel),
1052 ib_uverbs_create_cq_cb, NULL);
90849f4d 1053 return PTR_ERR_OR_ZERO(obj);
565197dd
MB
1054}
1055
8313c10f 1056static int ib_uverbs_ex_create_cq_cb(struct uverbs_attr_bundle *attrs,
565197dd
MB
1057 struct ib_ucq_object *obj,
1058 struct ib_uverbs_ex_create_cq_resp *resp,
1059 struct ib_udata *ucore, void *context)
1060{
1061 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1062 return -EFAULT;
1063
1064 return 0;
1065}
1066
8313c10f 1067static int ib_uverbs_ex_create_cq(struct uverbs_attr_bundle *attrs,
ef87df2c 1068 struct ib_udata *ucore)
565197dd
MB
1069{
1070 struct ib_uverbs_ex_create_cq_resp resp;
1071 struct ib_uverbs_ex_create_cq cmd;
1072 struct ib_ucq_object *obj;
1073 int err;
1074
1075 if (ucore->inlen < sizeof(cmd))
1076 return -EINVAL;
1077
1078 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1079 if (err)
1080 return err;
1081
1082 if (cmd.comp_mask)
1083 return -EINVAL;
1084
1085 if (cmd.reserved)
1086 return -EINVAL;
1087
1088 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1089 sizeof(resp.response_length)))
1090 return -ENOSPC;
1091
ef87df2c 1092 obj = create_cq(attrs, ucore, &cmd, min(ucore->inlen, sizeof(cmd)),
565197dd
MB
1093 ib_uverbs_ex_create_cq_cb, NULL);
1094
f4cd9d58 1095 return PTR_ERR_OR_ZERO(obj);
bc38a6ab
RD
1096}
1097
7106a976
JG
1098static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs,
1099 const char __user *buf, int in_len, int out_len)
33b9b3ee
RD
1100{
1101 struct ib_uverbs_resize_cq cmd;
f7a6cb7b 1102 struct ib_uverbs_resize_cq_resp resp = {};
33b9b3ee
RD
1103 struct ib_cq *cq;
1104 int ret = -EINVAL;
1105
1106 if (copy_from_user(&cmd, buf, sizeof cmd))
1107 return -EFAULT;
1108
8313c10f 1109 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
9ead190b
RD
1110 if (!cq)
1111 return -EINVAL;
33b9b3ee 1112
ef87df2c 1113 ret = cq->device->resize_cq(cq, cmd.cqe, &attrs->driver_udata);
33b9b3ee
RD
1114 if (ret)
1115 goto out;
1116
33b9b3ee
RD
1117 resp.cqe = cq->cqe;
1118
40a20339 1119 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe))
33b9b3ee
RD
1120 ret = -EFAULT;
1121
1122out:
fd3c7904 1123 uobj_put_obj_read(cq);
33b9b3ee 1124
7106a976 1125 return ret;
33b9b3ee
RD
1126}
1127
7db20ecd
HD
1128static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
1129 struct ib_wc *wc)
7182afea
DC
1130{
1131 struct ib_uverbs_wc tmp;
1132
1133 tmp.wr_id = wc->wr_id;
1134 tmp.status = wc->status;
1135 tmp.opcode = wc->opcode;
1136 tmp.vendor_err = wc->vendor_err;
1137 tmp.byte_len = wc->byte_len;
c966ea12 1138 tmp.ex.imm_data = wc->ex.imm_data;
7182afea
DC
1139 tmp.qp_num = wc->qp->qp_num;
1140 tmp.src_qp = wc->src_qp;
1141 tmp.wc_flags = wc->wc_flags;
1142 tmp.pkey_index = wc->pkey_index;
7db20ecd 1143 if (rdma_cap_opa_ah(ib_dev, wc->port_num))
62ede777 1144 tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid);
7db20ecd 1145 else
62ede777 1146 tmp.slid = ib_lid_cpu16(wc->slid);
7182afea
DC
1147 tmp.sl = wc->sl;
1148 tmp.dlid_path_bits = wc->dlid_path_bits;
1149 tmp.port_num = wc->port_num;
1150 tmp.reserved = 0;
1151
1152 if (copy_to_user(dest, &tmp, sizeof tmp))
1153 return -EFAULT;
1154
1155 return 0;
1156}
1157
7106a976
JG
1158static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs,
1159 const char __user *buf, int in_len, int out_len)
67cdb40c
RD
1160{
1161 struct ib_uverbs_poll_cq cmd;
7182afea
DC
1162 struct ib_uverbs_poll_cq_resp resp;
1163 u8 __user *header_ptr;
1164 u8 __user *data_ptr;
67cdb40c 1165 struct ib_cq *cq;
7182afea
DC
1166 struct ib_wc wc;
1167 int ret;
67cdb40c
RD
1168
1169 if (copy_from_user(&cmd, buf, sizeof cmd))
1170 return -EFAULT;
1171
8313c10f 1172 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
7182afea
DC
1173 if (!cq)
1174 return -EINVAL;
67cdb40c 1175
7182afea 1176 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
40a20339 1177 header_ptr = u64_to_user_ptr(cmd.response);
7182afea 1178 data_ptr = header_ptr + sizeof resp;
9ead190b 1179
7182afea
DC
1180 memset(&resp, 0, sizeof resp);
1181 while (resp.count < cmd.ne) {
1182 ret = ib_poll_cq(cq, 1, &wc);
1183 if (ret < 0)
1184 goto out_put;
1185 if (!ret)
1186 break;
1187
bbd51e88 1188 ret = copy_wc_to_user(cq->device, data_ptr, &wc);
7182afea
DC
1189 if (ret)
1190 goto out_put;
1191
1192 data_ptr += sizeof(struct ib_uverbs_wc);
1193 ++resp.count;
67cdb40c
RD
1194 }
1195
7182afea 1196 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
67cdb40c 1197 ret = -EFAULT;
7182afea
DC
1198 goto out_put;
1199 }
67cdb40c 1200
7106a976 1201 ret = 0;
67cdb40c 1202
7182afea 1203out_put:
fd3c7904 1204 uobj_put_obj_read(cq);
7182afea 1205 return ret;
67cdb40c
RD
1206}
1207
7106a976
JG
1208static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs,
1209 const char __user *buf, int in_len,
1210 int out_len)
67cdb40c
RD
1211{
1212 struct ib_uverbs_req_notify_cq cmd;
1213 struct ib_cq *cq;
67cdb40c
RD
1214
1215 if (copy_from_user(&cmd, buf, sizeof cmd))
1216 return -EFAULT;
1217
8313c10f 1218 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
ab108676 1219 if (!cq)
9ead190b 1220 return -EINVAL;
67cdb40c 1221
9ead190b
RD
1222 ib_req_notify_cq(cq, cmd.solicited_only ?
1223 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1224
fd3c7904 1225 uobj_put_obj_read(cq);
9ead190b 1226
7106a976 1227 return 0;
67cdb40c
RD
1228}
1229
7106a976
JG
1230static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs,
1231 const char __user *buf, int in_len, int out_len)
bc38a6ab 1232{
63aaf647
RD
1233 struct ib_uverbs_destroy_cq cmd;
1234 struct ib_uverbs_destroy_cq_resp resp;
9ead190b 1235 struct ib_uobject *uobj;
9ead190b 1236 struct ib_ucq_object *obj;
bc38a6ab
RD
1237
1238 if (copy_from_user(&cmd, buf, sizeof cmd))
1239 return -EFAULT;
1240
8313c10f 1241 uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
fd3c7904
MB
1242 if (IS_ERR(uobj))
1243 return PTR_ERR(uobj);
1244
32ed5c00 1245 obj = container_of(uobj, struct ib_ucq_object, uobject);
fd3c7904 1246 memset(&resp, 0, sizeof(resp));
9ead190b
RD
1247 resp.comp_events_reported = obj->comp_events_reported;
1248 resp.async_events_reported = obj->async_events_reported;
63aaf647 1249
32ed5c00
JG
1250 uobj_put_destroy(uobj);
1251
40a20339 1252 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
9ead190b 1253 return -EFAULT;
bc38a6ab 1254
7106a976 1255 return 0;
bc38a6ab
RD
1256}
1257
8313c10f 1258static int create_qp(struct uverbs_attr_bundle *attrs,
6d8a7497 1259 struct ib_udata *ucore,
6d8a7497
EBE
1260 struct ib_uverbs_ex_create_qp *cmd,
1261 size_t cmd_sz,
8313c10f 1262 int (*cb)(struct uverbs_attr_bundle *attrs,
6d8a7497
EBE
1263 struct ib_uverbs_ex_create_qp_resp *resp,
1264 struct ib_udata *udata),
1265 void *context)
bc38a6ab 1266{
6d8a7497
EBE
1267 struct ib_uqp_object *obj;
1268 struct ib_device *device;
1269 struct ib_pd *pd = NULL;
1270 struct ib_xrcd *xrcd = NULL;
fd3c7904 1271 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT);
6d8a7497
EBE
1272 struct ib_cq *scq = NULL, *rcq = NULL;
1273 struct ib_srq *srq = NULL;
1274 struct ib_qp *qp;
1275 char *buf;
c70285f8 1276 struct ib_qp_init_attr attr = {};
6d8a7497
EBE
1277 struct ib_uverbs_ex_create_qp_resp resp;
1278 int ret;
c70285f8
YH
1279 struct ib_rwq_ind_table *ind_tbl = NULL;
1280 bool has_sq = true;
bbd51e88 1281 struct ib_device *ib_dev;
6d8a7497
EBE
1282
1283 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
c938a616
OG
1284 return -EPERM;
1285
8313c10f 1286 obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
bbd51e88 1287 &ib_dev);
fd3c7904
MB
1288 if (IS_ERR(obj))
1289 return PTR_ERR(obj);
1290 obj->uxrcd = NULL;
1291 obj->uevent.uobject.user_handle = cmd->user_handle;
f48b7269 1292 mutex_init(&obj->mcast_lock);
bc38a6ab 1293
c70285f8
YH
1294 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) +
1295 sizeof(cmd->rwq_ind_tbl_handle) &&
1296 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) {
2cc1e3b8
JG
1297 ind_tbl = uobj_get_obj_read(rwq_ind_table,
1298 UVERBS_OBJECT_RWQ_IND_TBL,
8313c10f 1299 cmd->rwq_ind_tbl_handle, attrs);
c70285f8
YH
1300 if (!ind_tbl) {
1301 ret = -EINVAL;
1302 goto err_put;
1303 }
1304
1305 attr.rwq_ind_tbl = ind_tbl;
1306 }
1307
2dee0e54
YH
1308 if (cmd_sz > sizeof(*cmd) &&
1309 !ib_is_udata_cleared(ucore, sizeof(*cmd),
1310 cmd_sz - sizeof(*cmd))) {
c70285f8
YH
1311 ret = -EOPNOTSUPP;
1312 goto err_put;
1313 }
1314
1315 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
1316 ret = -EINVAL;
1317 goto err_put;
1318 }
1319
1320 if (ind_tbl && !cmd->max_send_wr)
1321 has_sq = false;
bc38a6ab 1322
6d8a7497 1323 if (cmd->qp_type == IB_QPT_XRC_TGT) {
1f7ff9d5 1324 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle,
8313c10f 1325 attrs);
fd3c7904
MB
1326
1327 if (IS_ERR(xrcd_uobj)) {
1328 ret = -EINVAL;
1329 goto err_put;
1330 }
1331
1332 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
b93f3c18
SH
1333 if (!xrcd) {
1334 ret = -EINVAL;
1335 goto err_put;
1336 }
1337 device = xrcd->device;
9977f4f6 1338 } else {
6d8a7497
EBE
1339 if (cmd->qp_type == IB_QPT_XRC_INI) {
1340 cmd->max_recv_wr = 0;
1341 cmd->max_recv_sge = 0;
b93f3c18 1342 } else {
6d8a7497 1343 if (cmd->is_srq) {
2cc1e3b8 1344 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
8313c10f 1345 cmd->srq_handle, attrs);
38eb44fa 1346 if (!srq || srq->srq_type == IB_SRQT_XRC) {
b93f3c18
SH
1347 ret = -EINVAL;
1348 goto err_put;
1349 }
1350 }
5909ce54 1351
c70285f8
YH
1352 if (!ind_tbl) {
1353 if (cmd->recv_cq_handle != cmd->send_cq_handle) {
2cc1e3b8
JG
1354 rcq = uobj_get_obj_read(
1355 cq, UVERBS_OBJECT_CQ,
8313c10f 1356 cmd->recv_cq_handle, attrs);
c70285f8
YH
1357 if (!rcq) {
1358 ret = -EINVAL;
1359 goto err_put;
1360 }
5909ce54 1361 }
9977f4f6
SH
1362 }
1363 }
5909ce54 1364
c70285f8 1365 if (has_sq)
2cc1e3b8 1366 scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
8313c10f 1367 cmd->send_cq_handle, attrs);
c70285f8
YH
1368 if (!ind_tbl)
1369 rcq = rcq ?: scq;
2cc1e3b8 1370 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
8313c10f 1371 attrs);
c70285f8 1372 if (!pd || (!scq && has_sq)) {
5909ce54
RD
1373 ret = -EINVAL;
1374 goto err_put;
1375 }
1376
b93f3c18 1377 device = pd->device;
9977f4f6
SH
1378 }
1379
bc38a6ab 1380 attr.event_handler = ib_uverbs_qp_event_handler;
8313c10f 1381 attr.qp_context = attrs->ufile;
bc38a6ab
RD
1382 attr.send_cq = scq;
1383 attr.recv_cq = rcq;
f520ba5a 1384 attr.srq = srq;
b93f3c18 1385 attr.xrcd = xrcd;
6d8a7497
EBE
1386 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1387 IB_SIGNAL_REQ_WR;
1388 attr.qp_type = cmd->qp_type;
b846f25a 1389 attr.create_flags = 0;
bc38a6ab 1390
6d8a7497
EBE
1391 attr.cap.max_send_wr = cmd->max_send_wr;
1392 attr.cap.max_recv_wr = cmd->max_recv_wr;
1393 attr.cap.max_send_sge = cmd->max_send_sge;
1394 attr.cap.max_recv_sge = cmd->max_recv_sge;
1395 attr.cap.max_inline_data = cmd->max_inline_data;
bc38a6ab 1396
9ead190b
RD
1397 obj->uevent.events_reported = 0;
1398 INIT_LIST_HEAD(&obj->uevent.event_list);
1399 INIT_LIST_HEAD(&obj->mcast_list);
bc38a6ab 1400
6d8a7497
EBE
1401 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) +
1402 sizeof(cmd->create_flags))
1403 attr.create_flags = cmd->create_flags;
1404
8a06ce59
LR
1405 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1406 IB_QP_CREATE_CROSS_CHANNEL |
1407 IB_QP_CREATE_MANAGED_SEND |
b531b909 1408 IB_QP_CREATE_MANAGED_RECV |
9e1b161f 1409 IB_QP_CREATE_SCATTER_FCS |
2dee0e54 1410 IB_QP_CREATE_CVLAN_STRIPPING |
e1d2e887
NO
1411 IB_QP_CREATE_SOURCE_QPN |
1412 IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
6d8a7497
EBE
1413 ret = -EINVAL;
1414 goto err_put;
1415 }
1416
2dee0e54
YH
1417 if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
1418 if (!capable(CAP_NET_RAW)) {
1419 ret = -EPERM;
1420 goto err_put;
1421 }
1422
1423 attr.source_qpn = cmd->source_qpn;
1424 }
1425
6d8a7497
EBE
1426 buf = (void *)cmd + sizeof(*cmd);
1427 if (cmd_sz > sizeof(*cmd))
1428 if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
1429 cmd_sz - sizeof(*cmd) - 1))) {
1430 ret = -EINVAL;
1431 goto err_put;
1432 }
1433
1434 if (cmd->qp_type == IB_QPT_XRC_TGT)
b93f3c18
SH
1435 qp = ib_create_qp(pd, &attr);
1436 else
ef87df2c 1437 qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
2f08ee36 1438 &obj->uevent.uobject);
b93f3c18 1439
bc38a6ab
RD
1440 if (IS_ERR(qp)) {
1441 ret = PTR_ERR(qp);
fd3c7904 1442 goto err_put;
bc38a6ab
RD
1443 }
1444
6d8a7497 1445 if (cmd->qp_type != IB_QPT_XRC_TGT) {
d291f1a6
DJ
1446 ret = ib_create_qp_security(qp, device);
1447 if (ret)
1448 goto err_cb;
1449
0e0ec7e0 1450 qp->real_qp = qp;
b93f3c18
SH
1451 qp->pd = pd;
1452 qp->send_cq = attr.send_cq;
1453 qp->recv_cq = attr.recv_cq;
1454 qp->srq = attr.srq;
c70285f8 1455 qp->rwq_ind_tbl = ind_tbl;
b93f3c18
SH
1456 qp->event_handler = attr.event_handler;
1457 qp->qp_context = attr.qp_context;
1458 qp->qp_type = attr.qp_type;
e47e321a 1459 atomic_set(&qp->usecnt, 0);
b93f3c18 1460 atomic_inc(&pd->usecnt);
498ca3c8 1461 qp->port = 0;
c70285f8
YH
1462 if (attr.send_cq)
1463 atomic_inc(&attr.send_cq->usecnt);
b93f3c18
SH
1464 if (attr.recv_cq)
1465 atomic_inc(&attr.recv_cq->usecnt);
1466 if (attr.srq)
1467 atomic_inc(&attr.srq->usecnt);
c70285f8
YH
1468 if (ind_tbl)
1469 atomic_inc(&ind_tbl->usecnt);
f4576587
LR
1470 } else {
1471 /* It is done in _ib_create_qp for other QP types */
1472 qp->uobject = &obj->uevent.uobject;
b93f3c18 1473 }
bc38a6ab 1474
9ead190b 1475 obj->uevent.uobject.object = qp;
bc38a6ab 1476
9ead190b 1477 memset(&resp, 0, sizeof resp);
6d8a7497
EBE
1478 resp.base.qpn = qp->qp_num;
1479 resp.base.qp_handle = obj->uevent.uobject.id;
1480 resp.base.max_recv_sge = attr.cap.max_recv_sge;
1481 resp.base.max_send_sge = attr.cap.max_send_sge;
1482 resp.base.max_recv_wr = attr.cap.max_recv_wr;
1483 resp.base.max_send_wr = attr.cap.max_send_wr;
1484 resp.base.max_inline_data = attr.cap.max_inline_data;
bc38a6ab 1485
6d8a7497
EBE
1486 resp.response_length = offsetof(typeof(resp), response_length) +
1487 sizeof(resp.response_length);
1488
8313c10f 1489 ret = cb(attrs, &resp, ucore);
6d8a7497
EBE
1490 if (ret)
1491 goto err_cb;
bc38a6ab 1492
846be90d
YH
1493 if (xrcd) {
1494 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1495 uobject);
1496 atomic_inc(&obj->uxrcd->refcnt);
fd3c7904 1497 uobj_put_read(xrcd_uobj);
846be90d
YH
1498 }
1499
b93f3c18 1500 if (pd)
fd3c7904 1501 uobj_put_obj_read(pd);
b93f3c18 1502 if (scq)
fd3c7904 1503 uobj_put_obj_read(scq);
9977f4f6 1504 if (rcq && rcq != scq)
fd3c7904 1505 uobj_put_obj_read(rcq);
9ead190b 1506 if (srq)
fd3c7904 1507 uobj_put_obj_read(srq);
c70285f8 1508 if (ind_tbl)
fd3c7904 1509 uobj_put_obj_read(ind_tbl);
9ead190b 1510
7106a976 1511 return uobj_alloc_commit(&obj->uevent.uobject);
6d8a7497 1512err_cb:
bc38a6ab
RD
1513 ib_destroy_qp(qp);
1514
9ead190b 1515err_put:
fd3c7904
MB
1516 if (!IS_ERR(xrcd_uobj))
1517 uobj_put_read(xrcd_uobj);
9ead190b 1518 if (pd)
fd3c7904 1519 uobj_put_obj_read(pd);
9ead190b 1520 if (scq)
fd3c7904 1521 uobj_put_obj_read(scq);
43db2bc0 1522 if (rcq && rcq != scq)
fd3c7904 1523 uobj_put_obj_read(rcq);
9ead190b 1524 if (srq)
fd3c7904 1525 uobj_put_obj_read(srq);
c70285f8 1526 if (ind_tbl)
fd3c7904 1527 uobj_put_obj_read(ind_tbl);
9ead190b 1528
fd3c7904 1529 uobj_alloc_abort(&obj->uevent.uobject);
bc38a6ab
RD
1530 return ret;
1531}
1532
8313c10f 1533static int ib_uverbs_create_qp_cb(struct uverbs_attr_bundle *attrs,
6d8a7497
EBE
1534 struct ib_uverbs_ex_create_qp_resp *resp,
1535 struct ib_udata *ucore)
1536{
1537 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1538 return -EFAULT;
1539
1540 return 0;
1541}
1542
7106a976
JG
1543static int ib_uverbs_create_qp(struct uverbs_attr_bundle *attrs,
1544 const char __user *buf, int in_len, int out_len)
6d8a7497
EBE
1545{
1546 struct ib_uverbs_create_qp cmd;
1547 struct ib_uverbs_ex_create_qp cmd_ex;
1548 struct ib_udata ucore;
6d8a7497
EBE
1549 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp);
1550 int err;
1551
1552 if (out_len < resp_size)
1553 return -ENOSPC;
1554
1555 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1556 return -EFAULT;
1557
40a20339
AB
1558 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
1559 sizeof(cmd), resp_size);
6d8a7497
EBE
1560
1561 memset(&cmd_ex, 0, sizeof(cmd_ex));
1562 cmd_ex.user_handle = cmd.user_handle;
1563 cmd_ex.pd_handle = cmd.pd_handle;
1564 cmd_ex.send_cq_handle = cmd.send_cq_handle;
1565 cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1566 cmd_ex.srq_handle = cmd.srq_handle;
1567 cmd_ex.max_send_wr = cmd.max_send_wr;
1568 cmd_ex.max_recv_wr = cmd.max_recv_wr;
1569 cmd_ex.max_send_sge = cmd.max_send_sge;
1570 cmd_ex.max_recv_sge = cmd.max_recv_sge;
1571 cmd_ex.max_inline_data = cmd.max_inline_data;
1572 cmd_ex.sq_sig_all = cmd.sq_sig_all;
1573 cmd_ex.qp_type = cmd.qp_type;
1574 cmd_ex.is_srq = cmd.is_srq;
1575
ef87df2c
JG
1576 err = create_qp(attrs, &ucore, &cmd_ex,
1577 offsetof(typeof(cmd_ex), is_srq) + sizeof(cmd.is_srq),
1578 ib_uverbs_create_qp_cb, NULL);
6d8a7497
EBE
1579
1580 if (err)
1581 return err;
1582
7106a976 1583 return 0;
6d8a7497
EBE
1584}
1585
8313c10f 1586static int ib_uverbs_ex_create_qp_cb(struct uverbs_attr_bundle *attrs,
6d8a7497
EBE
1587 struct ib_uverbs_ex_create_qp_resp *resp,
1588 struct ib_udata *ucore)
1589{
1590 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1591 return -EFAULT;
1592
1593 return 0;
1594}
1595
8313c10f 1596static int ib_uverbs_ex_create_qp(struct uverbs_attr_bundle *attrs,
ef87df2c 1597 struct ib_udata *ucore)
6d8a7497
EBE
1598{
1599 struct ib_uverbs_ex_create_qp_resp resp;
1600 struct ib_uverbs_ex_create_qp cmd = {0};
1601 int err;
1602
1603 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) +
1604 sizeof(cmd.comp_mask)))
1605 return -EINVAL;
1606
1607 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
1608 if (err)
1609 return err;
1610
c70285f8 1611 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
6d8a7497
EBE
1612 return -EINVAL;
1613
1614 if (cmd.reserved)
1615 return -EINVAL;
1616
1617 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1618 sizeof(resp.response_length)))
1619 return -ENOSPC;
1620
ef87df2c 1621 err = create_qp(attrs, ucore, &cmd,
6d8a7497
EBE
1622 min(ucore->inlen, sizeof(cmd)),
1623 ib_uverbs_ex_create_qp_cb, NULL);
1624
1625 if (err)
1626 return err;
1627
1628 return 0;
1629}
1630
7106a976
JG
1631static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs,
1632 const char __user *buf, int in_len, int out_len)
42849b26
SH
1633{
1634 struct ib_uverbs_open_qp cmd;
1635 struct ib_uverbs_create_qp_resp resp;
42849b26
SH
1636 struct ib_uqp_object *obj;
1637 struct ib_xrcd *xrcd;
1638 struct ib_uobject *uninitialized_var(xrcd_uobj);
1639 struct ib_qp *qp;
1640 struct ib_qp_open_attr attr;
1641 int ret;
bbd51e88 1642 struct ib_device *ib_dev;
42849b26
SH
1643
1644 if (out_len < sizeof resp)
1645 return -ENOSPC;
1646
1647 if (copy_from_user(&cmd, buf, sizeof cmd))
1648 return -EFAULT;
1649
8313c10f 1650 obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
bbd51e88 1651 &ib_dev);
fd3c7904
MB
1652 if (IS_ERR(obj))
1653 return PTR_ERR(obj);
42849b26 1654
8313c10f 1655 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, attrs);
fd3c7904
MB
1656 if (IS_ERR(xrcd_uobj)) {
1657 ret = -EINVAL;
1658 goto err_put;
1659 }
42849b26 1660
fd3c7904 1661 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
42849b26
SH
1662 if (!xrcd) {
1663 ret = -EINVAL;
fd3c7904 1664 goto err_xrcd;
42849b26
SH
1665 }
1666
1667 attr.event_handler = ib_uverbs_qp_event_handler;
8313c10f 1668 attr.qp_context = attrs->ufile;
42849b26
SH
1669 attr.qp_num = cmd.qpn;
1670 attr.qp_type = cmd.qp_type;
1671
1672 obj->uevent.events_reported = 0;
1673 INIT_LIST_HEAD(&obj->uevent.event_list);
1674 INIT_LIST_HEAD(&obj->mcast_list);
1675
1676 qp = ib_open_qp(xrcd, &attr);
1677 if (IS_ERR(qp)) {
1678 ret = PTR_ERR(qp);
fd3c7904 1679 goto err_xrcd;
42849b26
SH
1680 }
1681
42849b26 1682 obj->uevent.uobject.object = qp;
fd3c7904 1683 obj->uevent.uobject.user_handle = cmd.user_handle;
42849b26
SH
1684
1685 memset(&resp, 0, sizeof resp);
1686 resp.qpn = qp->qp_num;
1687 resp.qp_handle = obj->uevent.uobject.id;
1688
40a20339 1689 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
42849b26 1690 ret = -EFAULT;
fd3c7904 1691 goto err_destroy;
42849b26
SH
1692 }
1693
846be90d
YH
1694 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1695 atomic_inc(&obj->uxrcd->refcnt);
fd3c7904
MB
1696 qp->uobject = &obj->uevent.uobject;
1697 uobj_put_read(xrcd_uobj);
42849b26 1698
7106a976 1699 return uobj_alloc_commit(&obj->uevent.uobject);
42849b26 1700
42849b26
SH
1701err_destroy:
1702 ib_destroy_qp(qp);
fd3c7904
MB
1703err_xrcd:
1704 uobj_put_read(xrcd_uobj);
42849b26 1705err_put:
fd3c7904 1706 uobj_alloc_abort(&obj->uevent.uobject);
42849b26
SH
1707 return ret;
1708}
1709
89caa053
PP
1710static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
1711 struct rdma_ah_attr *rdma_attr)
1712{
1713 const struct ib_global_route *grh;
1714
1715 uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr);
1716 uverb_attr->sl = rdma_ah_get_sl(rdma_attr);
1717 uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr);
1718 uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr);
1719 uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) &
1720 IB_AH_GRH);
1721 if (uverb_attr->is_global) {
1722 grh = rdma_ah_read_grh(rdma_attr);
1723 memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
1724 uverb_attr->flow_label = grh->flow_label;
1725 uverb_attr->sgid_index = grh->sgid_index;
1726 uverb_attr->hop_limit = grh->hop_limit;
1727 uverb_attr->traffic_class = grh->traffic_class;
1728 }
1729 uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr);
1730}
1731
7106a976
JG
1732static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs,
1733 const char __user *buf, int in_len, int out_len)
7ccc9a24
DB
1734{
1735 struct ib_uverbs_query_qp cmd;
1736 struct ib_uverbs_query_qp_resp resp;
1737 struct ib_qp *qp;
1738 struct ib_qp_attr *attr;
1739 struct ib_qp_init_attr *init_attr;
1740 int ret;
1741
1742 if (copy_from_user(&cmd, buf, sizeof cmd))
1743 return -EFAULT;
1744
1745 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1746 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1747 if (!attr || !init_attr) {
1748 ret = -ENOMEM;
1749 goto out;
1750 }
1751
8313c10f 1752 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
9ead190b 1753 if (!qp) {
7ccc9a24 1754 ret = -EINVAL;
9ead190b
RD
1755 goto out;
1756 }
1757
1758 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
7ccc9a24 1759
fd3c7904 1760 uobj_put_obj_read(qp);
7ccc9a24
DB
1761
1762 if (ret)
1763 goto out;
1764
1765 memset(&resp, 0, sizeof resp);
1766
1767 resp.qp_state = attr->qp_state;
1768 resp.cur_qp_state = attr->cur_qp_state;
1769 resp.path_mtu = attr->path_mtu;
1770 resp.path_mig_state = attr->path_mig_state;
1771 resp.qkey = attr->qkey;
1772 resp.rq_psn = attr->rq_psn;
1773 resp.sq_psn = attr->sq_psn;
1774 resp.dest_qp_num = attr->dest_qp_num;
1775 resp.qp_access_flags = attr->qp_access_flags;
1776 resp.pkey_index = attr->pkey_index;
1777 resp.alt_pkey_index = attr->alt_pkey_index;
0b26c88f 1778 resp.sq_draining = attr->sq_draining;
7ccc9a24
DB
1779 resp.max_rd_atomic = attr->max_rd_atomic;
1780 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1781 resp.min_rnr_timer = attr->min_rnr_timer;
1782 resp.port_num = attr->port_num;
1783 resp.timeout = attr->timeout;
1784 resp.retry_cnt = attr->retry_cnt;
1785 resp.rnr_retry = attr->rnr_retry;
1786 resp.alt_port_num = attr->alt_port_num;
1787 resp.alt_timeout = attr->alt_timeout;
1788
89caa053
PP
1789 copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
1790 copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
7ccc9a24
DB
1791
1792 resp.max_send_wr = init_attr->cap.max_send_wr;
1793 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1794 resp.max_send_sge = init_attr->cap.max_send_sge;
1795 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1796 resp.max_inline_data = init_attr->cap.max_inline_data;
27d56300 1797 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
7ccc9a24 1798
40a20339 1799 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
7ccc9a24
DB
1800 ret = -EFAULT;
1801
1802out:
1803 kfree(attr);
1804 kfree(init_attr);
1805
7106a976 1806 return ret;
7ccc9a24
DB
1807}
1808
9977f4f6
SH
1809/* Remove ignored fields set in the attribute mask */
1810static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1811{
1812 switch (qp_type) {
1813 case IB_QPT_XRC_INI:
1814 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
b93f3c18
SH
1815 case IB_QPT_XRC_TGT:
1816 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1817 IB_QP_RNR_RETRY);
9977f4f6
SH
1818 default:
1819 return mask;
1820 }
1821}
1822
89caa053
PP
1823static void copy_ah_attr_from_uverbs(struct ib_device *dev,
1824 struct rdma_ah_attr *rdma_attr,
1825 struct ib_uverbs_qp_dest *uverb_attr)
1826{
1827 rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
1828 if (uverb_attr->is_global) {
1829 rdma_ah_set_grh(rdma_attr, NULL,
1830 uverb_attr->flow_label,
1831 uverb_attr->sgid_index,
1832 uverb_attr->hop_limit,
1833 uverb_attr->traffic_class);
1834 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
1835 } else {
1836 rdma_ah_set_ah_flags(rdma_attr, 0);
1837 }
1838 rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
1839 rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
1840 rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
1841 rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
1842 rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
1843 rdma_ah_set_make_grd(rdma_attr, false);
1844}
1845
8313c10f 1846static int modify_qp(struct uverbs_attr_bundle *attrs,
ef87df2c 1847 struct ib_uverbs_ex_modify_qp *cmd)
bc38a6ab 1848{
189aba99
BW
1849 struct ib_qp_attr *attr;
1850 struct ib_qp *qp;
1851 int ret;
9bc57e2d 1852
fb51eeca 1853 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
bc38a6ab
RD
1854 if (!attr)
1855 return -ENOMEM;
1856
8313c10f
JG
1857 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
1858 attrs);
9ead190b 1859 if (!qp) {
bc38a6ab
RD
1860 ret = -EINVAL;
1861 goto out;
1862 }
1863
5a7a88f1
IM
1864 if ((cmd->base.attr_mask & IB_QP_PORT) &&
1865 !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
5ecce4c9
BP
1866 ret = -EINVAL;
1867 goto release_qp;
1868 }
1869
addb8a65
JM
1870 if ((cmd->base.attr_mask & IB_QP_AV)) {
1871 if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
1872 ret = -EINVAL;
1873 goto release_qp;
1874 }
1875
1876 if (cmd->base.attr_mask & IB_QP_STATE &&
1877 cmd->base.qp_state == IB_QPS_RTR) {
1878 /* We are in INIT->RTR TRANSITION (if we are not,
1879 * this transition will be rejected in subsequent checks).
1880 * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
1881 * but the IB_QP_STATE flag is required.
1882 *
1883 * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
1884 * when IB_QP_AV is set, has required inclusion of a valid
1885 * port number in the primary AV. (AVs are created and handled
1886 * differently for infiniband and ethernet (RoCE) ports).
1887 *
1888 * Check the port number included in the primary AV against
1889 * the port number in the qp struct, which was set (and saved)
1890 * in the RST->INIT transition.
1891 */
1892 if (cmd->base.dest.port_num != qp->real_qp->port) {
1893 ret = -EINVAL;
1894 goto release_qp;
1895 }
1896 } else {
1897 /* We are in SQD->SQD. (If we are not, this transition will
1898 * be rejected later in the verbs layer checks).
1899 * Check for both IB_QP_PORT and IB_QP_AV, these can be set
1900 * together in the SQD->SQD transition.
1901 *
1902 * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
1903 * verbs layer driver does not track primary port changes
1904 * resulting from path migration. Thus, in SQD, if the primary
1905 * AV is modified, the primary port should also be modified).
1906 *
1907 * Note that in this transition, the IB_QP_STATE flag
1908 * is not allowed.
1909 */
1910 if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
1911 == (IB_QP_AV | IB_QP_PORT)) &&
1912 cmd->base.port_num != cmd->base.dest.port_num) {
1913 ret = -EINVAL;
1914 goto release_qp;
1915 }
1916 if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
1917 == IB_QP_AV) {
1918 cmd->base.attr_mask |= IB_QP_PORT;
1919 cmd->base.port_num = cmd->base.dest.port_num;
1920 }
1921 }
5d4c05c3
LR
1922 }
1923
4cae8ff1 1924 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
5d4c05c3 1925 (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
addb8a65
JM
1926 !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
1927 cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
4cae8ff1
DJ
1928 ret = -EINVAL;
1929 goto release_qp;
1930 }
1931
88de869b
LR
1932 if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
1933 cmd->base.cur_qp_state > IB_QPS_ERR) ||
4eeed368
MD
1934 (cmd->base.attr_mask & IB_QP_STATE &&
1935 cmd->base.qp_state > IB_QPS_ERR)) {
88de869b
LR
1936 ret = -EINVAL;
1937 goto release_qp;
1938 }
1939
4eeed368
MD
1940 if (cmd->base.attr_mask & IB_QP_STATE)
1941 attr->qp_state = cmd->base.qp_state;
1942 if (cmd->base.attr_mask & IB_QP_CUR_STATE)
1943 attr->cur_qp_state = cmd->base.cur_qp_state;
1944 if (cmd->base.attr_mask & IB_QP_PATH_MTU)
1945 attr->path_mtu = cmd->base.path_mtu;
1946 if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
1947 attr->path_mig_state = cmd->base.path_mig_state;
1948 if (cmd->base.attr_mask & IB_QP_QKEY)
1949 attr->qkey = cmd->base.qkey;
1950 if (cmd->base.attr_mask & IB_QP_RQ_PSN)
1951 attr->rq_psn = cmd->base.rq_psn;
1952 if (cmd->base.attr_mask & IB_QP_SQ_PSN)
1953 attr->sq_psn = cmd->base.sq_psn;
1954 if (cmd->base.attr_mask & IB_QP_DEST_QPN)
1955 attr->dest_qp_num = cmd->base.dest_qp_num;
1956 if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
1957 attr->qp_access_flags = cmd->base.qp_access_flags;
1958 if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
1959 attr->pkey_index = cmd->base.pkey_index;
1960 if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1961 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
1962 if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1963 attr->max_rd_atomic = cmd->base.max_rd_atomic;
1964 if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1965 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
1966 if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
1967 attr->min_rnr_timer = cmd->base.min_rnr_timer;
1968 if (cmd->base.attr_mask & IB_QP_PORT)
1969 attr->port_num = cmd->base.port_num;
1970 if (cmd->base.attr_mask & IB_QP_TIMEOUT)
1971 attr->timeout = cmd->base.timeout;
1972 if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
1973 attr->retry_cnt = cmd->base.retry_cnt;
1974 if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
1975 attr->rnr_retry = cmd->base.rnr_retry;
1976 if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
1977 attr->alt_port_num = cmd->base.alt_port_num;
1978 attr->alt_timeout = cmd->base.alt_timeout;
1979 attr->alt_pkey_index = cmd->base.alt_pkey_index;
1980 }
1981 if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
1982 attr->rate_limit = cmd->rate_limit;
189aba99 1983
498ca3c8 1984 if (cmd->base.attr_mask & IB_QP_AV)
89caa053
PP
1985 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
1986 &cmd->base.dest);
189aba99 1987
498ca3c8 1988 if (cmd->base.attr_mask & IB_QP_ALT_PATH)
89caa053
PP
1989 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
1990 &cmd->base.alt_dest);
bc38a6ab 1991
f7c8f2e9
PP
1992 ret = ib_modify_qp_with_udata(qp, attr,
1993 modify_qp_mask(qp->qp_type,
1994 cmd->base.attr_mask),
ef87df2c 1995 &attrs->driver_udata);
9ead190b 1996
0fb8bcf0 1997release_qp:
fd3c7904 1998 uobj_put_obj_read(qp);
bc38a6ab 1999out:
bc38a6ab
RD
2000 kfree(attr);
2001
2002 return ret;
2003}
2004
7106a976
JG
2005static int ib_uverbs_modify_qp(struct uverbs_attr_bundle *attrs,
2006 const char __user *buf, int in_len, int out_len)
189aba99
BW
2007{
2008 struct ib_uverbs_ex_modify_qp cmd = {};
189aba99
BW
2009
2010 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base)))
2011 return -EFAULT;
2012
2013 if (cmd.base.attr_mask &
2014 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
2015 return -EOPNOTSUPP;
2016
ef87df2c 2017 return modify_qp(attrs, &cmd);
189aba99
BW
2018}
2019
8313c10f 2020static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs,
ef87df2c 2021 struct ib_udata *ucore)
189aba99
BW
2022{
2023 struct ib_uverbs_ex_modify_qp cmd = {};
2024 int ret;
2025
2026 /*
2027 * Last bit is reserved for extending the attr_mask by
2028 * using another field.
2029 */
2030 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
2031
2032 if (ucore->inlen < sizeof(cmd.base))
2033 return -EINVAL;
2034
2035 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2036 if (ret)
2037 return ret;
2038
2039 if (cmd.base.attr_mask &
2040 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
2041 return -EOPNOTSUPP;
2042
2043 if (ucore->inlen > sizeof(cmd)) {
05d14e7b
MS
2044 if (!ib_is_udata_cleared(ucore, sizeof(cmd),
2045 ucore->inlen - sizeof(cmd)))
189aba99
BW
2046 return -EOPNOTSUPP;
2047 }
2048
ef87df2c 2049 ret = modify_qp(attrs, &cmd);
189aba99
BW
2050
2051 return ret;
2052}
2053
7106a976
JG
2054static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs,
2055 const char __user *buf, int in_len, int out_len)
bc38a6ab 2056{
63aaf647
RD
2057 struct ib_uverbs_destroy_qp cmd;
2058 struct ib_uverbs_destroy_qp_resp resp;
9ead190b 2059 struct ib_uobject *uobj;
9ead190b 2060 struct ib_uqp_object *obj;
bc38a6ab
RD
2061
2062 if (copy_from_user(&cmd, buf, sizeof cmd))
2063 return -EFAULT;
2064
8313c10f 2065 uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
fd3c7904
MB
2066 if (IS_ERR(uobj))
2067 return PTR_ERR(uobj);
2068
9ead190b 2069 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
32ed5c00 2070 memset(&resp, 0, sizeof(resp));
9ead190b 2071 resp.events_reported = obj->uevent.events_reported;
32ed5c00
JG
2072
2073 uobj_put_destroy(uobj);
bc38a6ab 2074
40a20339 2075 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
9ead190b 2076 return -EFAULT;
bc38a6ab 2077
7106a976 2078 return 0;
bc38a6ab
RD
2079}
2080
e622f2f4
CH
2081static void *alloc_wr(size_t wr_size, __u32 num_sge)
2082{
4f7f4dcf
VT
2083 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
2084 sizeof (struct ib_sge))
2085 return NULL;
2086
e622f2f4
CH
2087 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2088 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
4f7f4dcf 2089}
e622f2f4 2090
7106a976
JG
2091static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs,
2092 const char __user *buf, int in_len, int out_len)
67cdb40c
RD
2093{
2094 struct ib_uverbs_post_send cmd;
2095 struct ib_uverbs_post_send_resp resp;
2096 struct ib_uverbs_send_wr *user_wr;
d34ac5cd
BVA
2097 struct ib_send_wr *wr = NULL, *last, *next;
2098 const struct ib_send_wr *bad_wr;
67cdb40c
RD
2099 struct ib_qp *qp;
2100 int i, sg_ind;
9ead190b 2101 int is_ud;
67cdb40c 2102 ssize_t ret = -EINVAL;
1d784b89 2103 size_t next_size;
67cdb40c
RD
2104
2105 if (copy_from_user(&cmd, buf, sizeof cmd))
2106 return -EFAULT;
2107
2108 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2109 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2110 return -EINVAL;
2111
2112 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2113 return -EINVAL;
2114
2115 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2116 if (!user_wr)
2117 return -ENOMEM;
2118
8313c10f 2119 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
9ead190b 2120 if (!qp)
67cdb40c
RD
2121 goto out;
2122
9ead190b 2123 is_ud = qp->qp_type == IB_QPT_UD;
67cdb40c
RD
2124 sg_ind = 0;
2125 last = NULL;
2126 for (i = 0; i < cmd.wr_count; ++i) {
2127 if (copy_from_user(user_wr,
2128 buf + sizeof cmd + i * cmd.wqe_size,
2129 cmd.wqe_size)) {
2130 ret = -EFAULT;
9ead190b 2131 goto out_put;
67cdb40c
RD
2132 }
2133
2134 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2135 ret = -EINVAL;
9ead190b 2136 goto out_put;
67cdb40c
RD
2137 }
2138
e622f2f4
CH
2139 if (is_ud) {
2140 struct ib_ud_wr *ud;
2141
2142 if (user_wr->opcode != IB_WR_SEND &&
2143 user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2144 ret = -EINVAL;
2145 goto out_put;
2146 }
2147
1d784b89
MM
2148 next_size = sizeof(*ud);
2149 ud = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2150 if (!ud) {
2151 ret = -ENOMEM;
2152 goto out_put;
2153 }
2154
2cc1e3b8 2155 ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
8313c10f 2156 user_wr->wr.ud.ah, attrs);
e622f2f4
CH
2157 if (!ud->ah) {
2158 kfree(ud);
2159 ret = -EINVAL;
2160 goto out_put;
2161 }
2162 ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2163 ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2164
2165 next = &ud->wr;
2166 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2167 user_wr->opcode == IB_WR_RDMA_WRITE ||
2168 user_wr->opcode == IB_WR_RDMA_READ) {
2169 struct ib_rdma_wr *rdma;
2170
1d784b89
MM
2171 next_size = sizeof(*rdma);
2172 rdma = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2173 if (!rdma) {
2174 ret = -ENOMEM;
2175 goto out_put;
2176 }
2177
2178 rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2179 rdma->rkey = user_wr->wr.rdma.rkey;
2180
2181 next = &rdma->wr;
2182 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2183 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2184 struct ib_atomic_wr *atomic;
2185
1d784b89
MM
2186 next_size = sizeof(*atomic);
2187 atomic = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2188 if (!atomic) {
2189 ret = -ENOMEM;
2190 goto out_put;
2191 }
2192
2193 atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2194 atomic->compare_add = user_wr->wr.atomic.compare_add;
2195 atomic->swap = user_wr->wr.atomic.swap;
2196 atomic->rkey = user_wr->wr.atomic.rkey;
2197
2198 next = &atomic->wr;
2199 } else if (user_wr->opcode == IB_WR_SEND ||
2200 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2201 user_wr->opcode == IB_WR_SEND_WITH_INV) {
1d784b89
MM
2202 next_size = sizeof(*next);
2203 next = alloc_wr(next_size, user_wr->num_sge);
e622f2f4
CH
2204 if (!next) {
2205 ret = -ENOMEM;
2206 goto out_put;
2207 }
2208 } else {
2209 ret = -EINVAL;
9ead190b 2210 goto out_put;
67cdb40c
RD
2211 }
2212
e622f2f4
CH
2213 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2214 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2215 next->ex.imm_data =
2216 (__be32 __force) user_wr->ex.imm_data;
2217 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2218 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2219 }
2220
67cdb40c
RD
2221 if (!last)
2222 wr = next;
2223 else
2224 last->next = next;
2225 last = next;
2226
2227 next->next = NULL;
2228 next->wr_id = user_wr->wr_id;
2229 next->num_sge = user_wr->num_sge;
2230 next->opcode = user_wr->opcode;
2231 next->send_flags = user_wr->send_flags;
67cdb40c 2232
67cdb40c
RD
2233 if (next->num_sge) {
2234 next->sg_list = (void *) next +
1d784b89 2235 ALIGN(next_size, sizeof(struct ib_sge));
67cdb40c
RD
2236 if (copy_from_user(next->sg_list,
2237 buf + sizeof cmd +
2238 cmd.wr_count * cmd.wqe_size +
2239 sg_ind * sizeof (struct ib_sge),
2240 next->num_sge * sizeof (struct ib_sge))) {
2241 ret = -EFAULT;
9ead190b 2242 goto out_put;
67cdb40c
RD
2243 }
2244 sg_ind += next->num_sge;
2245 } else
2246 next->sg_list = NULL;
2247 }
2248
2249 resp.bad_wr = 0;
0e0ec7e0 2250 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
67cdb40c
RD
2251 if (ret)
2252 for (next = wr; next; next = next->next) {
2253 ++resp.bad_wr;
2254 if (next == bad_wr)
2255 break;
2256 }
2257
40a20339 2258 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
67cdb40c
RD
2259 ret = -EFAULT;
2260
9ead190b 2261out_put:
fd3c7904 2262 uobj_put_obj_read(qp);
67cdb40c
RD
2263
2264 while (wr) {
e622f2f4 2265 if (is_ud && ud_wr(wr)->ah)
fd3c7904 2266 uobj_put_obj_read(ud_wr(wr)->ah);
67cdb40c
RD
2267 next = wr->next;
2268 kfree(wr);
2269 wr = next;
2270 }
2271
18320828 2272out:
67cdb40c
RD
2273 kfree(user_wr);
2274
7106a976 2275 return ret;
67cdb40c
RD
2276}
2277
2278static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2279 int in_len,
2280 u32 wr_count,
2281 u32 sge_count,
2282 u32 wqe_size)
2283{
2284 struct ib_uverbs_recv_wr *user_wr;
2285 struct ib_recv_wr *wr = NULL, *last, *next;
2286 int sg_ind;
2287 int i;
2288 int ret;
2289
2290 if (in_len < wqe_size * wr_count +
2291 sge_count * sizeof (struct ib_uverbs_sge))
2292 return ERR_PTR(-EINVAL);
2293
2294 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2295 return ERR_PTR(-EINVAL);
2296
2297 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2298 if (!user_wr)
2299 return ERR_PTR(-ENOMEM);
2300
2301 sg_ind = 0;
2302 last = NULL;
2303 for (i = 0; i < wr_count; ++i) {
2304 if (copy_from_user(user_wr, buf + i * wqe_size,
2305 wqe_size)) {
2306 ret = -EFAULT;
2307 goto err;
2308 }
2309
2310 if (user_wr->num_sge + sg_ind > sge_count) {
2311 ret = -EINVAL;
2312 goto err;
2313 }
2314
4f7f4dcf
VT
2315 if (user_wr->num_sge >=
2316 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
2317 sizeof (struct ib_sge)) {
2318 ret = -EINVAL;
2319 goto err;
2320 }
2321
67cdb40c
RD
2322 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2323 user_wr->num_sge * sizeof (struct ib_sge),
2324 GFP_KERNEL);
2325 if (!next) {
2326 ret = -ENOMEM;
2327 goto err;
2328 }
2329
2330 if (!last)
2331 wr = next;
2332 else
2333 last->next = next;
2334 last = next;
2335
2336 next->next = NULL;
2337 next->wr_id = user_wr->wr_id;
2338 next->num_sge = user_wr->num_sge;
2339
2340 if (next->num_sge) {
2341 next->sg_list = (void *) next +
2342 ALIGN(sizeof *next, sizeof (struct ib_sge));
2343 if (copy_from_user(next->sg_list,
2344 buf + wr_count * wqe_size +
2345 sg_ind * sizeof (struct ib_sge),
2346 next->num_sge * sizeof (struct ib_sge))) {
2347 ret = -EFAULT;
2348 goto err;
2349 }
2350 sg_ind += next->num_sge;
2351 } else
2352 next->sg_list = NULL;
2353 }
2354
2355 kfree(user_wr);
2356 return wr;
2357
2358err:
2359 kfree(user_wr);
2360
2361 while (wr) {
2362 next = wr->next;
2363 kfree(wr);
2364 wr = next;
2365 }
2366
2367 return ERR_PTR(ret);
2368}
2369
7106a976
JG
2370static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs,
2371 const char __user *buf, int in_len, int out_len)
67cdb40c
RD
2372{
2373 struct ib_uverbs_post_recv cmd;
2374 struct ib_uverbs_post_recv_resp resp;
d34ac5cd
BVA
2375 struct ib_recv_wr *wr, *next;
2376 const struct ib_recv_wr *bad_wr;
67cdb40c
RD
2377 struct ib_qp *qp;
2378 ssize_t ret = -EINVAL;
2379
2380 if (copy_from_user(&cmd, buf, sizeof cmd))
2381 return -EFAULT;
2382
2383 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2384 in_len - sizeof cmd, cmd.wr_count,
2385 cmd.sge_count, cmd.wqe_size);
2386 if (IS_ERR(wr))
2387 return PTR_ERR(wr);
2388
8313c10f 2389 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
9ead190b 2390 if (!qp)
67cdb40c
RD
2391 goto out;
2392
2393 resp.bad_wr = 0;
0e0ec7e0 2394 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
9ead190b 2395
fd3c7904
MB
2396 uobj_put_obj_read(qp);
2397 if (ret) {
67cdb40c
RD
2398 for (next = wr; next; next = next->next) {
2399 ++resp.bad_wr;
2400 if (next == bad_wr)
2401 break;
2402 }
fd3c7904 2403 }
67cdb40c 2404
40a20339 2405 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
67cdb40c
RD
2406 ret = -EFAULT;
2407
2408out:
67cdb40c
RD
2409 while (wr) {
2410 next = wr->next;
2411 kfree(wr);
2412 wr = next;
2413 }
2414
7106a976 2415 return ret;
67cdb40c
RD
2416}
2417
7106a976
JG
2418static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs,
2419 const char __user *buf, int in_len,
2420 int out_len)
67cdb40c
RD
2421{
2422 struct ib_uverbs_post_srq_recv cmd;
2423 struct ib_uverbs_post_srq_recv_resp resp;
d34ac5cd
BVA
2424 struct ib_recv_wr *wr, *next;
2425 const struct ib_recv_wr *bad_wr;
67cdb40c
RD
2426 struct ib_srq *srq;
2427 ssize_t ret = -EINVAL;
2428
2429 if (copy_from_user(&cmd, buf, sizeof cmd))
2430 return -EFAULT;
2431
2432 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2433 in_len - sizeof cmd, cmd.wr_count,
2434 cmd.sge_count, cmd.wqe_size);
2435 if (IS_ERR(wr))
2436 return PTR_ERR(wr);
2437
8313c10f 2438 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
9ead190b 2439 if (!srq)
67cdb40c
RD
2440 goto out;
2441
2442 resp.bad_wr = 0;
a140692a 2443 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
9ead190b 2444
fd3c7904 2445 uobj_put_obj_read(srq);
9ead190b 2446
67cdb40c
RD
2447 if (ret)
2448 for (next = wr; next; next = next->next) {
2449 ++resp.bad_wr;
2450 if (next == bad_wr)
2451 break;
2452 }
2453
40a20339 2454 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
67cdb40c
RD
2455 ret = -EFAULT;
2456
2457out:
67cdb40c
RD
2458 while (wr) {
2459 next = wr->next;
2460 kfree(wr);
2461 wr = next;
2462 }
2463
7106a976 2464 return ret;
67cdb40c
RD
2465}
2466
7106a976
JG
2467static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs,
2468 const char __user *buf, int in_len, int out_len)
67cdb40c
RD
2469{
2470 struct ib_uverbs_create_ah cmd;
2471 struct ib_uverbs_create_ah_resp resp;
2472 struct ib_uobject *uobj;
2473 struct ib_pd *pd;
2474 struct ib_ah *ah;
fb51eeca 2475 struct rdma_ah_attr attr = {};
67cdb40c 2476 int ret;
bbd51e88 2477 struct ib_device *ib_dev;
67cdb40c
RD
2478
2479 if (out_len < sizeof resp)
2480 return -ENOSPC;
2481
2482 if (copy_from_user(&cmd, buf, sizeof cmd))
2483 return -EFAULT;
2484
8313c10f 2485 uobj = uobj_alloc(UVERBS_OBJECT_AH, attrs, &ib_dev);
fd3c7904
MB
2486 if (IS_ERR(uobj))
2487 return PTR_ERR(uobj);
67cdb40c 2488
bbd51e88
JG
2489 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) {
2490 ret = -EINVAL;
2491 goto err;
2492 }
2493
8313c10f 2494 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
9ead190b 2495 if (!pd) {
67cdb40c 2496 ret = -EINVAL;
9ead190b 2497 goto err;
67cdb40c
RD
2498 }
2499
44c58487 2500 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
d98bb7f7 2501 rdma_ah_set_make_grd(&attr, false);
d8966fcd
DC
2502 rdma_ah_set_dlid(&attr, cmd.attr.dlid);
2503 rdma_ah_set_sl(&attr, cmd.attr.sl);
2504 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
2505 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
2506 rdma_ah_set_port_num(&attr, cmd.attr.port_num);
2507
4ba66093 2508 if (cmd.attr.is_global) {
d8966fcd
DC
2509 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
2510 cmd.attr.grh.sgid_index,
2511 cmd.attr.grh.hop_limit,
2512 cmd.attr.grh.traffic_class);
2513 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
4ba66093 2514 } else {
d8966fcd 2515 rdma_ah_set_ah_flags(&attr, 0);
4ba66093 2516 }
477864c8 2517
ef87df2c 2518 ah = rdma_create_user_ah(pd, &attr, &attrs->driver_udata);
67cdb40c
RD
2519 if (IS_ERR(ah)) {
2520 ret = PTR_ERR(ah);
fd3c7904 2521 goto err_put;
67cdb40c
RD
2522 }
2523
9ead190b 2524 ah->uobject = uobj;
fd3c7904 2525 uobj->user_handle = cmd.user_handle;
9ead190b 2526 uobj->object = ah;
67cdb40c 2527
67cdb40c
RD
2528 resp.ah_handle = uobj->id;
2529
40a20339 2530 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
67cdb40c 2531 ret = -EFAULT;
9ead190b 2532 goto err_copy;
67cdb40c
RD
2533 }
2534
fd3c7904 2535 uobj_put_obj_read(pd);
7106a976 2536 return uobj_alloc_commit(uobj);
67cdb40c 2537
9ead190b 2538err_copy:
36523159 2539 rdma_destroy_ah(ah);
67cdb40c 2540
fd3c7904
MB
2541err_put:
2542 uobj_put_obj_read(pd);
ec924b47 2543
9ead190b 2544err:
fd3c7904 2545 uobj_alloc_abort(uobj);
67cdb40c
RD
2546 return ret;
2547}
2548
7106a976
JG
2549static int ib_uverbs_destroy_ah(struct uverbs_attr_bundle *attrs,
2550 const char __user *buf, int in_len, int out_len)
67cdb40c
RD
2551{
2552 struct ib_uverbs_destroy_ah cmd;
67cdb40c
RD
2553
2554 if (copy_from_user(&cmd, buf, sizeof cmd))
2555 return -EFAULT;
2556
7106a976 2557 return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, attrs);
67cdb40c
RD
2558}
2559
7106a976
JG
2560static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs,
2561 const char __user *buf, int in_len,
2562 int out_len)
bc38a6ab
RD
2563{
2564 struct ib_uverbs_attach_mcast cmd;
2565 struct ib_qp *qp;
9ead190b 2566 struct ib_uqp_object *obj;
f4e40156 2567 struct ib_uverbs_mcast_entry *mcast;
9ead190b 2568 int ret;
bc38a6ab
RD
2569
2570 if (copy_from_user(&cmd, buf, sizeof cmd))
2571 return -EFAULT;
2572
8313c10f 2573 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
9ead190b
RD
2574 if (!qp)
2575 return -EINVAL;
f4e40156 2576
9ead190b 2577 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
f4e40156 2578
f48b7269 2579 mutex_lock(&obj->mcast_lock);
9ead190b 2580 list_for_each_entry(mcast, &obj->mcast_list, list)
f4e40156
JM
2581 if (cmd.mlid == mcast->lid &&
2582 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2583 ret = 0;
9ead190b 2584 goto out_put;
f4e40156
JM
2585 }
2586
2587 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2588 if (!mcast) {
2589 ret = -ENOMEM;
9ead190b 2590 goto out_put;
f4e40156
JM
2591 }
2592
2593 mcast->lid = cmd.mlid;
2594 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
bc38a6ab 2595
f4e40156 2596 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
9ead190b
RD
2597 if (!ret)
2598 list_add_tail(&mcast->list, &obj->mcast_list);
2599 else
f4e40156
JM
2600 kfree(mcast);
2601
9ead190b 2602out_put:
f48b7269 2603 mutex_unlock(&obj->mcast_lock);
fd3c7904 2604 uobj_put_obj_read(qp);
bc38a6ab 2605
7106a976 2606 return ret;
bc38a6ab
RD
2607}
2608
7106a976
JG
2609static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs,
2610 const char __user *buf, int in_len,
2611 int out_len)
bc38a6ab
RD
2612{
2613 struct ib_uverbs_detach_mcast cmd;
9ead190b 2614 struct ib_uqp_object *obj;
bc38a6ab 2615 struct ib_qp *qp;
f4e40156 2616 struct ib_uverbs_mcast_entry *mcast;
bc38a6ab 2617 int ret = -EINVAL;
20c7840a 2618 bool found = false;
bc38a6ab
RD
2619
2620 if (copy_from_user(&cmd, buf, sizeof cmd))
2621 return -EFAULT;
2622
8313c10f 2623 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
9ead190b
RD
2624 if (!qp)
2625 return -EINVAL;
bc38a6ab 2626
fd3c7904 2627 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
f48b7269 2628 mutex_lock(&obj->mcast_lock);
fd3c7904 2629
9ead190b 2630 list_for_each_entry(mcast, &obj->mcast_list, list)
f4e40156
JM
2631 if (cmd.mlid == mcast->lid &&
2632 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2633 list_del(&mcast->list);
2634 kfree(mcast);
20c7840a 2635 found = true;
f4e40156
JM
2636 break;
2637 }
2638
20c7840a
MR
2639 if (!found) {
2640 ret = -EINVAL;
2641 goto out_put;
2642 }
2643
2644 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
2645
9ead190b 2646out_put:
f48b7269 2647 mutex_unlock(&obj->mcast_lock);
fd3c7904 2648 uobj_put_obj_read(qp);
7106a976 2649 return ret;
bc38a6ab 2650}
f520ba5a 2651
fa76d24e 2652struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
9b828441
MB
2653{
2654 struct ib_uflow_resources *resources;
2655
b6ba4a9a 2656 resources = kzalloc(sizeof(*resources), GFP_KERNEL);
9b828441
MB
2657
2658 if (!resources)
de749814 2659 return NULL;
b6ba4a9a 2660
a5cc9831
LR
2661 if (!num_specs)
2662 goto out;
2663
b6ba4a9a
RS
2664 resources->counters =
2665 kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
b6ba4a9a
RS
2666 resources->collection =
2667 kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);
2668
de749814
LR
2669 if (!resources->counters || !resources->collection)
2670 goto err;
9b828441 2671
a5cc9831 2672out:
9b828441 2673 resources->max = num_specs;
9b828441 2674 return resources;
b6ba4a9a 2675
de749814 2676err:
b6ba4a9a 2677 kfree(resources->counters);
b6ba4a9a 2678 kfree(resources);
de749814 2679
b6ba4a9a 2680 return NULL;
9b828441 2681}
fa76d24e 2682EXPORT_SYMBOL(flow_resources_alloc);
9b828441
MB
2683
2684void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
2685{
2686 unsigned int i;
2687
6cd080a6
YH
2688 if (!uflow_res)
2689 return;
2690
b6ba4a9a 2691 for (i = 0; i < uflow_res->collection_num; i++)
9b828441
MB
2692 atomic_dec(&uflow_res->collection[i]->usecnt);
2693
b6ba4a9a
RS
2694 for (i = 0; i < uflow_res->counters_num; i++)
2695 atomic_dec(&uflow_res->counters[i]->usecnt);
2696
2697 kfree(uflow_res->collection);
2698 kfree(uflow_res->counters);
9b828441
MB
2699 kfree(uflow_res);
2700}
fa76d24e 2701EXPORT_SYMBOL(ib_uverbs_flow_resources_free);
9b828441 2702
fa76d24e
MB
2703void flow_resources_add(struct ib_uflow_resources *uflow_res,
2704 enum ib_flow_spec_type type,
2705 void *ibobj)
9b828441
MB
2706{
2707 WARN_ON(uflow_res->num >= uflow_res->max);
2708
b6ba4a9a
RS
2709 switch (type) {
2710 case IB_FLOW_SPEC_ACTION_HANDLE:
2711 atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
2712 uflow_res->collection[uflow_res->collection_num++] =
2713 (struct ib_flow_action *)ibobj;
2714 break;
2715 case IB_FLOW_SPEC_ACTION_COUNT:
2716 atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
2717 uflow_res->counters[uflow_res->counters_num++] =
2718 (struct ib_counters *)ibobj;
2719 break;
2720 default:
2721 WARN_ON(1);
2722 }
2723
2724 uflow_res->num++;
9b828441 2725}
fa76d24e 2726EXPORT_SYMBOL(flow_resources_add);
9b828441 2727
8313c10f 2728static int kern_spec_to_ib_spec_action(const struct uverbs_attr_bundle *attrs,
9b828441
MB
2729 struct ib_uverbs_flow_spec *kern_spec,
2730 union ib_flow_spec *ib_spec,
2731 struct ib_uflow_resources *uflow_res)
94e03f11
MR
2732{
2733 ib_spec->type = kern_spec->type;
2734 switch (ib_spec->type) {
2735 case IB_FLOW_SPEC_ACTION_TAG:
2736 if (kern_spec->flow_tag.size !=
2737 sizeof(struct ib_uverbs_flow_spec_action_tag))
2738 return -EINVAL;
2739
2740 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
2741 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
2742 break;
483a3966
SS
2743 case IB_FLOW_SPEC_ACTION_DROP:
2744 if (kern_spec->drop.size !=
2745 sizeof(struct ib_uverbs_flow_spec_action_drop))
2746 return -EINVAL;
2747
2748 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
2749 break;
9b828441
MB
2750 case IB_FLOW_SPEC_ACTION_HANDLE:
2751 if (kern_spec->action.size !=
2752 sizeof(struct ib_uverbs_flow_spec_action_handle))
2753 return -EOPNOTSUPP;
2754 ib_spec->action.act = uobj_get_obj_read(flow_action,
2755 UVERBS_OBJECT_FLOW_ACTION,
2756 kern_spec->action.handle,
8313c10f 2757 attrs);
9b828441
MB
2758 if (!ib_spec->action.act)
2759 return -EINVAL;
2760 ib_spec->action.size =
2761 sizeof(struct ib_flow_spec_action_handle);
b6ba4a9a
RS
2762 flow_resources_add(uflow_res,
2763 IB_FLOW_SPEC_ACTION_HANDLE,
2764 ib_spec->action.act);
9b828441
MB
2765 uobj_put_obj_read(ib_spec->action.act);
2766 break;
b6ba4a9a
RS
2767 case IB_FLOW_SPEC_ACTION_COUNT:
2768 if (kern_spec->flow_count.size !=
2769 sizeof(struct ib_uverbs_flow_spec_action_count))
2770 return -EINVAL;
2771 ib_spec->flow_count.counters =
2772 uobj_get_obj_read(counters,
2773 UVERBS_OBJECT_COUNTERS,
2774 kern_spec->flow_count.handle,
8313c10f 2775 attrs);
b6ba4a9a
RS
2776 if (!ib_spec->flow_count.counters)
2777 return -EINVAL;
2778 ib_spec->flow_count.size =
2779 sizeof(struct ib_flow_spec_action_count);
2780 flow_resources_add(uflow_res,
2781 IB_FLOW_SPEC_ACTION_COUNT,
2782 ib_spec->flow_count.counters);
2783 uobj_put_obj_read(ib_spec->flow_count.counters);
2784 break;
94e03f11
MR
2785 default:
2786 return -EINVAL;
2787 }
2788 return 0;
2789}
2790
766d8551 2791static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
15dfbd6b
MG
2792{
2793 /* Returns user space filter size, includes padding */
2794 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
2795}
2796
766d8551 2797static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
15dfbd6b
MG
2798 u16 ib_real_filter_sz)
2799{
2800 /*
2801 * User space filter structures must be 64 bit aligned, otherwise this
2802 * may pass, but we won't handle additional new attributes.
2803 */
2804
2805 if (kern_filter_size > ib_real_filter_sz) {
2806 if (memchr_inv(kern_spec_filter +
2807 ib_real_filter_sz, 0,
2808 kern_filter_size - ib_real_filter_sz))
2809 return -EINVAL;
2810 return ib_real_filter_sz;
2811 }
2812 return kern_filter_size;
2813}
2814
766d8551
MB
2815int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
2816 const void *kern_spec_mask,
2817 const void *kern_spec_val,
2818 size_t kern_filter_sz,
2819 union ib_flow_spec *ib_spec)
436f2ad0 2820{
15dfbd6b 2821 ssize_t actual_filter_sz;
15dfbd6b 2822 ssize_t ib_filter_sz;
15dfbd6b 2823
15dfbd6b
MG
2824 /* User flow spec size must be aligned to 4 bytes */
2825 if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
2826 return -EINVAL;
2827
766d8551
MB
2828 ib_spec->type = type;
2829
fbf46860
MR
2830 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
2831 return -EINVAL;
15dfbd6b 2832
fbf46860 2833 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
436f2ad0 2834 case IB_FLOW_SPEC_ETH:
15dfbd6b
MG
2835 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
2836 actual_filter_sz = spec_filter_size(kern_spec_mask,
2837 kern_filter_sz,
2838 ib_filter_sz);
2839 if (actual_filter_sz <= 0)
436f2ad0 2840 return -EINVAL;
15dfbd6b
MG
2841 ib_spec->size = sizeof(struct ib_flow_spec_eth);
2842 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
2843 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
436f2ad0
HHZ
2844 break;
2845 case IB_FLOW_SPEC_IPV4:
15dfbd6b
MG
2846 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
2847 actual_filter_sz = spec_filter_size(kern_spec_mask,
2848 kern_filter_sz,
2849 ib_filter_sz);
2850 if (actual_filter_sz <= 0)
436f2ad0 2851 return -EINVAL;
15dfbd6b
MG
2852 ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
2853 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
2854 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
436f2ad0 2855 break;
4c2aae71 2856 case IB_FLOW_SPEC_IPV6:
15dfbd6b
MG
2857 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
2858 actual_filter_sz = spec_filter_size(kern_spec_mask,
2859 kern_filter_sz,
2860 ib_filter_sz);
2861 if (actual_filter_sz <= 0)
4c2aae71 2862 return -EINVAL;
15dfbd6b
MG
2863 ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
2864 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
2865 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
a72c6a2b
MG
2866
2867 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
2868 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
2869 return -EINVAL;
4c2aae71 2870 break;
436f2ad0
HHZ
2871 case IB_FLOW_SPEC_TCP:
2872 case IB_FLOW_SPEC_UDP:
15dfbd6b
MG
2873 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
2874 actual_filter_sz = spec_filter_size(kern_spec_mask,
2875 kern_filter_sz,
2876 ib_filter_sz);
2877 if (actual_filter_sz <= 0)
436f2ad0 2878 return -EINVAL;
15dfbd6b
MG
2879 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
2880 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
2881 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
436f2ad0 2882 break;
0dbf3332
MR
2883 case IB_FLOW_SPEC_VXLAN_TUNNEL:
2884 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
2885 actual_filter_sz = spec_filter_size(kern_spec_mask,
2886 kern_filter_sz,
2887 ib_filter_sz);
2888 if (actual_filter_sz <= 0)
2889 return -EINVAL;
2890 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
2891 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
2892 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
2893
2894 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
2895 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
2896 return -EINVAL;
2897 break;
56ab0b38
MB
2898 case IB_FLOW_SPEC_ESP:
2899 ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz);
2900 actual_filter_sz = spec_filter_size(kern_spec_mask,
2901 kern_filter_sz,
2902 ib_filter_sz);
2903 if (actual_filter_sz <= 0)
2904 return -EINVAL;
2905 ib_spec->esp.size = sizeof(struct ib_flow_spec_esp);
2906 memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz);
2907 memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
2908 break;
d90e5e50
AL
2909 case IB_FLOW_SPEC_GRE:
2910 ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
2911 actual_filter_sz = spec_filter_size(kern_spec_mask,
2912 kern_filter_sz,
2913 ib_filter_sz);
2914 if (actual_filter_sz <= 0)
2915 return -EINVAL;
2916 ib_spec->gre.size = sizeof(struct ib_flow_spec_gre);
2917 memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz);
2918 memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
2919 break;
b04f0f03
AL
2920 case IB_FLOW_SPEC_MPLS:
2921 ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
2922 actual_filter_sz = spec_filter_size(kern_spec_mask,
2923 kern_filter_sz,
2924 ib_filter_sz);
2925 if (actual_filter_sz <= 0)
2926 return -EINVAL;
2927 ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls);
2928 memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz);
2929 memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz);
2930 break;
436f2ad0
HHZ
2931 default:
2932 return -EINVAL;
2933 }
2934 return 0;
2935}
2936
766d8551
MB
2937static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
2938 union ib_flow_spec *ib_spec)
2939{
2940 ssize_t kern_filter_sz;
2941 void *kern_spec_mask;
2942 void *kern_spec_val;
2943
766d8551
MB
2944 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
2945
2946 kern_spec_val = (void *)kern_spec +
2947 sizeof(struct ib_uverbs_flow_spec_hdr);
2948 kern_spec_mask = kern_spec_val + kern_filter_sz;
2949
2950 return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type,
2951 kern_spec_mask,
2952 kern_spec_val,
2953 kern_filter_sz, ib_spec);
2954}
2955
8313c10f 2956static int kern_spec_to_ib_spec(struct uverbs_attr_bundle *attrs,
9b828441
MB
2957 struct ib_uverbs_flow_spec *kern_spec,
2958 union ib_flow_spec *ib_spec,
2959 struct ib_uflow_resources *uflow_res)
94e03f11
MR
2960{
2961 if (kern_spec->reserved)
2962 return -EINVAL;
2963
2964 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
8313c10f 2965 return kern_spec_to_ib_spec_action(attrs, kern_spec, ib_spec,
9b828441 2966 uflow_res);
94e03f11
MR
2967 else
2968 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
2969}
2970
8313c10f 2971static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs,
ef87df2c 2972 struct ib_udata *ucore)
f213c052
YH
2973{
2974 struct ib_uverbs_ex_create_wq cmd = {};
2975 struct ib_uverbs_ex_create_wq_resp resp = {};
2976 struct ib_uwq_object *obj;
2977 int err = 0;
2978 struct ib_cq *cq;
2979 struct ib_pd *pd;
2980 struct ib_wq *wq;
2981 struct ib_wq_init_attr wq_init_attr = {};
2982 size_t required_cmd_sz;
2983 size_t required_resp_len;
bbd51e88 2984 struct ib_device *ib_dev;
f213c052
YH
2985
2986 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge);
2987 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn);
2988
2989 if (ucore->inlen < required_cmd_sz)
2990 return -EINVAL;
2991
2992 if (ucore->outlen < required_resp_len)
2993 return -ENOSPC;
2994
2995 if (ucore->inlen > sizeof(cmd) &&
2996 !ib_is_udata_cleared(ucore, sizeof(cmd),
2997 ucore->inlen - sizeof(cmd)))
2998 return -EOPNOTSUPP;
2999
3000 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3001 if (err)
3002 return err;
3003
3004 if (cmd.comp_mask)
3005 return -EOPNOTSUPP;
3006
8313c10f 3007 obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, attrs,
bbd51e88 3008 &ib_dev);
fd3c7904
MB
3009 if (IS_ERR(obj))
3010 return PTR_ERR(obj);
f213c052 3011
8313c10f 3012 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
f213c052
YH
3013 if (!pd) {
3014 err = -EINVAL;
3015 goto err_uobj;
3016 }
3017
8313c10f 3018 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
f213c052
YH
3019 if (!cq) {
3020 err = -EINVAL;
3021 goto err_put_pd;
3022 }
3023
3024 wq_init_attr.cq = cq;
3025 wq_init_attr.max_sge = cmd.max_sge;
3026 wq_init_attr.max_wr = cmd.max_wr;
8313c10f 3027 wq_init_attr.wq_context = attrs->ufile;
f213c052
YH
3028 wq_init_attr.wq_type = cmd.wq_type;
3029 wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
af1cb95d
NO
3030 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) +
3031 sizeof(cmd.create_flags)))
3032 wq_init_attr.create_flags = cmd.create_flags;
f213c052
YH
3033 obj->uevent.events_reported = 0;
3034 INIT_LIST_HEAD(&obj->uevent.event_list);
21885586 3035
ef87df2c 3036 wq = pd->device->create_wq(pd, &wq_init_attr, &attrs->driver_udata);
f213c052
YH
3037 if (IS_ERR(wq)) {
3038 err = PTR_ERR(wq);
3039 goto err_put_cq;
3040 }
3041
3042 wq->uobject = &obj->uevent.uobject;
3043 obj->uevent.uobject.object = wq;
3044 wq->wq_type = wq_init_attr.wq_type;
3045 wq->cq = cq;
3046 wq->pd = pd;
3047 wq->device = pd->device;
3048 wq->wq_context = wq_init_attr.wq_context;
3049 atomic_set(&wq->usecnt, 0);
3050 atomic_inc(&pd->usecnt);
3051 atomic_inc(&cq->usecnt);
3052 wq->uobject = &obj->uevent.uobject;
3053 obj->uevent.uobject.object = wq;
f213c052
YH
3054
3055 memset(&resp, 0, sizeof(resp));
3056 resp.wq_handle = obj->uevent.uobject.id;
3057 resp.max_sge = wq_init_attr.max_sge;
3058 resp.max_wr = wq_init_attr.max_wr;
3059 resp.wqn = wq->wq_num;
3060 resp.response_length = required_resp_len;
3061 err = ib_copy_to_udata(ucore,
3062 &resp, resp.response_length);
3063 if (err)
3064 goto err_copy;
3065
fd3c7904
MB
3066 uobj_put_obj_read(pd);
3067 uobj_put_obj_read(cq);
7106a976 3068 return uobj_alloc_commit(&obj->uevent.uobject);
f213c052
YH
3069
3070err_copy:
f213c052
YH
3071 ib_destroy_wq(wq);
3072err_put_cq:
fd3c7904 3073 uobj_put_obj_read(cq);
f213c052 3074err_put_pd:
fd3c7904 3075 uobj_put_obj_read(pd);
f213c052 3076err_uobj:
fd3c7904 3077 uobj_alloc_abort(&obj->uevent.uobject);
f213c052
YH
3078
3079 return err;
3080}
3081
8313c10f 3082static int ib_uverbs_ex_destroy_wq(struct uverbs_attr_bundle *attrs,
ef87df2c 3083 struct ib_udata *ucore)
f213c052
YH
3084{
3085 struct ib_uverbs_ex_destroy_wq cmd = {};
3086 struct ib_uverbs_ex_destroy_wq_resp resp = {};
f213c052
YH
3087 struct ib_uobject *uobj;
3088 struct ib_uwq_object *obj;
3089 size_t required_cmd_sz;
3090 size_t required_resp_len;
3091 int ret;
3092
3093 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle);
3094 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
3095
3096 if (ucore->inlen < required_cmd_sz)
3097 return -EINVAL;
3098
3099 if (ucore->outlen < required_resp_len)
3100 return -ENOSPC;
3101
3102 if (ucore->inlen > sizeof(cmd) &&
3103 !ib_is_udata_cleared(ucore, sizeof(cmd),
3104 ucore->inlen - sizeof(cmd)))
3105 return -EOPNOTSUPP;
3106
3107 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3108 if (ret)
3109 return ret;
3110
3111 if (cmd.comp_mask)
3112 return -EOPNOTSUPP;
3113
3114 resp.response_length = required_resp_len;
8313c10f 3115 uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
fd3c7904
MB
3116 if (IS_ERR(uobj))
3117 return PTR_ERR(uobj);
f213c052 3118
f213c052 3119 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
f213c052 3120 resp.events_reported = obj->uevent.events_reported;
32ed5c00
JG
3121
3122 uobj_put_destroy(uobj);
f213c052 3123
c52d8114 3124 return ib_copy_to_udata(ucore, &resp, resp.response_length);
f213c052
YH
3125}
3126
8313c10f 3127static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs,
ef87df2c 3128 struct ib_udata *ucore)
f213c052
YH
3129{
3130 struct ib_uverbs_ex_modify_wq cmd = {};
3131 struct ib_wq *wq;
3132 struct ib_wq_attr wq_attr = {};
3133 size_t required_cmd_sz;
3134 int ret;
3135
3136 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state);
3137 if (ucore->inlen < required_cmd_sz)
3138 return -EINVAL;
3139
3140 if (ucore->inlen > sizeof(cmd) &&
3141 !ib_is_udata_cleared(ucore, sizeof(cmd),
3142 ucore->inlen - sizeof(cmd)))
3143 return -EOPNOTSUPP;
3144
3145 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3146 if (ret)
3147 return ret;
3148
3149 if (!cmd.attr_mask)
3150 return -EINVAL;
3151
af1cb95d 3152 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
f213c052
YH
3153 return -EINVAL;
3154
8313c10f 3155 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
f213c052
YH
3156 if (!wq)
3157 return -EINVAL;
3158
3159 wq_attr.curr_wq_state = cmd.curr_wq_state;
3160 wq_attr.wq_state = cmd.wq_state;
af1cb95d
NO
3161 if (cmd.attr_mask & IB_WQ_FLAGS) {
3162 wq_attr.flags = cmd.flags;
3163 wq_attr.flags_mask = cmd.flags_mask;
3164 }
ef87df2c
JG
3165 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask,
3166 &attrs->driver_udata);
fd3c7904 3167 uobj_put_obj_read(wq);
f213c052
YH
3168 return ret;
3169}
3170
8313c10f 3171static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs,
ef87df2c 3172 struct ib_udata *ucore)
de019a94
YH
3173{
3174 struct ib_uverbs_ex_create_rwq_ind_table cmd = {};
3175 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
3176 struct ib_uobject *uobj;
3177 int err = 0;
3178 struct ib_rwq_ind_table_init_attr init_attr = {};
3179 struct ib_rwq_ind_table *rwq_ind_tbl;
3180 struct ib_wq **wqs = NULL;
3181 u32 *wqs_handles = NULL;
3182 struct ib_wq *wq = NULL;
3183 int i, j, num_read_wqs;
3184 u32 num_wq_handles;
3185 u32 expected_in_size;
3186 size_t required_cmd_sz_header;
3187 size_t required_resp_len;
bbd51e88 3188 struct ib_device *ib_dev;
de019a94
YH
3189
3190 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size);
3191 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num);
3192
3193 if (ucore->inlen < required_cmd_sz_header)
3194 return -EINVAL;
3195
3196 if (ucore->outlen < required_resp_len)
3197 return -ENOSPC;
3198
3199 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header);
3200 if (err)
3201 return err;
3202
3203 ucore->inbuf += required_cmd_sz_header;
3204 ucore->inlen -= required_cmd_sz_header;
3205
3206 if (cmd.comp_mask)
3207 return -EOPNOTSUPP;
3208
3209 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
3210 return -EINVAL;
3211
3212 num_wq_handles = 1 << cmd.log_ind_tbl_size;
3213 expected_in_size = num_wq_handles * sizeof(__u32);
3214 if (num_wq_handles == 1)
3215 /* input size for wq handles is u64 aligned */
3216 expected_in_size += sizeof(__u32);
3217
3218 if (ucore->inlen < expected_in_size)
3219 return -EINVAL;
3220
3221 if (ucore->inlen > expected_in_size &&
3222 !ib_is_udata_cleared(ucore, expected_in_size,
3223 ucore->inlen - expected_in_size))
3224 return -EOPNOTSUPP;
3225
3226 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
3227 GFP_KERNEL);
3228 if (!wqs_handles)
3229 return -ENOMEM;
3230
3231 err = ib_copy_from_udata(wqs_handles, ucore,
3232 num_wq_handles * sizeof(__u32));
3233 if (err)
3234 goto err_free;
3235
3236 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
3237 if (!wqs) {
3238 err = -ENOMEM;
3239 goto err_free;
3240 }
3241
3242 for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
3243 num_read_wqs++) {
2cc1e3b8 3244 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
8313c10f 3245 wqs_handles[num_read_wqs], attrs);
de019a94
YH
3246 if (!wq) {
3247 err = -EINVAL;
3248 goto put_wqs;
3249 }
3250
3251 wqs[num_read_wqs] = wq;
3252 }
3253
8313c10f 3254 uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, attrs, &ib_dev);
fd3c7904
MB
3255 if (IS_ERR(uobj)) {
3256 err = PTR_ERR(uobj);
de019a94
YH
3257 goto put_wqs;
3258 }
3259
de019a94
YH
3260 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3261 init_attr.ind_tbl = wqs;
21885586 3262
ef87df2c
JG
3263 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr,
3264 &attrs->driver_udata);
de019a94
YH
3265
3266 if (IS_ERR(rwq_ind_tbl)) {
3267 err = PTR_ERR(rwq_ind_tbl);
3268 goto err_uobj;
3269 }
3270
3271 rwq_ind_tbl->ind_tbl = wqs;
3272 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
3273 rwq_ind_tbl->uobject = uobj;
3274 uobj->object = rwq_ind_tbl;
3275 rwq_ind_tbl->device = ib_dev;
3276 atomic_set(&rwq_ind_tbl->usecnt, 0);
3277
3278 for (i = 0; i < num_wq_handles; i++)
3279 atomic_inc(&wqs[i]->usecnt);
3280
de019a94
YH
3281 resp.ind_tbl_handle = uobj->id;
3282 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3283 resp.response_length = required_resp_len;
3284
3285 err = ib_copy_to_udata(ucore,
3286 &resp, resp.response_length);
3287 if (err)
3288 goto err_copy;
3289
3290 kfree(wqs_handles);
3291
3292 for (j = 0; j < num_read_wqs; j++)
fd3c7904 3293 uobj_put_obj_read(wqs[j]);
de019a94 3294
7106a976 3295 return uobj_alloc_commit(uobj);
de019a94
YH
3296
3297err_copy:
de019a94
YH
3298 ib_destroy_rwq_ind_table(rwq_ind_tbl);
3299err_uobj:
fd3c7904 3300 uobj_alloc_abort(uobj);
de019a94
YH
3301put_wqs:
3302 for (j = 0; j < num_read_wqs; j++)
fd3c7904 3303 uobj_put_obj_read(wqs[j]);
de019a94
YH
3304err_free:
3305 kfree(wqs_handles);
3306 kfree(wqs);
3307 return err;
3308}
3309
8313c10f 3310static int ib_uverbs_ex_destroy_rwq_ind_table(struct uverbs_attr_bundle *attrs,
ef87df2c 3311 struct ib_udata *ucore)
de019a94
YH
3312{
3313 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {};
de019a94 3314 int ret;
de019a94
YH
3315 size_t required_cmd_sz;
3316
3317 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle);
3318
3319 if (ucore->inlen < required_cmd_sz)
3320 return -EINVAL;
3321
3322 if (ucore->inlen > sizeof(cmd) &&
3323 !ib_is_udata_cleared(ucore, sizeof(cmd),
3324 ucore->inlen - sizeof(cmd)))
3325 return -EOPNOTSUPP;
3326
3327 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3328 if (ret)
3329 return ret;
3330
3331 if (cmd.comp_mask)
3332 return -EOPNOTSUPP;
3333
c33e73af 3334 return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL,
7106a976 3335 cmd.ind_tbl_handle, attrs);
de019a94
YH
3336}
3337
8313c10f 3338static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs,
ef87df2c 3339 struct ib_udata *ucore)
436f2ad0
HHZ
3340{
3341 struct ib_uverbs_create_flow cmd;
3342 struct ib_uverbs_create_flow_resp resp;
3343 struct ib_uobject *uobj;
3344 struct ib_flow *flow_id;
d82693da 3345 struct ib_uverbs_flow_attr *kern_flow_attr;
436f2ad0
HHZ
3346 struct ib_flow_attr *flow_attr;
3347 struct ib_qp *qp;
9b828441 3348 struct ib_uflow_resources *uflow_res;
4fae7f17 3349 struct ib_uverbs_flow_spec_hdr *kern_spec;
436f2ad0 3350 int err = 0;
436f2ad0
HHZ
3351 void *ib_spec;
3352 int i;
bbd51e88 3353 struct ib_device *ib_dev;
436f2ad0 3354
6bcca3d4
YD
3355 if (ucore->inlen < sizeof(cmd))
3356 return -EINVAL;
3357
f21519b2 3358 if (ucore->outlen < sizeof(resp))
436f2ad0
HHZ
3359 return -ENOSPC;
3360
f21519b2
YD
3361 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3362 if (err)
3363 return err;
3364
3365 ucore->inbuf += sizeof(cmd);
3366 ucore->inlen -= sizeof(cmd);
436f2ad0 3367
22878dbc
MB
3368 if (cmd.comp_mask)
3369 return -EINVAL;
3370
e3b6d8cf 3371 if (!capable(CAP_NET_RAW))
436f2ad0
HHZ
3372 return -EPERM;
3373
a3100a78
MV
3374 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3375 return -EINVAL;
3376
3377 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3378 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3379 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3380 return -EINVAL;
3381
f8848274 3382 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
22878dbc
MB
3383 return -EINVAL;
3384
f21519b2 3385 if (cmd.flow_attr.size > ucore->inlen ||
f8848274 3386 cmd.flow_attr.size >
b68c9560 3387 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
22878dbc
MB
3388 return -EINVAL;
3389
c780d82a
YD
3390 if (cmd.flow_attr.reserved[0] ||
3391 cmd.flow_attr.reserved[1])
3392 return -EINVAL;
3393
436f2ad0 3394 if (cmd.flow_attr.num_of_specs) {
f8848274
MB
3395 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3396 GFP_KERNEL);
436f2ad0
HHZ
3397 if (!kern_flow_attr)
3398 return -ENOMEM;
3399
4fae7f17
LR
3400 *kern_flow_attr = cmd.flow_attr;
3401 err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore,
f21519b2
YD
3402 cmd.flow_attr.size);
3403 if (err)
436f2ad0 3404 goto err_free_attr;
436f2ad0
HHZ
3405 } else {
3406 kern_flow_attr = &cmd.flow_attr;
436f2ad0
HHZ
3407 }
3408
8313c10f 3409 uobj = uobj_alloc(UVERBS_OBJECT_FLOW, attrs, &ib_dev);
fd3c7904
MB
3410 if (IS_ERR(uobj)) {
3411 err = PTR_ERR(uobj);
436f2ad0
HHZ
3412 goto err_free_attr;
3413 }
436f2ad0 3414
8313c10f 3415 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
436f2ad0
HHZ
3416 if (!qp) {
3417 err = -EINVAL;
3418 goto err_uobj;
3419 }
3420
940efcc8
LR
3421 if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
3422 err = -EINVAL;
3423 goto err_put;
3424 }
3425
7654cb1b
MW
3426 flow_attr = kzalloc(struct_size(flow_attr, flows,
3427 cmd.flow_attr.num_of_specs), GFP_KERNEL);
436f2ad0
HHZ
3428 if (!flow_attr) {
3429 err = -ENOMEM;
3430 goto err_put;
3431 }
9b828441
MB
3432 uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs);
3433 if (!uflow_res) {
3434 err = -ENOMEM;
3435 goto err_free_flow_attr;
3436 }
436f2ad0
HHZ
3437
3438 flow_attr->type = kern_flow_attr->type;
3439 flow_attr->priority = kern_flow_attr->priority;
3440 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3441 flow_attr->port = kern_flow_attr->port;
3442 flow_attr->flags = kern_flow_attr->flags;
3443 flow_attr->size = sizeof(*flow_attr);
3444
4fae7f17 3445 kern_spec = kern_flow_attr->flow_specs;
436f2ad0 3446 ib_spec = flow_attr + 1;
f8848274 3447 for (i = 0; i < flow_attr->num_of_specs &&
fe48aecb 3448 cmd.flow_attr.size >= sizeof(*kern_spec) &&
4fae7f17
LR
3449 cmd.flow_attr.size >= kern_spec->size;
3450 i++) {
3451 err = kern_spec_to_ib_spec(
8313c10f 3452 attrs, (struct ib_uverbs_flow_spec *)kern_spec,
4fae7f17 3453 ib_spec, uflow_res);
436f2ad0
HHZ
3454 if (err)
3455 goto err_free;
b04f0f03 3456
436f2ad0
HHZ
3457 flow_attr->size +=
3458 ((union ib_flow_spec *) ib_spec)->size;
4fae7f17
LR
3459 cmd.flow_attr.size -= kern_spec->size;
3460 kern_spec = ((void *)kern_spec) + kern_spec->size;
436f2ad0
HHZ
3461 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3462 }
f8848274
MB
3463 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3464 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3465 i, cmd.flow_attr.size);
98a37510 3466 err = -EINVAL;
436f2ad0
HHZ
3467 goto err_free;
3468 }
59082a32 3469
ef87df2c
JG
3470 flow_id = qp->device->create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER,
3471 &attrs->driver_udata);
59082a32 3472
436f2ad0
HHZ
3473 if (IS_ERR(flow_id)) {
3474 err = PTR_ERR(flow_id);
fd3c7904 3475 goto err_free;
436f2ad0 3476 }
86e1d464
MB
3477
3478 ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
436f2ad0 3479
436f2ad0
HHZ
3480 memset(&resp, 0, sizeof(resp));
3481 resp.flow_handle = uobj->id;
3482
f21519b2
YD
3483 err = ib_copy_to_udata(ucore,
3484 &resp, sizeof(resp));
3485 if (err)
436f2ad0 3486 goto err_copy;
436f2ad0 3487
fd3c7904 3488 uobj_put_obj_read(qp);
436f2ad0
HHZ
3489 kfree(flow_attr);
3490 if (cmd.flow_attr.num_of_specs)
3491 kfree(kern_flow_attr);
7106a976 3492 return uobj_alloc_commit(uobj);
436f2ad0 3493err_copy:
1ccddc42
LR
3494 if (!qp->device->destroy_flow(flow_id))
3495 atomic_dec(&qp->usecnt);
436f2ad0 3496err_free:
9b828441
MB
3497 ib_uverbs_flow_resources_free(uflow_res);
3498err_free_flow_attr:
436f2ad0
HHZ
3499 kfree(flow_attr);
3500err_put:
fd3c7904 3501 uobj_put_obj_read(qp);
436f2ad0 3502err_uobj:
fd3c7904 3503 uobj_alloc_abort(uobj);
436f2ad0
HHZ
3504err_free_attr:
3505 if (cmd.flow_attr.num_of_specs)
3506 kfree(kern_flow_attr);
3507 return err;
3508}
3509
8313c10f 3510static int ib_uverbs_ex_destroy_flow(struct uverbs_attr_bundle *attrs,
ef87df2c 3511 struct ib_udata *ucore)
f21519b2 3512{
436f2ad0 3513 struct ib_uverbs_destroy_flow cmd;
436f2ad0
HHZ
3514 int ret;
3515
6bcca3d4
YD
3516 if (ucore->inlen < sizeof(cmd))
3517 return -EINVAL;
3518
f21519b2
YD
3519 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3520 if (ret)
3521 return ret;
436f2ad0 3522
2782c2d3
YD
3523 if (cmd.comp_mask)
3524 return -EINVAL;
3525
7106a976 3526 return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, attrs);
436f2ad0
HHZ
3527}
3528
8313c10f 3529static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
c89d1bed
SH
3530 struct ib_uverbs_create_xsrq *cmd,
3531 struct ib_udata *udata)
f520ba5a 3532{
f520ba5a 3533 struct ib_uverbs_create_srq_resp resp;
8541f8de 3534 struct ib_usrq_object *obj;
f520ba5a
RD
3535 struct ib_pd *pd;
3536 struct ib_srq *srq;
8541f8de 3537 struct ib_uobject *uninitialized_var(xrcd_uobj);
f520ba5a
RD
3538 struct ib_srq_init_attr attr;
3539 int ret;
bbd51e88 3540 struct ib_device *ib_dev;
f520ba5a 3541
8313c10f 3542 obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, attrs,
bbd51e88 3543 &ib_dev);
fd3c7904
MB
3544 if (IS_ERR(obj))
3545 return PTR_ERR(obj);
f520ba5a 3546
38eb44fa
AK
3547 if (cmd->srq_type == IB_SRQT_TM)
3548 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
3549
8541f8de 3550 if (cmd->srq_type == IB_SRQT_XRC) {
1f7ff9d5 3551 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle,
8313c10f 3552 attrs);
fd3c7904 3553 if (IS_ERR(xrcd_uobj)) {
8541f8de 3554 ret = -EINVAL;
5909ce54 3555 goto err;
8541f8de
SH
3556 }
3557
fd3c7904
MB
3558 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
3559 if (!attr.ext.xrc.xrcd) {
3560 ret = -EINVAL;
3561 goto err_put_xrcd;
3562 }
3563
8541f8de
SH
3564 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3565 atomic_inc(&obj->uxrcd->refcnt);
1a56ff6d 3566 }
5909ce54 3567
1a56ff6d 3568 if (ib_srq_has_cq(cmd->srq_type)) {
2cc1e3b8 3569 attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
8313c10f 3570 cmd->cq_handle, attrs);
1a56ff6d 3571 if (!attr.ext.cq) {
5909ce54
RD
3572 ret = -EINVAL;
3573 goto err_put_xrcd;
3574 }
3575 }
3576
8313c10f 3577 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
5909ce54
RD
3578 if (!pd) {
3579 ret = -EINVAL;
3580 goto err_put_cq;
8541f8de
SH
3581 }
3582
f520ba5a 3583 attr.event_handler = ib_uverbs_srq_event_handler;
8313c10f 3584 attr.srq_context = attrs->ufile;
8541f8de
SH
3585 attr.srq_type = cmd->srq_type;
3586 attr.attr.max_wr = cmd->max_wr;
3587 attr.attr.max_sge = cmd->max_sge;
3588 attr.attr.srq_limit = cmd->srq_limit;
f520ba5a 3589
8541f8de
SH
3590 obj->uevent.events_reported = 0;
3591 INIT_LIST_HEAD(&obj->uevent.event_list);
f520ba5a 3592
8541f8de 3593 srq = pd->device->create_srq(pd, &attr, udata);
f520ba5a
RD
3594 if (IS_ERR(srq)) {
3595 ret = PTR_ERR(srq);
ec924b47 3596 goto err_put;
f520ba5a
RD
3597 }
3598
8541f8de
SH
3599 srq->device = pd->device;
3600 srq->pd = pd;
3601 srq->srq_type = cmd->srq_type;
3602 srq->uobject = &obj->uevent.uobject;
f520ba5a
RD
3603 srq->event_handler = attr.event_handler;
3604 srq->srq_context = attr.srq_context;
8541f8de 3605
1a56ff6d
AK
3606 if (ib_srq_has_cq(cmd->srq_type)) {
3607 srq->ext.cq = attr.ext.cq;
3608 atomic_inc(&attr.ext.cq->usecnt);
3609 }
3610
8541f8de 3611 if (cmd->srq_type == IB_SRQT_XRC) {
8541f8de 3612 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
8541f8de
SH
3613 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3614 }
3615
f520ba5a
RD
3616 atomic_inc(&pd->usecnt);
3617 atomic_set(&srq->usecnt, 0);
3618
8541f8de 3619 obj->uevent.uobject.object = srq;
fd3c7904 3620 obj->uevent.uobject.user_handle = cmd->user_handle;
f520ba5a 3621
9ead190b 3622 memset(&resp, 0, sizeof resp);
8541f8de 3623 resp.srq_handle = obj->uevent.uobject.id;
ea88fd16
DB
3624 resp.max_wr = attr.attr.max_wr;
3625 resp.max_sge = attr.attr.max_sge;
8541f8de
SH
3626 if (cmd->srq_type == IB_SRQT_XRC)
3627 resp.srqn = srq->ext.xrc.srq_num;
f520ba5a 3628
6f57c933 3629 if (copy_to_user(u64_to_user_ptr(cmd->response),
f520ba5a
RD
3630 &resp, sizeof resp)) {
3631 ret = -EFAULT;
9ead190b 3632 goto err_copy;
f520ba5a
RD
3633 }
3634
1a56ff6d 3635 if (cmd->srq_type == IB_SRQT_XRC)
fd3c7904 3636 uobj_put_read(xrcd_uobj);
1a56ff6d
AK
3637
3638 if (ib_srq_has_cq(cmd->srq_type))
3639 uobj_put_obj_read(attr.ext.cq);
3640
fd3c7904 3641 uobj_put_obj_read(pd);
7106a976 3642 return uobj_alloc_commit(&obj->uevent.uobject);
f520ba5a 3643
9ead190b 3644err_copy:
f520ba5a
RD
3645 ib_destroy_srq(srq);
3646
ec924b47 3647err_put:
fd3c7904 3648 uobj_put_obj_read(pd);
8541f8de
SH
3649
3650err_put_cq:
1a56ff6d
AK
3651 if (ib_srq_has_cq(cmd->srq_type))
3652 uobj_put_obj_read(attr.ext.cq);
8541f8de 3653
5909ce54
RD
3654err_put_xrcd:
3655 if (cmd->srq_type == IB_SRQT_XRC) {
3656 atomic_dec(&obj->uxrcd->refcnt);
fd3c7904 3657 uobj_put_read(xrcd_uobj);
5909ce54 3658 }
ec924b47 3659
9ead190b 3660err:
fd3c7904 3661 uobj_alloc_abort(&obj->uevent.uobject);
f520ba5a
RD
3662 return ret;
3663}
3664
7106a976
JG
3665static int ib_uverbs_create_srq(struct uverbs_attr_bundle *attrs,
3666 const char __user *buf, int in_len, int out_len)
8541f8de
SH
3667{
3668 struct ib_uverbs_create_srq cmd;
3669 struct ib_uverbs_create_xsrq xcmd;
3670 struct ib_uverbs_create_srq_resp resp;
8541f8de
SH
3671
3672 if (out_len < sizeof resp)
3673 return -ENOSPC;
3674
3675 if (copy_from_user(&cmd, buf, sizeof cmd))
3676 return -EFAULT;
3677
38eb44fa 3678 memset(&xcmd, 0, sizeof(xcmd));
8541f8de
SH
3679 xcmd.response = cmd.response;
3680 xcmd.user_handle = cmd.user_handle;
3681 xcmd.srq_type = IB_SRQT_BASIC;
3682 xcmd.pd_handle = cmd.pd_handle;
3683 xcmd.max_wr = cmd.max_wr;
3684 xcmd.max_sge = cmd.max_sge;
3685 xcmd.srq_limit = cmd.srq_limit;
3686
ef87df2c 3687 return __uverbs_create_xsrq(attrs, &xcmd, &attrs->driver_udata);
8541f8de
SH
3688}
3689
7106a976
JG
3690static int ib_uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
3691 const char __user *buf, int in_len,
3692 int out_len)
8541f8de
SH
3693{
3694 struct ib_uverbs_create_xsrq cmd;
3695 struct ib_uverbs_create_srq_resp resp;
8541f8de
SH
3696
3697 if (out_len < sizeof resp)
3698 return -ENOSPC;
3699
3700 if (copy_from_user(&cmd, buf, sizeof cmd))
3701 return -EFAULT;
3702
ef87df2c 3703 return __uverbs_create_xsrq(attrs, &cmd, &attrs->driver_udata);
8541f8de
SH
3704}
3705
7106a976
JG
3706static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs,
3707 const char __user *buf, int in_len, int out_len)
f520ba5a
RD
3708{
3709 struct ib_uverbs_modify_srq cmd;
3710 struct ib_srq *srq;
3711 struct ib_srq_attr attr;
3712 int ret;
3713
3714 if (copy_from_user(&cmd, buf, sizeof cmd))
3715 return -EFAULT;
3716
8313c10f 3717 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
9ead190b
RD
3718 if (!srq)
3719 return -EINVAL;
f520ba5a
RD
3720
3721 attr.max_wr = cmd.max_wr;
f520ba5a
RD
3722 attr.srq_limit = cmd.srq_limit;
3723
ef87df2c
JG
3724 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask,
3725 &attrs->driver_udata);
f520ba5a 3726
fd3c7904 3727 uobj_put_obj_read(srq);
f520ba5a 3728
7106a976 3729 return ret;
f520ba5a
RD
3730}
3731
7106a976
JG
3732static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs,
3733 const char __user *buf, int in_len, int out_len)
8bdb0e86
DB
3734{
3735 struct ib_uverbs_query_srq cmd;
3736 struct ib_uverbs_query_srq_resp resp;
3737 struct ib_srq_attr attr;
3738 struct ib_srq *srq;
3739 int ret;
3740
3741 if (out_len < sizeof resp)
3742 return -ENOSPC;
3743
3744 if (copy_from_user(&cmd, buf, sizeof cmd))
3745 return -EFAULT;
3746
8313c10f 3747 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
9ead190b
RD
3748 if (!srq)
3749 return -EINVAL;
8bdb0e86 3750
9ead190b 3751 ret = ib_query_srq(srq, &attr);
8bdb0e86 3752
fd3c7904 3753 uobj_put_obj_read(srq);
8bdb0e86
DB
3754
3755 if (ret)
9ead190b 3756 return ret;
8bdb0e86
DB
3757
3758 memset(&resp, 0, sizeof resp);
3759
3760 resp.max_wr = attr.max_wr;
3761 resp.max_sge = attr.max_sge;
3762 resp.srq_limit = attr.srq_limit;
3763
40a20339 3764 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
9ead190b 3765 return -EFAULT;
8bdb0e86 3766
7106a976 3767 return 0;
8bdb0e86
DB
3768}
3769
7106a976
JG
3770static int ib_uverbs_destroy_srq(struct uverbs_attr_bundle *attrs,
3771 const char __user *buf, int in_len,
3772 int out_len)
f520ba5a 3773{
63aaf647
RD
3774 struct ib_uverbs_destroy_srq cmd;
3775 struct ib_uverbs_destroy_srq_resp resp;
9ead190b 3776 struct ib_uobject *uobj;
9ead190b 3777 struct ib_uevent_object *obj;
f520ba5a
RD
3778
3779 if (copy_from_user(&cmd, buf, sizeof cmd))
3780 return -EFAULT;
3781
8313c10f 3782 uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
fd3c7904
MB
3783 if (IS_ERR(uobj))
3784 return PTR_ERR(uobj);
3785
9ead190b 3786 obj = container_of(uobj, struct ib_uevent_object, uobject);
fd3c7904 3787 memset(&resp, 0, sizeof(resp));
9ead190b 3788 resp.events_reported = obj->events_reported;
32ed5c00
JG
3789
3790 uobj_put_destroy(uobj);
3791
40a20339 3792 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
fd3c7904 3793 return -EFAULT;
63aaf647 3794
7106a976 3795 return 0;
f520ba5a 3796}
02d1aa7a 3797
8313c10f 3798static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs,
ef87df2c 3799 struct ib_udata *ucore)
02d1aa7a 3800{
2953f425 3801 struct ib_uverbs_ex_query_device_resp resp = { {0} };
02d1aa7a 3802 struct ib_uverbs_ex_query_device cmd;
2953f425 3803 struct ib_device_attr attr = {0};
bbd51e88
JG
3804 struct ib_ucontext *ucontext;
3805 struct ib_device *ib_dev;
02d1aa7a
EC
3806 int err;
3807
8313c10f 3808 ucontext = ib_uverbs_get_ucontext(attrs);
bbd51e88
JG
3809 if (IS_ERR(ucontext))
3810 return PTR_ERR(ucontext);
3811 ib_dev = ucontext->device;
3812
02d1aa7a
EC
3813 if (ucore->inlen < sizeof(cmd))
3814 return -EINVAL;
3815
3816 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3817 if (err)
3818 return err;
3819
3820 if (cmd.comp_mask)
3821 return -EINVAL;
3822
3823 if (cmd.reserved)
3824 return -EINVAL;
3825
f4056bfd 3826 resp.response_length = offsetof(typeof(resp), odp_caps);
02d1aa7a
EC
3827
3828 if (ucore->outlen < resp.response_length)
3829 return -ENOSPC;
3830
ef87df2c 3831 err = ib_dev->query_device(ib_dev, &attr, &attrs->driver_udata);
02d1aa7a
EC
3832 if (err)
3833 return err;
3834
bbd51e88 3835 copy_query_dev_fields(ucontext, &resp.base, &attr);
02d1aa7a 3836
f4056bfd
HE
3837 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3838 goto end;
3839
3840#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3841 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3842 resp.odp_caps.per_transport_caps.rc_odp_caps =
3843 attr.odp_caps.per_transport_caps.rc_odp_caps;
3844 resp.odp_caps.per_transport_caps.uc_odp_caps =
3845 attr.odp_caps.per_transport_caps.uc_odp_caps;
3846 resp.odp_caps.per_transport_caps.ud_odp_caps =
3847 attr.odp_caps.per_transport_caps.ud_odp_caps;
f4056bfd
HE
3848#endif
3849 resp.response_length += sizeof(resp.odp_caps);
3850
24306dc6
MB
3851 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3852 goto end;
3853
3854 resp.timestamp_mask = attr.timestamp_mask;
3855 resp.response_length += sizeof(resp.timestamp_mask);
3856
3857 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3858 goto end;
3859
3860 resp.hca_core_clock = attr.hca_core_clock;
3861 resp.response_length += sizeof(resp.hca_core_clock);
3862
0b24e5ac
MD
3863 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex))
3864 goto end;
3865
3866 resp.device_cap_flags_ex = attr.device_cap_flags;
3867 resp.response_length += sizeof(resp.device_cap_flags_ex);
47adf2f4
YH
3868
3869 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps))
3870 goto end;
3871
3872 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
3873 resp.rss_caps.max_rwq_indirection_tables =
3874 attr.rss_caps.max_rwq_indirection_tables;
3875 resp.rss_caps.max_rwq_indirection_table_size =
3876 attr.rss_caps.max_rwq_indirection_table_size;
3877
3878 resp.response_length += sizeof(resp.rss_caps);
3879
3880 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq))
3881 goto end;
3882
3883 resp.max_wq_type_rq = attr.max_wq_type_rq;
3884 resp.response_length += sizeof(resp.max_wq_type_rq);
5f23d426
NO
3885
3886 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps))
3887 goto end;
3888
3889 resp.raw_packet_caps = attr.raw_packet_caps;
3890 resp.response_length += sizeof(resp.raw_packet_caps);
8d50505a 3891
78b1beb0 3892 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps))
8d50505a
AK
3893 goto end;
3894
78b1beb0
LR
3895 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
3896 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
3897 resp.tm_caps.max_ops = attr.tm_caps.max_ops;
3898 resp.tm_caps.max_sge = attr.tm_caps.max_sge;
3899 resp.tm_caps.flags = attr.tm_caps.flags;
3900 resp.response_length += sizeof(resp.tm_caps);
18bd9072
YC
3901
3902 if (ucore->outlen < resp.response_length + sizeof(resp.cq_moderation_caps))
3903 goto end;
3904
3905 resp.cq_moderation_caps.max_cq_moderation_count =
3906 attr.cq_caps.max_cq_moderation_count;
3907 resp.cq_moderation_caps.max_cq_moderation_period =
3908 attr.cq_caps.max_cq_moderation_period;
3909 resp.response_length += sizeof(resp.cq_moderation_caps);
1d8eeb9f
AL
3910
3911 if (ucore->outlen < resp.response_length + sizeof(resp.max_dm_size))
3912 goto end;
3913
3914 resp.max_dm_size = attr.max_dm_size;
3915 resp.response_length += sizeof(resp.max_dm_size);
f4056bfd 3916end:
02d1aa7a 3917 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
2953f425 3918 return err;
02d1aa7a 3919}
869ddcf8 3920
8313c10f 3921static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs,
ef87df2c 3922 struct ib_udata *ucore)
869ddcf8
YC
3923{
3924 struct ib_uverbs_ex_modify_cq cmd = {};
3925 struct ib_cq *cq;
3926 size_t required_cmd_sz;
3927 int ret;
3928
3929 required_cmd_sz = offsetof(typeof(cmd), reserved) +
3930 sizeof(cmd.reserved);
3931 if (ucore->inlen < required_cmd_sz)
3932 return -EINVAL;
3933
3934 /* sanity checks */
3935 if (ucore->inlen > sizeof(cmd) &&
3936 !ib_is_udata_cleared(ucore, sizeof(cmd),
3937 ucore->inlen - sizeof(cmd)))
3938 return -EOPNOTSUPP;
3939
3940 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3941 if (ret)
3942 return ret;
3943
3944 if (!cmd.attr_mask || cmd.reserved)
3945 return -EINVAL;
3946
3947 if (cmd.attr_mask > IB_CQ_MODERATE)
3948 return -EOPNOTSUPP;
3949
8313c10f 3950 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
869ddcf8
YC
3951 if (!cq)
3952 return -EINVAL;
3953
4190b4e9 3954 ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
869ddcf8
YC
3955
3956 uobj_put_obj_read(cq);
3957
3958 return ret;
3959}
d120c3c9 3960
669dac1e
JG
3961/*
3962 * Describe the input structs for write(). Some write methods have an input
3963 * only struct, most have an input and output. If the struct has an output then
3964 * the 'response' u64 must be the first field in the request structure.
3965 *
3966 * If udata is present then both the request and response structs have a
3967 * trailing driver_data flex array. In this case the size of the base struct
3968 * cannot be changed.
3969 */
3970#define offsetof_after(_struct, _member) \
3971 (offsetof(_struct, _member) + sizeof(((_struct *)NULL)->_member))
3972
3973#define UAPI_DEF_WRITE_IO(req, resp) \
3974 .write.has_resp = 1 + \
3975 BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) + \
3976 BUILD_BUG_ON_ZERO(sizeof(((req *)0)->response) != \
3977 sizeof(u64)), \
3978 .write.req_size = sizeof(req), .write.resp_size = sizeof(resp)
3979
3980#define UAPI_DEF_WRITE_I(req) .write.req_size = sizeof(req)
3981
3982#define UAPI_DEF_WRITE_UDATA_IO(req, resp) \
3983 UAPI_DEF_WRITE_IO(req, resp), \
3984 .write.has_udata = \
3985 1 + \
3986 BUILD_BUG_ON_ZERO(offsetof(req, driver_data) != \
3987 sizeof(req)) + \
3988 BUILD_BUG_ON_ZERO(offsetof(resp, driver_data) != \
3989 sizeof(resp))
3990
3991#define UAPI_DEF_WRITE_UDATA_I(req) \
3992 UAPI_DEF_WRITE_I(req), \
3993 .write.has_udata = \
3994 1 + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) != \
3995 sizeof(req))
3996
3997/*
3998 * The _EX versions are for use with WRITE_EX and allow the last struct member
3999 * to be specified. Buffers that do not include that member will be rejected.
4000 */
4001#define UAPI_DEF_WRITE_IO_EX(req, req_last_member, resp, resp_last_member) \
4002 .write.has_resp = 1, \
4003 .write.req_size = offsetof_after(req, req_last_member), \
4004 .write.resp_size = offsetof_after(resp, resp_last_member)
4005
4006#define UAPI_DEF_WRITE_I_EX(req, req_last_member) \
4007 .write.req_size = offsetof_after(req, req_last_member)
4008
d120c3c9 4009const struct uapi_definition uverbs_def_write_intf[] = {
a140692a
JG
4010 DECLARE_UVERBS_OBJECT(
4011 UVERBS_OBJECT_AH,
4012 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_AH,
4013 ib_uverbs_create_ah,
669dac1e
JG
4014 UAPI_DEF_WRITE_UDATA_IO(
4015 struct ib_uverbs_create_ah,
4016 struct ib_uverbs_create_ah_resp),
a140692a 4017 UAPI_DEF_METHOD_NEEDS_FN(create_ah)),
669dac1e
JG
4018 DECLARE_UVERBS_WRITE(
4019 IB_USER_VERBS_CMD_DESTROY_AH,
4020 ib_uverbs_destroy_ah,
4021 UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah),
4022 UAPI_DEF_METHOD_NEEDS_FN(destroy_ah))),
d120c3c9
JG
4023
4024 DECLARE_UVERBS_OBJECT(
4025 UVERBS_OBJECT_COMP_CHANNEL,
669dac1e
JG
4026 DECLARE_UVERBS_WRITE(
4027 IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL,
4028 ib_uverbs_create_comp_channel,
4029 UAPI_DEF_WRITE_IO(
4030 struct ib_uverbs_create_comp_channel,
4031 struct ib_uverbs_create_comp_channel_resp))),
d120c3c9
JG
4032
4033 DECLARE_UVERBS_OBJECT(
4034 UVERBS_OBJECT_CQ,
4035 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_CQ,
a140692a 4036 ib_uverbs_create_cq,
669dac1e
JG
4037 UAPI_DEF_WRITE_UDATA_IO(
4038 struct ib_uverbs_create_cq,
4039 struct ib_uverbs_create_cq_resp),
a140692a 4040 UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
669dac1e
JG
4041 DECLARE_UVERBS_WRITE(
4042 IB_USER_VERBS_CMD_DESTROY_CQ,
4043 ib_uverbs_destroy_cq,
4044 UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_cq,
4045 struct ib_uverbs_destroy_cq_resp),
4046 UAPI_DEF_METHOD_NEEDS_FN(destroy_cq)),
4047 DECLARE_UVERBS_WRITE(
4048 IB_USER_VERBS_CMD_POLL_CQ,
4049 ib_uverbs_poll_cq,
4050 UAPI_DEF_WRITE_IO(struct ib_uverbs_poll_cq,
4051 struct ib_uverbs_poll_cq_resp),
4052 UAPI_DEF_METHOD_NEEDS_FN(poll_cq)),
4053 DECLARE_UVERBS_WRITE(
4054 IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
4055 ib_uverbs_req_notify_cq,
4056 UAPI_DEF_WRITE_I(struct ib_uverbs_req_notify_cq),
4057 UAPI_DEF_METHOD_NEEDS_FN(req_notify_cq)),
d120c3c9 4058 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_RESIZE_CQ,
a140692a 4059 ib_uverbs_resize_cq,
669dac1e
JG
4060 UAPI_DEF_WRITE_UDATA_IO(
4061 struct ib_uverbs_resize_cq,
4062 struct ib_uverbs_resize_cq_resp),
a140692a 4063 UAPI_DEF_METHOD_NEEDS_FN(resize_cq)),
669dac1e
JG
4064 DECLARE_UVERBS_WRITE_EX(
4065 IB_USER_VERBS_EX_CMD_CREATE_CQ,
4066 ib_uverbs_ex_create_cq,
4067 UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_cq,
4068 reserved,
4069 struct ib_uverbs_ex_create_cq_resp,
4070 response_length),
4071 UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
4072 DECLARE_UVERBS_WRITE_EX(
4073 IB_USER_VERBS_EX_CMD_MODIFY_CQ,
4074 ib_uverbs_ex_modify_cq,
4075 UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq),
4076 UAPI_DEF_METHOD_NEEDS_FN(create_cq))),
d120c3c9
JG
4077
4078 DECLARE_UVERBS_OBJECT(
4079 UVERBS_OBJECT_DEVICE,
4080 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_GET_CONTEXT,
669dac1e
JG
4081 ib_uverbs_get_context,
4082 UAPI_DEF_WRITE_UDATA_IO(
4083 struct ib_uverbs_get_context,
4084 struct ib_uverbs_get_context_resp)),
4085 DECLARE_UVERBS_WRITE(
4086 IB_USER_VERBS_CMD_QUERY_DEVICE,
4087 ib_uverbs_query_device,
4088 UAPI_DEF_WRITE_IO(struct ib_uverbs_query_device,
4089 struct ib_uverbs_query_device_resp)),
4090 DECLARE_UVERBS_WRITE(
4091 IB_USER_VERBS_CMD_QUERY_PORT,
4092 ib_uverbs_query_port,
4093 UAPI_DEF_WRITE_IO(struct ib_uverbs_query_port,
4094 struct ib_uverbs_query_port_resp),
4095 UAPI_DEF_METHOD_NEEDS_FN(query_port)),
4096 DECLARE_UVERBS_WRITE_EX(
4097 IB_USER_VERBS_EX_CMD_QUERY_DEVICE,
4098 ib_uverbs_ex_query_device,
4099 UAPI_DEF_WRITE_IO_EX(
4100 struct ib_uverbs_ex_query_device,
4101 reserved,
4102 struct ib_uverbs_ex_query_device_resp,
4103 response_length),
4104 UAPI_DEF_METHOD_NEEDS_FN(query_device)),
a140692a
JG
4105 UAPI_DEF_OBJ_NEEDS_FN(alloc_ucontext),
4106 UAPI_DEF_OBJ_NEEDS_FN(dealloc_ucontext)),
d120c3c9
JG
4107
4108 DECLARE_UVERBS_OBJECT(
4109 UVERBS_OBJECT_FLOW,
669dac1e
JG
4110 DECLARE_UVERBS_WRITE_EX(
4111 IB_USER_VERBS_EX_CMD_CREATE_FLOW,
4112 ib_uverbs_ex_create_flow,
4113 UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_create_flow,
4114 flow_attr,
4115 struct ib_uverbs_create_flow_resp,
4116 flow_handle),
4117 UAPI_DEF_METHOD_NEEDS_FN(create_flow)),
4118 DECLARE_UVERBS_WRITE_EX(
4119 IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
4120 ib_uverbs_ex_destroy_flow,
4121 UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_flow),
4122 UAPI_DEF_METHOD_NEEDS_FN(destroy_flow))),
a140692a
JG
4123
4124 DECLARE_UVERBS_OBJECT(
4125 UVERBS_OBJECT_MR,
4126 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_DEREG_MR,
4127 ib_uverbs_dereg_mr,
669dac1e 4128 UAPI_DEF_WRITE_I(struct ib_uverbs_dereg_mr),
a140692a 4129 UAPI_DEF_METHOD_NEEDS_FN(dereg_mr)),
669dac1e
JG
4130 DECLARE_UVERBS_WRITE(
4131 IB_USER_VERBS_CMD_REG_MR,
4132 ib_uverbs_reg_mr,
4133 UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_reg_mr,
4134 struct ib_uverbs_reg_mr_resp),
4135 UAPI_DEF_METHOD_NEEDS_FN(reg_user_mr)),
4136 DECLARE_UVERBS_WRITE(
4137 IB_USER_VERBS_CMD_REREG_MR,
4138 ib_uverbs_rereg_mr,
4139 UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_rereg_mr,
4140 struct ib_uverbs_rereg_mr_resp),
4141 UAPI_DEF_METHOD_NEEDS_FN(rereg_user_mr))),
a140692a
JG
4142
4143 DECLARE_UVERBS_OBJECT(
4144 UVERBS_OBJECT_MW,
669dac1e
JG
4145 DECLARE_UVERBS_WRITE(
4146 IB_USER_VERBS_CMD_ALLOC_MW,
4147 ib_uverbs_alloc_mw,
4148 UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_mw,
4149 struct ib_uverbs_alloc_mw_resp),
4150 UAPI_DEF_METHOD_NEEDS_FN(alloc_mw)),
4151 DECLARE_UVERBS_WRITE(
4152 IB_USER_VERBS_CMD_DEALLOC_MW,
4153 ib_uverbs_dealloc_mw,
4154 UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_mw),
4155 UAPI_DEF_METHOD_NEEDS_FN(dealloc_mw))),
a140692a
JG
4156
4157 DECLARE_UVERBS_OBJECT(
4158 UVERBS_OBJECT_PD,
669dac1e
JG
4159 DECLARE_UVERBS_WRITE(
4160 IB_USER_VERBS_CMD_ALLOC_PD,
4161 ib_uverbs_alloc_pd,
4162 UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_pd,
4163 struct ib_uverbs_alloc_pd_resp),
4164 UAPI_DEF_METHOD_NEEDS_FN(alloc_pd)),
4165 DECLARE_UVERBS_WRITE(
4166 IB_USER_VERBS_CMD_DEALLOC_PD,
4167 ib_uverbs_dealloc_pd,
4168 UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_pd),
4169 UAPI_DEF_METHOD_NEEDS_FN(dealloc_pd))),
d120c3c9
JG
4170
4171 DECLARE_UVERBS_OBJECT(
4172 UVERBS_OBJECT_QP,
669dac1e
JG
4173 DECLARE_UVERBS_WRITE(
4174 IB_USER_VERBS_CMD_ATTACH_MCAST,
4175 ib_uverbs_attach_mcast,
4176 UAPI_DEF_WRITE_I(struct ib_uverbs_attach_mcast),
4177 UAPI_DEF_METHOD_NEEDS_FN(attach_mcast),
4178 UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
d120c3c9 4179 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_QP,
a140692a 4180 ib_uverbs_create_qp,
669dac1e
JG
4181 UAPI_DEF_WRITE_UDATA_IO(
4182 struct ib_uverbs_create_qp,
4183 struct ib_uverbs_create_qp_resp),
a140692a 4184 UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
669dac1e
JG
4185 DECLARE_UVERBS_WRITE(
4186 IB_USER_VERBS_CMD_DESTROY_QP,
4187 ib_uverbs_destroy_qp,
4188 UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_qp,
4189 struct ib_uverbs_destroy_qp_resp),
4190 UAPI_DEF_METHOD_NEEDS_FN(destroy_qp)),
4191 DECLARE_UVERBS_WRITE(
4192 IB_USER_VERBS_CMD_DETACH_MCAST,
4193 ib_uverbs_detach_mcast,
4194 UAPI_DEF_WRITE_I(struct ib_uverbs_detach_mcast),
4195 UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
4196 DECLARE_UVERBS_WRITE(
4197 IB_USER_VERBS_CMD_MODIFY_QP,
4198 ib_uverbs_modify_qp,
4199 UAPI_DEF_WRITE_I(struct ib_uverbs_modify_qp),
4200 UAPI_DEF_METHOD_NEEDS_FN(modify_qp)),
4201 DECLARE_UVERBS_WRITE(
4202 IB_USER_VERBS_CMD_POST_RECV,
4203 ib_uverbs_post_recv,
4204 UAPI_DEF_WRITE_IO(struct ib_uverbs_post_recv,
4205 struct ib_uverbs_post_recv_resp),
4206 UAPI_DEF_METHOD_NEEDS_FN(post_recv)),
4207 DECLARE_UVERBS_WRITE(
4208 IB_USER_VERBS_CMD_POST_SEND,
4209 ib_uverbs_post_send,
4210 UAPI_DEF_WRITE_IO(struct ib_uverbs_post_send,
4211 struct ib_uverbs_post_send_resp),
4212 UAPI_DEF_METHOD_NEEDS_FN(post_send)),
4213 DECLARE_UVERBS_WRITE(
4214 IB_USER_VERBS_CMD_QUERY_QP,
4215 ib_uverbs_query_qp,
4216 UAPI_DEF_WRITE_IO(struct ib_uverbs_query_qp,
4217 struct ib_uverbs_query_qp_resp),
4218 UAPI_DEF_METHOD_NEEDS_FN(query_qp)),
4219 DECLARE_UVERBS_WRITE_EX(
4220 IB_USER_VERBS_EX_CMD_CREATE_QP,
4221 ib_uverbs_ex_create_qp,
4222 UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_qp,
4223 comp_mask,
4224 struct ib_uverbs_ex_create_qp_resp,
4225 response_length),
4226 UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
4227 DECLARE_UVERBS_WRITE_EX(
4228 IB_USER_VERBS_EX_CMD_MODIFY_QP,
4229 ib_uverbs_ex_modify_qp,
4230 UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_modify_qp,
4231 base,
4232 struct ib_uverbs_ex_modify_qp_resp,
4233 response_length),
4234 UAPI_DEF_METHOD_NEEDS_FN(modify_qp))),
d120c3c9
JG
4235
4236 DECLARE_UVERBS_OBJECT(
4237 UVERBS_OBJECT_RWQ_IND_TBL,
a140692a
JG
4238 DECLARE_UVERBS_WRITE_EX(
4239 IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
4240 ib_uverbs_ex_create_rwq_ind_table,
669dac1e
JG
4241 UAPI_DEF_WRITE_IO_EX(
4242 struct ib_uverbs_ex_create_rwq_ind_table,
4243 log_ind_tbl_size,
4244 struct ib_uverbs_ex_create_rwq_ind_table_resp,
4245 ind_tbl_num),
a140692a
JG
4246 UAPI_DEF_METHOD_NEEDS_FN(create_rwq_ind_table)),
4247 DECLARE_UVERBS_WRITE_EX(
4248 IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
4249 ib_uverbs_ex_destroy_rwq_ind_table,
669dac1e
JG
4250 UAPI_DEF_WRITE_I(
4251 struct ib_uverbs_ex_destroy_rwq_ind_table),
a140692a 4252 UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))),
d120c3c9
JG
4253
4254 DECLARE_UVERBS_OBJECT(
4255 UVERBS_OBJECT_WQ,
669dac1e
JG
4256 DECLARE_UVERBS_WRITE_EX(
4257 IB_USER_VERBS_EX_CMD_CREATE_WQ,
4258 ib_uverbs_ex_create_wq,
4259 UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_wq,
4260 max_sge,
4261 struct ib_uverbs_ex_create_wq_resp,
4262 wqn),
4263 UAPI_DEF_METHOD_NEEDS_FN(create_wq)),
4264 DECLARE_UVERBS_WRITE_EX(
4265 IB_USER_VERBS_EX_CMD_DESTROY_WQ,
4266 ib_uverbs_ex_destroy_wq,
4267 UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_destroy_wq,
4268 wq_handle,
4269 struct ib_uverbs_ex_destroy_wq_resp,
4270 reserved),
4271 UAPI_DEF_METHOD_NEEDS_FN(destroy_wq)),
4272 DECLARE_UVERBS_WRITE_EX(
4273 IB_USER_VERBS_EX_CMD_MODIFY_WQ,
4274 ib_uverbs_ex_modify_wq,
4275 UAPI_DEF_WRITE_I_EX(struct ib_uverbs_ex_modify_wq,
4276 curr_wq_state),
4277 UAPI_DEF_METHOD_NEEDS_FN(modify_wq))),
d120c3c9
JG
4278
4279 DECLARE_UVERBS_OBJECT(
4280 UVERBS_OBJECT_SRQ,
4281 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_SRQ,
a140692a 4282 ib_uverbs_create_srq,
669dac1e
JG
4283 UAPI_DEF_WRITE_UDATA_IO(
4284 struct ib_uverbs_create_srq,
4285 struct ib_uverbs_create_srq_resp),
a140692a 4286 UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
d120c3c9 4287 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_XSRQ,
a140692a 4288 ib_uverbs_create_xsrq,
669dac1e
JG
4289 UAPI_DEF_WRITE_UDATA_IO(
4290 struct ib_uverbs_create_xsrq,
4291 struct ib_uverbs_create_srq_resp),
a140692a 4292 UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
669dac1e
JG
4293 DECLARE_UVERBS_WRITE(
4294 IB_USER_VERBS_CMD_DESTROY_SRQ,
4295 ib_uverbs_destroy_srq,
4296 UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_srq,
4297 struct ib_uverbs_destroy_srq_resp),
4298 UAPI_DEF_METHOD_NEEDS_FN(destroy_srq)),
4299 DECLARE_UVERBS_WRITE(
4300 IB_USER_VERBS_CMD_MODIFY_SRQ,
4301 ib_uverbs_modify_srq,
4302 UAPI_DEF_WRITE_UDATA_I(struct ib_uverbs_modify_srq),
4303 UAPI_DEF_METHOD_NEEDS_FN(modify_srq)),
4304 DECLARE_UVERBS_WRITE(
4305 IB_USER_VERBS_CMD_POST_SRQ_RECV,
4306 ib_uverbs_post_srq_recv,
4307 UAPI_DEF_WRITE_IO(struct ib_uverbs_post_srq_recv,
4308 struct ib_uverbs_post_srq_recv_resp),
4309 UAPI_DEF_METHOD_NEEDS_FN(post_srq_recv)),
4310 DECLARE_UVERBS_WRITE(
4311 IB_USER_VERBS_CMD_QUERY_SRQ,
4312 ib_uverbs_query_srq,
4313 UAPI_DEF_WRITE_IO(struct ib_uverbs_query_srq,
4314 struct ib_uverbs_query_srq_resp),
4315 UAPI_DEF_METHOD_NEEDS_FN(query_srq))),
a140692a
JG
4316
4317 DECLARE_UVERBS_OBJECT(
4318 UVERBS_OBJECT_XRCD,
669dac1e
JG
4319 DECLARE_UVERBS_WRITE(
4320 IB_USER_VERBS_CMD_CLOSE_XRCD,
4321 ib_uverbs_close_xrcd,
4322 UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd),
4323 UAPI_DEF_METHOD_NEEDS_FN(dealloc_xrcd)),
a140692a 4324 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_QP,
669dac1e
JG
4325 ib_uverbs_open_qp,
4326 UAPI_DEF_WRITE_UDATA_IO(
4327 struct ib_uverbs_open_qp,
4328 struct ib_uverbs_create_qp_resp)),
a140692a
JG
4329 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_XRCD,
4330 ib_uverbs_open_xrcd,
669dac1e
JG
4331 UAPI_DEF_WRITE_UDATA_IO(
4332 struct ib_uverbs_open_xrcd,
4333 struct ib_uverbs_open_xrcd_resp),
a140692a 4334 UAPI_DEF_METHOD_NEEDS_FN(alloc_xrcd))),
d120c3c9
JG
4335
4336 {},
4337};