]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/infiniband/hw/mlx5/devx.c
IB/uverbs: Do not pass struct ib_device to the write based methods
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / hw / mlx5 / devx.c
CommitLineData
a8b92ca1
YH
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <rdma/ib_user_verbs.h>
7#include <rdma/ib_verbs.h>
8#include <rdma/uverbs_types.h>
9#include <rdma/uverbs_ioctl.h>
10#include <rdma/mlx5_user_ioctl_cmds.h>
11#include <rdma/ib_umem.h>
12#include <linux/mlx5/driver.h>
13#include <linux/mlx5/fs.h>
14#include "mlx5_ib.h"
15
8aa8c95c
YH
16#define UVERBS_MODULE_NAME mlx5_ib
17#include <rdma/uverbs_named_ioctl.h>
18
7efce369
YH
19#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
20struct devx_obj {
21 struct mlx5_core_dev *mdev;
22 u32 obj_id;
23 u32 dinlen; /* destroy inbox length */
24 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
25};
26
aeae9457
YH
27struct devx_umem {
28 struct mlx5_core_dev *mdev;
29 struct ib_umem *umem;
30 u32 page_offset;
31 int page_shift;
32 int ncont;
33 u32 dinlen;
34 u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
35};
36
37struct devx_umem_reg_cmd {
38 void *in;
39 u32 inlen;
40 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
41};
42
8aa8c95c
YH
43static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file)
44{
45 return to_mucontext(ib_uverbs_get_ucontext(file));
46}
47
a8b92ca1
YH
48int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
49{
50 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
51 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
52 u64 general_obj_types;
a8b92ca1
YH
53 void *hdr;
54 int err;
55
a8b92ca1
YH
56 hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
57
58 general_obj_types = MLX5_CAP_GEN_64(dev->mdev, general_obj_types);
59 if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) ||
60 !(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM))
61 return -EINVAL;
62
63 if (!capable(CAP_NET_RAW))
64 return -EPERM;
65
66 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
67 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX);
68
69 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
70 if (err)
71 return err;
72
73 context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
74 return 0;
75}
76
77void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
78 struct mlx5_ib_ucontext *context)
79{
80 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
81 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
82
83 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
84 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
85 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid);
86
87 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
88}
8aa8c95c 89
32269441
YH
90bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
91{
92 struct devx_obj *devx_obj = obj;
93 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
94
95 switch (opcode) {
96 case MLX5_CMD_OP_DESTROY_TIR:
97 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
98 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
99 obj_id);
100 return true;
101
102 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
103 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
104 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
105 table_id);
106 return true;
107 default:
108 return false;
109 }
110}
111
e662e14d
YH
112static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
113{
114 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
115 u32 obj_id;
116
117 switch (opcode) {
118 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
119 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
120 obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
121 break;
122 case MLX5_CMD_OP_QUERY_MKEY:
123 obj_id = MLX5_GET(query_mkey_in, in, mkey_index);
124 break;
125 case MLX5_CMD_OP_QUERY_CQ:
126 obj_id = MLX5_GET(query_cq_in, in, cqn);
127 break;
128 case MLX5_CMD_OP_MODIFY_CQ:
129 obj_id = MLX5_GET(modify_cq_in, in, cqn);
130 break;
131 case MLX5_CMD_OP_QUERY_SQ:
132 obj_id = MLX5_GET(query_sq_in, in, sqn);
133 break;
134 case MLX5_CMD_OP_MODIFY_SQ:
135 obj_id = MLX5_GET(modify_sq_in, in, sqn);
136 break;
137 case MLX5_CMD_OP_QUERY_RQ:
138 obj_id = MLX5_GET(query_rq_in, in, rqn);
139 break;
140 case MLX5_CMD_OP_MODIFY_RQ:
141 obj_id = MLX5_GET(modify_rq_in, in, rqn);
142 break;
143 case MLX5_CMD_OP_QUERY_RMP:
144 obj_id = MLX5_GET(query_rmp_in, in, rmpn);
145 break;
146 case MLX5_CMD_OP_MODIFY_RMP:
147 obj_id = MLX5_GET(modify_rmp_in, in, rmpn);
148 break;
149 case MLX5_CMD_OP_QUERY_RQT:
150 obj_id = MLX5_GET(query_rqt_in, in, rqtn);
151 break;
152 case MLX5_CMD_OP_MODIFY_RQT:
153 obj_id = MLX5_GET(modify_rqt_in, in, rqtn);
154 break;
155 case MLX5_CMD_OP_QUERY_TIR:
156 obj_id = MLX5_GET(query_tir_in, in, tirn);
157 break;
158 case MLX5_CMD_OP_MODIFY_TIR:
159 obj_id = MLX5_GET(modify_tir_in, in, tirn);
160 break;
161 case MLX5_CMD_OP_QUERY_TIS:
162 obj_id = MLX5_GET(query_tis_in, in, tisn);
163 break;
164 case MLX5_CMD_OP_MODIFY_TIS:
165 obj_id = MLX5_GET(modify_tis_in, in, tisn);
166 break;
167 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
168 obj_id = MLX5_GET(query_flow_table_in, in, table_id);
169 break;
170 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
171 obj_id = MLX5_GET(modify_flow_table_in, in, table_id);
172 break;
173 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
174 obj_id = MLX5_GET(query_flow_group_in, in, group_id);
175 break;
176 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
177 obj_id = MLX5_GET(query_fte_in, in, flow_index);
178 break;
179 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
180 obj_id = MLX5_GET(set_fte_in, in, flow_index);
181 break;
182 case MLX5_CMD_OP_QUERY_Q_COUNTER:
183 obj_id = MLX5_GET(query_q_counter_in, in, counter_set_id);
184 break;
185 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
186 obj_id = MLX5_GET(query_flow_counter_in, in, flow_counter_id);
187 break;
188 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
189 obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
190 break;
191 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
192 obj_id = MLX5_GET(query_scheduling_element_in, in,
193 scheduling_element_id);
194 break;
195 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
196 obj_id = MLX5_GET(modify_scheduling_element_in, in,
197 scheduling_element_id);
198 break;
199 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
200 obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
201 break;
202 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
203 obj_id = MLX5_GET(query_l2_table_entry_in, in, table_index);
204 break;
205 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
206 obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
207 break;
208 case MLX5_CMD_OP_QUERY_QP:
209 obj_id = MLX5_GET(query_qp_in, in, qpn);
210 break;
211 case MLX5_CMD_OP_RST2INIT_QP:
212 obj_id = MLX5_GET(rst2init_qp_in, in, qpn);
213 break;
214 case MLX5_CMD_OP_INIT2RTR_QP:
215 obj_id = MLX5_GET(init2rtr_qp_in, in, qpn);
216 break;
217 case MLX5_CMD_OP_RTR2RTS_QP:
218 obj_id = MLX5_GET(rtr2rts_qp_in, in, qpn);
219 break;
220 case MLX5_CMD_OP_RTS2RTS_QP:
221 obj_id = MLX5_GET(rts2rts_qp_in, in, qpn);
222 break;
223 case MLX5_CMD_OP_SQERR2RTS_QP:
224 obj_id = MLX5_GET(sqerr2rts_qp_in, in, qpn);
225 break;
226 case MLX5_CMD_OP_2ERR_QP:
227 obj_id = MLX5_GET(qp_2err_in, in, qpn);
228 break;
229 case MLX5_CMD_OP_2RST_QP:
230 obj_id = MLX5_GET(qp_2rst_in, in, qpn);
231 break;
232 case MLX5_CMD_OP_QUERY_DCT:
233 obj_id = MLX5_GET(query_dct_in, in, dctn);
234 break;
235 case MLX5_CMD_OP_QUERY_XRQ:
236 obj_id = MLX5_GET(query_xrq_in, in, xrqn);
237 break;
238 case MLX5_CMD_OP_QUERY_XRC_SRQ:
239 obj_id = MLX5_GET(query_xrc_srq_in, in, xrc_srqn);
240 break;
241 case MLX5_CMD_OP_ARM_XRC_SRQ:
242 obj_id = MLX5_GET(arm_xrc_srq_in, in, xrc_srqn);
243 break;
244 case MLX5_CMD_OP_QUERY_SRQ:
245 obj_id = MLX5_GET(query_srq_in, in, srqn);
246 break;
247 case MLX5_CMD_OP_ARM_RQ:
248 obj_id = MLX5_GET(arm_rq_in, in, srq_number);
249 break;
250 case MLX5_CMD_OP_DRAIN_DCT:
251 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
252 obj_id = MLX5_GET(drain_dct_in, in, dctn);
253 break;
254 case MLX5_CMD_OP_ARM_XRQ:
255 obj_id = MLX5_GET(arm_xrq_in, in, xrqn);
256 break;
257 default:
258 return false;
259 }
260
261 if (obj_id == obj->obj_id)
262 return true;
263
264 return false;
265}
266
7efce369
YH
267static bool devx_is_obj_create_cmd(const void *in)
268{
269 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
270
271 switch (opcode) {
272 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
273 case MLX5_CMD_OP_CREATE_MKEY:
274 case MLX5_CMD_OP_CREATE_CQ:
275 case MLX5_CMD_OP_ALLOC_PD:
276 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
277 case MLX5_CMD_OP_CREATE_RMP:
278 case MLX5_CMD_OP_CREATE_SQ:
279 case MLX5_CMD_OP_CREATE_RQ:
280 case MLX5_CMD_OP_CREATE_RQT:
281 case MLX5_CMD_OP_CREATE_TIR:
282 case MLX5_CMD_OP_CREATE_TIS:
283 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
284 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
285 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
286 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
287 case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
288 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
289 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
290 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
291 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
292 case MLX5_CMD_OP_CREATE_QP:
293 case MLX5_CMD_OP_CREATE_SRQ:
294 case MLX5_CMD_OP_CREATE_XRC_SRQ:
295 case MLX5_CMD_OP_CREATE_DCT:
296 case MLX5_CMD_OP_CREATE_XRQ:
297 case MLX5_CMD_OP_ATTACH_TO_MCG:
298 case MLX5_CMD_OP_ALLOC_XRCD:
299 return true;
300 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
301 {
302 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
303 if (op_mod == 0)
304 return true;
305 return false;
306 }
307 default:
308 return false;
309 }
310}
311
e662e14d
YH
312static bool devx_is_obj_modify_cmd(const void *in)
313{
314 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
315
316 switch (opcode) {
317 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
318 case MLX5_CMD_OP_MODIFY_CQ:
319 case MLX5_CMD_OP_MODIFY_RMP:
320 case MLX5_CMD_OP_MODIFY_SQ:
321 case MLX5_CMD_OP_MODIFY_RQ:
322 case MLX5_CMD_OP_MODIFY_RQT:
323 case MLX5_CMD_OP_MODIFY_TIR:
324 case MLX5_CMD_OP_MODIFY_TIS:
325 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
326 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
327 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
328 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
329 case MLX5_CMD_OP_RST2INIT_QP:
330 case MLX5_CMD_OP_INIT2RTR_QP:
331 case MLX5_CMD_OP_RTR2RTS_QP:
332 case MLX5_CMD_OP_RTS2RTS_QP:
333 case MLX5_CMD_OP_SQERR2RTS_QP:
334 case MLX5_CMD_OP_2ERR_QP:
335 case MLX5_CMD_OP_2RST_QP:
336 case MLX5_CMD_OP_ARM_XRC_SRQ:
337 case MLX5_CMD_OP_ARM_RQ:
338 case MLX5_CMD_OP_DRAIN_DCT:
339 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
340 case MLX5_CMD_OP_ARM_XRQ:
341 return true;
342 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
343 {
344 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
345
346 if (op_mod == 1)
347 return true;
348 return false;
349 }
350 default:
351 return false;
352 }
353}
354
355static bool devx_is_obj_query_cmd(const void *in)
356{
357 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
358
359 switch (opcode) {
360 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
361 case MLX5_CMD_OP_QUERY_MKEY:
362 case MLX5_CMD_OP_QUERY_CQ:
363 case MLX5_CMD_OP_QUERY_RMP:
364 case MLX5_CMD_OP_QUERY_SQ:
365 case MLX5_CMD_OP_QUERY_RQ:
366 case MLX5_CMD_OP_QUERY_RQT:
367 case MLX5_CMD_OP_QUERY_TIR:
368 case MLX5_CMD_OP_QUERY_TIS:
369 case MLX5_CMD_OP_QUERY_Q_COUNTER:
370 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
371 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
372 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
373 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
374 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
375 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
376 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
377 case MLX5_CMD_OP_QUERY_QP:
378 case MLX5_CMD_OP_QUERY_SRQ:
379 case MLX5_CMD_OP_QUERY_XRC_SRQ:
380 case MLX5_CMD_OP_QUERY_DCT:
381 case MLX5_CMD_OP_QUERY_XRQ:
382 return true;
383 default:
384 return false;
385 }
386}
387
388static bool devx_is_general_cmd(void *in)
8aa8c95c
YH
389{
390 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
391
392 switch (opcode) {
393 case MLX5_CMD_OP_QUERY_HCA_CAP:
394 case MLX5_CMD_OP_QUERY_VPORT_STATE:
395 case MLX5_CMD_OP_QUERY_ADAPTER:
396 case MLX5_CMD_OP_QUERY_ISSI:
397 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
398 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
399 case MLX5_CMD_OP_QUERY_VNIC_ENV:
400 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
401 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
402 case MLX5_CMD_OP_NOP:
403 case MLX5_CMD_OP_QUERY_CONG_STATUS:
404 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
405 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
406 return true;
407 default:
408 return false;
409 }
410}
411
f6fe01b7
YH
412static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(struct ib_device *ib_dev,
413 struct ib_uverbs_file *file,
414 struct uverbs_attr_bundle *attrs)
415{
416 struct mlx5_ib_dev *dev = to_mdev(ib_dev);
417 int user_vector;
418 int dev_eqn;
419 unsigned int irqn;
420 int err;
421
422 if (uverbs_copy_from(&user_vector, attrs,
423 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
424 return -EFAULT;
425
426 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
427 if (err < 0)
428 return err;
429
430 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
431 &dev_eqn, sizeof(dev_eqn)))
432 return -EFAULT;
433
434 return 0;
435}
436
7c043e90
YH
437/*
438 *Security note:
439 * The hardware protection mechanism works like this: Each device object that
440 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
441 * the device specification manual) upon its creation. Then upon doorbell,
442 * hardware fetches the object context for which the doorbell was rang, and
443 * validates that the UAR through which the DB was rang matches the UAR ID
444 * of the object.
445 * If no match the doorbell is silently ignored by the hardware. Of course,
446 * the user cannot ring a doorbell on a UAR that was not mapped to it.
447 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
448 * mailboxes (except tagging them with UID), we expose to the user its UAR
449 * ID, so it can embed it in these objects in the expected specification
450 * format. So the only thing the user can do is hurt itself by creating a
451 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
452 * may ring a doorbell on its objects.
453 * The consequence of that will be that another user can schedule a QP/SQ
454 * of the buggy user for execution (just insert it to the hardware schedule
455 * queue or arm its CQ for event generation), no further harm is expected.
456 */
457static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(struct ib_device *ib_dev,
458 struct ib_uverbs_file *file,
459 struct uverbs_attr_bundle *attrs)
460{
22fa27fb
JG
461 struct mlx5_ib_ucontext *c;
462 struct mlx5_ib_dev *dev;
7c043e90
YH
463 u32 user_idx;
464 s32 dev_idx;
465
22fa27fb
JG
466 c = devx_ufile2uctx(file);
467 if (IS_ERR(c))
468 return PTR_ERR(c);
469 dev = to_mdev(c->ibucontext.device);
470
7c043e90
YH
471 if (uverbs_copy_from(&user_idx, attrs,
472 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
473 return -EFAULT;
474
22fa27fb 475 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
7c043e90
YH
476 if (dev_idx < 0)
477 return dev_idx;
478
479 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
480 &dev_idx, sizeof(dev_idx)))
481 return -EFAULT;
482
483 return 0;
484}
485
8aa8c95c
YH
486static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(struct ib_device *ib_dev,
487 struct ib_uverbs_file *file,
488 struct uverbs_attr_bundle *attrs)
489{
22fa27fb
JG
490 struct mlx5_ib_ucontext *c;
491 struct mlx5_ib_dev *dev;
7efce369
YH
492 void *cmd_in = uverbs_attr_get_alloced_ptr(
493 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
8aa8c95c
YH
494 int cmd_out_len = uverbs_attr_get_len(attrs,
495 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
496 void *cmd_out;
497 int err;
498
22fa27fb
JG
499 c = devx_ufile2uctx(file);
500 if (IS_ERR(c))
501 return PTR_ERR(c);
502 dev = to_mdev(c->ibucontext.device);
503
8aa8c95c
YH
504 if (!c->devx_uid)
505 return -EPERM;
506
507 /* Only white list of some general HCA commands are allowed for this method. */
508 if (!devx_is_general_cmd(cmd_in))
509 return -EINVAL;
510
511 cmd_out = kvzalloc(cmd_out_len, GFP_KERNEL);
512 if (!cmd_out)
513 return -ENOMEM;
514
515 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
516 err = mlx5_cmd_exec(dev->mdev, cmd_in,
517 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
518 cmd_out, cmd_out_len);
519 if (err)
520 goto other_cmd_free;
521
522 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out, cmd_out_len);
523
524other_cmd_free:
525 kvfree(cmd_out);
526 return err;
527}
528
7efce369
YH
529static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
530 u32 *dinlen,
531 u32 *obj_id)
532{
533 u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
534 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
535
536 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
537 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
538
539 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
540 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
541
542 switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
543 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
544 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
545 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
546 break;
547
548 case MLX5_CMD_OP_CREATE_MKEY:
549 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
550 break;
551 case MLX5_CMD_OP_CREATE_CQ:
552 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
553 break;
554 case MLX5_CMD_OP_ALLOC_PD:
555 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
556 break;
557 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
558 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
559 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
560 break;
561 case MLX5_CMD_OP_CREATE_RMP:
562 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
563 break;
564 case MLX5_CMD_OP_CREATE_SQ:
565 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
566 break;
567 case MLX5_CMD_OP_CREATE_RQ:
568 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
569 break;
570 case MLX5_CMD_OP_CREATE_RQT:
571 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
572 break;
573 case MLX5_CMD_OP_CREATE_TIR:
574 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
575 break;
576 case MLX5_CMD_OP_CREATE_TIS:
577 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
578 break;
579 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
580 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
581 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
582 break;
583 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
584 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
585 *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
586 MLX5_SET(destroy_flow_table_in, din, other_vport,
587 MLX5_GET(create_flow_table_in, in, other_vport));
588 MLX5_SET(destroy_flow_table_in, din, vport_number,
589 MLX5_GET(create_flow_table_in, in, vport_number));
590 MLX5_SET(destroy_flow_table_in, din, table_type,
591 MLX5_GET(create_flow_table_in, in, table_type));
592 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
593 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
594 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
595 break;
596 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
597 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
598 *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
599 MLX5_SET(destroy_flow_group_in, din, other_vport,
600 MLX5_GET(create_flow_group_in, in, other_vport));
601 MLX5_SET(destroy_flow_group_in, din, vport_number,
602 MLX5_GET(create_flow_group_in, in, vport_number));
603 MLX5_SET(destroy_flow_group_in, din, table_type,
604 MLX5_GET(create_flow_group_in, in, table_type));
605 MLX5_SET(destroy_flow_group_in, din, table_id,
606 MLX5_GET(create_flow_group_in, in, table_id));
607 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
608 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
609 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
610 break;
611 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
612 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
613 *obj_id = MLX5_GET(set_fte_in, in, flow_index);
614 MLX5_SET(delete_fte_in, din, other_vport,
615 MLX5_GET(set_fte_in, in, other_vport));
616 MLX5_SET(delete_fte_in, din, vport_number,
617 MLX5_GET(set_fte_in, in, vport_number));
618 MLX5_SET(delete_fte_in, din, table_type,
619 MLX5_GET(set_fte_in, in, table_type));
620 MLX5_SET(delete_fte_in, din, table_id,
621 MLX5_GET(set_fte_in, in, table_id));
622 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
623 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
624 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
625 break;
626 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
627 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
628 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
629 break;
630 case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
631 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
632 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
633 break;
634 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
635 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
636 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
637 break;
638 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
639 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
640 *obj_id = MLX5_GET(create_scheduling_element_out, out,
641 scheduling_element_id);
642 MLX5_SET(destroy_scheduling_element_in, din,
643 scheduling_hierarchy,
644 MLX5_GET(create_scheduling_element_in, in,
645 scheduling_hierarchy));
646 MLX5_SET(destroy_scheduling_element_in, din,
647 scheduling_element_id, *obj_id);
648 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
649 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
650 break;
651 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
652 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
653 *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
654 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
655 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
656 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
657 break;
658 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
659 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
660 *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
661 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
662 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
663 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
664 break;
665 case MLX5_CMD_OP_CREATE_QP:
666 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
667 break;
668 case MLX5_CMD_OP_CREATE_SRQ:
669 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
670 break;
671 case MLX5_CMD_OP_CREATE_XRC_SRQ:
672 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
673 MLX5_CMD_OP_DESTROY_XRC_SRQ);
674 break;
675 case MLX5_CMD_OP_CREATE_DCT:
676 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
677 break;
678 case MLX5_CMD_OP_CREATE_XRQ:
679 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
680 break;
681 case MLX5_CMD_OP_ATTACH_TO_MCG:
682 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
683 MLX5_SET(detach_from_mcg_in, din, qpn,
684 MLX5_GET(attach_to_mcg_in, in, qpn));
685 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
686 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
687 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
688 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
689 break;
690 case MLX5_CMD_OP_ALLOC_XRCD:
691 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
692 break;
693 default:
694 /* The entry must match to one of the devx_is_obj_create_cmd */
695 WARN_ON(true);
696 break;
697 }
698}
699
700static int devx_obj_cleanup(struct ib_uobject *uobject,
701 enum rdma_remove_reason why)
702{
703 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
704 struct devx_obj *obj = uobject->object;
705 int ret;
706
707 ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
1c77483e 708 if (ib_is_destroy_retryable(ret, why, uobject))
7efce369
YH
709 return ret;
710
711 kfree(obj);
712 return ret;
713}
714
7efce369
YH
715static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(struct ib_device *ib_dev,
716 struct ib_uverbs_file *file,
717 struct uverbs_attr_bundle *attrs)
718{
7efce369
YH
719 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
720 int cmd_out_len = uverbs_attr_get_len(attrs,
721 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
722 void *cmd_out;
c36ee46d
JG
723 struct ib_uobject *uobj = uverbs_attr_get_uobject(
724 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
725 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
726 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
7efce369
YH
727 struct devx_obj *obj;
728 int err;
729
730 if (!c->devx_uid)
731 return -EPERM;
732
733 if (!devx_is_obj_create_cmd(cmd_in))
734 return -EINVAL;
735
736 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
737 if (!obj)
738 return -ENOMEM;
739
740 cmd_out = kvzalloc(cmd_out_len, GFP_KERNEL);
741 if (!cmd_out) {
742 err = -ENOMEM;
743 goto obj_free;
744 }
745
746 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
747 err = mlx5_cmd_exec(dev->mdev, cmd_in,
748 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN),
749 cmd_out, cmd_out_len);
750 if (err)
751 goto cmd_free;
752
7efce369
YH
753 uobj->object = obj;
754 obj->mdev = dev->mdev;
755 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, &obj->obj_id);
756 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
757
758 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
759 if (err)
760 goto cmd_free;
761
762 kvfree(cmd_out);
763 return 0;
764
765cmd_free:
766 kvfree(cmd_out);
767obj_free:
768 kfree(obj);
769 return err;
770}
771
e662e14d
YH
772static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(struct ib_device *ib_dev,
773 struct ib_uverbs_file *file,
774 struct uverbs_attr_bundle *attrs)
775{
e662e14d
YH
776 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
777 int cmd_out_len = uverbs_attr_get_len(attrs,
778 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
779 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
780 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
c36ee46d
JG
781 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
782 struct devx_obj *obj = uobj->object;
e662e14d
YH
783 void *cmd_out;
784 int err;
785
786 if (!c->devx_uid)
787 return -EPERM;
788
789 if (!devx_is_obj_modify_cmd(cmd_in))
790 return -EINVAL;
791
c36ee46d 792 if (!devx_is_valid_obj_id(obj, cmd_in))
e662e14d
YH
793 return -EINVAL;
794
795 cmd_out = kvzalloc(cmd_out_len, GFP_KERNEL);
796 if (!cmd_out)
797 return -ENOMEM;
798
799 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
c36ee46d 800 err = mlx5_cmd_exec(obj->mdev, cmd_in,
e662e14d
YH
801 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
802 cmd_out, cmd_out_len);
803 if (err)
804 goto other_cmd_free;
805
806 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
807 cmd_out, cmd_out_len);
808
809other_cmd_free:
810 kvfree(cmd_out);
811 return err;
812}
813
814static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(struct ib_device *ib_dev,
815 struct ib_uverbs_file *file,
816 struct uverbs_attr_bundle *attrs)
817{
e662e14d
YH
818 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
819 int cmd_out_len = uverbs_attr_get_len(attrs,
820 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
821 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
822 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
c36ee46d
JG
823 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
824 struct devx_obj *obj = uobj->object;
e662e14d
YH
825 void *cmd_out;
826 int err;
827
828 if (!c->devx_uid)
829 return -EPERM;
830
831 if (!devx_is_obj_query_cmd(cmd_in))
832 return -EINVAL;
833
c36ee46d 834 if (!devx_is_valid_obj_id(obj, cmd_in))
e662e14d
YH
835 return -EINVAL;
836
837 cmd_out = kvzalloc(cmd_out_len, GFP_KERNEL);
838 if (!cmd_out)
839 return -ENOMEM;
840
841 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
c36ee46d 842 err = mlx5_cmd_exec(obj->mdev, cmd_in,
e662e14d
YH
843 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
844 cmd_out, cmd_out_len);
845 if (err)
846 goto other_cmd_free;
847
848 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, cmd_out, cmd_out_len);
849
850other_cmd_free:
851 kvfree(cmd_out);
852 return err;
853}
854
aeae9457
YH
855static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
856 struct uverbs_attr_bundle *attrs,
857 struct devx_umem *obj)
858{
859 u64 addr;
860 size_t size;
bccd0622 861 u32 access;
aeae9457
YH
862 int npages;
863 int err;
864 u32 page_mask;
865
866 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
bccd0622 867 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
aeae9457
YH
868 return -EFAULT;
869
bccd0622
JG
870 err = uverbs_get_flags32(&access, attrs,
871 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
872 IB_ACCESS_SUPPORTED);
873 if (err)
874 return err;
875
aeae9457
YH
876 err = ib_check_mr_access(access);
877 if (err)
878 return err;
879
880 obj->umem = ib_umem_get(ucontext, addr, size, access, 0);
881 if (IS_ERR(obj->umem))
882 return PTR_ERR(obj->umem);
883
884 mlx5_ib_cont_pages(obj->umem, obj->umem->address,
885 MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
886 &obj->page_shift, &obj->ncont, NULL);
887
888 if (!npages) {
889 ib_umem_release(obj->umem);
890 return -EINVAL;
891 }
892
893 page_mask = (1 << obj->page_shift) - 1;
894 obj->page_offset = obj->umem->address & page_mask;
895
896 return 0;
897}
898
899static int devx_umem_reg_cmd_alloc(struct devx_umem *obj,
900 struct devx_umem_reg_cmd *cmd)
901{
902 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
903 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
904 cmd->in = kvzalloc(cmd->inlen, GFP_KERNEL);
905 return cmd->in ? 0 : -ENOMEM;
906}
907
908static void devx_umem_reg_cmd_free(struct devx_umem_reg_cmd *cmd)
909{
910 kvfree(cmd->in);
911}
912
913static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
914 struct devx_umem *obj,
915 struct devx_umem_reg_cmd *cmd)
916{
917 void *umem;
918 __be64 *mtt;
919
920 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
921 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
922
923 MLX5_SET(general_obj_in_cmd_hdr, cmd->in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
924 MLX5_SET(general_obj_in_cmd_hdr, cmd->in, obj_type, MLX5_OBJ_TYPE_UMEM);
925 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
926 MLX5_SET(umem, umem, log_page_size, obj->page_shift -
927 MLX5_ADAPTER_PAGE_SHIFT);
928 MLX5_SET(umem, umem, page_offset, obj->page_offset);
929 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
930 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
931 MLX5_IB_MTT_READ);
932}
933
934static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(struct ib_device *ib_dev,
935 struct ib_uverbs_file *file,
936 struct uverbs_attr_bundle *attrs)
937{
aeae9457
YH
938 struct devx_umem_reg_cmd cmd;
939 struct devx_umem *obj;
c36ee46d
JG
940 struct ib_uobject *uobj = uverbs_attr_get_uobject(
941 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
aeae9457 942 u32 obj_id;
c36ee46d
JG
943 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
944 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
aeae9457
YH
945 int err;
946
947 if (!c->devx_uid)
948 return -EPERM;
949
aeae9457
YH
950 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
951 if (!obj)
952 return -ENOMEM;
953
954 err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
955 if (err)
956 goto err_obj_free;
957
958 err = devx_umem_reg_cmd_alloc(obj, &cmd);
959 if (err)
960 goto err_umem_release;
961
962 devx_umem_reg_cmd_build(dev, obj, &cmd);
963
964 MLX5_SET(general_obj_in_cmd_hdr, cmd.in, uid, c->devx_uid);
965 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
966 sizeof(cmd.out));
967 if (err)
968 goto err_umem_reg_cmd_free;
969
970 obj->mdev = dev->mdev;
971 uobj->object = obj;
972 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
973 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
974 if (err)
975 goto err_umem_destroy;
976
977 devx_umem_reg_cmd_free(&cmd);
978
979 return 0;
980
981err_umem_destroy:
982 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
983err_umem_reg_cmd_free:
984 devx_umem_reg_cmd_free(&cmd);
985err_umem_release:
986 ib_umem_release(obj->umem);
987err_obj_free:
988 kfree(obj);
989 return err;
990}
991
aeae9457
YH
992static int devx_umem_cleanup(struct ib_uobject *uobject,
993 enum rdma_remove_reason why)
994{
995 struct devx_umem *obj = uobject->object;
996 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
997 int err;
998
999 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
1c77483e 1000 if (ib_is_destroy_retryable(err, why, uobject))
aeae9457
YH
1001 return err;
1002
1003 ib_umem_release(obj->umem);
1004 kfree(obj);
1005 return 0;
1006}
1007
9a119cd5
JG
1008DECLARE_UVERBS_NAMED_METHOD(
1009 MLX5_IB_METHOD_DEVX_UMEM_REG,
1010 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
1011 MLX5_IB_OBJECT_DEVX_UMEM,
1012 UVERBS_ACCESS_NEW,
83bb4442 1013 UA_MANDATORY),
9a119cd5
JG
1014 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
1015 UVERBS_ATTR_TYPE(u64),
83bb4442 1016 UA_MANDATORY),
9a119cd5
JG
1017 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
1018 UVERBS_ATTR_TYPE(u64),
83bb4442 1019 UA_MANDATORY),
bccd0622
JG
1020 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
1021 enum ib_access_flags),
9a119cd5
JG
1022 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
1023 UVERBS_ATTR_TYPE(u32),
83bb4442 1024 UA_MANDATORY));
9a119cd5 1025
528922af 1026DECLARE_UVERBS_NAMED_METHOD_DESTROY(
9a119cd5
JG
1027 MLX5_IB_METHOD_DEVX_UMEM_DEREG,
1028 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
1029 MLX5_IB_OBJECT_DEVX_UMEM,
1030 UVERBS_ACCESS_DESTROY,
83bb4442 1031 UA_MANDATORY));
9a119cd5
JG
1032
1033DECLARE_UVERBS_NAMED_METHOD(
1034 MLX5_IB_METHOD_DEVX_QUERY_EQN,
1035 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
1036 UVERBS_ATTR_TYPE(u32),
83bb4442 1037 UA_MANDATORY),
9a119cd5
JG
1038 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
1039 UVERBS_ATTR_TYPE(u32),
83bb4442 1040 UA_MANDATORY));
9a119cd5
JG
1041
1042DECLARE_UVERBS_NAMED_METHOD(
1043 MLX5_IB_METHOD_DEVX_QUERY_UAR,
1044 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
1045 UVERBS_ATTR_TYPE(u32),
83bb4442 1046 UA_MANDATORY),
9a119cd5
JG
1047 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1048 UVERBS_ATTR_TYPE(u32),
83bb4442 1049 UA_MANDATORY));
9a119cd5
JG
1050
1051DECLARE_UVERBS_NAMED_METHOD(
1052 MLX5_IB_METHOD_DEVX_OTHER,
1053 UVERBS_ATTR_PTR_IN(
1054 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
1055 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
83bb4442 1056 UA_MANDATORY,
83bb4442 1057 UA_ALLOC_AND_COPY),
9a119cd5
JG
1058 UVERBS_ATTR_PTR_OUT(
1059 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
1060 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
540cd692 1061 UA_MANDATORY));
9a119cd5
JG
1062
1063DECLARE_UVERBS_NAMED_METHOD(
1064 MLX5_IB_METHOD_DEVX_OBJ_CREATE,
1065 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
1066 MLX5_IB_OBJECT_DEVX_OBJ,
1067 UVERBS_ACCESS_NEW,
83bb4442 1068 UA_MANDATORY),
9a119cd5
JG
1069 UVERBS_ATTR_PTR_IN(
1070 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
1071 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
83bb4442 1072 UA_MANDATORY,
83bb4442 1073 UA_ALLOC_AND_COPY),
9a119cd5
JG
1074 UVERBS_ATTR_PTR_OUT(
1075 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
1076 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
540cd692 1077 UA_MANDATORY));
9a119cd5 1078
528922af 1079DECLARE_UVERBS_NAMED_METHOD_DESTROY(
9a119cd5
JG
1080 MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
1081 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
1082 MLX5_IB_OBJECT_DEVX_OBJ,
1083 UVERBS_ACCESS_DESTROY,
83bb4442 1084 UA_MANDATORY));
9a119cd5
JG
1085
1086DECLARE_UVERBS_NAMED_METHOD(
1087 MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
1088 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
1089 MLX5_IB_OBJECT_DEVX_OBJ,
1090 UVERBS_ACCESS_WRITE,
83bb4442 1091 UA_MANDATORY),
9a119cd5
JG
1092 UVERBS_ATTR_PTR_IN(
1093 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
1094 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
83bb4442 1095 UA_MANDATORY,
83bb4442 1096 UA_ALLOC_AND_COPY),
9a119cd5
JG
1097 UVERBS_ATTR_PTR_OUT(
1098 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1099 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
540cd692 1100 UA_MANDATORY));
9a119cd5
JG
1101
1102DECLARE_UVERBS_NAMED_METHOD(
1103 MLX5_IB_METHOD_DEVX_OBJ_QUERY,
1104 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
1105 MLX5_IB_OBJECT_DEVX_OBJ,
1106 UVERBS_ACCESS_READ,
83bb4442 1107 UA_MANDATORY),
9a119cd5
JG
1108 UVERBS_ATTR_PTR_IN(
1109 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
1110 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
83bb4442 1111 UA_MANDATORY,
83bb4442 1112 UA_ALLOC_AND_COPY),
9a119cd5
JG
1113 UVERBS_ATTR_PTR_OUT(
1114 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1115 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
540cd692 1116 UA_MANDATORY));
e662e14d 1117
6c61d2a5 1118DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
9a119cd5
JG
1119 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
1120 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
1121 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN));
8aa8c95c 1122
6c61d2a5 1123DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
9a119cd5
JG
1124 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
1125 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
1126 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
1127 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
1128 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY));
7efce369 1129
6c61d2a5 1130DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
9a119cd5
JG
1131 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
1132 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
1133 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
aeae9457 1134
6c61d2a5 1135DECLARE_UVERBS_OBJECT_TREE(devx_objects,
9a119cd5
JG
1136 &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX),
1137 &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ),
1138 &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM));
c59450c4
YH
1139
1140const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void)
1141{
1142 return &devx_objects;
1143}