]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/infiniband/hw/mlx5/devx.c
RDMA/core: Support netlink commands in non init_net net namespaces
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / hw / mlx5 / devx.c
CommitLineData
a8b92ca1
YH
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <rdma/ib_user_verbs.h>
7#include <rdma/ib_verbs.h>
8#include <rdma/uverbs_types.h>
9#include <rdma/uverbs_ioctl.h>
10#include <rdma/mlx5_user_ioctl_cmds.h>
a124edba 11#include <rdma/mlx5_user_ioctl_verbs.h>
a8b92ca1 12#include <rdma/ib_umem.h>
34613eb1 13#include <rdma/uverbs_std_types.h>
a8b92ca1
YH
14#include <linux/mlx5/driver.h>
15#include <linux/mlx5/fs.h>
16#include "mlx5_ib.h"
75973853 17#include <linux/xarray.h>
a8b92ca1 18
8aa8c95c
YH
19#define UVERBS_MODULE_NAME mlx5_ib
20#include <rdma/uverbs_named_ioctl.h>
21
ef1659ad
YH
22static void dispatch_event_fd(struct list_head *fd_list, const void *data);
23
534fd7aa
YH
24enum devx_obj_flags {
25 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
c5ae1954 26 DEVX_OBJ_FLAGS_DCT = 1 << 1,
ef1659ad 27 DEVX_OBJ_FLAGS_CQ = 1 << 2,
534fd7aa
YH
28};
29
a124edba
YH
30struct devx_async_data {
31 struct mlx5_ib_dev *mdev;
32 struct list_head list;
33 struct ib_uobject *fd_uobj;
34 struct mlx5_async_work cb_work;
35 u16 cmd_out_len;
36 /* must be last field in this structure */
37 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
38};
39
5ec9d8ee
YH
40struct devx_async_event_data {
41 struct list_head list; /* headed in ev_file->event_list */
42 struct mlx5_ib_uapi_devx_async_event_hdr hdr;
43};
44
75973853
YH
45/* first level XA value data structure */
46struct devx_event {
47 struct xarray object_ids; /* second XA level, Key = object id */
48 struct list_head unaffiliated_list;
49};
50
51/* second level XA value data structure */
52struct devx_obj_event {
53 struct rcu_head rcu;
54 struct list_head obj_sub_list;
55};
56
57struct devx_event_subscription {
58 struct list_head file_list; /* headed in ev_file->
59 * subscribed_events_list
60 */
61 struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
62 * devx_obj_event->obj_sub_list
63 */
64 struct list_head obj_list; /* headed in devx_object */
65 struct list_head event_list; /* headed in ev_file->event_list or in
66 * temp list via subscription
67 */
68
69 u8 is_cleaned:1;
70 u32 xa_key_level1;
71 u32 xa_key_level2;
72 struct rcu_head rcu;
73 u64 cookie;
74 struct devx_async_event_file *ev_file;
75 struct file *filp; /* Upon hot unplug we need a direct access to */
76 struct eventfd_ctx *eventfd;
77};
78
2afc5e1b
YH
79struct devx_async_event_file {
80 struct ib_uobject uobj;
81 /* Head of events that are subscribed to this FD */
82 struct list_head subscribed_events_list;
83 spinlock_t lock;
84 wait_queue_head_t poll_wait;
85 struct list_head event_list;
86 struct mlx5_ib_dev *dev;
87 u8 omit_data:1;
5ec9d8ee
YH
88 u8 is_overflow_err:1;
89 u8 is_destroyed:1;
2afc5e1b
YH
90};
91
7efce369
YH
92#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
93struct devx_obj {
ef1659ad 94 struct mlx5_ib_dev *ib_dev;
2351776e 95 u64 obj_id;
7efce369
YH
96 u32 dinlen; /* destroy inbox length */
97 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
534fd7aa 98 u32 flags;
c5ae1954
YH
99 union {
100 struct mlx5_ib_devx_mr devx_mr;
101 struct mlx5_core_dct core_dct;
ef1659ad 102 struct mlx5_core_cq core_cq;
c5ae1954 103 };
75973853 104 struct list_head event_sub; /* holds devx_event_subscription entries */
7efce369
YH
105};
106
aeae9457
YH
107struct devx_umem {
108 struct mlx5_core_dev *mdev;
109 struct ib_umem *umem;
110 u32 page_offset;
111 int page_shift;
112 int ncont;
113 u32 dinlen;
114 u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
115};
116
117struct devx_umem_reg_cmd {
118 void *in;
119 u32 inlen;
120 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
121};
122
15a1b4be
JG
123static struct mlx5_ib_ucontext *
124devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
8aa8c95c 125{
15a1b4be 126 return to_mucontext(ib_uverbs_get_ucontext(attrs));
8aa8c95c
YH
127}
128
fb98153b 129int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
a8b92ca1
YH
130{
131 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
132 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
6e3722ba 133 void *uctx;
a8b92ca1 134 int err;
76dc5a84 135 u16 uid;
fb98153b 136 u32 cap = 0;
a8b92ca1 137
6e3722ba
YH
138 /* 0 means not supported */
139 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
a8b92ca1
YH
140 return -EINVAL;
141
6e3722ba 142 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
fb98153b
YH
143 if (is_user && capable(CAP_NET_RAW) &&
144 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
145 cap |= MLX5_UCTX_CAP_RAW_TX;
33cde96f
AL
146 if (is_user && capable(CAP_SYS_RAWIO) &&
147 (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
148 MLX5_UCTX_CAP_INTERNAL_DEV_RES))
149 cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
fb98153b 150
6e3722ba 151 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
fb98153b 152 MLX5_SET(uctx, uctx, cap, cap);
a8b92ca1
YH
153
154 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
155 if (err)
156 return err;
157
76dc5a84
YH
158 uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
159 return uid;
a8b92ca1
YH
160}
161
76dc5a84 162void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
a8b92ca1 163{
6e3722ba 164 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
a8b92ca1
YH
165 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
166
6e3722ba
YH
167 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
168 MLX5_SET(destroy_uctx_in, in, uid, uid);
a8b92ca1
YH
169
170 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
171}
8aa8c95c 172
32269441
YH
173bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
174{
175 struct devx_obj *devx_obj = obj;
176 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
177
178 switch (opcode) {
179 case MLX5_CMD_OP_DESTROY_TIR:
180 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
181 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
182 obj_id);
183 return true;
184
185 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
186 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
187 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
188 table_id);
189 return true;
190 default:
191 return false;
192 }
193}
194
bfc5d839
MB
195bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
196{
197 struct devx_obj *devx_obj = obj;
198 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
199
200 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
201 *counter_id = MLX5_GET(dealloc_flow_counter_in,
202 devx_obj->dinbox,
203 flow_counter_id);
204 return true;
205 }
206
207 return false;
208}
209
75973853
YH
210static bool is_legacy_unaffiliated_event_num(u16 event_num)
211{
212 switch (event_num) {
213 case MLX5_EVENT_TYPE_PORT_CHANGE:
214 return true;
215 default:
216 return false;
217 }
218}
219
220static bool is_legacy_obj_event_num(u16 event_num)
221{
222 switch (event_num) {
223 case MLX5_EVENT_TYPE_PATH_MIG:
224 case MLX5_EVENT_TYPE_COMM_EST:
225 case MLX5_EVENT_TYPE_SQ_DRAINED:
226 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
227 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
228 case MLX5_EVENT_TYPE_CQ_ERROR:
229 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
230 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
231 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
232 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
233 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
234 case MLX5_EVENT_TYPE_DCT_DRAINED:
235 case MLX5_EVENT_TYPE_COMP:
236 return true;
237 default:
238 return false;
239 }
240}
241
242static u16 get_legacy_obj_type(u16 opcode)
243{
244 switch (opcode) {
245 case MLX5_CMD_OP_CREATE_RQ:
246 return MLX5_EVENT_QUEUE_TYPE_RQ;
247 case MLX5_CMD_OP_CREATE_QP:
248 return MLX5_EVENT_QUEUE_TYPE_QP;
249 case MLX5_CMD_OP_CREATE_SQ:
250 return MLX5_EVENT_QUEUE_TYPE_SQ;
251 case MLX5_CMD_OP_CREATE_DCT:
252 return MLX5_EVENT_QUEUE_TYPE_DCT;
253 default:
254 return 0;
255 }
256}
257
258static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
259{
260 u16 opcode;
261
262 opcode = (obj->obj_id >> 32) & 0xffff;
263
264 if (is_legacy_obj_event_num(event_num))
265 return get_legacy_obj_type(opcode);
266
267 switch (opcode) {
268 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
269 return (obj->obj_id >> 48);
270 case MLX5_CMD_OP_CREATE_RQ:
271 return MLX5_OBJ_TYPE_RQ;
272 case MLX5_CMD_OP_CREATE_QP:
273 return MLX5_OBJ_TYPE_QP;
274 case MLX5_CMD_OP_CREATE_SQ:
275 return MLX5_OBJ_TYPE_SQ;
276 case MLX5_CMD_OP_CREATE_DCT:
277 return MLX5_OBJ_TYPE_DCT;
278 case MLX5_CMD_OP_CREATE_TIR:
279 return MLX5_OBJ_TYPE_TIR;
280 case MLX5_CMD_OP_CREATE_TIS:
281 return MLX5_OBJ_TYPE_TIS;
282 case MLX5_CMD_OP_CREATE_PSV:
283 return MLX5_OBJ_TYPE_PSV;
284 case MLX5_OBJ_TYPE_MKEY:
285 return MLX5_OBJ_TYPE_MKEY;
286 case MLX5_CMD_OP_CREATE_RMP:
287 return MLX5_OBJ_TYPE_RMP;
288 case MLX5_CMD_OP_CREATE_XRC_SRQ:
289 return MLX5_OBJ_TYPE_XRC_SRQ;
290 case MLX5_CMD_OP_CREATE_XRQ:
291 return MLX5_OBJ_TYPE_XRQ;
292 case MLX5_CMD_OP_CREATE_RQT:
293 return MLX5_OBJ_TYPE_RQT;
294 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
295 return MLX5_OBJ_TYPE_FLOW_COUNTER;
296 case MLX5_CMD_OP_CREATE_CQ:
297 return MLX5_OBJ_TYPE_CQ;
298 default:
299 return 0;
300 }
301}
302
5ec9d8ee
YH
303static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
304{
305 switch (event_type) {
306 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
307 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
308 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
309 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
310 case MLX5_EVENT_TYPE_PATH_MIG:
311 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
312 case MLX5_EVENT_TYPE_COMM_EST:
313 case MLX5_EVENT_TYPE_SQ_DRAINED:
314 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
315 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
316 return eqe->data.qp_srq.type;
317 case MLX5_EVENT_TYPE_CQ_ERROR:
318 return 0;
319 case MLX5_EVENT_TYPE_DCT_DRAINED:
320 return MLX5_EVENT_QUEUE_TYPE_DCT;
321 default:
322 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
323 }
324}
325
75973853
YH
326static u32 get_dec_obj_id(u64 obj_id)
327{
328 return (obj_id & 0xffffffff);
329}
330
2351776e
YH
331/*
332 * As the obj_id in the firmware is not globally unique the object type
333 * must be considered upon checking for a valid object id.
334 * For that the opcode of the creator command is encoded as part of the obj_id.
335 */
cd5d20f1 336static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
2351776e
YH
337{
338 return ((u64)opcode << 32) | obj_id;
339}
340
34613eb1 341static u64 devx_get_obj_id(const void *in)
e662e14d
YH
342{
343 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
2351776e 344 u64 obj_id;
e662e14d
YH
345
346 switch (opcode) {
347 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
348 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
cd5d20f1
YH
349 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
350 MLX5_GET(general_obj_in_cmd_hdr, in,
351 obj_type) << 16,
2351776e
YH
352 MLX5_GET(general_obj_in_cmd_hdr, in,
353 obj_id));
e662e14d
YH
354 break;
355 case MLX5_CMD_OP_QUERY_MKEY:
2351776e
YH
356 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
357 MLX5_GET(query_mkey_in, in,
358 mkey_index));
e662e14d
YH
359 break;
360 case MLX5_CMD_OP_QUERY_CQ:
2351776e
YH
361 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
362 MLX5_GET(query_cq_in, in, cqn));
e662e14d
YH
363 break;
364 case MLX5_CMD_OP_MODIFY_CQ:
2351776e
YH
365 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
366 MLX5_GET(modify_cq_in, in, cqn));
e662e14d
YH
367 break;
368 case MLX5_CMD_OP_QUERY_SQ:
2351776e
YH
369 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
370 MLX5_GET(query_sq_in, in, sqn));
e662e14d
YH
371 break;
372 case MLX5_CMD_OP_MODIFY_SQ:
2351776e
YH
373 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
374 MLX5_GET(modify_sq_in, in, sqn));
e662e14d
YH
375 break;
376 case MLX5_CMD_OP_QUERY_RQ:
2351776e
YH
377 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
378 MLX5_GET(query_rq_in, in, rqn));
e662e14d
YH
379 break;
380 case MLX5_CMD_OP_MODIFY_RQ:
2351776e
YH
381 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
382 MLX5_GET(modify_rq_in, in, rqn));
e662e14d
YH
383 break;
384 case MLX5_CMD_OP_QUERY_RMP:
2351776e
YH
385 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
386 MLX5_GET(query_rmp_in, in, rmpn));
e662e14d
YH
387 break;
388 case MLX5_CMD_OP_MODIFY_RMP:
2351776e
YH
389 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
390 MLX5_GET(modify_rmp_in, in, rmpn));
e662e14d
YH
391 break;
392 case MLX5_CMD_OP_QUERY_RQT:
2351776e
YH
393 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
394 MLX5_GET(query_rqt_in, in, rqtn));
e662e14d
YH
395 break;
396 case MLX5_CMD_OP_MODIFY_RQT:
2351776e
YH
397 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
398 MLX5_GET(modify_rqt_in, in, rqtn));
e662e14d
YH
399 break;
400 case MLX5_CMD_OP_QUERY_TIR:
2351776e
YH
401 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
402 MLX5_GET(query_tir_in, in, tirn));
e662e14d
YH
403 break;
404 case MLX5_CMD_OP_MODIFY_TIR:
2351776e
YH
405 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
406 MLX5_GET(modify_tir_in, in, tirn));
e662e14d
YH
407 break;
408 case MLX5_CMD_OP_QUERY_TIS:
2351776e
YH
409 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
410 MLX5_GET(query_tis_in, in, tisn));
e662e14d
YH
411 break;
412 case MLX5_CMD_OP_MODIFY_TIS:
2351776e
YH
413 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
414 MLX5_GET(modify_tis_in, in, tisn));
e662e14d
YH
415 break;
416 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
2351776e
YH
417 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
418 MLX5_GET(query_flow_table_in, in,
419 table_id));
e662e14d
YH
420 break;
421 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
2351776e
YH
422 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
423 MLX5_GET(modify_flow_table_in, in,
424 table_id));
e662e14d
YH
425 break;
426 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
2351776e
YH
427 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
428 MLX5_GET(query_flow_group_in, in,
429 group_id));
e662e14d
YH
430 break;
431 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
2351776e
YH
432 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
433 MLX5_GET(query_fte_in, in,
434 flow_index));
e662e14d
YH
435 break;
436 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
2351776e
YH
437 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
438 MLX5_GET(set_fte_in, in, flow_index));
e662e14d
YH
439 break;
440 case MLX5_CMD_OP_QUERY_Q_COUNTER:
2351776e
YH
441 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
442 MLX5_GET(query_q_counter_in, in,
443 counter_set_id));
e662e14d
YH
444 break;
445 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
2351776e
YH
446 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
447 MLX5_GET(query_flow_counter_in, in,
448 flow_counter_id));
e662e14d
YH
449 break;
450 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
2351776e
YH
451 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
452 MLX5_GET(general_obj_in_cmd_hdr, in,
453 obj_id));
e662e14d
YH
454 break;
455 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
2351776e
YH
456 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
457 MLX5_GET(query_scheduling_element_in,
458 in, scheduling_element_id));
e662e14d
YH
459 break;
460 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
2351776e
YH
461 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
462 MLX5_GET(modify_scheduling_element_in,
463 in, scheduling_element_id));
e662e14d
YH
464 break;
465 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
2351776e
YH
466 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
467 MLX5_GET(add_vxlan_udp_dport_in, in,
468 vxlan_udp_port));
e662e14d
YH
469 break;
470 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
2351776e
YH
471 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
472 MLX5_GET(query_l2_table_entry_in, in,
473 table_index));
e662e14d
YH
474 break;
475 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
2351776e
YH
476 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
477 MLX5_GET(set_l2_table_entry_in, in,
478 table_index));
e662e14d
YH
479 break;
480 case MLX5_CMD_OP_QUERY_QP:
2351776e
YH
481 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
482 MLX5_GET(query_qp_in, in, qpn));
e662e14d
YH
483 break;
484 case MLX5_CMD_OP_RST2INIT_QP:
2351776e
YH
485 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
486 MLX5_GET(rst2init_qp_in, in, qpn));
e662e14d
YH
487 break;
488 case MLX5_CMD_OP_INIT2RTR_QP:
2351776e
YH
489 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
490 MLX5_GET(init2rtr_qp_in, in, qpn));
e662e14d
YH
491 break;
492 case MLX5_CMD_OP_RTR2RTS_QP:
2351776e
YH
493 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
494 MLX5_GET(rtr2rts_qp_in, in, qpn));
e662e14d
YH
495 break;
496 case MLX5_CMD_OP_RTS2RTS_QP:
2351776e
YH
497 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
498 MLX5_GET(rts2rts_qp_in, in, qpn));
e662e14d
YH
499 break;
500 case MLX5_CMD_OP_SQERR2RTS_QP:
2351776e
YH
501 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
502 MLX5_GET(sqerr2rts_qp_in, in, qpn));
e662e14d
YH
503 break;
504 case MLX5_CMD_OP_2ERR_QP:
2351776e
YH
505 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
506 MLX5_GET(qp_2err_in, in, qpn));
e662e14d
YH
507 break;
508 case MLX5_CMD_OP_2RST_QP:
2351776e
YH
509 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
510 MLX5_GET(qp_2rst_in, in, qpn));
e662e14d
YH
511 break;
512 case MLX5_CMD_OP_QUERY_DCT:
2351776e
YH
513 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
514 MLX5_GET(query_dct_in, in, dctn));
e662e14d
YH
515 break;
516 case MLX5_CMD_OP_QUERY_XRQ:
719598c9
YH
517 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
518 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
2351776e
YH
519 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
520 MLX5_GET(query_xrq_in, in, xrqn));
e662e14d
YH
521 break;
522 case MLX5_CMD_OP_QUERY_XRC_SRQ:
2351776e
YH
523 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
524 MLX5_GET(query_xrc_srq_in, in,
525 xrc_srqn));
e662e14d
YH
526 break;
527 case MLX5_CMD_OP_ARM_XRC_SRQ:
2351776e
YH
528 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
529 MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
e662e14d
YH
530 break;
531 case MLX5_CMD_OP_QUERY_SRQ:
2351776e
YH
532 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
533 MLX5_GET(query_srq_in, in, srqn));
e662e14d
YH
534 break;
535 case MLX5_CMD_OP_ARM_RQ:
2351776e
YH
536 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
537 MLX5_GET(arm_rq_in, in, srq_number));
e662e14d 538 break;
e662e14d 539 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
2351776e
YH
540 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
541 MLX5_GET(drain_dct_in, in, dctn));
e662e14d
YH
542 break;
543 case MLX5_CMD_OP_ARM_XRQ:
719598c9 544 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
2351776e
YH
545 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
546 MLX5_GET(arm_xrq_in, in, xrqn));
e662e14d 547 break;
719598c9
YH
548 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
549 obj_id = get_enc_obj_id
550 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
551 MLX5_GET(query_packet_reformat_context_in,
552 in, packet_reformat_id));
553 break;
e662e14d 554 default:
34613eb1
YH
555 obj_id = 0;
556 }
557
558 return obj_id;
559}
560
e79c9c60
JG
561static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
562 struct ib_uobject *uobj, const void *in)
34613eb1 563{
e79c9c60 564 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
34613eb1
YH
565 u64 obj_id = devx_get_obj_id(in);
566
567 if (!obj_id)
e662e14d 568 return false;
34613eb1
YH
569
570 switch (uobj_get_object_id(uobj)) {
571 case UVERBS_OBJECT_CQ:
572 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
573 to_mcq(uobj->object)->mcq.cqn) ==
574 obj_id;
575
576 case UVERBS_OBJECT_SRQ:
577 {
578 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
34613eb1
YH
579 u16 opcode;
580
581 switch (srq->common.res) {
582 case MLX5_RES_XSRQ:
583 opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
584 break;
585 case MLX5_RES_XRQ:
586 opcode = MLX5_CMD_OP_CREATE_XRQ;
587 break;
588 default:
589 if (!dev->mdev->issi)
590 opcode = MLX5_CMD_OP_CREATE_SRQ;
591 else
592 opcode = MLX5_CMD_OP_CREATE_RMP;
593 }
594
595 return get_enc_obj_id(opcode,
596 to_msrq(uobj->object)->msrq.srqn) ==
597 obj_id;
e662e14d
YH
598 }
599
34613eb1
YH
600 case UVERBS_OBJECT_QP:
601 {
602 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
603 enum ib_qp_type qp_type = qp->ibqp.qp_type;
604
605 if (qp_type == IB_QPT_RAW_PACKET ||
606 (qp->flags & MLX5_IB_QP_UNDERLAY)) {
607 struct mlx5_ib_raw_packet_qp *raw_packet_qp =
608 &qp->raw_packet_qp;
609 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
610 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
611
612 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
613 rq->base.mqp.qpn) == obj_id ||
614 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
615 sq->base.mqp.qpn) == obj_id ||
616 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
617 rq->tirn) == obj_id ||
618 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
619 sq->tisn) == obj_id);
620 }
621
622 if (qp_type == MLX5_IB_QPT_DCT)
623 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
624 qp->dct.mdct.mqp.qpn) == obj_id;
625
626 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
627 qp->ibqp.qp_num) == obj_id;
628 }
e662e14d 629
34613eb1
YH
630 case UVERBS_OBJECT_WQ:
631 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
632 to_mrwq(uobj->object)->core_qp.qpn) ==
633 obj_id;
634
635 case UVERBS_OBJECT_RWQ_IND_TBL:
636 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
637 to_mrwq_ind_table(uobj->object)->rqtn) ==
638 obj_id;
639
640 case MLX5_IB_OBJECT_DEVX_OBJ:
641 return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
642
643 default:
644 return false;
645 }
e662e14d
YH
646}
647
ba1a057d
YH
648static void devx_set_umem_valid(const void *in)
649{
650 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
651
652 switch (opcode) {
653 case MLX5_CMD_OP_CREATE_MKEY:
654 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
655 break;
656 case MLX5_CMD_OP_CREATE_CQ:
657 {
658 void *cqc;
659
660 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
661 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
662 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
663 break;
664 }
665 case MLX5_CMD_OP_CREATE_QP:
666 {
667 void *qpc;
668
669 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
670 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
671 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
672 break;
673 }
674
675 case MLX5_CMD_OP_CREATE_RQ:
676 {
677 void *rqc, *wq;
678
679 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
680 wq = MLX5_ADDR_OF(rqc, rqc, wq);
681 MLX5_SET(wq, wq, dbr_umem_valid, 1);
682 MLX5_SET(wq, wq, wq_umem_valid, 1);
683 break;
684 }
685
686 case MLX5_CMD_OP_CREATE_SQ:
687 {
688 void *sqc, *wq;
689
690 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
691 wq = MLX5_ADDR_OF(sqc, sqc, wq);
692 MLX5_SET(wq, wq, dbr_umem_valid, 1);
693 MLX5_SET(wq, wq, wq_umem_valid, 1);
694 break;
695 }
696
697 case MLX5_CMD_OP_MODIFY_CQ:
698 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
699 break;
700
701 case MLX5_CMD_OP_CREATE_RMP:
702 {
703 void *rmpc, *wq;
704
705 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
706 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
707 MLX5_SET(wq, wq, dbr_umem_valid, 1);
708 MLX5_SET(wq, wq, wq_umem_valid, 1);
709 break;
710 }
711
712 case MLX5_CMD_OP_CREATE_XRQ:
713 {
714 void *xrqc, *wq;
715
716 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
717 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
718 MLX5_SET(wq, wq, dbr_umem_valid, 1);
719 MLX5_SET(wq, wq, wq_umem_valid, 1);
720 break;
721 }
722
723 case MLX5_CMD_OP_CREATE_XRC_SRQ:
724 {
725 void *xrc_srqc;
726
727 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
728 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
729 xrc_srq_context_entry);
730 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
731 break;
732 }
733
734 default:
735 return;
736 }
737}
738
2351776e 739static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
7efce369 740{
2351776e 741 *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
7efce369 742
2351776e 743 switch (*opcode) {
7efce369
YH
744 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
745 case MLX5_CMD_OP_CREATE_MKEY:
746 case MLX5_CMD_OP_CREATE_CQ:
747 case MLX5_CMD_OP_ALLOC_PD:
748 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
749 case MLX5_CMD_OP_CREATE_RMP:
750 case MLX5_CMD_OP_CREATE_SQ:
751 case MLX5_CMD_OP_CREATE_RQ:
752 case MLX5_CMD_OP_CREATE_RQT:
753 case MLX5_CMD_OP_CREATE_TIR:
754 case MLX5_CMD_OP_CREATE_TIS:
755 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
756 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
757 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
758 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
60786f09 759 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
7efce369
YH
760 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
761 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
762 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
763 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
764 case MLX5_CMD_OP_CREATE_QP:
765 case MLX5_CMD_OP_CREATE_SRQ:
766 case MLX5_CMD_OP_CREATE_XRC_SRQ:
767 case MLX5_CMD_OP_CREATE_DCT:
768 case MLX5_CMD_OP_CREATE_XRQ:
769 case MLX5_CMD_OP_ATTACH_TO_MCG:
770 case MLX5_CMD_OP_ALLOC_XRCD:
771 return true;
772 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
773 {
774 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
775 if (op_mod == 0)
776 return true;
777 return false;
778 }
779 default:
780 return false;
781 }
782}
783
e662e14d
YH
784static bool devx_is_obj_modify_cmd(const void *in)
785{
786 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
787
788 switch (opcode) {
789 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
790 case MLX5_CMD_OP_MODIFY_CQ:
791 case MLX5_CMD_OP_MODIFY_RMP:
792 case MLX5_CMD_OP_MODIFY_SQ:
793 case MLX5_CMD_OP_MODIFY_RQ:
794 case MLX5_CMD_OP_MODIFY_RQT:
795 case MLX5_CMD_OP_MODIFY_TIR:
796 case MLX5_CMD_OP_MODIFY_TIS:
797 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
798 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
799 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
800 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
801 case MLX5_CMD_OP_RST2INIT_QP:
802 case MLX5_CMD_OP_INIT2RTR_QP:
803 case MLX5_CMD_OP_RTR2RTS_QP:
804 case MLX5_CMD_OP_RTS2RTS_QP:
805 case MLX5_CMD_OP_SQERR2RTS_QP:
806 case MLX5_CMD_OP_2ERR_QP:
807 case MLX5_CMD_OP_2RST_QP:
808 case MLX5_CMD_OP_ARM_XRC_SRQ:
809 case MLX5_CMD_OP_ARM_RQ:
e662e14d
YH
810 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
811 case MLX5_CMD_OP_ARM_XRQ:
719598c9 812 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
e662e14d
YH
813 return true;
814 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
815 {
816 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
817
818 if (op_mod == 1)
819 return true;
820 return false;
821 }
822 default:
823 return false;
824 }
825}
826
827static bool devx_is_obj_query_cmd(const void *in)
828{
829 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
830
831 switch (opcode) {
832 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
833 case MLX5_CMD_OP_QUERY_MKEY:
834 case MLX5_CMD_OP_QUERY_CQ:
835 case MLX5_CMD_OP_QUERY_RMP:
836 case MLX5_CMD_OP_QUERY_SQ:
837 case MLX5_CMD_OP_QUERY_RQ:
838 case MLX5_CMD_OP_QUERY_RQT:
839 case MLX5_CMD_OP_QUERY_TIR:
840 case MLX5_CMD_OP_QUERY_TIS:
841 case MLX5_CMD_OP_QUERY_Q_COUNTER:
842 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
843 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
844 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
845 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
846 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
847 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
848 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
849 case MLX5_CMD_OP_QUERY_QP:
850 case MLX5_CMD_OP_QUERY_SRQ:
851 case MLX5_CMD_OP_QUERY_XRC_SRQ:
852 case MLX5_CMD_OP_QUERY_DCT:
853 case MLX5_CMD_OP_QUERY_XRQ:
719598c9
YH
854 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
855 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
856 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
e662e14d
YH
857 return true;
858 default:
859 return false;
860 }
861}
862
7e1335a7
YH
863static bool devx_is_whitelist_cmd(void *in)
864{
865 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
866
867 switch (opcode) {
868 case MLX5_CMD_OP_QUERY_HCA_CAP:
869 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
56e5acd4 870 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
7e1335a7
YH
871 return true;
872 default:
873 return false;
874 }
875}
876
877static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
878{
879 if (devx_is_whitelist_cmd(cmd_in)) {
880 struct mlx5_ib_dev *dev;
881
882 if (c->devx_uid)
883 return c->devx_uid;
884
885 dev = to_mdev(c->ibucontext.device);
886 if (dev->devx_whitelist_uid)
887 return dev->devx_whitelist_uid;
888
889 return -EOPNOTSUPP;
890 }
891
892 if (!c->devx_uid)
893 return -EINVAL;
894
7e1335a7
YH
895 return c->devx_uid;
896}
b6142608
MG
897
898static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
8aa8c95c
YH
899{
900 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
901
b6142608
MG
902 /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
903 if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
904 MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
905 (opcode >= MLX5_CMD_OP_GENERAL_START &&
906 opcode < MLX5_CMD_OP_GENERAL_END))
719598c9
YH
907 return true;
908
8aa8c95c
YH
909 switch (opcode) {
910 case MLX5_CMD_OP_QUERY_HCA_CAP:
7e1335a7 911 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
56e5acd4 912 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
8aa8c95c
YH
913 case MLX5_CMD_OP_QUERY_VPORT_STATE:
914 case MLX5_CMD_OP_QUERY_ADAPTER:
915 case MLX5_CMD_OP_QUERY_ISSI:
916 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
917 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
918 case MLX5_CMD_OP_QUERY_VNIC_ENV:
919 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
920 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
921 case MLX5_CMD_OP_NOP:
922 case MLX5_CMD_OP_QUERY_CONG_STATUS:
923 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
924 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
925 return true;
926 default:
927 return false;
928 }
929}
930
e83f0ecd 931static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
15a1b4be 932 struct uverbs_attr_bundle *attrs)
f6fe01b7 933{
e83f0ecd
JG
934 struct mlx5_ib_ucontext *c;
935 struct mlx5_ib_dev *dev;
f6fe01b7
YH
936 int user_vector;
937 int dev_eqn;
938 unsigned int irqn;
939 int err;
940
941 if (uverbs_copy_from(&user_vector, attrs,
942 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
943 return -EFAULT;
944
15a1b4be 945 c = devx_ufile2uctx(attrs);
e83f0ecd
JG
946 if (IS_ERR(c))
947 return PTR_ERR(c);
948 dev = to_mdev(c->ibucontext.device);
949
f6fe01b7
YH
950 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
951 if (err < 0)
952 return err;
953
954 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
955 &dev_eqn, sizeof(dev_eqn)))
956 return -EFAULT;
957
958 return 0;
959}
960
7c043e90
YH
961/*
962 *Security note:
963 * The hardware protection mechanism works like this: Each device object that
964 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
965 * the device specification manual) upon its creation. Then upon doorbell,
966 * hardware fetches the object context for which the doorbell was rang, and
967 * validates that the UAR through which the DB was rang matches the UAR ID
968 * of the object.
969 * If no match the doorbell is silently ignored by the hardware. Of course,
970 * the user cannot ring a doorbell on a UAR that was not mapped to it.
971 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
972 * mailboxes (except tagging them with UID), we expose to the user its UAR
973 * ID, so it can embed it in these objects in the expected specification
974 * format. So the only thing the user can do is hurt itself by creating a
975 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
976 * may ring a doorbell on its objects.
977 * The consequence of that will be that another user can schedule a QP/SQ
978 * of the buggy user for execution (just insert it to the hardware schedule
979 * queue or arm its CQ for event generation), no further harm is expected.
980 */
e83f0ecd 981static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
15a1b4be 982 struct uverbs_attr_bundle *attrs)
7c043e90 983{
22fa27fb
JG
984 struct mlx5_ib_ucontext *c;
985 struct mlx5_ib_dev *dev;
7c043e90
YH
986 u32 user_idx;
987 s32 dev_idx;
988
15a1b4be 989 c = devx_ufile2uctx(attrs);
22fa27fb
JG
990 if (IS_ERR(c))
991 return PTR_ERR(c);
992 dev = to_mdev(c->ibucontext.device);
993
7c043e90
YH
994 if (uverbs_copy_from(&user_idx, attrs,
995 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
996 return -EFAULT;
997
22fa27fb 998 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
7c043e90
YH
999 if (dev_idx < 0)
1000 return dev_idx;
1001
1002 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1003 &dev_idx, sizeof(dev_idx)))
1004 return -EFAULT;
1005
1006 return 0;
1007}
1008
e83f0ecd 1009static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
15a1b4be 1010 struct uverbs_attr_bundle *attrs)
8aa8c95c 1011{
22fa27fb
JG
1012 struct mlx5_ib_ucontext *c;
1013 struct mlx5_ib_dev *dev;
7efce369
YH
1014 void *cmd_in = uverbs_attr_get_alloced_ptr(
1015 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
8aa8c95c
YH
1016 int cmd_out_len = uverbs_attr_get_len(attrs,
1017 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1018 void *cmd_out;
1019 int err;
7e1335a7 1020 int uid;
8aa8c95c 1021
15a1b4be 1022 c = devx_ufile2uctx(attrs);
22fa27fb
JG
1023 if (IS_ERR(c))
1024 return PTR_ERR(c);
1025 dev = to_mdev(c->ibucontext.device);
1026
7e1335a7
YH
1027 uid = devx_get_uid(c, cmd_in);
1028 if (uid < 0)
1029 return uid;
8aa8c95c
YH
1030
1031 /* Only white list of some general HCA commands are allowed for this method. */
b6142608 1032 if (!devx_is_general_cmd(cmd_in, dev))
8aa8c95c
YH
1033 return -EINVAL;
1034
b61815e2
JG
1035 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1036 if (IS_ERR(cmd_out))
1037 return PTR_ERR(cmd_out);
8aa8c95c 1038
7e1335a7 1039 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
8aa8c95c
YH
1040 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1041 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1042 cmd_out, cmd_out_len);
1043 if (err)
b61815e2 1044 return err;
8aa8c95c 1045
b61815e2
JG
1046 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1047 cmd_out_len);
8aa8c95c
YH
1048}
1049
7efce369
YH
1050static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1051 u32 *dinlen,
1052 u32 *obj_id)
1053{
1054 u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
1055 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1056
1057 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1058 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1059
1060 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1061 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1062
1063 switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
1064 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1065 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1066 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
1067 break;
1068
6e3722ba
YH
1069 case MLX5_CMD_OP_CREATE_UMEM:
1070 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1071 MLX5_CMD_OP_DESTROY_UMEM);
1072 break;
7efce369
YH
1073 case MLX5_CMD_OP_CREATE_MKEY:
1074 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
1075 break;
1076 case MLX5_CMD_OP_CREATE_CQ:
1077 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1078 break;
1079 case MLX5_CMD_OP_ALLOC_PD:
1080 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1081 break;
1082 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1083 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1084 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1085 break;
1086 case MLX5_CMD_OP_CREATE_RMP:
1087 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1088 break;
1089 case MLX5_CMD_OP_CREATE_SQ:
1090 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1091 break;
1092 case MLX5_CMD_OP_CREATE_RQ:
1093 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1094 break;
1095 case MLX5_CMD_OP_CREATE_RQT:
1096 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1097 break;
1098 case MLX5_CMD_OP_CREATE_TIR:
1099 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1100 break;
1101 case MLX5_CMD_OP_CREATE_TIS:
1102 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1103 break;
1104 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1105 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1106 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1107 break;
1108 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1109 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1110 *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
1111 MLX5_SET(destroy_flow_table_in, din, other_vport,
1112 MLX5_GET(create_flow_table_in, in, other_vport));
1113 MLX5_SET(destroy_flow_table_in, din, vport_number,
1114 MLX5_GET(create_flow_table_in, in, vport_number));
1115 MLX5_SET(destroy_flow_table_in, din, table_type,
1116 MLX5_GET(create_flow_table_in, in, table_type));
1117 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1118 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1119 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1120 break;
1121 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1122 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1123 *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
1124 MLX5_SET(destroy_flow_group_in, din, other_vport,
1125 MLX5_GET(create_flow_group_in, in, other_vport));
1126 MLX5_SET(destroy_flow_group_in, din, vport_number,
1127 MLX5_GET(create_flow_group_in, in, vport_number));
1128 MLX5_SET(destroy_flow_group_in, din, table_type,
1129 MLX5_GET(create_flow_group_in, in, table_type));
1130 MLX5_SET(destroy_flow_group_in, din, table_id,
1131 MLX5_GET(create_flow_group_in, in, table_id));
1132 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1133 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1134 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1135 break;
1136 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1137 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1138 *obj_id = MLX5_GET(set_fte_in, in, flow_index);
1139 MLX5_SET(delete_fte_in, din, other_vport,
1140 MLX5_GET(set_fte_in, in, other_vport));
1141 MLX5_SET(delete_fte_in, din, vport_number,
1142 MLX5_GET(set_fte_in, in, vport_number));
1143 MLX5_SET(delete_fte_in, din, table_type,
1144 MLX5_GET(set_fte_in, in, table_type));
1145 MLX5_SET(delete_fte_in, din, table_id,
1146 MLX5_GET(set_fte_in, in, table_id));
1147 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1148 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1149 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1150 break;
1151 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1152 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1153 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1154 break;
60786f09 1155 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
7efce369 1156 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
60786f09 1157 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
7efce369
YH
1158 break;
1159 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1160 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1161 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1162 break;
1163 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1164 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1165 *obj_id = MLX5_GET(create_scheduling_element_out, out,
1166 scheduling_element_id);
1167 MLX5_SET(destroy_scheduling_element_in, din,
1168 scheduling_hierarchy,
1169 MLX5_GET(create_scheduling_element_in, in,
1170 scheduling_hierarchy));
1171 MLX5_SET(destroy_scheduling_element_in, din,
1172 scheduling_element_id, *obj_id);
1173 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1174 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1175 break;
1176 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1177 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1178 *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
1179 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1180 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1181 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1182 break;
1183 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1184 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1185 *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
1186 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1187 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1188 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1189 break;
1190 case MLX5_CMD_OP_CREATE_QP:
1191 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1192 break;
1193 case MLX5_CMD_OP_CREATE_SRQ:
1194 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1195 break;
1196 case MLX5_CMD_OP_CREATE_XRC_SRQ:
1197 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1198 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1199 break;
1200 case MLX5_CMD_OP_CREATE_DCT:
1201 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1202 break;
1203 case MLX5_CMD_OP_CREATE_XRQ:
1204 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1205 break;
1206 case MLX5_CMD_OP_ATTACH_TO_MCG:
1207 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1208 MLX5_SET(detach_from_mcg_in, din, qpn,
1209 MLX5_GET(attach_to_mcg_in, in, qpn));
1210 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1211 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1212 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1213 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
1214 break;
1215 case MLX5_CMD_OP_ALLOC_XRCD:
1216 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
1217 break;
1218 default:
1219 /* The entry must match to one of the devx_is_obj_create_cmd */
1220 WARN_ON(true);
1221 break;
1222 }
1223}
1224
534fd7aa
YH
1225static int devx_handle_mkey_indirect(struct devx_obj *obj,
1226 struct mlx5_ib_dev *dev,
1227 void *in, void *out)
1228{
534fd7aa 1229 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
534fd7aa
YH
1230 struct mlx5_core_mkey *mkey;
1231 void *mkc;
1232 u8 key;
534fd7aa
YH
1233
1234 mkey = &devx_mr->mmkey;
1235 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1236 key = MLX5_GET(mkc, mkc, mkey_7_0);
1237 mkey->key = mlx5_idx_to_mkey(
1238 MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1239 mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1240 mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1241 mkey->size = MLX5_GET64(mkc, mkc, len);
1242 mkey->pd = MLX5_GET(mkc, mkc, pd);
1243 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1244
792c4e9d
MW
1245 return xa_err(xa_store(&dev->mdev->priv.mkey_table,
1246 mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL));
534fd7aa
YH
1247}
1248
fa31f143
YH
1249static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1250 struct devx_obj *obj,
1251 void *in, int in_len)
1252{
1253 int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1254 MLX5_FLD_SZ_BYTES(create_mkey_in,
1255 memory_key_mkey_entry);
1256 void *mkc;
1257 u8 access_mode;
1258
1259 if (in_len < min_len)
1260 return -EINVAL;
1261
1262 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1263
1264 access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1265 access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1266
1267 if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
534fd7aa
YH
1268 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1269 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1270 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
fa31f143 1271 return 0;
534fd7aa 1272 }
fa31f143
YH
1273
1274 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1275 return 0;
1276}
1277
534fd7aa
YH
1278static void devx_free_indirect_mkey(struct rcu_head *rcu)
1279{
1280 kfree(container_of(rcu, struct devx_obj, devx_mr.rcu));
1281}
1282
1283/* This function to delete from the radix tree needs to be called before
1284 * destroying the underlying mkey. Otherwise a race might occur in case that
1285 * other thread will get the same mkey before this one will be deleted,
1286 * in that case it will fail via inserting to the tree its own data.
1287 *
1288 * Note:
1289 * An error in the destroy is not expected unless there is some other indirect
1290 * mkey which points to this one. In a kernel cleanup flow it will be just
1291 * destroyed in the iterative destruction call. In a user flow, in case
1292 * the application didn't close in the expected order it's its own problem,
1293 * the mkey won't be part of the tree, in both cases the kernel is safe.
1294 */
1295static void devx_cleanup_mkey(struct devx_obj *obj)
1296{
5832fdd3 1297 xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
792c4e9d 1298 mlx5_base_mkey(obj->devx_mr.mmkey.key));
534fd7aa
YH
1299}
1300
75973853
YH
1301static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1302 struct devx_event_subscription *sub)
1303{
1304 struct devx_event *event;
1305 struct devx_obj_event *xa_val_level2;
1306
1307 if (sub->is_cleaned)
1308 return;
1309
1310 sub->is_cleaned = 1;
1311 list_del_rcu(&sub->xa_list);
1312
1313 if (list_empty(&sub->obj_list))
1314 return;
1315
1316 list_del_rcu(&sub->obj_list);
1317 /* check whether key level 1 for this obj_sub_list is empty */
1318 event = xa_load(&dev->devx_event_table.event_xa,
1319 sub->xa_key_level1);
1320 WARN_ON(!event);
1321
1322 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1323 if (list_empty(&xa_val_level2->obj_sub_list)) {
1324 xa_erase(&event->object_ids,
1325 sub->xa_key_level2);
1326 kfree_rcu(xa_val_level2, rcu);
1327 }
1328}
1329
7efce369 1330static int devx_obj_cleanup(struct ib_uobject *uobject,
a6a3797d
SR
1331 enum rdma_remove_reason why,
1332 struct uverbs_attr_bundle *attrs)
7efce369
YH
1333{
1334 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
75973853 1335 struct mlx5_devx_event_table *devx_event_table;
7efce369 1336 struct devx_obj *obj = uobject->object;
75973853
YH
1337 struct devx_event_subscription *sub_entry, *tmp;
1338 struct mlx5_ib_dev *dev;
7efce369
YH
1339 int ret;
1340
75973853 1341 dev = mlx5_udata_to_mdev(&attrs->driver_udata);
534fd7aa
YH
1342 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1343 devx_cleanup_mkey(obj);
1344
c5ae1954 1345 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
5832fdd3 1346 ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
ef1659ad 1347 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
5832fdd3 1348 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
c5ae1954 1349 else
5832fdd3
YH
1350 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1351 obj->dinlen, out, sizeof(out));
1c77483e 1352 if (ib_is_destroy_retryable(ret, why, uobject))
7efce369
YH
1353 return ret;
1354
75973853 1355 devx_event_table = &dev->devx_event_table;
534fd7aa 1356
75973853
YH
1357 mutex_lock(&devx_event_table->event_xa_lock);
1358 list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1359 devx_cleanup_subscription(dev, sub_entry);
1360 mutex_unlock(&devx_event_table->event_xa_lock);
1361
1362 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
534fd7aa
YH
1363 call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu,
1364 devx_free_indirect_mkey);
1365 return ret;
1366 }
1367
7efce369
YH
1368 kfree(obj);
1369 return ret;
1370}
1371
ef1659ad
YH
1372static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1373{
1374 struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1375 struct mlx5_devx_event_table *table;
1376 struct devx_event *event;
1377 struct devx_obj_event *obj_event;
1378 u32 obj_id = mcq->cqn;
1379
1380 table = &obj->ib_dev->devx_event_table;
1381 rcu_read_lock();
1382 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1383 if (!event)
1384 goto out;
1385
1386 obj_event = xa_load(&event->object_ids, obj_id);
1387 if (!obj_event)
1388 goto out;
1389
1390 dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1391out:
1392 rcu_read_unlock();
1393}
1394
e83f0ecd 1395static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
15a1b4be 1396 struct uverbs_attr_bundle *attrs)
7efce369 1397{
7efce369
YH
1398 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1399 int cmd_out_len = uverbs_attr_get_len(attrs,
1400 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
fa31f143
YH
1401 int cmd_in_len = uverbs_attr_get_len(attrs,
1402 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
7efce369 1403 void *cmd_out;
c36ee46d
JG
1404 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1405 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
89944450
SR
1406 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1407 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
c36ee46d 1408 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
e8ef090a 1409 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
7efce369 1410 struct devx_obj *obj;
cd5d20f1 1411 u16 obj_type = 0;
7efce369 1412 int err;
7e1335a7 1413 int uid;
2351776e
YH
1414 u32 obj_id;
1415 u16 opcode;
7efce369 1416
b6142608
MG
1417 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1418 return -EINVAL;
1419
7e1335a7
YH
1420 uid = devx_get_uid(c, cmd_in);
1421 if (uid < 0)
1422 return uid;
7efce369 1423
2351776e 1424 if (!devx_is_obj_create_cmd(cmd_in, &opcode))
7efce369
YH
1425 return -EINVAL;
1426
b61815e2
JG
1427 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1428 if (IS_ERR(cmd_out))
1429 return PTR_ERR(cmd_out);
1430
7efce369
YH
1431 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1432 if (!obj)
1433 return -ENOMEM;
1434
7e1335a7 1435 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
fa31f143
YH
1436 if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1437 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1438 if (err)
1439 goto obj_free;
1440 } else {
1441 devx_set_umem_valid(cmd_in);
1442 }
ba1a057d 1443
c5ae1954
YH
1444 if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1445 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1446 err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
1447 cmd_in, cmd_in_len,
1448 cmd_out, cmd_out_len);
ef1659ad
YH
1449 } else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
1450 obj->flags |= DEVX_OBJ_FLAGS_CQ;
1451 obj->core_cq.comp = devx_cq_comp;
1452 err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
1453 cmd_in, cmd_in_len, cmd_out,
1454 cmd_out_len);
c5ae1954
YH
1455 } else {
1456 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1457 cmd_in_len,
1458 cmd_out, cmd_out_len);
1459 }
1460
7efce369 1461 if (err)
b61815e2 1462 goto obj_free;
7efce369 1463
7efce369 1464 uobj->object = obj;
75973853 1465 INIT_LIST_HEAD(&obj->event_sub);
ef1659ad 1466 obj->ib_dev = dev;
2351776e
YH
1467 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1468 &obj_id);
7efce369
YH
1469 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1470
534fd7aa
YH
1471 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1472 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1473 if (err)
1474 goto obj_destroy;
1475 }
1476
7efce369
YH
1477 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1478 if (err)
0da4d48d 1479 goto err_copy;
7efce369 1480
cd5d20f1
YH
1481 if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1482 obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1483
1484 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1485
7efce369
YH
1486 return 0;
1487
0da4d48d 1488err_copy:
534fd7aa
YH
1489 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1490 devx_cleanup_mkey(obj);
0da4d48d 1491obj_destroy:
c5ae1954 1492 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
5832fdd3 1493 mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
ef1659ad 1494 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
5832fdd3 1495 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
c5ae1954 1496 else
5832fdd3 1497 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
c5ae1954 1498 sizeof(out));
7efce369
YH
1499obj_free:
1500 kfree(obj);
1501 return err;
1502}
1503
e83f0ecd 1504static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
15a1b4be 1505 struct uverbs_attr_bundle *attrs)
e662e14d 1506{
e662e14d
YH
1507 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1508 int cmd_out_len = uverbs_attr_get_len(attrs,
1509 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1510 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1511 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
89944450
SR
1512 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1513 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1514 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
e662e14d
YH
1515 void *cmd_out;
1516 int err;
7e1335a7 1517 int uid;
e662e14d 1518
b6142608
MG
1519 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1520 return -EINVAL;
1521
7e1335a7
YH
1522 uid = devx_get_uid(c, cmd_in);
1523 if (uid < 0)
1524 return uid;
e662e14d
YH
1525
1526 if (!devx_is_obj_modify_cmd(cmd_in))
1527 return -EINVAL;
1528
e79c9c60 1529 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
e662e14d
YH
1530 return -EINVAL;
1531
b61815e2
JG
1532 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1533 if (IS_ERR(cmd_out))
1534 return PTR_ERR(cmd_out);
e662e14d 1535
7e1335a7 1536 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
ba1a057d
YH
1537 devx_set_umem_valid(cmd_in);
1538
34613eb1 1539 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
e662e14d
YH
1540 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1541 cmd_out, cmd_out_len);
1542 if (err)
b61815e2 1543 return err;
e662e14d 1544
b61815e2
JG
1545 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1546 cmd_out, cmd_out_len);
e662e14d
YH
1547}
1548
e83f0ecd 1549static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
15a1b4be 1550 struct uverbs_attr_bundle *attrs)
e662e14d 1551{
e662e14d
YH
1552 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1553 int cmd_out_len = uverbs_attr_get_len(attrs,
1554 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1555 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1556 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
89944450
SR
1557 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1558 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
e662e14d
YH
1559 void *cmd_out;
1560 int err;
7e1335a7 1561 int uid;
89944450 1562 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
e662e14d 1563
b6142608
MG
1564 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1565 return -EINVAL;
1566
7e1335a7
YH
1567 uid = devx_get_uid(c, cmd_in);
1568 if (uid < 0)
1569 return uid;
e662e14d
YH
1570
1571 if (!devx_is_obj_query_cmd(cmd_in))
1572 return -EINVAL;
1573
e79c9c60 1574 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
e662e14d
YH
1575 return -EINVAL;
1576
b61815e2
JG
1577 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1578 if (IS_ERR(cmd_out))
1579 return PTR_ERR(cmd_out);
e662e14d 1580
7e1335a7 1581 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
34613eb1 1582 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
e662e14d
YH
1583 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1584 cmd_out, cmd_out_len);
1585 if (err)
b61815e2 1586 return err;
e662e14d 1587
b61815e2
JG
1588 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1589 cmd_out, cmd_out_len);
e662e14d
YH
1590}
1591
6bf8f22a
YH
1592struct devx_async_event_queue {
1593 spinlock_t lock;
1594 wait_queue_head_t poll_wait;
1595 struct list_head event_list;
a124edba 1596 atomic_t bytes_in_use;
eaebaf77 1597 u8 is_destroyed:1;
6bf8f22a
YH
1598};
1599
1600struct devx_async_cmd_event_file {
1601 struct ib_uobject uobj;
1602 struct devx_async_event_queue ev_queue;
a124edba 1603 struct mlx5_async_ctx async_ctx;
6bf8f22a
YH
1604};
1605
1606static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1607{
1608 spin_lock_init(&ev_queue->lock);
1609 INIT_LIST_HEAD(&ev_queue->event_list);
1610 init_waitqueue_head(&ev_queue->poll_wait);
a124edba 1611 atomic_set(&ev_queue->bytes_in_use, 0);
eaebaf77 1612 ev_queue->is_destroyed = 0;
6bf8f22a
YH
1613}
1614
1615static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1616 struct uverbs_attr_bundle *attrs)
1617{
1618 struct devx_async_cmd_event_file *ev_file;
1619
1620 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1621 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
e79c9c60 1622 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
6bf8f22a
YH
1623
1624 ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1625 uobj);
1626 devx_init_event_queue(&ev_file->ev_queue);
a124edba
YH
1627 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1628 return 0;
1629}
1630
2afc5e1b
YH
1631static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1632 struct uverbs_attr_bundle *attrs)
1633{
1634 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1635 attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1636 struct devx_async_event_file *ev_file;
1637 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1638 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1639 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1640 u32 flags;
1641 int err;
1642
1643 err = uverbs_get_flags32(&flags, attrs,
1644 MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1645 MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1646
1647 if (err)
1648 return err;
1649
1650 ev_file = container_of(uobj, struct devx_async_event_file,
1651 uobj);
1652 spin_lock_init(&ev_file->lock);
1653 INIT_LIST_HEAD(&ev_file->event_list);
1654 init_waitqueue_head(&ev_file->poll_wait);
1655 if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1656 ev_file->omit_data = 1;
1657 INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1658 ev_file->dev = dev;
75973853 1659 get_device(&dev->ib_dev.dev);
2afc5e1b
YH
1660 return 0;
1661}
1662
a124edba
YH
1663static void devx_query_callback(int status, struct mlx5_async_work *context)
1664{
1665 struct devx_async_data *async_data =
1666 container_of(context, struct devx_async_data, cb_work);
1667 struct ib_uobject *fd_uobj = async_data->fd_uobj;
1668 struct devx_async_cmd_event_file *ev_file;
1669 struct devx_async_event_queue *ev_queue;
1670 unsigned long flags;
1671
1672 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1673 uobj);
1674 ev_queue = &ev_file->ev_queue;
1675
1676 spin_lock_irqsave(&ev_queue->lock, flags);
1677 list_add_tail(&async_data->list, &ev_queue->event_list);
1678 spin_unlock_irqrestore(&ev_queue->lock, flags);
1679
1680 wake_up_interruptible(&ev_queue->poll_wait);
1681 fput(fd_uobj->object);
1682}
1683
1684#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1685
1686static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1687 struct uverbs_attr_bundle *attrs)
1688{
1689 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1690 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1691 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1692 attrs,
1693 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1694 u16 cmd_out_len;
89944450
SR
1695 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1696 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
a124edba
YH
1697 struct ib_uobject *fd_uobj;
1698 int err;
1699 int uid;
89944450 1700 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
a124edba
YH
1701 struct devx_async_cmd_event_file *ev_file;
1702 struct devx_async_data *async_data;
1703
b6142608
MG
1704 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1705 return -EINVAL;
1706
a124edba
YH
1707 uid = devx_get_uid(c, cmd_in);
1708 if (uid < 0)
1709 return uid;
1710
1711 if (!devx_is_obj_query_cmd(cmd_in))
1712 return -EINVAL;
1713
1714 err = uverbs_get_const(&cmd_out_len, attrs,
1715 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1716 if (err)
1717 return err;
1718
e79c9c60 1719 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
a124edba
YH
1720 return -EINVAL;
1721
1722 fd_uobj = uverbs_attr_get_uobject(attrs,
1723 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1724 if (IS_ERR(fd_uobj))
1725 return PTR_ERR(fd_uobj);
1726
1727 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1728 uobj);
1729
1730 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1731 MAX_ASYNC_BYTES_IN_USE) {
1732 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1733 return -EAGAIN;
1734 }
1735
1736 async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1737 cmd_out_len), GFP_KERNEL);
1738 if (!async_data) {
1739 err = -ENOMEM;
1740 goto sub_bytes;
1741 }
1742
1743 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1744 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1745 if (err)
1746 goto free_async;
1747
1748 async_data->cmd_out_len = cmd_out_len;
1749 async_data->mdev = mdev;
1750 async_data->fd_uobj = fd_uobj;
1751
1752 get_file(fd_uobj->object);
1753 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1754 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1755 uverbs_attr_get_len(attrs,
1756 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1757 async_data->hdr.out_data,
1758 async_data->cmd_out_len,
1759 devx_query_callback, &async_data->cb_work);
1760
1761 if (err)
1762 goto cb_err;
1763
6bf8f22a 1764 return 0;
a124edba
YH
1765
1766cb_err:
1767 fput(fd_uobj->object);
1768free_async:
1769 kvfree(async_data);
1770sub_bytes:
1771 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1772 return err;
6bf8f22a
YH
1773}
1774
75973853
YH
1775static void
1776subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1777 u32 key_level1,
1778 bool is_level2,
1779 u32 key_level2)
1780{
1781 struct devx_event *event;
1782 struct devx_obj_event *xa_val_level2;
1783
1784 /* Level 1 is valid for future use, no need to free */
1785 if (!is_level2)
1786 return;
1787
1788 event = xa_load(&devx_event_table->event_xa, key_level1);
1789 WARN_ON(!event);
1790
1791 xa_val_level2 = xa_load(&event->object_ids,
1792 key_level2);
1793 if (list_empty(&xa_val_level2->obj_sub_list)) {
1794 xa_erase(&event->object_ids,
1795 key_level2);
1796 kfree_rcu(xa_val_level2, rcu);
1797 }
1798}
1799
1800static int
1801subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1802 u32 key_level1,
1803 bool is_level2,
1804 u32 key_level2)
1805{
1806 struct devx_obj_event *obj_event;
1807 struct devx_event *event;
1808 int err;
1809
1810 event = xa_load(&devx_event_table->event_xa, key_level1);
1811 if (!event) {
1812 event = kzalloc(sizeof(*event), GFP_KERNEL);
1813 if (!event)
1814 return -ENOMEM;
1815
1816 INIT_LIST_HEAD(&event->unaffiliated_list);
1817 xa_init(&event->object_ids);
1818
1819 err = xa_insert(&devx_event_table->event_xa,
1820 key_level1,
1821 event,
1822 GFP_KERNEL);
1823 if (err) {
1824 kfree(event);
1825 return err;
1826 }
1827 }
1828
1829 if (!is_level2)
1830 return 0;
1831
1832 obj_event = xa_load(&event->object_ids, key_level2);
1833 if (!obj_event) {
1834 obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1835 if (!obj_event)
1836 /* Level1 is valid for future use, no need to free */
1837 return -ENOMEM;
1838
1839 err = xa_insert(&event->object_ids,
1840 key_level2,
1841 obj_event,
1842 GFP_KERNEL);
1843 if (err)
1844 return err;
1845 INIT_LIST_HEAD(&obj_event->obj_sub_list);
1846 }
1847
1848 return 0;
1849}
1850
1851static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1852 struct devx_obj *obj)
1853{
1854 int i;
1855
1856 for (i = 0; i < num_events; i++) {
1857 if (obj) {
1858 if (!is_legacy_obj_event_num(event_type_num_list[i]))
1859 return false;
1860 } else if (!is_legacy_unaffiliated_event_num(
1861 event_type_num_list[i])) {
1862 return false;
1863 }
1864 }
1865
1866 return true;
1867}
1868
1869#define MAX_SUPP_EVENT_NUM 255
1870static bool is_valid_events(struct mlx5_core_dev *dev,
1871 int num_events, u16 *event_type_num_list,
1872 struct devx_obj *obj)
1873{
1874 __be64 *aff_events;
1875 __be64 *unaff_events;
1876 int mask_entry;
1877 int mask_bit;
1878 int i;
1879
1880 if (MLX5_CAP_GEN(dev, event_cap)) {
1881 aff_events = MLX5_CAP_DEV_EVENT(dev,
1882 user_affiliated_events);
1883 unaff_events = MLX5_CAP_DEV_EVENT(dev,
1884 user_unaffiliated_events);
1885 } else {
1886 return is_valid_events_legacy(num_events, event_type_num_list,
1887 obj);
1888 }
1889
1890 for (i = 0; i < num_events; i++) {
1891 if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1892 return false;
1893
1894 mask_entry = event_type_num_list[i] / 64;
1895 mask_bit = event_type_num_list[i] % 64;
1896
1897 if (obj) {
1898 /* CQ completion */
1899 if (event_type_num_list[i] == 0)
1900 continue;
1901
1902 if (!(be64_to_cpu(aff_events[mask_entry]) &
1903 (1ull << mask_bit)))
1904 return false;
1905
1906 continue;
1907 }
1908
1909 if (!(be64_to_cpu(unaff_events[mask_entry]) &
1910 (1ull << mask_bit)))
1911 return false;
1912 }
1913
1914 return true;
1915}
1916
1917#define MAX_NUM_EVENTS 16
1918static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1919 struct uverbs_attr_bundle *attrs)
1920{
1921 struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
1922 attrs,
1923 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
1924 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1925 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1926 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1927 struct ib_uobject *fd_uobj;
1928 struct devx_obj *obj = NULL;
1929 struct devx_async_event_file *ev_file;
1930 struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
1931 u16 *event_type_num_list;
1932 struct devx_event_subscription *event_sub, *tmp_sub;
1933 struct list_head sub_list;
1934 int redirect_fd;
1935 bool use_eventfd = false;
1936 int num_events;
1937 int num_alloc_xa_entries = 0;
1938 u16 obj_type = 0;
1939 u64 cookie = 0;
1940 u32 obj_id = 0;
1941 int err;
1942 int i;
1943
1944 if (!c->devx_uid)
1945 return -EINVAL;
1946
1947 if (!IS_ERR(devx_uobj)) {
1948 obj = (struct devx_obj *)devx_uobj->object;
1949 if (obj)
1950 obj_id = get_dec_obj_id(obj->obj_id);
1951 }
1952
1953 fd_uobj = uverbs_attr_get_uobject(attrs,
1954 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
1955 if (IS_ERR(fd_uobj))
1956 return PTR_ERR(fd_uobj);
1957
1958 ev_file = container_of(fd_uobj, struct devx_async_event_file,
1959 uobj);
1960
1961 if (uverbs_attr_is_valid(attrs,
1962 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
1963 err = uverbs_copy_from(&redirect_fd, attrs,
1964 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
1965 if (err)
1966 return err;
1967
1968 use_eventfd = true;
1969 }
1970
1971 if (uverbs_attr_is_valid(attrs,
1972 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
1973 if (use_eventfd)
1974 return -EINVAL;
1975
1976 err = uverbs_copy_from(&cookie, attrs,
1977 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
1978 if (err)
1979 return err;
1980 }
1981
1982 num_events = uverbs_attr_ptr_get_array_size(
1983 attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
1984 sizeof(u16));
1985
1986 if (num_events < 0)
1987 return num_events;
1988
1989 if (num_events > MAX_NUM_EVENTS)
1990 return -EINVAL;
1991
1992 event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
1993 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
1994
1995 if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
1996 return -EINVAL;
1997
1998 INIT_LIST_HEAD(&sub_list);
1999
2000 /* Protect from concurrent subscriptions to same XA entries to allow
2001 * both to succeed
2002 */
2003 mutex_lock(&devx_event_table->event_xa_lock);
2004 for (i = 0; i < num_events; i++) {
2005 u32 key_level1;
2006
2007 if (obj)
2008 obj_type = get_dec_obj_type(obj,
2009 event_type_num_list[i]);
2010 key_level1 = event_type_num_list[i] | obj_type << 16;
2011
2012 err = subscribe_event_xa_alloc(devx_event_table,
2013 key_level1,
2014 obj,
2015 obj_id);
2016 if (err)
2017 goto err;
2018
2019 num_alloc_xa_entries++;
2020 event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2021 if (!event_sub)
2022 goto err;
2023
2024 list_add_tail(&event_sub->event_list, &sub_list);
2025 if (use_eventfd) {
2026 event_sub->eventfd =
2027 eventfd_ctx_fdget(redirect_fd);
2028
2029 if (IS_ERR(event_sub)) {
2030 err = PTR_ERR(event_sub->eventfd);
2031 event_sub->eventfd = NULL;
2032 goto err;
2033 }
2034 }
2035
2036 event_sub->cookie = cookie;
2037 event_sub->ev_file = ev_file;
2038 event_sub->filp = fd_uobj->object;
2039 /* May be needed upon cleanup the devx object/subscription */
2040 event_sub->xa_key_level1 = key_level1;
2041 event_sub->xa_key_level2 = obj_id;
2042 INIT_LIST_HEAD(&event_sub->obj_list);
2043 }
2044
2045 /* Once all the allocations and the XA data insertions were done we
2046 * can go ahead and add all the subscriptions to the relevant lists
2047 * without concern of a failure.
2048 */
2049 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2050 struct devx_event *event;
2051 struct devx_obj_event *obj_event;
2052
2053 list_del_init(&event_sub->event_list);
2054
2055 spin_lock_irq(&ev_file->lock);
2056 list_add_tail_rcu(&event_sub->file_list,
2057 &ev_file->subscribed_events_list);
2058 spin_unlock_irq(&ev_file->lock);
2059
2060 event = xa_load(&devx_event_table->event_xa,
2061 event_sub->xa_key_level1);
2062 WARN_ON(!event);
2063
2064 if (!obj) {
2065 list_add_tail_rcu(&event_sub->xa_list,
2066 &event->unaffiliated_list);
2067 continue;
2068 }
2069
2070 obj_event = xa_load(&event->object_ids, obj_id);
2071 WARN_ON(!obj_event);
2072 list_add_tail_rcu(&event_sub->xa_list,
2073 &obj_event->obj_sub_list);
2074 list_add_tail_rcu(&event_sub->obj_list,
2075 &obj->event_sub);
2076 }
2077
2078 mutex_unlock(&devx_event_table->event_xa_lock);
2079 return 0;
2080
2081err:
2082 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2083 list_del(&event_sub->event_list);
2084
2085 subscribe_event_xa_dealloc(devx_event_table,
2086 event_sub->xa_key_level1,
2087 obj,
2088 obj_id);
2089
2090 if (event_sub->eventfd)
2091 eventfd_ctx_put(event_sub->eventfd);
2092
2093 kfree(event_sub);
2094 }
2095
2096 mutex_unlock(&devx_event_table->event_xa_lock);
2097 return err;
2098}
2099
aeae9457
YH
2100static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2101 struct uverbs_attr_bundle *attrs,
2102 struct devx_umem *obj)
2103{
2104 u64 addr;
2105 size_t size;
bccd0622 2106 u32 access;
aeae9457
YH
2107 int npages;
2108 int err;
2109 u32 page_mask;
2110
2111 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
bccd0622 2112 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
aeae9457
YH
2113 return -EFAULT;
2114
bccd0622
JG
2115 err = uverbs_get_flags32(&access, attrs,
2116 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
47f07f03
YH
2117 IB_ACCESS_LOCAL_WRITE |
2118 IB_ACCESS_REMOTE_WRITE |
2119 IB_ACCESS_REMOTE_READ);
bccd0622
JG
2120 if (err)
2121 return err;
2122
aeae9457
YH
2123 err = ib_check_mr_access(access);
2124 if (err)
2125 return err;
2126
b0ea0fa5 2127 obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
aeae9457
YH
2128 if (IS_ERR(obj->umem))
2129 return PTR_ERR(obj->umem);
2130
2131 mlx5_ib_cont_pages(obj->umem, obj->umem->address,
2132 MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
2133 &obj->page_shift, &obj->ncont, NULL);
2134
2135 if (!npages) {
2136 ib_umem_release(obj->umem);
2137 return -EINVAL;
2138 }
2139
2140 page_mask = (1 << obj->page_shift) - 1;
2141 obj->page_offset = obj->umem->address & page_mask;
2142
2143 return 0;
2144}
2145
b61815e2
JG
2146static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
2147 struct devx_umem *obj,
aeae9457
YH
2148 struct devx_umem_reg_cmd *cmd)
2149{
2150 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2151 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
b61815e2
JG
2152 cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2153 return PTR_ERR_OR_ZERO(cmd->in);
aeae9457
YH
2154}
2155
2156static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
2157 struct devx_umem *obj,
2158 struct devx_umem_reg_cmd *cmd)
2159{
2160 void *umem;
2161 __be64 *mtt;
2162
2163 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2164 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2165
6e3722ba 2166 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
aeae9457
YH
2167 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
2168 MLX5_SET(umem, umem, log_page_size, obj->page_shift -
2169 MLX5_ADAPTER_PAGE_SHIFT);
2170 MLX5_SET(umem, umem, page_offset, obj->page_offset);
2171 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
2172 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2173 MLX5_IB_MTT_READ);
2174}
2175
e83f0ecd 2176static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
15a1b4be 2177 struct uverbs_attr_bundle *attrs)
aeae9457 2178{
aeae9457
YH
2179 struct devx_umem_reg_cmd cmd;
2180 struct devx_umem *obj;
c36ee46d
JG
2181 struct ib_uobject *uobj = uverbs_attr_get_uobject(
2182 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
aeae9457 2183 u32 obj_id;
89944450
SR
2184 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2185 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
c36ee46d 2186 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
aeae9457
YH
2187 int err;
2188
2189 if (!c->devx_uid)
7e1335a7
YH
2190 return -EINVAL;
2191
aeae9457
YH
2192 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2193 if (!obj)
2194 return -ENOMEM;
2195
2196 err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
2197 if (err)
2198 goto err_obj_free;
2199
b61815e2 2200 err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
aeae9457
YH
2201 if (err)
2202 goto err_umem_release;
2203
2204 devx_umem_reg_cmd_build(dev, obj, &cmd);
2205
6e3722ba 2206 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
aeae9457
YH
2207 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2208 sizeof(cmd.out));
2209 if (err)
b61815e2 2210 goto err_umem_release;
aeae9457
YH
2211
2212 obj->mdev = dev->mdev;
2213 uobj->object = obj;
2214 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2215 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
2216 if (err)
2217 goto err_umem_destroy;
2218
aeae9457
YH
2219 return 0;
2220
2221err_umem_destroy:
2222 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
aeae9457
YH
2223err_umem_release:
2224 ib_umem_release(obj->umem);
2225err_obj_free:
2226 kfree(obj);
2227 return err;
2228}
2229
aeae9457 2230static int devx_umem_cleanup(struct ib_uobject *uobject,
a6a3797d
SR
2231 enum rdma_remove_reason why,
2232 struct uverbs_attr_bundle *attrs)
aeae9457
YH
2233{
2234 struct devx_umem *obj = uobject->object;
2235 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2236 int err;
2237
2238 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
1c77483e 2239 if (ib_is_destroy_retryable(err, why, uobject))
aeae9457
YH
2240 return err;
2241
2242 ib_umem_release(obj->umem);
2243 kfree(obj);
2244 return 0;
2245}
2246
5ec9d8ee
YH
2247static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2248 unsigned long event_type)
2249{
2250 __be64 *unaff_events;
2251 int mask_entry;
2252 int mask_bit;
2253
2254 if (!MLX5_CAP_GEN(dev, event_cap))
2255 return is_legacy_unaffiliated_event_num(event_type);
2256
2257 unaff_events = MLX5_CAP_DEV_EVENT(dev,
2258 user_unaffiliated_events);
2259 WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2260
2261 mask_entry = event_type / 64;
2262 mask_bit = event_type % 64;
2263
2264 if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2265 return false;
2266
2267 return true;
2268}
2269
2270static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2271{
2272 struct mlx5_eqe *eqe = data;
2273 u32 obj_id = 0;
2274
2275 switch (event_type) {
2276 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2277 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2278 case MLX5_EVENT_TYPE_PATH_MIG:
2279 case MLX5_EVENT_TYPE_COMM_EST:
2280 case MLX5_EVENT_TYPE_SQ_DRAINED:
2281 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2282 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2283 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2284 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2285 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2286 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2287 break;
2288 case MLX5_EVENT_TYPE_DCT_DRAINED:
2289 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2290 break;
2291 case MLX5_EVENT_TYPE_CQ_ERROR:
2292 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2293 break;
2294 default:
2295 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2296 break;
2297 }
2298
2299 return obj_id;
2300}
2301
2302static int deliver_event(struct devx_event_subscription *event_sub,
2303 const void *data)
2304{
2305 struct devx_async_event_file *ev_file;
2306 struct devx_async_event_data *event_data;
2307 unsigned long flags;
2308
2309 ev_file = event_sub->ev_file;
2310
2311 if (ev_file->omit_data) {
2312 spin_lock_irqsave(&ev_file->lock, flags);
2313 if (!list_empty(&event_sub->event_list)) {
2314 spin_unlock_irqrestore(&ev_file->lock, flags);
2315 return 0;
2316 }
2317
2318 list_add_tail(&event_sub->event_list, &ev_file->event_list);
2319 spin_unlock_irqrestore(&ev_file->lock, flags);
2320 wake_up_interruptible(&ev_file->poll_wait);
2321 return 0;
2322 }
2323
2324 event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2325 GFP_ATOMIC);
2326 if (!event_data) {
2327 spin_lock_irqsave(&ev_file->lock, flags);
2328 ev_file->is_overflow_err = 1;
2329 spin_unlock_irqrestore(&ev_file->lock, flags);
2330 return -ENOMEM;
2331 }
2332
2333 event_data->hdr.cookie = event_sub->cookie;
2334 memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2335
2336 spin_lock_irqsave(&ev_file->lock, flags);
2337 list_add_tail(&event_data->list, &ev_file->event_list);
2338 spin_unlock_irqrestore(&ev_file->lock, flags);
2339 wake_up_interruptible(&ev_file->poll_wait);
2340
2341 return 0;
2342}
2343
2344static void dispatch_event_fd(struct list_head *fd_list,
2345 const void *data)
2346{
2347 struct devx_event_subscription *item;
2348
2349 list_for_each_entry_rcu(item, fd_list, xa_list) {
2350 if (!get_file_rcu(item->filp))
2351 continue;
2352
2353 if (item->eventfd) {
2354 eventfd_signal(item->eventfd, 1);
2355 fput(item->filp);
2356 continue;
2357 }
2358
2359 deliver_event(item, data);
2360 fput(item->filp);
2361 }
2362}
2363
e337dd53
YH
2364static int devx_event_notifier(struct notifier_block *nb,
2365 unsigned long event_type, void *data)
2366{
5ec9d8ee
YH
2367 struct mlx5_devx_event_table *table;
2368 struct mlx5_ib_dev *dev;
2369 struct devx_event *event;
2370 struct devx_obj_event *obj_event;
2371 u16 obj_type = 0;
2372 bool is_unaffiliated;
2373 u32 obj_id;
2374
2375 /* Explicit filtering to kernel events which may occur frequently */
2376 if (event_type == MLX5_EVENT_TYPE_CMD ||
2377 event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2378 return NOTIFY_OK;
2379
2380 table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2381 dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2382 is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2383
2384 if (!is_unaffiliated)
2385 obj_type = get_event_obj_type(event_type, data);
2386
2387 rcu_read_lock();
2388 event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2389 if (!event) {
2390 rcu_read_unlock();
2391 return NOTIFY_DONE;
2392 }
2393
2394 if (is_unaffiliated) {
2395 dispatch_event_fd(&event->unaffiliated_list, data);
2396 rcu_read_unlock();
2397 return NOTIFY_OK;
2398 }
2399
2400 obj_id = devx_get_obj_id_from_event(event_type, data);
2401 obj_event = xa_load(&event->object_ids, obj_id);
2402 if (!obj_event) {
2403 rcu_read_unlock();
2404 return NOTIFY_DONE;
2405 }
2406
2407 dispatch_event_fd(&obj_event->obj_sub_list, data);
2408
2409 rcu_read_unlock();
2410 return NOTIFY_OK;
e337dd53
YH
2411}
2412
2413void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev)
2414{
2415 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2416
2417 xa_init(&table->event_xa);
2418 mutex_init(&table->event_xa_lock);
2419 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2420 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2421}
2422
2423void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
2424{
2425 struct mlx5_devx_event_table *table = &dev->devx_event_table;
75973853
YH
2426 struct devx_event_subscription *sub, *tmp;
2427 struct devx_event *event;
e337dd53
YH
2428 void *entry;
2429 unsigned long id;
2430
2431 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
75973853
YH
2432 mutex_lock(&dev->devx_event_table.event_xa_lock);
2433 xa_for_each(&table->event_xa, id, entry) {
2434 event = entry;
2435 list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list,
2436 xa_list)
2437 devx_cleanup_subscription(dev, sub);
e337dd53 2438 kfree(entry);
75973853
YH
2439 }
2440 mutex_unlock(&dev->devx_event_table.event_xa_lock);
e337dd53
YH
2441 xa_destroy(&table->event_xa);
2442}
2443
6bf8f22a
YH
2444static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2445 size_t count, loff_t *pos)
2446{
4accbb3f
YH
2447 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2448 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2449 struct devx_async_data *event;
2450 int ret = 0;
2451 size_t eventsz;
2452
2453 spin_lock_irq(&ev_queue->lock);
2454
2455 while (list_empty(&ev_queue->event_list)) {
2456 spin_unlock_irq(&ev_queue->lock);
2457
2458 if (filp->f_flags & O_NONBLOCK)
2459 return -EAGAIN;
2460
2461 if (wait_event_interruptible(
2462 ev_queue->poll_wait,
eaebaf77
YH
2463 (!list_empty(&ev_queue->event_list) ||
2464 ev_queue->is_destroyed))) {
4accbb3f
YH
2465 return -ERESTARTSYS;
2466 }
eaebaf77
YH
2467
2468 if (list_empty(&ev_queue->event_list) &&
2469 ev_queue->is_destroyed)
2470 return -EIO;
2471
4accbb3f
YH
2472 spin_lock_irq(&ev_queue->lock);
2473 }
2474
2475 event = list_entry(ev_queue->event_list.next,
2476 struct devx_async_data, list);
2477 eventsz = event->cmd_out_len +
2478 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2479
2480 if (eventsz > count) {
2481 spin_unlock_irq(&ev_queue->lock);
2482 return -ENOSPC;
2483 }
2484
2485 list_del(ev_queue->event_list.next);
2486 spin_unlock_irq(&ev_queue->lock);
2487
2488 if (copy_to_user(buf, &event->hdr, eventsz))
2489 ret = -EFAULT;
2490 else
2491 ret = eventsz;
2492
2493 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2494 kvfree(event);
2495 return ret;
6bf8f22a
YH
2496}
2497
2498static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
2499{
a124edba
YH
2500 struct ib_uobject *uobj = filp->private_data;
2501 struct devx_async_cmd_event_file *comp_ev_file = container_of(
2502 uobj, struct devx_async_cmd_event_file, uobj);
2503 struct devx_async_data *entry, *tmp;
2504
2505 spin_lock_irq(&comp_ev_file->ev_queue.lock);
2506 list_for_each_entry_safe(entry, tmp,
2507 &comp_ev_file->ev_queue.event_list, list)
2508 kvfree(entry);
2509 spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2510
6bf8f22a
YH
2511 uverbs_close_fd(filp);
2512 return 0;
2513}
2514
2515static __poll_t devx_async_cmd_event_poll(struct file *filp,
2516 struct poll_table_struct *wait)
2517{
4accbb3f
YH
2518 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2519 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2520 __poll_t pollflags = 0;
2521
2522 poll_wait(filp, &ev_queue->poll_wait, wait);
2523
2524 spin_lock_irq(&ev_queue->lock);
eaebaf77
YH
2525 if (ev_queue->is_destroyed)
2526 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2527 else if (!list_empty(&ev_queue->event_list))
4accbb3f
YH
2528 pollflags = EPOLLIN | EPOLLRDNORM;
2529 spin_unlock_irq(&ev_queue->lock);
2530
2531 return pollflags;
6bf8f22a
YH
2532}
2533
1f687ede 2534static const struct file_operations devx_async_cmd_event_fops = {
6bf8f22a
YH
2535 .owner = THIS_MODULE,
2536 .read = devx_async_cmd_event_read,
2537 .poll = devx_async_cmd_event_poll,
2538 .release = devx_async_cmd_event_close,
2539 .llseek = no_llseek,
2540};
2541
2afc5e1b
YH
2542static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2543 size_t count, loff_t *pos)
2544{
5ec9d8ee
YH
2545 struct devx_async_event_file *ev_file = filp->private_data;
2546 struct devx_event_subscription *event_sub;
2547 struct devx_async_event_data *uninitialized_var(event);
2548 int ret = 0;
2549 size_t eventsz;
2550 bool omit_data;
2551 void *event_data;
2552
2553 omit_data = ev_file->omit_data;
2554
2555 spin_lock_irq(&ev_file->lock);
2556
2557 if (ev_file->is_overflow_err) {
2558 ev_file->is_overflow_err = 0;
2559 spin_unlock_irq(&ev_file->lock);
2560 return -EOVERFLOW;
2561 }
2562
2563 if (ev_file->is_destroyed) {
2564 spin_unlock_irq(&ev_file->lock);
2565 return -EIO;
2566 }
2567
2568 while (list_empty(&ev_file->event_list)) {
2569 spin_unlock_irq(&ev_file->lock);
2570
2571 if (filp->f_flags & O_NONBLOCK)
2572 return -EAGAIN;
2573
2574 if (wait_event_interruptible(ev_file->poll_wait,
2575 (!list_empty(&ev_file->event_list) ||
2576 ev_file->is_destroyed))) {
2577 return -ERESTARTSYS;
2578 }
2579
2580 spin_lock_irq(&ev_file->lock);
2581 if (ev_file->is_destroyed) {
2582 spin_unlock_irq(&ev_file->lock);
2583 return -EIO;
2584 }
2585 }
2586
2587 if (omit_data) {
2588 event_sub = list_first_entry(&ev_file->event_list,
2589 struct devx_event_subscription,
2590 event_list);
2591 eventsz = sizeof(event_sub->cookie);
2592 event_data = &event_sub->cookie;
2593 } else {
2594 event = list_first_entry(&ev_file->event_list,
2595 struct devx_async_event_data, list);
2596 eventsz = sizeof(struct mlx5_eqe) +
2597 sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2598 event_data = &event->hdr;
2599 }
2600
2601 if (eventsz > count) {
2602 spin_unlock_irq(&ev_file->lock);
2603 return -EINVAL;
2604 }
2605
2606 if (omit_data)
2607 list_del_init(&event_sub->event_list);
2608 else
2609 list_del(&event->list);
2610
2611 spin_unlock_irq(&ev_file->lock);
2612
2613 if (copy_to_user(buf, event_data, eventsz))
2614 /* This points to an application issue, not a kernel concern */
2615 ret = -EFAULT;
2616 else
2617 ret = eventsz;
2618
2619 if (!omit_data)
2620 kfree(event);
2621 return ret;
2afc5e1b
YH
2622}
2623
2624static __poll_t devx_async_event_poll(struct file *filp,
2625 struct poll_table_struct *wait)
2626{
5ec9d8ee
YH
2627 struct devx_async_event_file *ev_file = filp->private_data;
2628 __poll_t pollflags = 0;
2629
2630 poll_wait(filp, &ev_file->poll_wait, wait);
2631
2632 spin_lock_irq(&ev_file->lock);
2633 if (ev_file->is_destroyed)
2634 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2635 else if (!list_empty(&ev_file->event_list))
2636 pollflags = EPOLLIN | EPOLLRDNORM;
2637 spin_unlock_irq(&ev_file->lock);
2638
2639 return pollflags;
2afc5e1b
YH
2640}
2641
2642static int devx_async_event_close(struct inode *inode, struct file *filp)
2643{
75973853
YH
2644 struct devx_async_event_file *ev_file = filp->private_data;
2645 struct devx_event_subscription *event_sub, *event_sub_tmp;
5ec9d8ee 2646 struct devx_async_event_data *entry, *tmp;
75973853
YH
2647
2648 mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock);
2649 /* delete the subscriptions which are related to this FD */
2650 list_for_each_entry_safe(event_sub, event_sub_tmp,
2651 &ev_file->subscribed_events_list, file_list) {
2652 devx_cleanup_subscription(ev_file->dev, event_sub);
2653 if (event_sub->eventfd)
2654 eventfd_ctx_put(event_sub->eventfd);
2655
2656 list_del_rcu(&event_sub->file_list);
2657 /* subscription may not be used by the read API any more */
2658 kfree_rcu(event_sub, rcu);
2659 }
2660
2661 mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock);
2662
5ec9d8ee
YH
2663 /* free the pending events allocation */
2664 if (!ev_file->omit_data) {
2665 spin_lock_irq(&ev_file->lock);
2666 list_for_each_entry_safe(entry, tmp,
2667 &ev_file->event_list, list)
2668 kfree(entry); /* read can't come any more */
2669 spin_unlock_irq(&ev_file->lock);
2670 }
2671
2afc5e1b 2672 uverbs_close_fd(filp);
75973853 2673 put_device(&ev_file->dev->ib_dev.dev);
2afc5e1b
YH
2674 return 0;
2675}
2676
2677static const struct file_operations devx_async_event_fops = {
2678 .owner = THIS_MODULE,
2679 .read = devx_async_event_read,
2680 .poll = devx_async_event_poll,
2681 .release = devx_async_event_close,
2682 .llseek = no_llseek,
2683};
2684
6bf8f22a
YH
2685static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
2686 enum rdma_remove_reason why)
2687{
a124edba
YH
2688 struct devx_async_cmd_event_file *comp_ev_file =
2689 container_of(uobj, struct devx_async_cmd_event_file,
2690 uobj);
eaebaf77
YH
2691 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2692
2693 spin_lock_irq(&ev_queue->lock);
2694 ev_queue->is_destroyed = 1;
2695 spin_unlock_irq(&ev_queue->lock);
2696
2697 if (why == RDMA_REMOVE_DRIVER_REMOVE)
2698 wake_up_interruptible(&ev_queue->poll_wait);
a124edba
YH
2699
2700 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
6bf8f22a
YH
2701 return 0;
2702};
2703
2afc5e1b
YH
2704static int devx_hot_unplug_async_event_file(struct ib_uobject *uobj,
2705 enum rdma_remove_reason why)
2706{
5ec9d8ee
YH
2707 struct devx_async_event_file *ev_file =
2708 container_of(uobj, struct devx_async_event_file,
2709 uobj);
2710
2711 spin_lock_irq(&ev_file->lock);
2712 ev_file->is_destroyed = 1;
2713 spin_unlock_irq(&ev_file->lock);
2714
2715 wake_up_interruptible(&ev_file->poll_wait);
2afc5e1b
YH
2716 return 0;
2717};
2718
9a119cd5
JG
2719DECLARE_UVERBS_NAMED_METHOD(
2720 MLX5_IB_METHOD_DEVX_UMEM_REG,
2721 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2722 MLX5_IB_OBJECT_DEVX_UMEM,
2723 UVERBS_ACCESS_NEW,
83bb4442 2724 UA_MANDATORY),
9a119cd5
JG
2725 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2726 UVERBS_ATTR_TYPE(u64),
83bb4442 2727 UA_MANDATORY),
9a119cd5
JG
2728 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2729 UVERBS_ATTR_TYPE(u64),
83bb4442 2730 UA_MANDATORY),
bccd0622
JG
2731 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2732 enum ib_access_flags),
9a119cd5
JG
2733 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2734 UVERBS_ATTR_TYPE(u32),
83bb4442 2735 UA_MANDATORY));
9a119cd5 2736
528922af 2737DECLARE_UVERBS_NAMED_METHOD_DESTROY(
9a119cd5
JG
2738 MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2739 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2740 MLX5_IB_OBJECT_DEVX_UMEM,
2741 UVERBS_ACCESS_DESTROY,
83bb4442 2742 UA_MANDATORY));
9a119cd5
JG
2743
2744DECLARE_UVERBS_NAMED_METHOD(
2745 MLX5_IB_METHOD_DEVX_QUERY_EQN,
2746 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2747 UVERBS_ATTR_TYPE(u32),
83bb4442 2748 UA_MANDATORY),
9a119cd5
JG
2749 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2750 UVERBS_ATTR_TYPE(u32),
83bb4442 2751 UA_MANDATORY));
9a119cd5
JG
2752
2753DECLARE_UVERBS_NAMED_METHOD(
2754 MLX5_IB_METHOD_DEVX_QUERY_UAR,
2755 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2756 UVERBS_ATTR_TYPE(u32),
83bb4442 2757 UA_MANDATORY),
9a119cd5
JG
2758 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2759 UVERBS_ATTR_TYPE(u32),
83bb4442 2760 UA_MANDATORY));
9a119cd5
JG
2761
2762DECLARE_UVERBS_NAMED_METHOD(
2763 MLX5_IB_METHOD_DEVX_OTHER,
2764 UVERBS_ATTR_PTR_IN(
2765 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2766 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
83bb4442 2767 UA_MANDATORY,
83bb4442 2768 UA_ALLOC_AND_COPY),
9a119cd5
JG
2769 UVERBS_ATTR_PTR_OUT(
2770 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2771 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
540cd692 2772 UA_MANDATORY));
9a119cd5
JG
2773
2774DECLARE_UVERBS_NAMED_METHOD(
2775 MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2776 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2777 MLX5_IB_OBJECT_DEVX_OBJ,
2778 UVERBS_ACCESS_NEW,
83bb4442 2779 UA_MANDATORY),
9a119cd5
JG
2780 UVERBS_ATTR_PTR_IN(
2781 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2782 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
83bb4442 2783 UA_MANDATORY,
83bb4442 2784 UA_ALLOC_AND_COPY),
9a119cd5
JG
2785 UVERBS_ATTR_PTR_OUT(
2786 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2787 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
540cd692 2788 UA_MANDATORY));
9a119cd5 2789
528922af 2790DECLARE_UVERBS_NAMED_METHOD_DESTROY(
9a119cd5
JG
2791 MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2792 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2793 MLX5_IB_OBJECT_DEVX_OBJ,
2794 UVERBS_ACCESS_DESTROY,
83bb4442 2795 UA_MANDATORY));
9a119cd5
JG
2796
2797DECLARE_UVERBS_NAMED_METHOD(
2798 MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2799 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
34613eb1 2800 UVERBS_IDR_ANY_OBJECT,
9a119cd5 2801 UVERBS_ACCESS_WRITE,
83bb4442 2802 UA_MANDATORY),
9a119cd5
JG
2803 UVERBS_ATTR_PTR_IN(
2804 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2805 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
83bb4442 2806 UA_MANDATORY,
83bb4442 2807 UA_ALLOC_AND_COPY),
9a119cd5
JG
2808 UVERBS_ATTR_PTR_OUT(
2809 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2810 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
540cd692 2811 UA_MANDATORY));
9a119cd5
JG
2812
2813DECLARE_UVERBS_NAMED_METHOD(
2814 MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2815 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
34613eb1 2816 UVERBS_IDR_ANY_OBJECT,
9a119cd5 2817 UVERBS_ACCESS_READ,
83bb4442 2818 UA_MANDATORY),
9a119cd5
JG
2819 UVERBS_ATTR_PTR_IN(
2820 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2821 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
83bb4442 2822 UA_MANDATORY,
83bb4442 2823 UA_ALLOC_AND_COPY),
9a119cd5
JG
2824 UVERBS_ATTR_PTR_OUT(
2825 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2826 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
540cd692 2827 UA_MANDATORY));
e662e14d 2828
a124edba
YH
2829DECLARE_UVERBS_NAMED_METHOD(
2830 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2831 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2832 UVERBS_IDR_ANY_OBJECT,
2833 UVERBS_ACCESS_READ,
2834 UA_MANDATORY),
2835 UVERBS_ATTR_PTR_IN(
2836 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2837 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2838 UA_MANDATORY,
2839 UA_ALLOC_AND_COPY),
2840 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2841 u16, UA_MANDATORY),
2842 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2843 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2844 UVERBS_ACCESS_READ,
2845 UA_MANDATORY),
2846 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2847 UVERBS_ATTR_TYPE(u64),
2848 UA_MANDATORY));
2849
75973853
YH
2850DECLARE_UVERBS_NAMED_METHOD(
2851 MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
2852 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
2853 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2854 UVERBS_ACCESS_READ,
2855 UA_MANDATORY),
2856 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
2857 MLX5_IB_OBJECT_DEVX_OBJ,
2858 UVERBS_ACCESS_READ,
2859 UA_OPTIONAL),
2860 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2861 UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
2862 UA_MANDATORY,
2863 UA_ALLOC_AND_COPY),
2864 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
2865 UVERBS_ATTR_TYPE(u64),
2866 UA_OPTIONAL),
2867 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
2868 UVERBS_ATTR_TYPE(u32),
2869 UA_OPTIONAL));
2870
6c61d2a5 2871DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
9a119cd5
JG
2872 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
2873 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
75973853
YH
2874 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
2875 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
8aa8c95c 2876
6c61d2a5 2877DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
9a119cd5
JG
2878 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
2879 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
2880 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
2881 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
a124edba
YH
2882 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
2883 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
7efce369 2884
6c61d2a5 2885DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
9a119cd5
JG
2886 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
2887 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
2888 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
aeae9457 2889
6bf8f22a
YH
2890
2891DECLARE_UVERBS_NAMED_METHOD(
2892 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
2893 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
2894 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2895 UVERBS_ACCESS_NEW,
2896 UA_MANDATORY));
2897
2898DECLARE_UVERBS_NAMED_OBJECT(
2899 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2900 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
2901 devx_hot_unplug_async_cmd_event_file,
2902 &devx_async_cmd_event_fops, "[devx_async_cmd]",
2903 O_RDONLY),
2904 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
2905
2afc5e1b
YH
2906DECLARE_UVERBS_NAMED_METHOD(
2907 MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
2908 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
2909 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2910 UVERBS_ACCESS_NEW,
2911 UA_MANDATORY),
2912 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
2913 enum mlx5_ib_uapi_devx_create_event_channel_flags,
2914 UA_MANDATORY));
2915
2916DECLARE_UVERBS_NAMED_OBJECT(
2917 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2918 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
2919 devx_hot_unplug_async_event_file,
2920 &devx_async_event_fops, "[devx_async_event]",
2921 O_RDONLY),
2922 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
2923
36e235c8 2924static bool devx_is_supported(struct ib_device *device)
c59450c4 2925{
36e235c8
JG
2926 struct mlx5_ib_dev *dev = to_mdev(device);
2927
7f575103 2928 return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
c59450c4 2929}
36e235c8 2930
0cbf432d 2931const struct uapi_definition mlx5_ib_devx_defs[] = {
36e235c8
JG
2932 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2933 MLX5_IB_OBJECT_DEVX,
2934 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2935 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2936 MLX5_IB_OBJECT_DEVX_OBJ,
2937 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2938 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2939 MLX5_IB_OBJECT_DEVX_UMEM,
2940 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
6bf8f22a
YH
2941 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2942 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2943 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2afc5e1b
YH
2944 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2945 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2946 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
0cbf432d
JG
2947 {},
2948};