2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/gfp.h>
34 #include <linux/export.h>
35 #include <linux/mlx5/cmd.h>
36 #include <linux/mlx5/qp.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/transobj.h>
40 #include "mlx5_core.h"
43 static struct mlx5_core_rsc_common
*
44 mlx5_get_rsc(struct mlx5_qp_table
*table
, u32 rsn
)
46 struct mlx5_core_rsc_common
*common
;
48 spin_lock(&table
->lock
);
50 common
= radix_tree_lookup(&table
->tree
, rsn
);
52 atomic_inc(&common
->refcount
);
54 spin_unlock(&table
->lock
);
59 void mlx5_core_put_rsc(struct mlx5_core_rsc_common
*common
)
61 if (atomic_dec_and_test(&common
->refcount
))
62 complete(&common
->free
);
65 static u64
qp_allowed_event_types(void)
69 mask
= BIT(MLX5_EVENT_TYPE_PATH_MIG
) |
70 BIT(MLX5_EVENT_TYPE_COMM_EST
) |
71 BIT(MLX5_EVENT_TYPE_SQ_DRAINED
) |
72 BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE
) |
73 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
) |
74 BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED
) |
75 BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
) |
76 BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
);
81 static u64
rq_allowed_event_types(void)
85 mask
= BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE
) |
86 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
);
91 static u64
sq_allowed_event_types(void)
93 return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
);
96 static u64
dct_allowed_event_types(void)
98 return BIT(MLX5_EVENT_TYPE_DCT_DRAINED
);
101 static bool is_event_type_allowed(int rsc_type
, int event_type
)
104 case MLX5_EVENT_QUEUE_TYPE_QP
:
105 return BIT(event_type
) & qp_allowed_event_types();
106 case MLX5_EVENT_QUEUE_TYPE_RQ
:
107 return BIT(event_type
) & rq_allowed_event_types();
108 case MLX5_EVENT_QUEUE_TYPE_SQ
:
109 return BIT(event_type
) & sq_allowed_event_types();
110 case MLX5_EVENT_QUEUE_TYPE_DCT
:
111 return BIT(event_type
) & dct_allowed_event_types();
113 WARN(1, "Event arrived for unknown resource type");
118 static int rsc_event_notifier(struct notifier_block
*nb
,
119 unsigned long type
, void *data
)
121 struct mlx5_core_rsc_common
*common
;
122 struct mlx5_qp_table
*table
;
123 struct mlx5_core_dev
*dev
;
124 struct mlx5_core_dct
*dct
;
125 u8 event_type
= (u8
)type
;
126 struct mlx5_core_qp
*qp
;
127 struct mlx5_priv
*priv
;
128 struct mlx5_eqe
*eqe
;
131 switch (event_type
) {
132 case MLX5_EVENT_TYPE_DCT_DRAINED
:
134 rsn
= be32_to_cpu(eqe
->data
.dct
.dctn
) & 0xffffff;
135 rsn
|= (MLX5_RES_DCT
<< MLX5_USER_INDEX_LEN
);
137 case MLX5_EVENT_TYPE_PATH_MIG
:
138 case MLX5_EVENT_TYPE_COMM_EST
:
139 case MLX5_EVENT_TYPE_SQ_DRAINED
:
140 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
141 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
142 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
143 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
144 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
146 rsn
= be32_to_cpu(eqe
->data
.qp_srq
.qp_srq_n
) & 0xffffff;
147 rsn
|= (eqe
->data
.qp_srq
.type
<< MLX5_USER_INDEX_LEN
);
153 table
= container_of(nb
, struct mlx5_qp_table
, nb
);
154 priv
= container_of(table
, struct mlx5_priv
, qp_table
);
155 dev
= container_of(priv
, struct mlx5_core_dev
, priv
);
157 mlx5_core_dbg(dev
, "event (%d) arrived on resource 0x%x\n", eqe
->type
, rsn
);
159 common
= mlx5_get_rsc(table
, rsn
);
161 mlx5_core_warn(dev
, "Async event for bogus resource 0x%x\n", rsn
);
165 if (!is_event_type_allowed((rsn
>> MLX5_USER_INDEX_LEN
), event_type
)) {
166 mlx5_core_warn(dev
, "event 0x%.2x is not allowed on resource 0x%.8x\n",
171 switch (common
->res
) {
175 qp
= (struct mlx5_core_qp
*)common
;
176 qp
->event(qp
, event_type
);
179 dct
= (struct mlx5_core_dct
*)common
;
180 if (event_type
== MLX5_EVENT_TYPE_DCT_DRAINED
)
181 complete(&dct
->drained
);
184 mlx5_core_warn(dev
, "invalid resource type for 0x%x\n", rsn
);
187 mlx5_core_put_rsc(common
);
192 static int create_resource_common(struct mlx5_core_dev
*dev
,
193 struct mlx5_core_qp
*qp
,
196 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
199 qp
->common
.res
= rsc_type
;
200 spin_lock_irq(&table
->lock
);
201 err
= radix_tree_insert(&table
->tree
,
202 qp
->qpn
| (rsc_type
<< MLX5_USER_INDEX_LEN
),
204 spin_unlock_irq(&table
->lock
);
208 atomic_set(&qp
->common
.refcount
, 1);
209 init_completion(&qp
->common
.free
);
210 qp
->pid
= current
->pid
;
215 static void destroy_resource_common(struct mlx5_core_dev
*dev
,
216 struct mlx5_core_qp
*qp
)
218 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
221 spin_lock_irqsave(&table
->lock
, flags
);
222 radix_tree_delete(&table
->tree
,
223 qp
->qpn
| (qp
->common
.res
<< MLX5_USER_INDEX_LEN
));
224 spin_unlock_irqrestore(&table
->lock
, flags
);
225 mlx5_core_put_rsc((struct mlx5_core_rsc_common
*)qp
);
226 wait_for_completion(&qp
->common
.free
);
229 int mlx5_core_create_dct(struct mlx5_core_dev
*dev
,
230 struct mlx5_core_dct
*dct
,
233 u32 out
[MLX5_ST_SZ_DW(create_dct_out
)] = {0};
234 u32 din
[MLX5_ST_SZ_DW(destroy_dct_in
)] = {0};
235 u32 dout
[MLX5_ST_SZ_DW(destroy_dct_out
)] = {0};
236 struct mlx5_core_qp
*qp
= &dct
->mqp
;
239 init_completion(&dct
->drained
);
240 MLX5_SET(create_dct_in
, in
, opcode
, MLX5_CMD_OP_CREATE_DCT
);
242 err
= mlx5_cmd_exec(dev
, in
, inlen
, &out
, sizeof(out
));
244 mlx5_core_warn(dev
, "create DCT failed, ret %d\n", err
);
248 qp
->qpn
= MLX5_GET(create_dct_out
, out
, dctn
);
249 qp
->uid
= MLX5_GET(create_dct_in
, in
, uid
);
250 err
= create_resource_common(dev
, qp
, MLX5_RES_DCT
);
256 MLX5_SET(destroy_dct_in
, din
, opcode
, MLX5_CMD_OP_DESTROY_DCT
);
257 MLX5_SET(destroy_dct_in
, din
, dctn
, qp
->qpn
);
258 MLX5_SET(destroy_dct_in
, din
, uid
, qp
->uid
);
259 mlx5_cmd_exec(dev
, (void *)&in
, sizeof(din
),
260 (void *)&out
, sizeof(dout
));
263 EXPORT_SYMBOL_GPL(mlx5_core_create_dct
);
265 int mlx5_core_create_qp(struct mlx5_core_dev
*dev
,
266 struct mlx5_core_qp
*qp
,
269 u32 out
[MLX5_ST_SZ_DW(create_qp_out
)] = {0};
270 u32 dout
[MLX5_ST_SZ_DW(destroy_qp_out
)];
271 u32 din
[MLX5_ST_SZ_DW(destroy_qp_in
)];
274 MLX5_SET(create_qp_in
, in
, opcode
, MLX5_CMD_OP_CREATE_QP
);
276 err
= mlx5_cmd_exec(dev
, in
, inlen
, out
, sizeof(out
));
280 qp
->uid
= MLX5_GET(create_qp_in
, in
, uid
);
281 qp
->qpn
= MLX5_GET(create_qp_out
, out
, qpn
);
282 mlx5_core_dbg(dev
, "qpn = 0x%x\n", qp
->qpn
);
284 err
= create_resource_common(dev
, qp
, MLX5_RES_QP
);
288 err
= mlx5_debug_qp_add(dev
, qp
);
290 mlx5_core_dbg(dev
, "failed adding QP 0x%x to debug file system\n",
293 atomic_inc(&dev
->num_qps
);
298 memset(din
, 0, sizeof(din
));
299 memset(dout
, 0, sizeof(dout
));
300 MLX5_SET(destroy_qp_in
, din
, opcode
, MLX5_CMD_OP_DESTROY_QP
);
301 MLX5_SET(destroy_qp_in
, din
, qpn
, qp
->qpn
);
302 MLX5_SET(destroy_qp_in
, din
, uid
, qp
->uid
);
303 mlx5_cmd_exec(dev
, din
, sizeof(din
), dout
, sizeof(dout
));
306 EXPORT_SYMBOL_GPL(mlx5_core_create_qp
);
308 static int mlx5_core_drain_dct(struct mlx5_core_dev
*dev
,
309 struct mlx5_core_dct
*dct
)
311 u32 out
[MLX5_ST_SZ_DW(drain_dct_out
)] = {0};
312 u32 in
[MLX5_ST_SZ_DW(drain_dct_in
)] = {0};
313 struct mlx5_core_qp
*qp
= &dct
->mqp
;
315 MLX5_SET(drain_dct_in
, in
, opcode
, MLX5_CMD_OP_DRAIN_DCT
);
316 MLX5_SET(drain_dct_in
, in
, dctn
, qp
->qpn
);
317 MLX5_SET(drain_dct_in
, in
, uid
, qp
->uid
);
318 return mlx5_cmd_exec(dev
, (void *)&in
, sizeof(in
),
319 (void *)&out
, sizeof(out
));
322 int mlx5_core_destroy_dct(struct mlx5_core_dev
*dev
,
323 struct mlx5_core_dct
*dct
)
325 u32 out
[MLX5_ST_SZ_DW(destroy_dct_out
)] = {0};
326 u32 in
[MLX5_ST_SZ_DW(destroy_dct_in
)] = {0};
327 struct mlx5_core_qp
*qp
= &dct
->mqp
;
330 err
= mlx5_core_drain_dct(dev
, dct
);
332 if (dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
335 mlx5_core_warn(dev
, "failed drain DCT 0x%x with error 0x%x\n", qp
->qpn
, err
);
339 wait_for_completion(&dct
->drained
);
341 destroy_resource_common(dev
, &dct
->mqp
);
342 MLX5_SET(destroy_dct_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_DCT
);
343 MLX5_SET(destroy_dct_in
, in
, dctn
, qp
->qpn
);
344 MLX5_SET(destroy_dct_in
, in
, uid
, qp
->uid
);
345 err
= mlx5_cmd_exec(dev
, (void *)&in
, sizeof(in
),
346 (void *)&out
, sizeof(out
));
349 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct
);
351 int mlx5_core_destroy_qp(struct mlx5_core_dev
*dev
,
352 struct mlx5_core_qp
*qp
)
354 u32 out
[MLX5_ST_SZ_DW(destroy_qp_out
)] = {0};
355 u32 in
[MLX5_ST_SZ_DW(destroy_qp_in
)] = {0};
358 mlx5_debug_qp_remove(dev
, qp
);
360 destroy_resource_common(dev
, qp
);
362 MLX5_SET(destroy_qp_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_QP
);
363 MLX5_SET(destroy_qp_in
, in
, qpn
, qp
->qpn
);
364 MLX5_SET(destroy_qp_in
, in
, uid
, qp
->uid
);
365 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
369 atomic_dec(&dev
->num_qps
);
372 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp
);
374 int mlx5_core_set_delay_drop(struct mlx5_core_dev
*dev
,
377 u32 out
[MLX5_ST_SZ_DW(set_delay_drop_params_out
)] = {0};
378 u32 in
[MLX5_ST_SZ_DW(set_delay_drop_params_in
)] = {0};
380 MLX5_SET(set_delay_drop_params_in
, in
, opcode
,
381 MLX5_CMD_OP_SET_DELAY_DROP_PARAMS
);
382 MLX5_SET(set_delay_drop_params_in
, in
, delay_drop_timeout
,
384 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
386 EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop
);
395 static int mbox_alloc(struct mbox_info
*mbox
, int inlen
, int outlen
)
398 mbox
->outlen
= outlen
;
399 mbox
->in
= kzalloc(mbox
->inlen
, GFP_KERNEL
);
400 mbox
->out
= kzalloc(mbox
->outlen
, GFP_KERNEL
);
401 if (!mbox
->in
|| !mbox
->out
) {
410 static void mbox_free(struct mbox_info
*mbox
)
416 static int modify_qp_mbox_alloc(struct mlx5_core_dev
*dev
, u16 opcode
, int qpn
,
417 u32 opt_param_mask
, void *qpc
,
418 struct mbox_info
*mbox
, u16 uid
)
423 #define MBOX_ALLOC(mbox, typ) \
424 mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
426 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
428 MLX5_SET(typ##_in, in, opcode, _opcode); \
429 MLX5_SET(typ##_in, in, qpn, _qpn); \
430 MLX5_SET(typ##_in, in, uid, _uid); \
433 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
435 MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \
436 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
437 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \
438 MLX5_ST_SZ_BYTES(qpc)); \
443 case MLX5_CMD_OP_2RST_QP
:
444 if (MBOX_ALLOC(mbox
, qp_2rst
))
446 MOD_QP_IN_SET(qp_2rst
, mbox
->in
, opcode
, qpn
, uid
);
448 case MLX5_CMD_OP_2ERR_QP
:
449 if (MBOX_ALLOC(mbox
, qp_2err
))
451 MOD_QP_IN_SET(qp_2err
, mbox
->in
, opcode
, qpn
, uid
);
454 /* MODIFY with QPC */
455 case MLX5_CMD_OP_RST2INIT_QP
:
456 if (MBOX_ALLOC(mbox
, rst2init_qp
))
458 MOD_QP_IN_SET_QPC(rst2init_qp
, mbox
->in
, opcode
, qpn
,
459 opt_param_mask
, qpc
, uid
);
461 case MLX5_CMD_OP_INIT2RTR_QP
:
462 if (MBOX_ALLOC(mbox
, init2rtr_qp
))
464 MOD_QP_IN_SET_QPC(init2rtr_qp
, mbox
->in
, opcode
, qpn
,
465 opt_param_mask
, qpc
, uid
);
467 case MLX5_CMD_OP_RTR2RTS_QP
:
468 if (MBOX_ALLOC(mbox
, rtr2rts_qp
))
470 MOD_QP_IN_SET_QPC(rtr2rts_qp
, mbox
->in
, opcode
, qpn
,
471 opt_param_mask
, qpc
, uid
);
473 case MLX5_CMD_OP_RTS2RTS_QP
:
474 if (MBOX_ALLOC(mbox
, rts2rts_qp
))
476 MOD_QP_IN_SET_QPC(rts2rts_qp
, mbox
->in
, opcode
, qpn
,
477 opt_param_mask
, qpc
, uid
);
479 case MLX5_CMD_OP_SQERR2RTS_QP
:
480 if (MBOX_ALLOC(mbox
, sqerr2rts_qp
))
482 MOD_QP_IN_SET_QPC(sqerr2rts_qp
, mbox
->in
, opcode
, qpn
,
483 opt_param_mask
, qpc
, uid
);
485 case MLX5_CMD_OP_INIT2INIT_QP
:
486 if (MBOX_ALLOC(mbox
, init2init_qp
))
488 MOD_QP_IN_SET_QPC(init2init_qp
, mbox
->in
, opcode
, qpn
,
489 opt_param_mask
, qpc
, uid
);
492 mlx5_core_err(dev
, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
499 int mlx5_core_qp_modify(struct mlx5_core_dev
*dev
, u16 opcode
,
500 u32 opt_param_mask
, void *qpc
,
501 struct mlx5_core_qp
*qp
)
503 struct mbox_info mbox
;
506 err
= modify_qp_mbox_alloc(dev
, opcode
, qp
->qpn
,
507 opt_param_mask
, qpc
, &mbox
, qp
->uid
);
511 err
= mlx5_cmd_exec(dev
, mbox
.in
, mbox
.inlen
, mbox
.out
, mbox
.outlen
);
515 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify
);
517 void mlx5_init_qp_table(struct mlx5_core_dev
*dev
)
519 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
521 memset(table
, 0, sizeof(*table
));
522 spin_lock_init(&table
->lock
);
523 INIT_RADIX_TREE(&table
->tree
, GFP_ATOMIC
);
524 mlx5_qp_debugfs_init(dev
);
526 table
->nb
.notifier_call
= rsc_event_notifier
;
527 mlx5_notifier_register(dev
, &table
->nb
);
530 void mlx5_cleanup_qp_table(struct mlx5_core_dev
*dev
)
532 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
534 mlx5_notifier_unregister(dev
, &table
->nb
);
535 mlx5_qp_debugfs_cleanup(dev
);
538 int mlx5_core_qp_query(struct mlx5_core_dev
*dev
, struct mlx5_core_qp
*qp
,
539 u32
*out
, int outlen
)
541 u32 in
[MLX5_ST_SZ_DW(query_qp_in
)] = {0};
543 MLX5_SET(query_qp_in
, in
, opcode
, MLX5_CMD_OP_QUERY_QP
);
544 MLX5_SET(query_qp_in
, in
, qpn
, qp
->qpn
);
545 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, outlen
);
547 EXPORT_SYMBOL_GPL(mlx5_core_qp_query
);
549 int mlx5_core_dct_query(struct mlx5_core_dev
*dev
, struct mlx5_core_dct
*dct
,
550 u32
*out
, int outlen
)
552 u32 in
[MLX5_ST_SZ_DW(query_dct_in
)] = {0};
553 struct mlx5_core_qp
*qp
= &dct
->mqp
;
555 MLX5_SET(query_dct_in
, in
, opcode
, MLX5_CMD_OP_QUERY_DCT
);
556 MLX5_SET(query_dct_in
, in
, dctn
, qp
->qpn
);
558 return mlx5_cmd_exec(dev
, (void *)&in
, sizeof(in
),
559 (void *)out
, outlen
);
561 EXPORT_SYMBOL_GPL(mlx5_core_dct_query
);
563 int mlx5_core_xrcd_alloc(struct mlx5_core_dev
*dev
, u32
*xrcdn
)
565 u32 out
[MLX5_ST_SZ_DW(alloc_xrcd_out
)] = {0};
566 u32 in
[MLX5_ST_SZ_DW(alloc_xrcd_in
)] = {0};
569 MLX5_SET(alloc_xrcd_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_XRCD
);
570 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
572 *xrcdn
= MLX5_GET(alloc_xrcd_out
, out
, xrcd
);
575 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc
);
577 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev
*dev
, u32 xrcdn
)
579 u32 out
[MLX5_ST_SZ_DW(dealloc_xrcd_out
)] = {0};
580 u32 in
[MLX5_ST_SZ_DW(dealloc_xrcd_in
)] = {0};
582 MLX5_SET(dealloc_xrcd_in
, in
, opcode
, MLX5_CMD_OP_DEALLOC_XRCD
);
583 MLX5_SET(dealloc_xrcd_in
, in
, xrcd
, xrcdn
);
584 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
586 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc
);
588 static void destroy_rq_tracked(struct mlx5_core_dev
*dev
, u32 rqn
, u16 uid
)
590 u32 in
[MLX5_ST_SZ_DW(destroy_rq_in
)] = {};
591 u32 out
[MLX5_ST_SZ_DW(destroy_rq_out
)] = {};
593 MLX5_SET(destroy_rq_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_RQ
);
594 MLX5_SET(destroy_rq_in
, in
, rqn
, rqn
);
595 MLX5_SET(destroy_rq_in
, in
, uid
, uid
);
596 mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
599 int mlx5_core_create_rq_tracked(struct mlx5_core_dev
*dev
, u32
*in
, int inlen
,
600 struct mlx5_core_qp
*rq
)
605 err
= mlx5_core_create_rq(dev
, in
, inlen
, &rqn
);
609 rq
->uid
= MLX5_GET(create_rq_in
, in
, uid
);
611 err
= create_resource_common(dev
, rq
, MLX5_RES_RQ
);
618 destroy_rq_tracked(dev
, rq
->qpn
, rq
->uid
);
622 EXPORT_SYMBOL(mlx5_core_create_rq_tracked
);
624 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev
*dev
,
625 struct mlx5_core_qp
*rq
)
627 destroy_resource_common(dev
, rq
);
628 destroy_rq_tracked(dev
, rq
->qpn
, rq
->uid
);
630 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked
);
632 static void destroy_sq_tracked(struct mlx5_core_dev
*dev
, u32 sqn
, u16 uid
)
634 u32 in
[MLX5_ST_SZ_DW(destroy_sq_in
)] = {};
635 u32 out
[MLX5_ST_SZ_DW(destroy_sq_out
)] = {};
637 MLX5_SET(destroy_sq_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_SQ
);
638 MLX5_SET(destroy_sq_in
, in
, sqn
, sqn
);
639 MLX5_SET(destroy_sq_in
, in
, uid
, uid
);
640 mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
643 int mlx5_core_create_sq_tracked(struct mlx5_core_dev
*dev
, u32
*in
, int inlen
,
644 struct mlx5_core_qp
*sq
)
649 err
= mlx5_core_create_sq(dev
, in
, inlen
, &sqn
);
653 sq
->uid
= MLX5_GET(create_sq_in
, in
, uid
);
655 err
= create_resource_common(dev
, sq
, MLX5_RES_SQ
);
662 destroy_sq_tracked(dev
, sq
->qpn
, sq
->uid
);
666 EXPORT_SYMBOL(mlx5_core_create_sq_tracked
);
668 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev
*dev
,
669 struct mlx5_core_qp
*sq
)
671 destroy_resource_common(dev
, sq
);
672 destroy_sq_tracked(dev
, sq
->qpn
, sq
->uid
);
674 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked
);
676 int mlx5_core_alloc_q_counter(struct mlx5_core_dev
*dev
, u16
*counter_id
)
678 u32 in
[MLX5_ST_SZ_DW(alloc_q_counter_in
)] = {0};
679 u32 out
[MLX5_ST_SZ_DW(alloc_q_counter_out
)] = {0};
682 MLX5_SET(alloc_q_counter_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_Q_COUNTER
);
683 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
685 *counter_id
= MLX5_GET(alloc_q_counter_out
, out
,
689 EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter
);
691 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev
*dev
, u16 counter_id
)
693 u32 in
[MLX5_ST_SZ_DW(dealloc_q_counter_in
)] = {0};
694 u32 out
[MLX5_ST_SZ_DW(dealloc_q_counter_out
)] = {0};
696 MLX5_SET(dealloc_q_counter_in
, in
, opcode
,
697 MLX5_CMD_OP_DEALLOC_Q_COUNTER
);
698 MLX5_SET(dealloc_q_counter_in
, in
, counter_set_id
, counter_id
);
699 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
701 EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter
);
703 int mlx5_core_query_q_counter(struct mlx5_core_dev
*dev
, u16 counter_id
,
704 int reset
, void *out
, int out_size
)
706 u32 in
[MLX5_ST_SZ_DW(query_q_counter_in
)] = {0};
708 MLX5_SET(query_q_counter_in
, in
, opcode
, MLX5_CMD_OP_QUERY_Q_COUNTER
);
709 MLX5_SET(query_q_counter_in
, in
, clear
, reset
);
710 MLX5_SET(query_q_counter_in
, in
, counter_set_id
, counter_id
);
711 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, out_size
);
713 EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter
);
715 struct mlx5_core_rsc_common
*mlx5_core_res_hold(struct mlx5_core_dev
*dev
,
717 enum mlx5_res_type res_type
)
719 u32 rsn
= res_num
| (res_type
<< MLX5_USER_INDEX_LEN
);
720 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
722 return mlx5_get_rsc(table
, rsn
);
724 EXPORT_SYMBOL_GPL(mlx5_core_res_hold
);
726 void mlx5_core_res_put(struct mlx5_core_rsc_common
*res
)
728 mlx5_core_put_rsc(res
);
730 EXPORT_SYMBOL_GPL(mlx5_core_res_put
);