2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/gfp.h>
35 #include <linux/export.h>
36 #include <linux/mlx5/cmd.h>
37 #include <linux/mlx5/qp.h>
38 #include <linux/mlx5/driver.h>
39 #include <linux/mlx5/transobj.h>
41 #include "mlx5_core.h"
43 static struct mlx5_core_rsc_common
*mlx5_get_rsc(struct mlx5_core_dev
*dev
,
46 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
47 struct mlx5_core_rsc_common
*common
;
49 spin_lock(&table
->lock
);
51 common
= radix_tree_lookup(&table
->tree
, rsn
);
53 atomic_inc(&common
->refcount
);
55 spin_unlock(&table
->lock
);
58 mlx5_core_warn(dev
, "Async event for bogus resource 0x%x\n",
65 void mlx5_core_put_rsc(struct mlx5_core_rsc_common
*common
)
67 if (atomic_dec_and_test(&common
->refcount
))
68 complete(&common
->free
);
71 static u64
qp_allowed_event_types(void)
75 mask
= BIT(MLX5_EVENT_TYPE_PATH_MIG
) |
76 BIT(MLX5_EVENT_TYPE_COMM_EST
) |
77 BIT(MLX5_EVENT_TYPE_SQ_DRAINED
) |
78 BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE
) |
79 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
) |
80 BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED
) |
81 BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
) |
82 BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
);
87 static u64
rq_allowed_event_types(void)
91 mask
= BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE
) |
92 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
);
97 static u64
sq_allowed_event_types(void)
99 return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
);
102 static bool is_event_type_allowed(int rsc_type
, int event_type
)
105 case MLX5_EVENT_QUEUE_TYPE_QP
:
106 return BIT(event_type
) & qp_allowed_event_types();
107 case MLX5_EVENT_QUEUE_TYPE_RQ
:
108 return BIT(event_type
) & rq_allowed_event_types();
109 case MLX5_EVENT_QUEUE_TYPE_SQ
:
110 return BIT(event_type
) & sq_allowed_event_types();
112 WARN(1, "Event arrived for unknown resource type");
117 void mlx5_rsc_event(struct mlx5_core_dev
*dev
, u32 rsn
, int event_type
)
119 struct mlx5_core_rsc_common
*common
= mlx5_get_rsc(dev
, rsn
);
120 struct mlx5_core_qp
*qp
;
125 if (!is_event_type_allowed((rsn
>> MLX5_USER_INDEX_LEN
), event_type
)) {
126 mlx5_core_warn(dev
, "event 0x%.2x is not allowed on resource 0x%.8x\n",
131 switch (common
->res
) {
135 qp
= (struct mlx5_core_qp
*)common
;
136 qp
->event(qp
, event_type
);
140 mlx5_core_warn(dev
, "invalid resource type for 0x%x\n", rsn
);
143 mlx5_core_put_rsc(common
);
146 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
147 void mlx5_eq_pagefault(struct mlx5_core_dev
*dev
, struct mlx5_eqe
*eqe
)
149 struct mlx5_eqe_page_fault
*pf_eqe
= &eqe
->data
.page_fault
;
150 int qpn
= be32_to_cpu(pf_eqe
->flags_qpn
) & MLX5_QPN_MASK
;
151 struct mlx5_core_rsc_common
*common
= mlx5_get_rsc(dev
, qpn
);
152 struct mlx5_core_qp
*qp
=
153 container_of(common
, struct mlx5_core_qp
, common
);
154 struct mlx5_pagefault pfault
;
157 mlx5_core_warn(dev
, "ODP event for non-existent QP %06x\n",
162 pfault
.event_subtype
= eqe
->sub_type
;
163 pfault
.flags
= (be32_to_cpu(pf_eqe
->flags_qpn
) >> MLX5_QPN_BITS
) &
164 (MLX5_PFAULT_REQUESTOR
| MLX5_PFAULT_WRITE
| MLX5_PFAULT_RDMA
);
165 pfault
.bytes_committed
= be32_to_cpu(
166 pf_eqe
->bytes_committed
);
169 "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
170 eqe
->sub_type
, pfault
.flags
);
172 switch (eqe
->sub_type
) {
173 case MLX5_PFAULT_SUBTYPE_RDMA
:
174 /* RDMA based event */
176 be32_to_cpu(pf_eqe
->rdma
.r_key
);
177 pfault
.rdma
.packet_size
=
178 be16_to_cpu(pf_eqe
->rdma
.packet_length
);
179 pfault
.rdma
.rdma_op_len
=
180 be32_to_cpu(pf_eqe
->rdma
.rdma_op_len
);
181 pfault
.rdma
.rdma_va
=
182 be64_to_cpu(pf_eqe
->rdma
.rdma_va
);
184 "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
185 qpn
, pfault
.rdma
.r_key
);
187 "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
188 pfault
.rdma
.rdma_op_len
);
190 "PAGE_FAULT: rdma_va: 0x%016llx,\n",
191 pfault
.rdma
.rdma_va
);
193 "PAGE_FAULT: bytes_committed: 0x%06x\n",
194 pfault
.bytes_committed
);
197 case MLX5_PFAULT_SUBTYPE_WQE
:
198 /* WQE based event */
199 pfault
.wqe
.wqe_index
=
200 be16_to_cpu(pf_eqe
->wqe
.wqe_index
);
201 pfault
.wqe
.packet_size
=
202 be16_to_cpu(pf_eqe
->wqe
.packet_length
);
204 "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
205 qpn
, pfault
.wqe
.wqe_index
);
207 "PAGE_FAULT: bytes_committed: 0x%06x\n",
208 pfault
.bytes_committed
);
213 "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
215 /* Unsupported page faults should still be resolved by the
220 if (qp
->pfault_handler
) {
221 qp
->pfault_handler(qp
, &pfault
);
224 "ODP event for QP %08x, without a fault handler in QP\n",
226 /* Page fault will remain unresolved. QP will hang until it is
231 mlx5_core_put_rsc(common
);
235 static int create_qprqsq_common(struct mlx5_core_dev
*dev
,
236 struct mlx5_core_qp
*qp
,
239 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
242 qp
->common
.res
= rsc_type
;
243 spin_lock_irq(&table
->lock
);
244 err
= radix_tree_insert(&table
->tree
,
245 qp
->qpn
| (rsc_type
<< MLX5_USER_INDEX_LEN
),
247 spin_unlock_irq(&table
->lock
);
251 atomic_set(&qp
->common
.refcount
, 1);
252 init_completion(&qp
->common
.free
);
253 qp
->pid
= current
->pid
;
258 static void destroy_qprqsq_common(struct mlx5_core_dev
*dev
,
259 struct mlx5_core_qp
*qp
)
261 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
264 spin_lock_irqsave(&table
->lock
, flags
);
265 radix_tree_delete(&table
->tree
,
266 qp
->qpn
| (qp
->common
.res
<< MLX5_USER_INDEX_LEN
));
267 spin_unlock_irqrestore(&table
->lock
, flags
);
268 mlx5_core_put_rsc((struct mlx5_core_rsc_common
*)qp
);
269 wait_for_completion(&qp
->common
.free
);
272 int mlx5_core_create_qp(struct mlx5_core_dev
*dev
,
273 struct mlx5_core_qp
*qp
,
276 u32 out
[MLX5_ST_SZ_DW(create_qp_out
)] = {0};
277 u32 dout
[MLX5_ST_SZ_DW(destroy_qp_out
)];
278 u32 din
[MLX5_ST_SZ_DW(destroy_qp_in
)];
281 MLX5_SET(create_qp_in
, in
, opcode
, MLX5_CMD_OP_CREATE_QP
);
283 err
= mlx5_cmd_exec(dev
, in
, inlen
, out
, sizeof(out
));
284 err
= err
? : mlx5_cmd_status_to_err_v2(out
);
288 qp
->qpn
= MLX5_GET(create_qp_out
, out
, qpn
);
289 mlx5_core_dbg(dev
, "qpn = 0x%x\n", qp
->qpn
);
291 err
= create_qprqsq_common(dev
, qp
, MLX5_RES_QP
);
295 err
= mlx5_debug_qp_add(dev
, qp
);
297 mlx5_core_dbg(dev
, "failed adding QP 0x%x to debug file system\n",
300 atomic_inc(&dev
->num_qps
);
305 memset(din
, 0, sizeof(din
));
306 memset(dout
, 0, sizeof(dout
));
307 MLX5_SET(destroy_qp_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_QP
);
308 MLX5_SET(destroy_qp_in
, in
, qpn
, qp
->qpn
);
309 mlx5_cmd_exec(dev
, din
, sizeof(din
), dout
, sizeof(dout
));
310 mlx5_cmd_status_to_err_v2(dout
);
313 EXPORT_SYMBOL_GPL(mlx5_core_create_qp
);
315 int mlx5_core_destroy_qp(struct mlx5_core_dev
*dev
,
316 struct mlx5_core_qp
*qp
)
318 u32 out
[MLX5_ST_SZ_DW(destroy_qp_out
)] = {0};
319 u32 in
[MLX5_ST_SZ_DW(destroy_qp_in
)] = {0};
322 mlx5_debug_qp_remove(dev
, qp
);
324 destroy_qprqsq_common(dev
, qp
);
326 MLX5_SET(destroy_qp_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_QP
);
327 MLX5_SET(destroy_qp_in
, in
, qpn
, qp
->qpn
);
328 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
329 err
= err
? : mlx5_cmd_status_to_err_v2(out
);
333 atomic_dec(&dev
->num_qps
);
336 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp
);
345 static int mbox_alloc(struct mbox_info
*mbox
, int inlen
, int outlen
)
348 mbox
->outlen
= outlen
;
349 mbox
->in
= kzalloc(mbox
->inlen
, GFP_KERNEL
);
350 mbox
->out
= kzalloc(mbox
->outlen
, GFP_KERNEL
);
351 if (!mbox
->in
|| !mbox
->out
) {
360 static void mbox_free(struct mbox_info
*mbox
)
366 static int modify_qp_mbox_alloc(struct mlx5_core_dev
*dev
, u16 opcode
, int qpn
,
367 u32 opt_param_mask
, void *qpc
,
368 struct mbox_info
*mbox
)
373 #define MBOX_ALLOC(mbox, typ) \
374 mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
376 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
377 MLX5_SET(typ##_in, in, opcode, _opcode); \
378 MLX5_SET(typ##_in, in, qpn, _qpn)
380 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
381 MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
382 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
383 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
387 case MLX5_CMD_OP_2RST_QP
:
388 if (MBOX_ALLOC(mbox
, qp_2rst
))
390 MOD_QP_IN_SET(qp_2rst
, mbox
->in
, opcode
, qpn
);
392 case MLX5_CMD_OP_2ERR_QP
:
393 if (MBOX_ALLOC(mbox
, qp_2err
))
395 MOD_QP_IN_SET(qp_2err
, mbox
->in
, opcode
, qpn
);
398 /* MODIFY with QPC */
399 case MLX5_CMD_OP_RST2INIT_QP
:
400 if (MBOX_ALLOC(mbox
, rst2init_qp
))
402 MOD_QP_IN_SET_QPC(rst2init_qp
, mbox
->in
, opcode
, qpn
,
403 opt_param_mask
, qpc
);
405 case MLX5_CMD_OP_INIT2RTR_QP
:
406 if (MBOX_ALLOC(mbox
, init2rtr_qp
))
408 MOD_QP_IN_SET_QPC(init2rtr_qp
, mbox
->in
, opcode
, qpn
,
409 opt_param_mask
, qpc
);
411 case MLX5_CMD_OP_RTR2RTS_QP
:
412 if (MBOX_ALLOC(mbox
, rtr2rts_qp
))
414 MOD_QP_IN_SET_QPC(rtr2rts_qp
, mbox
->in
, opcode
, qpn
,
415 opt_param_mask
, qpc
);
417 case MLX5_CMD_OP_RTS2RTS_QP
:
418 if (MBOX_ALLOC(mbox
, rts2rts_qp
))
420 MOD_QP_IN_SET_QPC(rts2rts_qp
, mbox
->in
, opcode
, qpn
,
421 opt_param_mask
, qpc
);
423 case MLX5_CMD_OP_SQERR2RTS_QP
:
424 if (MBOX_ALLOC(mbox
, sqerr2rts_qp
))
426 MOD_QP_IN_SET_QPC(sqerr2rts_qp
, mbox
->in
, opcode
, qpn
,
427 opt_param_mask
, qpc
);
429 case MLX5_CMD_OP_INIT2INIT_QP
:
430 if (MBOX_ALLOC(mbox
, init2init_qp
))
432 MOD_QP_IN_SET_QPC(init2init_qp
, mbox
->in
, opcode
, qpn
,
433 opt_param_mask
, qpc
);
436 mlx5_core_err(dev
, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
443 int mlx5_core_qp_modify(struct mlx5_core_dev
*dev
, u16 opcode
,
444 u32 opt_param_mask
, void *qpc
,
445 struct mlx5_core_qp
*qp
)
447 struct mbox_info mbox
;
450 err
= modify_qp_mbox_alloc(dev
, opcode
, qp
->qpn
,
451 opt_param_mask
, qpc
, &mbox
);
455 err
= mlx5_cmd_exec(dev
, mbox
.in
, mbox
.inlen
, mbox
.out
, mbox
.outlen
);
456 err
= err
? : mlx5_cmd_status_to_err_v2(mbox
.out
);
460 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify
);
462 void mlx5_init_qp_table(struct mlx5_core_dev
*dev
)
464 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
466 memset(table
, 0, sizeof(*table
));
467 spin_lock_init(&table
->lock
);
468 INIT_RADIX_TREE(&table
->tree
, GFP_ATOMIC
);
469 mlx5_qp_debugfs_init(dev
);
472 void mlx5_cleanup_qp_table(struct mlx5_core_dev
*dev
)
474 mlx5_qp_debugfs_cleanup(dev
);
477 int mlx5_core_qp_query(struct mlx5_core_dev
*dev
, struct mlx5_core_qp
*qp
,
478 u32
*out
, int outlen
)
480 u32 in
[MLX5_ST_SZ_DW(query_qp_in
)] = {0};
483 MLX5_SET(query_qp_in
, in
, opcode
, MLX5_CMD_OP_QUERY_QP
);
484 MLX5_SET(query_qp_in
, in
, qpn
, qp
->qpn
);
486 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, outlen
);
487 return err
? : mlx5_cmd_status_to_err_v2(out
);
489 EXPORT_SYMBOL_GPL(mlx5_core_qp_query
);
491 int mlx5_core_xrcd_alloc(struct mlx5_core_dev
*dev
, u32
*xrcdn
)
493 u32 out
[MLX5_ST_SZ_DW(alloc_xrcd_out
)] = {0};
494 u32 in
[MLX5_ST_SZ_DW(alloc_xrcd_in
)] = {0};
497 MLX5_SET(alloc_xrcd_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_XRCD
);
498 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
499 err
= err
? : mlx5_cmd_status_to_err_v2(out
);
501 *xrcdn
= MLX5_GET(alloc_xrcd_out
, out
, xrcd
);
504 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc
);
506 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev
*dev
, u32 xrcdn
)
508 u32 out
[MLX5_ST_SZ_DW(dealloc_xrcd_out
)] = {0};
509 u32 in
[MLX5_ST_SZ_DW(dealloc_xrcd_in
)] = {0};
512 MLX5_SET(dealloc_xrcd_in
, in
, opcode
, MLX5_CMD_OP_DEALLOC_XRCD
);
513 MLX5_SET(dealloc_xrcd_in
, in
, xrcd
, xrcdn
);
514 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
515 return err
? : mlx5_cmd_status_to_err_v2(out
);
517 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc
);
519 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
520 int mlx5_core_page_fault_resume(struct mlx5_core_dev
*dev
, u32 qpn
,
523 u32 out
[MLX5_ST_SZ_DW(page_fault_resume_out
)] = {0};
524 u32 in
[MLX5_ST_SZ_DW(page_fault_resume_in
)] = {0};
527 MLX5_SET(page_fault_resume_in
, in
, opcode
,
528 MLX5_CMD_OP_PAGE_FAULT_RESUME
);
530 MLX5_SET(page_fault_resume_in
, in
, qpn
, qpn
);
532 if (flags
& MLX5_PAGE_FAULT_RESUME_REQUESTOR
)
533 MLX5_SET(page_fault_resume_in
, in
, req_res
, 1);
534 if (flags
& MLX5_PAGE_FAULT_RESUME_WRITE
)
535 MLX5_SET(page_fault_resume_in
, in
, read_write
, 1);
536 if (flags
& MLX5_PAGE_FAULT_RESUME_RDMA
)
537 MLX5_SET(page_fault_resume_in
, in
, rdma
, 1);
539 MLX5_SET(page_fault_resume_in
, in
, error
, 1);
541 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
542 return err
? : mlx5_cmd_status_to_err_v2(out
);
544 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume
);
547 int mlx5_core_create_rq_tracked(struct mlx5_core_dev
*dev
, u32
*in
, int inlen
,
548 struct mlx5_core_qp
*rq
)
553 err
= mlx5_core_create_rq(dev
, in
, inlen
, &rqn
);
558 err
= create_qprqsq_common(dev
, rq
, MLX5_RES_RQ
);
565 mlx5_core_destroy_rq(dev
, rq
->qpn
);
569 EXPORT_SYMBOL(mlx5_core_create_rq_tracked
);
571 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev
*dev
,
572 struct mlx5_core_qp
*rq
)
574 destroy_qprqsq_common(dev
, rq
);
575 mlx5_core_destroy_rq(dev
, rq
->qpn
);
577 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked
);
579 int mlx5_core_create_sq_tracked(struct mlx5_core_dev
*dev
, u32
*in
, int inlen
,
580 struct mlx5_core_qp
*sq
)
585 err
= mlx5_core_create_sq(dev
, in
, inlen
, &sqn
);
590 err
= create_qprqsq_common(dev
, sq
, MLX5_RES_SQ
);
597 mlx5_core_destroy_sq(dev
, sq
->qpn
);
601 EXPORT_SYMBOL(mlx5_core_create_sq_tracked
);
603 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev
*dev
,
604 struct mlx5_core_qp
*sq
)
606 destroy_qprqsq_common(dev
, sq
);
607 mlx5_core_destroy_sq(dev
, sq
->qpn
);
609 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked
);
611 int mlx5_core_alloc_q_counter(struct mlx5_core_dev
*dev
, u16
*counter_id
)
613 u32 in
[MLX5_ST_SZ_DW(alloc_q_counter_in
)] = {0};
614 u32 out
[MLX5_ST_SZ_DW(alloc_q_counter_out
)] = {0};
617 MLX5_SET(alloc_q_counter_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_Q_COUNTER
);
618 err
= mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
), out
, sizeof(out
));
620 *counter_id
= MLX5_GET(alloc_q_counter_out
, out
,
624 EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter
);
626 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev
*dev
, u16 counter_id
)
628 u32 in
[MLX5_ST_SZ_DW(dealloc_q_counter_in
)] = {0};
629 u32 out
[MLX5_ST_SZ_DW(dealloc_q_counter_out
)] = {0};
631 MLX5_SET(dealloc_q_counter_in
, in
, opcode
,
632 MLX5_CMD_OP_DEALLOC_Q_COUNTER
);
633 MLX5_SET(dealloc_q_counter_in
, in
, counter_set_id
, counter_id
);
634 return mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
), out
,
637 EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter
);
639 int mlx5_core_query_q_counter(struct mlx5_core_dev
*dev
, u16 counter_id
,
640 int reset
, void *out
, int out_size
)
642 u32 in
[MLX5_ST_SZ_DW(query_q_counter_in
)] = {0};
644 MLX5_SET(query_q_counter_in
, in
, opcode
, MLX5_CMD_OP_QUERY_Q_COUNTER
);
645 MLX5_SET(query_q_counter_in
, in
, clear
, reset
);
646 MLX5_SET(query_q_counter_in
, in
, counter_set_id
, counter_id
);
647 return mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
), out
, out_size
);
649 EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter
);
651 int mlx5_core_query_out_of_buffer(struct mlx5_core_dev
*dev
, u16 counter_id
,
654 int outlen
= MLX5_ST_SZ_BYTES(query_q_counter_out
);
658 out
= mlx5_vzalloc(outlen
);
662 err
= mlx5_core_query_q_counter(dev
, counter_id
, 0, out
, outlen
);
664 *out_of_buffer
= MLX5_GET(query_q_counter_out
, out
,