2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/gfp.h>
35 #include <linux/export.h>
36 #include <linux/mlx5/cmd.h>
37 #include <linux/mlx5/qp.h>
38 #include <linux/mlx5/driver.h>
39 #include <linux/mlx5/transobj.h>
41 #include "mlx5_core.h"
43 static struct mlx5_core_rsc_common
*mlx5_get_rsc(struct mlx5_core_dev
*dev
,
46 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
47 struct mlx5_core_rsc_common
*common
;
49 spin_lock(&table
->lock
);
51 common
= radix_tree_lookup(&table
->tree
, rsn
);
53 atomic_inc(&common
->refcount
);
55 spin_unlock(&table
->lock
);
58 mlx5_core_warn(dev
, "Async event for bogus resource 0x%x\n",
65 void mlx5_core_put_rsc(struct mlx5_core_rsc_common
*common
)
67 if (atomic_dec_and_test(&common
->refcount
))
68 complete(&common
->free
);
71 static u64
qp_allowed_event_types(void)
75 mask
= BIT(MLX5_EVENT_TYPE_PATH_MIG
) |
76 BIT(MLX5_EVENT_TYPE_COMM_EST
) |
77 BIT(MLX5_EVENT_TYPE_SQ_DRAINED
) |
78 BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE
) |
79 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
) |
80 BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED
) |
81 BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
) |
82 BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
);
87 static u64
rq_allowed_event_types(void)
91 mask
= BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE
) |
92 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
);
97 static u64
sq_allowed_event_types(void)
99 return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR
);
102 static bool is_event_type_allowed(int rsc_type
, int event_type
)
105 case MLX5_EVENT_QUEUE_TYPE_QP
:
106 return BIT(event_type
) & qp_allowed_event_types();
107 case MLX5_EVENT_QUEUE_TYPE_RQ
:
108 return BIT(event_type
) & rq_allowed_event_types();
109 case MLX5_EVENT_QUEUE_TYPE_SQ
:
110 return BIT(event_type
) & sq_allowed_event_types();
112 WARN(1, "Event arrived for unknown resource type");
117 void mlx5_rsc_event(struct mlx5_core_dev
*dev
, u32 rsn
, int event_type
)
119 struct mlx5_core_rsc_common
*common
= mlx5_get_rsc(dev
, rsn
);
120 struct mlx5_core_qp
*qp
;
125 if (!is_event_type_allowed((rsn
>> MLX5_USER_INDEX_LEN
), event_type
)) {
126 mlx5_core_warn(dev
, "event 0x%.2x is not allowed on resource 0x%.8x\n",
131 switch (common
->res
) {
135 qp
= (struct mlx5_core_qp
*)common
;
136 qp
->event(qp
, event_type
);
140 mlx5_core_warn(dev
, "invalid resource type for 0x%x\n", rsn
);
143 mlx5_core_put_rsc(common
);
146 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
147 void mlx5_eq_pagefault(struct mlx5_core_dev
*dev
, struct mlx5_eqe
*eqe
)
149 struct mlx5_eqe_page_fault
*pf_eqe
= &eqe
->data
.page_fault
;
150 int qpn
= be32_to_cpu(pf_eqe
->flags_qpn
) & MLX5_QPN_MASK
;
151 struct mlx5_core_rsc_common
*common
= mlx5_get_rsc(dev
, qpn
);
152 struct mlx5_core_qp
*qp
=
153 container_of(common
, struct mlx5_core_qp
, common
);
154 struct mlx5_pagefault pfault
;
157 mlx5_core_warn(dev
, "ODP event for non-existent QP %06x\n",
162 pfault
.event_subtype
= eqe
->sub_type
;
163 pfault
.flags
= (be32_to_cpu(pf_eqe
->flags_qpn
) >> MLX5_QPN_BITS
) &
164 (MLX5_PFAULT_REQUESTOR
| MLX5_PFAULT_WRITE
| MLX5_PFAULT_RDMA
);
165 pfault
.bytes_committed
= be32_to_cpu(
166 pf_eqe
->bytes_committed
);
169 "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
170 eqe
->sub_type
, pfault
.flags
);
172 switch (eqe
->sub_type
) {
173 case MLX5_PFAULT_SUBTYPE_RDMA
:
174 /* RDMA based event */
176 be32_to_cpu(pf_eqe
->rdma
.r_key
);
177 pfault
.rdma
.packet_size
=
178 be16_to_cpu(pf_eqe
->rdma
.packet_length
);
179 pfault
.rdma
.rdma_op_len
=
180 be32_to_cpu(pf_eqe
->rdma
.rdma_op_len
);
181 pfault
.rdma
.rdma_va
=
182 be64_to_cpu(pf_eqe
->rdma
.rdma_va
);
184 "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
185 qpn
, pfault
.rdma
.r_key
);
187 "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
188 pfault
.rdma
.rdma_op_len
);
190 "PAGE_FAULT: rdma_va: 0x%016llx,\n",
191 pfault
.rdma
.rdma_va
);
193 "PAGE_FAULT: bytes_committed: 0x%06x\n",
194 pfault
.bytes_committed
);
197 case MLX5_PFAULT_SUBTYPE_WQE
:
198 /* WQE based event */
199 pfault
.wqe
.wqe_index
=
200 be16_to_cpu(pf_eqe
->wqe
.wqe_index
);
201 pfault
.wqe
.packet_size
=
202 be16_to_cpu(pf_eqe
->wqe
.packet_length
);
204 "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
205 qpn
, pfault
.wqe
.wqe_index
);
207 "PAGE_FAULT: bytes_committed: 0x%06x\n",
208 pfault
.bytes_committed
);
213 "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
215 /* Unsupported page faults should still be resolved by the
220 if (qp
->pfault_handler
) {
221 qp
->pfault_handler(qp
, &pfault
);
224 "ODP event for QP %08x, without a fault handler in QP\n",
226 /* Page fault will remain unresolved. QP will hang until it is
231 mlx5_core_put_rsc(common
);
235 static int create_qprqsq_common(struct mlx5_core_dev
*dev
,
236 struct mlx5_core_qp
*qp
,
239 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
242 qp
->common
.res
= rsc_type
;
243 spin_lock_irq(&table
->lock
);
244 err
= radix_tree_insert(&table
->tree
,
245 qp
->qpn
| (rsc_type
<< MLX5_USER_INDEX_LEN
),
247 spin_unlock_irq(&table
->lock
);
251 atomic_set(&qp
->common
.refcount
, 1);
252 init_completion(&qp
->common
.free
);
253 qp
->pid
= current
->pid
;
258 static void destroy_qprqsq_common(struct mlx5_core_dev
*dev
,
259 struct mlx5_core_qp
*qp
)
261 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
264 spin_lock_irqsave(&table
->lock
, flags
);
265 radix_tree_delete(&table
->tree
,
266 qp
->qpn
| (qp
->common
.res
<< MLX5_USER_INDEX_LEN
));
267 spin_unlock_irqrestore(&table
->lock
, flags
);
268 mlx5_core_put_rsc((struct mlx5_core_rsc_common
*)qp
);
269 wait_for_completion(&qp
->common
.free
);
272 int mlx5_core_create_qp(struct mlx5_core_dev
*dev
,
273 struct mlx5_core_qp
*qp
,
274 struct mlx5_create_qp_mbox_in
*in
,
277 struct mlx5_create_qp_mbox_out out
;
278 struct mlx5_destroy_qp_mbox_in din
;
279 struct mlx5_destroy_qp_mbox_out dout
;
282 memset(&out
, 0, sizeof(out
));
283 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_CREATE_QP
);
285 err
= mlx5_cmd_exec(dev
, in
, inlen
, &out
, sizeof(out
));
287 mlx5_core_warn(dev
, "ret %d\n", err
);
291 if (out
.hdr
.status
) {
292 mlx5_core_warn(dev
, "current num of QPs 0x%x\n",
293 atomic_read(&dev
->num_qps
));
294 return mlx5_cmd_status_to_err(&out
.hdr
);
297 qp
->qpn
= be32_to_cpu(out
.qpn
) & 0xffffff;
298 mlx5_core_dbg(dev
, "qpn = 0x%x\n", qp
->qpn
);
300 err
= create_qprqsq_common(dev
, qp
, MLX5_RES_QP
);
304 err
= mlx5_debug_qp_add(dev
, qp
);
306 mlx5_core_dbg(dev
, "failed adding QP 0x%x to debug file system\n",
309 atomic_inc(&dev
->num_qps
);
314 memset(&din
, 0, sizeof(din
));
315 memset(&dout
, 0, sizeof(dout
));
316 din
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_DESTROY_QP
);
317 din
.qpn
= cpu_to_be32(qp
->qpn
);
318 mlx5_cmd_exec(dev
, &din
, sizeof(din
), &out
, sizeof(dout
));
322 EXPORT_SYMBOL_GPL(mlx5_core_create_qp
);
324 int mlx5_core_destroy_qp(struct mlx5_core_dev
*dev
,
325 struct mlx5_core_qp
*qp
)
327 struct mlx5_destroy_qp_mbox_in in
;
328 struct mlx5_destroy_qp_mbox_out out
;
331 mlx5_debug_qp_remove(dev
, qp
);
333 destroy_qprqsq_common(dev
, qp
);
335 memset(&in
, 0, sizeof(in
));
336 memset(&out
, 0, sizeof(out
));
337 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_DESTROY_QP
);
338 in
.qpn
= cpu_to_be32(qp
->qpn
);
339 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
344 return mlx5_cmd_status_to_err(&out
.hdr
);
346 atomic_dec(&dev
->num_qps
);
349 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp
);
351 int mlx5_core_qp_modify(struct mlx5_core_dev
*dev
, u16 operation
,
352 struct mlx5_modify_qp_mbox_in
*in
, int sqd_event
,
353 struct mlx5_core_qp
*qp
)
355 struct mlx5_modify_qp_mbox_out out
;
358 memset(&out
, 0, sizeof(out
));
359 in
->hdr
.opcode
= cpu_to_be16(operation
);
360 in
->qpn
= cpu_to_be32(qp
->qpn
);
361 err
= mlx5_cmd_exec(dev
, in
, sizeof(*in
), &out
, sizeof(out
));
365 return mlx5_cmd_status_to_err(&out
.hdr
);
367 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify
);
369 void mlx5_init_qp_table(struct mlx5_core_dev
*dev
)
371 struct mlx5_qp_table
*table
= &dev
->priv
.qp_table
;
373 memset(table
, 0, sizeof(*table
));
374 spin_lock_init(&table
->lock
);
375 INIT_RADIX_TREE(&table
->tree
, GFP_ATOMIC
);
376 mlx5_qp_debugfs_init(dev
);
379 void mlx5_cleanup_qp_table(struct mlx5_core_dev
*dev
)
381 mlx5_qp_debugfs_cleanup(dev
);
384 int mlx5_core_qp_query(struct mlx5_core_dev
*dev
, struct mlx5_core_qp
*qp
,
385 struct mlx5_query_qp_mbox_out
*out
, int outlen
)
387 struct mlx5_query_qp_mbox_in in
;
390 memset(&in
, 0, sizeof(in
));
391 memset(out
, 0, outlen
);
392 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_QUERY_QP
);
393 in
.qpn
= cpu_to_be32(qp
->qpn
);
394 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), out
, outlen
);
399 return mlx5_cmd_status_to_err(&out
->hdr
);
403 EXPORT_SYMBOL_GPL(mlx5_core_qp_query
);
405 int mlx5_core_xrcd_alloc(struct mlx5_core_dev
*dev
, u32
*xrcdn
)
407 struct mlx5_alloc_xrcd_mbox_in in
;
408 struct mlx5_alloc_xrcd_mbox_out out
;
411 memset(&in
, 0, sizeof(in
));
412 memset(&out
, 0, sizeof(out
));
413 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD
);
414 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
419 err
= mlx5_cmd_status_to_err(&out
.hdr
);
421 *xrcdn
= be32_to_cpu(out
.xrcdn
) & 0xffffff;
425 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc
);
427 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev
*dev
, u32 xrcdn
)
429 struct mlx5_dealloc_xrcd_mbox_in in
;
430 struct mlx5_dealloc_xrcd_mbox_out out
;
433 memset(&in
, 0, sizeof(in
));
434 memset(&out
, 0, sizeof(out
));
435 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD
);
436 in
.xrcdn
= cpu_to_be32(xrcdn
);
437 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
442 err
= mlx5_cmd_status_to_err(&out
.hdr
);
446 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc
);
448 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
449 int mlx5_core_page_fault_resume(struct mlx5_core_dev
*dev
, u32 qpn
,
452 struct mlx5_page_fault_resume_mbox_in in
;
453 struct mlx5_page_fault_resume_mbox_out out
;
456 memset(&in
, 0, sizeof(in
));
457 memset(&out
, 0, sizeof(out
));
458 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME
);
460 flags
&= (MLX5_PAGE_FAULT_RESUME_REQUESTOR
|
461 MLX5_PAGE_FAULT_RESUME_WRITE
|
462 MLX5_PAGE_FAULT_RESUME_RDMA
);
463 flags
|= (error
? MLX5_PAGE_FAULT_RESUME_ERROR
: 0);
464 in
.flags_qpn
= cpu_to_be32((qpn
& MLX5_QPN_MASK
) |
465 (flags
<< MLX5_QPN_BITS
));
466 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
471 err
= mlx5_cmd_status_to_err(&out
.hdr
);
475 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume
);
478 int mlx5_core_create_rq_tracked(struct mlx5_core_dev
*dev
, u32
*in
, int inlen
,
479 struct mlx5_core_qp
*rq
)
484 err
= mlx5_core_create_rq(dev
, in
, inlen
, &rqn
);
489 err
= create_qprqsq_common(dev
, rq
, MLX5_RES_RQ
);
496 mlx5_core_destroy_rq(dev
, rq
->qpn
);
500 EXPORT_SYMBOL(mlx5_core_create_rq_tracked
);
502 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev
*dev
,
503 struct mlx5_core_qp
*rq
)
505 destroy_qprqsq_common(dev
, rq
);
506 mlx5_core_destroy_rq(dev
, rq
->qpn
);
508 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked
);
510 int mlx5_core_create_sq_tracked(struct mlx5_core_dev
*dev
, u32
*in
, int inlen
,
511 struct mlx5_core_qp
*sq
)
516 err
= mlx5_core_create_sq(dev
, in
, inlen
, &sqn
);
521 err
= create_qprqsq_common(dev
, sq
, MLX5_RES_SQ
);
528 mlx5_core_destroy_sq(dev
, sq
->qpn
);
532 EXPORT_SYMBOL(mlx5_core_create_sq_tracked
);
534 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev
*dev
,
535 struct mlx5_core_qp
*sq
)
537 destroy_qprqsq_common(dev
, sq
);
538 mlx5_core_destroy_sq(dev
, sq
->qpn
);
540 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked
);
542 int mlx5_core_alloc_q_counter(struct mlx5_core_dev
*dev
, u16
*counter_id
)
544 u32 in
[MLX5_ST_SZ_DW(alloc_q_counter_in
)];
545 u32 out
[MLX5_ST_SZ_DW(alloc_q_counter_out
)];
548 memset(in
, 0, sizeof(in
));
549 memset(out
, 0, sizeof(out
));
551 MLX5_SET(alloc_q_counter_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_Q_COUNTER
);
552 err
= mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
), out
, sizeof(out
));
554 *counter_id
= MLX5_GET(alloc_q_counter_out
, out
,
558 EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter
);
560 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev
*dev
, u16 counter_id
)
562 u32 in
[MLX5_ST_SZ_DW(dealloc_q_counter_in
)];
563 u32 out
[MLX5_ST_SZ_DW(dealloc_q_counter_out
)];
565 memset(in
, 0, sizeof(in
));
566 memset(out
, 0, sizeof(out
));
568 MLX5_SET(dealloc_q_counter_in
, in
, opcode
,
569 MLX5_CMD_OP_DEALLOC_Q_COUNTER
);
570 MLX5_SET(dealloc_q_counter_in
, in
, counter_set_id
, counter_id
);
571 return mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
), out
,
574 EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter
);
576 int mlx5_core_query_q_counter(struct mlx5_core_dev
*dev
, u16 counter_id
,
577 int reset
, void *out
, int out_size
)
579 u32 in
[MLX5_ST_SZ_DW(query_q_counter_in
)];
581 memset(in
, 0, sizeof(in
));
583 MLX5_SET(query_q_counter_in
, in
, opcode
, MLX5_CMD_OP_QUERY_Q_COUNTER
);
584 MLX5_SET(query_q_counter_in
, in
, clear
, reset
);
585 MLX5_SET(query_q_counter_in
, in
, counter_set_id
, counter_id
);
586 return mlx5_cmd_exec_check_status(dev
, in
, sizeof(in
), out
, out_size
);
588 EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter
);
590 int mlx5_core_query_out_of_buffer(struct mlx5_core_dev
*dev
, u16 counter_id
,
593 int outlen
= MLX5_ST_SZ_BYTES(query_q_counter_out
);
597 out
= mlx5_vzalloc(outlen
);
601 err
= mlx5_core_query_q_counter(dev
, counter_id
, 0, out
, outlen
);
603 *out_of_buffer
= MLX5_GET(query_q_counter_out
, out
,